repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 22
values | size
stringlengths 4
7
| content
stringlengths 626
1.05M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 5.21
99.9
| line_max
int64 12
999
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mdevaev/emonoda
|
emonoda/web/sockshandler.py
|
1
|
4843
|
"""
Emonoda -- A set of tools to organize and manage your torrents
Copyright (C) 2015 Devaev Maxim <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import socket
import urllib.parse
from urllib.request import HTTPHandler
from urllib.request import HTTPSHandler
from urllib.request import Request
from http.client import HTTPConnection
from http.client import HTTPSConnection
from http.client import HTTPResponse
from typing import Tuple
from typing import Optional
from typing import Any
from ..thirdparty import socks
# =====
SCHEME_TO_TYPE = {
"socks4": socks.PROXY_TYPE_SOCKS4,
"socks5": socks.PROXY_TYPE_SOCKS5,
}
SOCKS_PORT = 1080
# =====
class _SocksConnection(HTTPConnection):
def __init__(self, *args: Any, **kwargs: Any) -> None:
kwargs.pop("proxy_url", None) # XXX: Fix for "TypeError: __init__() got an unexpected keyword argument 'proxy_url'"
super().__init__(*args, **kwargs)
self.__proxy_args: Optional[Tuple[
Optional[int],
Optional[str],
Optional[int],
bool,
Optional[str],
Optional[str],
]] = None
# XXX: because proxy args/kwargs break super
def make_proxy_args(
self,
proxy_url: str="",
proxy_type: Optional[int]=None,
proxy_host: Optional[str]=None,
proxy_port: Optional[int]=None,
proxy_user: Optional[str]=None,
proxy_passwd: Optional[str]=None,
rdns: bool=True,
) -> None:
if proxy_url:
parsed = urllib.parse.urlparse(proxy_url)
scheme = parsed.scheme
proxy_user = parsed.username
proxy_passwd = parsed.password
proxy_host = parsed.hostname
proxy_port = (parsed.port or SOCKS_PORT)
proxy_type = SCHEME_TO_TYPE.get((scheme or "").lower())
if proxy_type is None:
raise RuntimeError("Invalid SOCKS protocol: {}".format(scheme))
self.__proxy_args = (proxy_type, proxy_host, proxy_port, rdns, proxy_user, proxy_passwd)
def connect(self) -> None:
assert self.__proxy_args is not None, "Proxy args weren't initialized"
self.sock = socks.socksocket()
self.sock.setproxy(*self.__proxy_args)
timeout = self.timeout # type: ignore
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: # type: ignore # pylint: disable=protected-access
self.sock.settimeout(timeout)
self.sock.connect((self.host, self.port)) # type: ignore
class _SocksSecureConnection(HTTPSConnection, _SocksConnection):
def __init__(self, *args: Any, **kwargs: Any) -> None:
kwargs.pop("proxy_url", None) # XXX: Fix for "TypeError: __init__() got an unexpected keyword argument 'proxy_url'"
super().__init__(*args, **kwargs)
# =====
class SocksHandler(HTTPHandler, HTTPSHandler):
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.__args = args
self.__kwargs = kwargs
super().__init__(debuglevel=kwargs.pop("debuglevel", 0))
def http_open(self, req: Request) -> HTTPResponse:
def build(
host: str,
port: Optional[int]=None,
timeout: int=socket._GLOBAL_DEFAULT_TIMEOUT, # type: ignore # pylint: disable=protected-access
) -> _SocksConnection:
connection = _SocksConnection(host, port=port, timeout=timeout, **self.__kwargs)
connection.make_proxy_args(*self.__args, **self.__kwargs)
return connection
return self.do_open(build, req) # type: ignore
def https_open(self, req: Request) -> HTTPResponse:
def build(
host: str,
port: Optional[int]=None,
timeout: int=socket._GLOBAL_DEFAULT_TIMEOUT, # type: ignore # pylint: disable=protected-access
) -> _SocksSecureConnection:
connection = _SocksSecureConnection(host, port=port, timeout=timeout, **self.__kwargs)
connection.make_proxy_args(*self.__args, **self.__kwargs)
return connection
return self.do_open(build, req) # type: ignore
# XXX: vulture hacks
_ = http_open
_ = https_open
del _
|
gpl-3.0
| -7,541,916,787,342,319,000 | 34.094203 | 124 | 0.629775 | false |
joinrpg/joinrpg
|
joinrpg/settings.py
|
1
|
2712
|
"""
Django settings for joinrpg project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import socket
import re
import configparser
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-xg80#^t4gpteyw@226bqt2pdypyzxzq6sj+^*va#*@pi&rwvh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
AUTH_USER_MODEL = 'claims.User'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'bootstrap3',
'claims'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'joinrpg.urls'
WSGI_APPLICATION = 'joinrpg.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
config = configparser.ConfigParser()
config.read("joinrpg.conf")
DATABASES = {
'default': {
'ENGINE': config.get('DATABASE', 'ENGINE', fallback='django.db.backends.sqlite3'),
'HOST': config.get('DATABASE', 'HOST', fallback=''),
'NAME': config.get('DATABASE', 'NAME', fallback='db.sqlite3'),
'USER': config.get('DATABASE', 'USER', fallback=''),
'PASSWORD': config.get('DATABASE', 'PASSWORD', fallback=''),
'CHARSET': config.get('DATABASE', 'CHARSET', fallback='utf8')
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'ru-RU'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join (BASE_DIR, 'claims/templates/').replace('\\', '/'),
)
STATIC_ROOT=os.path.join(BASE_DIR, "static")
|
apache-2.0
| 8,092,926,238,599,004,000 | 24.828571 | 90 | 0.706858 | false |
dongjoon-hyun/tools
|
cli/dolphin.py
|
1
|
5159
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dolphin CLI Fabric File
"""
__author__ = 'Dongjoon Hyun ([email protected])'
__copyright__ = 'Copyright (c) 2015-2016'
__license__ = 'Apache License'
__version__ = '0.1'
from fabric.api import task, run
@task
def pagerank(inpath, outpath, threshold, maxiter, damping):
"""
fab snu.pagerank:/sample/sample_pagerank,/user/hadoop/pagerank_result,0.01,10,0.85
"""
if not (outpath.startswith('/tmp/') or outpath.startswith('/user/hadoop/')):
print 'Unauthorized path: %(outpath)s' % locals()
return
run('''cat <<'EOF' > /home/hadoop/demo/snu.pagerank.sh
java -cp $YARN_CONF_DIR:/home/hadoop/dolphin/target/dolphin-0.1-SNAPSHOT-shaded.jar:/data1/cloudera/parcels/CDH/jars/* \
-Djava.util.logging.config.class=org.apache.reef.util.logging.Config \
edu.snu.reef.dolphin.examples.ml.algorithms.graph.PageRankREEF -convThr %(threshold)s -maxIter %(maxiter)s \
-dampingFactor %(damping)s -split 2 -input %(inpath)s -output /tmp/pagerank &> /dev/null
hadoop fs -rm -r -f -skipTrash %(outpath)s &> /dev/null
hadoop fs -mkdir %(outpath)s
hadoop fs -mv /tmp/pagerank/rank/CtrlTask-0 %(outpath)s
EOF''' % locals())
cmd = '/bin/bash /home/hadoop/demo/snu.pagerank.sh'
run(cmd)
@task
def em(inpath, outpath, cluster, threshold, maxiter):
"""
fab snu.em:/sample/sample_cluster,/user/hadoop/em_result,4,0.01,20
"""
if not (outpath.startswith('/tmp/') or outpath.startswith('/user/hadoop/')):
print 'Unauthorized path: %(outpath)s' % locals()
return
run('''cat <<'EOF' > /home/hadoop/demo/snu.em.sh
java -cp $YARN_CONF_DIR:/home/hadoop/dolphin/target/dolphin-0.1-SNAPSHOT-shaded.jar:/data1/cloudera/parcels/CDH/jars/* \
-Djava.util.logging.config.class=org.apache.reef.util.logging.Config \
edu.snu.reef.dolphin.examples.ml.algorithms.clustering.em.EMREEF -numCls %(cluster)s -convThr %(threshold)s \
-maxIter %(maxiter)s -split 4 -input %(inpath)s -output %(outpath)s &> /dev/null
EOF''' % locals())
cmd = '/bin/bash /home/hadoop/demo/snu.em.sh'
run(cmd)
@task
def kmeans(inpath, outpath, cluster, threshold, maxiter):
"""
fab snu.kmeans:/sample/sample_cluster,/user/hadoop/kmeans_result,4,0.01,20
"""
if not (outpath.startswith('/tmp/') or outpath.startswith('/user/hadoop/')):
print 'Unauthorized path: %(outpath)s' % locals()
return
run('''cat <<'EOF' > /home/hadoop/demo/snu.kmeans.sh
java -cp $YARN_CONF_DIR:/home/hadoop/dolphin/target/dolphin-0.1-SNAPSHOT-shaded.jar:/data1/cloudera/parcels/CDH/jars/* \
-Djava.util.logging.config.class=org.apache.reef.util.logging.Config \
edu.snu.reef.dolphin.examples.ml.algorithms.clustering.kmeans.KMeansREEF -numCls %(cluster)s -convThr %(threshold)s \
-maxIter %(maxiter)s -split 4 -input %(inpath)s -output %(outpath)s &> /dev/null
EOF''' % locals())
cmd = '/bin/bash /home/hadoop/demo/snu.kmeans.sh'
run(cmd)
@task
def lm(inpath, outpath, dim, step, lam, maxiter):
"""
fab snu.lm:/sample/sample_regression,/user/hadoop/lm_result,3,0.001,0.1,20
"""
if not (outpath.startswith('/tmp/') or outpath.startswith('/user/hadoop/')):
print 'Unauthorized path: %(outpath)s' % locals()
return
run('''cat <<'EOF' > /home/hadoop/demo/snu.lm.sh
java -cp $YARN_CONF_DIR:/home/hadoop/dolphin/target/dolphin-0.1-SNAPSHOT-shaded.jar:/data1/cloudera/parcels/CDH/jars/* \
-Djava.util.logging.config.class=org.apache.reef.util.logging.Config \
edu.snu.reef.dolphin.examples.ml.algorithms.regression.LinearRegREEF -dim %(dim)s -stepSize %(step)s -lambda %(lam)s \
-maxIter %(maxiter)s -split 4 -input %(inpath)s -output %(outpath)s &> /dev/null
EOF''' % locals())
cmd = '/bin/bash /home/hadoop/demo/snu.lm.sh'
run(cmd)
@task
def lr(inpath, outpath, dim, step, lam, maxiter):
"""
fab snu.lr:/sample/sample_classification,/user/hadoop/lr_result,3,0.00001,0.1,20
"""
if not (outpath.startswith('/tmp/') or outpath.startswith('/user/hadoop/')):
print 'Unauthorized path: %(outpath)s' % locals()
return
run('''cat <<'EOF' > /home/hadoop/demo/snu.lr.sh
java -cp $YARN_CONF_DIR:/home/hadoop/dolphin/target/dolphin-0.1-SNAPSHOT-shaded.jar:/data1/cloudera/parcels/CDH/jars/* \
-Djava.util.logging.config.class=org.apache.reef.util.logging.Config \
edu.snu.reef.dolphin.examples.ml.algorithms.classification.LogisticRegREEF -dim %(dim)s -stepSize %(step)s \
-lambda %(lam)s -maxIter %(maxiter)s -split 4 -input %(inpath)s -output %(outpath)s &> /dev/null
EOF''' % locals())
cmd = '/bin/bash /home/hadoop/demo/snu.lr.sh'
run(cmd)
|
apache-2.0
| 764,760,014,672,197 | 42.720339 | 120 | 0.688893 | false |
DummyDivision/Tsune
|
cardimporter/markdown_ext/superscript.py
|
1
|
1712
|
"""Superscipt extension for Markdown.
To superscript something, place a carat symbol, '^', before and after the
text that you would like in superscript: 6.02 x 10^23^
The '23' in this example will be superscripted. See below.
Examples:
>>> import markdown
>>> md = markdown.Markdown(extensions=['superscript'])
>>> md.convert('This is a reference to a footnote^1^.')
u'<p>This is a reference to a footnote<sup>1</sup>.</p>'
>>> md.convert('This is scientific notation: 6.02 x 10^23^')
u'<p>This is scientific notation: 6.02 x 10<sup>23</sup></p>'
>>> md.convert('This is scientific notation: 6.02 x 10^23. Note lack of second carat.')
u'<p>This is scientific notation: 6.02 x 10^23. Note lack of second carat.</p>'
>>> md.convert('Scientific notation: 6.02 x 10^23. Add carat at end of sentence.^')
u'<p>Scientific notation: 6.02 x 10<sup>23. Add a carat at the end of sentence.</sup>.</p>'
Paragraph breaks will nullify superscripts across paragraphs. Line breaks
within paragraphs will not.
Adapted from https://github.com/sgraber/markdown.superscript/blob/master/superscript.py
"""
from markdown.inlinepatterns import SimpleTagPattern
from markdown import Extension
# Global Vars
SUPERSCRIPT_RE = r'(\^)([^\^]*)(\^)' # the number is a superscript^2^
class SuperscriptExtension(Extension):
""" Superscript Extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Replace superscript with SuperscriptPattern """
sup_tag = SimpleTagPattern(SUPERSCRIPT_RE,"sup")
md.ESCAPED_CHARS.append('^')
md.inlinePatterns.add('supscript', sup_tag, "<not_strong")
def makeExtension(configs=None):
return SuperscriptExtension(configs=configs)
|
mit
| -2,050,348,514,982,707,200 | 36.23913 | 91 | 0.713785 | false |
TACC/DisplayCluster
|
examples/screensaver_moveOff.py
|
1
|
1656
|
import os
from time import time
from time import sleep
dgm = pydc.pyDisplayGroupPython()
orig = []
deltas = []
if 'DISPLAYCLUSTER_SCREENSAVER_IMAGE' in os.environ:
pongtime = int(os.environ['DISPLAYCLUSTER_SCREENSAVER_PONGTIME'])
else:
pongtime = 0
for i in range(dgm.getNumContentWindowManagers()):
cw = dgm.getPyContentWindowManager(i)
x,y,w,h = cw.getCoordinates()
deltas.append([x,y,w,h])
orig.append(deltas[-1][:2])
cw.setPosition(1,1)
print i
dx = 1.0/500.0
dy = 1.0/500.0
for i in os.environ:
print i, os.environ[i]
pongimage = None
if 'DISPLAYCLUSTER_SCREENSAVER_IMAGE' in os.environ:
print i
fname = os.environ['DISPLAYCLUSTER_SCREENSAVER_IMAGE']
if fname[0] != '/':
fname = os.environ['DISPLAYCLUSTER_DIR'] + '/' + fname
if os.path.isfile(fname):
tt = pydc.pyContent(fname)
pongimage = pydc.pyContentWindowManager(pydc.pyContent(fname))
dgm.addContentWindowManager(pongimage)
t0 = time()
while pydc.pyMyPythonQt().get_idle():
print '.'
if (pongimage != None) and ((time() - t0) < pongtime):
x,y,w,h = pongimage.getCoordinates()
x = x + dx
if (x + w) > 1.0:
x = 2.0 - (x+w) - w
dx = -dx
elif x < 0.0:
x = -x
dx = -dx
y = y + dy
if (y + h) > 1.0:
y = 2.0 - (y+h) - h
dy = -dy
elif y < 0.0:
y = -y
dy = -dy
pongimage.setPosition(x, y)
else:
if pongimage != None:
dgm.removeContentWindowManager(pongimage)
pongimage = None
sleep(0.03333)
if pongimage != None:
dgm.removeContentWindowManager(pongimage)
pongimage = None
for i in range(dgm.getNumContentWindowManagers()):
print i
cw = dgm.getPyContentWindowManager(i)
cw.setPosition(orig[i][0], orig[i][1])
|
bsd-2-clause
| 7,036,242,030,859,283,000 | 21.08 | 66 | 0.663043 | false |
pwithnall/dunfell
|
examples/source-names.py
|
1
|
1448
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright © Philip Withnall 2015 <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# An example log analysis program for Dunfell. It loads a log (specified as the
# first command line argument) then prints out the names of all the GSources
# mentioned in the log file, as a basic example of performing analysis on a
# log file.
#
import gi
gi.require_version('Dunfell', '0')
gi.require_version('DunfellUi', '0')
gi.require_version('Gio', '2.0')
from gi.repository import Dunfell, DunfellUi, Gio
import sys
parser = Dunfell.Parser.new()
parser.load_from_file(sys.argv[1])
model = parser.dup_model()
sources = model.dup_sources()
# Print out the names of all the sources
for source in sources:
print(source.props.name if source.props.name else "(Unnamed)")
|
lgpl-2.1
| -7,741,459,329,963,986,000 | 34.292683 | 79 | 0.74499 | false |
GoogleCloudPlatform/IoT-Icebreaker
|
appengine/token-vendor/datastore_utils.py
|
1
|
1228
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import logging
import json
import datetime
import time
from google.appengine.ext import ndb
# Database model for permanent storage of the sensor data
class SensorData(ndb.Model):
message_id = ndb.StringProperty(required=True)
session_id = ndb.StringProperty(required=True)
received_date = ndb.IntegerProperty(required=True)
timestamp = ndb.IntegerProperty(required=True)
payload = ndb.StringProperty(required=True)
def check_expired_session(session_id):
record_keys = SensorData.query(SensorData.session_id == session_id).fetch(1, keys_only=True)
return len(record_keys) > 0
|
apache-2.0
| -3,588,659,898,209,417,700 | 34.085714 | 96 | 0.760586 | false |
jantman/RPyMostat-sensor
|
rpymostat_sensor/tests/test_runner.py
|
1
|
14756
|
"""
The latest version of this package is available at:
<http://github.com/jantman/RPyMostat-sensor>
##################################################################################
Copyright 2016 Jason Antman <[email protected]> <http://www.jasonantman.com>
This file is part of RPyMostat-sensor, also known as RPyMostat-sensor.
RPyMostat-sensor is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
RPyMostat-sensor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with RPyMostat-sensor. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
##################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/RPyMostat-sensor> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
##################################################################################
AUTHORS:
Jason Antman <[email protected]> <http://www.jasonantman.com>
##################################################################################
"""
import sys
import logging
import argparse
import pytest
from rpymostat_sensor.runner import (
console_entry_point, Runner, StoreKeySubKeyValue
)
# https://code.google.com/p/mock/issues/detail?id=249
# py>=3.4 should use unittest.mock not the mock package on pypi
if (
sys.version_info[0] < 3 or
sys.version_info[0] == 3 and sys.version_info[1] < 4
):
from mock import patch, call, Mock, DEFAULT # noqa
else:
from unittest.mock import patch, call, Mock, DEFAULT # noqa
pbm = 'rpymostat_sensor.runner'
pb = '%s.Runner' % pbm
class TestStoreKeySubKeyValue(object):
def test_argparse_works(self):
parser = argparse.ArgumentParser()
parser.add_argument('--foo', action='store', type=str)
res = parser.parse_args(['--foo=bar'])
assert res.foo == 'bar'
def test_long(self):
parser = argparse.ArgumentParser()
parser.add_argument('--one', action=StoreKeySubKeyValue)
res = parser.parse_args(['--one=foo=bar=baz'])
assert res.one == {'foo': {'bar': 'baz'}}
def test_short(self):
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--one', action=StoreKeySubKeyValue)
res = parser.parse_args(['-o', 'foo=bar=baz'])
assert res.one == {'foo': {'bar': 'baz'}}
def test_multi_long(self):
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--one', action=StoreKeySubKeyValue)
res = parser.parse_args(['--one=foo=bar=baz', '--one=other=k=v'])
assert res.one == {'foo': {'bar': 'baz'}, 'other': {'k': 'v'}}
def test_multi_short(self):
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--one', action=StoreKeySubKeyValue)
res = parser.parse_args(['-o', 'foo=bar=baz', '-o', 'other=k=v'])
assert res.one == {'foo': {'bar': 'baz'}, 'other': {'k': 'v'}}
def test_no_equals(self):
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--one', action=StoreKeySubKeyValue)
with pytest.raises(SystemExit) as excinfo:
parser.parse_args(['-o', 'foobar'])
if sys.version_info[0] > 2:
msg = excinfo.value.args[0]
else:
msg = excinfo.value.message
assert msg == 2
def test_one_equals(self):
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--one', action=StoreKeySubKeyValue)
with pytest.raises(SystemExit) as excinfo:
parser.parse_args(['-o', 'foobar=baz'])
if sys.version_info[0] > 2:
msg = excinfo.value.args[0]
else:
msg = excinfo.value.message
assert msg == 2
def test_quoted(self):
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--one', action=StoreKeySubKeyValue)
res = parser.parse_args([
'-o',
'"foo some"="bar other"=baz',
'--one="baz other"="foo subkey"=blam'
])
assert res.one == {
'foo some': {'bar other': 'baz'},
'baz other': {'foo subkey': 'blam'}
}
class TestConsoleEntryPoint(object):
def test_console_entry_point(self):
with patch(pb, autospec=True) as mock_runner:
console_entry_point()
assert mock_runner.mock_calls == [
call(),
call().console_entry_point()
]
def test_console_entry_point_keyboard_interrupt(self):
def se_exc():
raise KeyboardInterrupt()
with patch(pb, autospec=True) as mock_runner:
mock_runner.return_value.console_entry_point.side_effect = se_exc
with patch('%s.logger' % pbm, autospec=True) as mock_logger:
with pytest.raises(SystemExit):
console_entry_point()
assert mock_runner.mock_calls == [
call(),
call().console_entry_point()
]
assert mock_logger.mock_calls == [
call.warning('Exiting on keyboard interrupt.')
]
class TestRunner(object):
def setup(self):
self.cls = Runner()
def test_parse_args_argparse(self):
argv = Mock()
parse_res = Mock()
with patch('%s.argparse.ArgumentParser' % pbm, autospec=True) as mock_p:
mock_p.return_value.parse_args.return_value = parse_res
res = self.cls.parse_args(argv)
assert res == parse_res
assert mock_p.mock_calls == [
call(description='RPyMostat Sensor Daemon'),
call().add_argument('-v', '--verbose', dest='verbose',
action='count', default=0,
help='verbose output. specify twice for '
'debug-level output.'),
call().add_argument('-d', '--dry-run', dest='dry_run',
action='store_true', default=False,
help='Only log results, do not POST to Engine.'
),
call().add_argument('-a', '--engine-address', dest='engine_addr',
type=str, default=None,
help='Engine API address'),
call().add_argument('-p', '--engine-port', dest='engine_port',
default=8088, type=int, help='Engine API port'),
call().add_argument('--dummy', dest='dummy', action='store_true',
default=False, help='do not discover or read '
'sensors; instead send dummy data'),
call().add_argument('-i', '--interval', dest='interval',
default=60.0, type=float,
help='Float number of seconds to sleep '
'between sensor poll/POST cycles'),
call().add_argument('-l', '--list-sensor-classes',
dest='list_classes', default=False,
action='store_true',
help='list all known sensor classes and '
'their arguments, then exit'),
call().add_argument('-c', '--sensor-class-arg', dest='class_args',
action=StoreKeySubKeyValue,
help='Provide an argument for a specific '
'sensor class, in the form '
'ClassName=arg_name=value; see -l for list '
'of classes and their arguments'),
call().parse_args(argv)
]
def test_parse_args_default(self):
res = self.cls.parse_args([])
assert res.verbose == 0
assert res.dry_run is False
assert res.engine_addr is None
assert res.engine_port == 8088
assert res.dummy is False
assert res.interval == 60.0
def test_parse_args_nondefault(self):
res = self.cls.parse_args([
'-v',
'--dry-run',
'-a', 'foo.bar.baz',
'--engine-port=1234',
'--dummy',
'-i', '12.34',
'-c', 'foo=bar=baz',
'--sensor-class-arg=foo=bar2=baz2',
'--sensor-class-arg=blam=blarg=blamm'
])
assert res.verbose == 1
assert res.dry_run is True
assert res.engine_addr == 'foo.bar.baz'
assert res.engine_port == 1234
assert res.dummy is True
assert res.interval == 12.34
assert res.class_args == {
'foo': {
'bar': 'baz',
'bar2': 'baz2'
},
'blam': {'blarg': 'blamm'}
}
def test_parse_args_verbose2(self):
res = self.cls.parse_args(['-vv'])
assert res.verbose == 2
assert res.dry_run is False
assert res.engine_addr is None
assert res.engine_port == 8088
assert res.dummy is False
assert res.interval == 60.0
def test_console_entry_point_defaults(self):
mock_args = Mock(
verbose=0,
dry_run=False,
engine_addr=None,
engine_port=8088,
dummy=False,
interval=60.0,
class_args={}
)
with patch('%s.logger' % pbm, autospec=True) as mock_logger:
with patch.multiple(
pb,
autospec=True,
parse_args=DEFAULT,
) as mocks:
mocks['parse_args'].return_value = mock_args
with patch('%s.SensorDaemon' % pbm,
autospec=True) as mock_daemon:
self.cls.console_entry_point()
assert mock_logger.mock_calls == []
assert mock_daemon.mock_calls == [
call(
dry_run=False,
dummy_data=False,
engine_port=8088,
engine_addr=None,
interval=60.0,
class_args={}
),
call().run()
]
def test_console_entry_point_list_sensors(self):
mock_args = Mock(
verbose=0,
dry_run=False,
engine_addr=None,
engine_port=8088,
dummy=False,
interval=60.0,
class_args={},
list_classes=True
)
with patch('%s.logger' % pbm, autospec=True) as mock_logger:
with patch.multiple(
pb,
autospec=True,
parse_args=DEFAULT,
) as mocks:
mocks['parse_args'].return_value = mock_args
with patch('%s.SensorDaemon' % pbm,
autospec=True) as mock_daemon:
with pytest.raises(SystemExit):
self.cls.console_entry_point()
assert mock_logger.mock_calls == []
assert mock_daemon.mock_calls == [
call(list_classes=True)
]
def test_console_entry_point_verbose1(self):
mock_args = Mock(
verbose=1,
dry_run=True,
engine_addr='foo.bar.baz',
engine_port=5678,
dummy=True,
interval=123.45,
class_args={'foo': {'bar': 'baz'}}
)
with patch('%s.logger' % pbm, autospec=True) as mock_logger:
with patch.multiple(
pb,
autospec=True,
parse_args=DEFAULT,
) as mocks:
mocks['parse_args'].return_value = mock_args
with patch('%s.SensorDaemon' % pbm,
autospec=True) as mock_daemon:
self.cls.console_entry_point()
assert mock_logger.mock_calls == [
call.setLevel(logging.INFO)
]
assert mock_daemon.mock_calls == [
call(
dry_run=True,
dummy_data=True,
engine_port=5678,
engine_addr='foo.bar.baz',
interval=123.45,
class_args={'foo': {'bar': 'baz'}}
),
call().run()
]
def test_console_entry_point_verbose2(self):
mock_args = Mock(
verbose=2,
dry_run=False,
engine_addr=None,
engine_port=8088,
dummy=False,
interval=60.0,
class_args={}
)
with patch('%s.logger' % pbm, autospec=True) as mock_logger:
with patch.multiple(
pb,
autospec=True,
parse_args=DEFAULT,
) as mocks:
mocks['parse_args'].return_value = mock_args
with patch('%s.SensorDaemon' % pbm,
autospec=True) as mock_daemon:
with patch('%s.logging.Formatter' % pbm,
autospec=True) as mock_formatter:
mock_handler = Mock(spec_set=logging.Handler)
type(mock_logger).handlers = [mock_handler]
self.cls.console_entry_point()
FORMAT = "[%(levelname)s %(filename)s:%(lineno)s - " \
"%(name)s.%(funcName)s() ] %(message)s"
assert mock_formatter.mock_calls == [
call(fmt=FORMAT),
]
assert mock_handler.mock_calls == [
call.setFormatter(mock_formatter.return_value)
]
assert mock_logger.mock_calls == [
call.setLevel(logging.DEBUG)
]
assert mock_daemon.mock_calls == [
call(
dry_run=False,
dummy_data=False,
engine_port=8088,
engine_addr=None,
interval=60.0,
class_args={}
),
call().run()
]
|
agpl-3.0
| 1,416,801,490,939,173,600 | 36.73913 | 82 | 0.511792 | false |
bellowsj/aiopogo
|
aiopogo/pogoprotos/inventory/applied_item_pb2.py
|
1
|
3968
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/inventory/applied_item.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.inventory.item import item_id_pb2 as pogoprotos_dot_inventory_dot_item_dot_item__id__pb2
from pogoprotos.inventory.item import item_type_pb2 as pogoprotos_dot_inventory_dot_item_dot_item__type__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/inventory/applied_item.proto',
package='pogoprotos.inventory',
syntax='proto3',
serialized_pb=_b('\n\'pogoprotos/inventory/applied_item.proto\x12\x14pogoprotos.inventory\x1a\'pogoprotos/inventory/item/item_id.proto\x1a)pogoprotos/inventory/item/item_type.proto\"\xa0\x01\n\x0b\x41ppliedItem\x12\x32\n\x07item_id\x18\x01 \x01(\x0e\x32!.pogoprotos.inventory.item.ItemId\x12\x36\n\titem_type\x18\x02 \x01(\x0e\x32#.pogoprotos.inventory.item.ItemType\x12\x11\n\texpire_ms\x18\x03 \x01(\x03\x12\x12\n\napplied_ms\x18\x04 \x01(\x03\x62\x06proto3')
,
dependencies=[pogoprotos_dot_inventory_dot_item_dot_item__id__pb2.DESCRIPTOR,pogoprotos_dot_inventory_dot_item_dot_item__type__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_APPLIEDITEM = _descriptor.Descriptor(
name='AppliedItem',
full_name='pogoprotos.inventory.AppliedItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='item_id', full_name='pogoprotos.inventory.AppliedItem.item_id', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='item_type', full_name='pogoprotos.inventory.AppliedItem.item_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='expire_ms', full_name='pogoprotos.inventory.AppliedItem.expire_ms', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='applied_ms', full_name='pogoprotos.inventory.AppliedItem.applied_ms', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=150,
serialized_end=310,
)
_APPLIEDITEM.fields_by_name['item_id'].enum_type = pogoprotos_dot_inventory_dot_item_dot_item__id__pb2._ITEMID
_APPLIEDITEM.fields_by_name['item_type'].enum_type = pogoprotos_dot_inventory_dot_item_dot_item__type__pb2._ITEMTYPE
DESCRIPTOR.message_types_by_name['AppliedItem'] = _APPLIEDITEM
AppliedItem = _reflection.GeneratedProtocolMessageType('AppliedItem', (_message.Message,), dict(
DESCRIPTOR = _APPLIEDITEM,
__module__ = 'pogoprotos.inventory.applied_item_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.inventory.AppliedItem)
))
_sym_db.RegisterMessage(AppliedItem)
# @@protoc_insertion_point(module_scope)
|
mit
| -2,103,541,498,251,276,500 | 40.768421 | 463 | 0.735383 | false |
libretees/skyrocket
|
sky/infrastructure.py
|
1
|
3161
|
import sys
import logging
from .state import config, mode
logger = logging.getLogger(__name__)
class Infrastructure(object):
_wrapped = None
_dependencies = None
_category = None
_original_creation_mode = None
_locals = None
_result = None
def __init__(self, callable_, *args, **kwargs):
self.__name__ = callable_.__name__ if hasattr(callable_, '__name__') else 'undefined'
self.__doc__ = callable_.__doc__ if hasattr(callable_, '__doc__') else None
self.__module__ = callable_.__module__ if hasattr(callable_, '__module__') else None
self._wrapped = callable_
self.environment = kwargs.get('environment', None)
self.dependencies = kwargs.get('requires', None)
def __repr__(self):
return 'Infrastructure:' + self.__name__
def __call__(self, *args, **kwargs):
# Set the creation mode, if the object specifies one.
self._set_creation_mode()
# Define a source code profiler.
def profiler(frame, event, arg):
if event == 'return':
self._locals = frame.f_locals.copy()
# Activate the profiler on the next call, return or exception.
sys.setprofile(profiler)
try:
# Trace the function call.
self._result = self._wrapped(*args, **kwargs)
finally:
# Disable the source code profiler.
sys.setprofile(None)
# Reset the creation mode, if the object specifies one.
self._reset_creation_mode()
return self._result
def __getattr__(self, attr):
return self._locals[attr] if self._locals else super(Infrastructure, self).__getattr__()
def _set_creation_mode(self):
global config
if self.category:
self._original_creation_mode = config['CREATION_MODE']
config['CREATION_MODE'] = self.category
logger.debug('Set CREATION_MODE to \'%s\'.' % mode(self.category).name.title())
def _reset_creation_mode(self):
global config
if self.category:
config['CREATION_MODE'] = self._original_creation_mode
logger.debug('Set CREATION_MODE to \'%s\'.' % (self._original_creation_mode.title() \
if isinstance(self._original_creation_mode, str) \
else self._original_creation_mode))
@property
def dependencies(self):
return self._dependencies
@dependencies.setter
def dependencies(self, dependencies):
if dependencies:
self._dependencies = set(dependencies)
logger.debug('Set (%s) dependencies to (%s).' % (self, ', '.join(list(dependencies))))
@property
def category(self):
return self._category
@category.setter
def category(self, category):
self._category = category
logger.debug('Set (%s) to \'%s\' Creation Mode.' % (self, mode(self.category).name.title()))
@property
def resources(self):
return self._locals
@property
def result(self):
return self._result
|
gpl-3.0
| -6,875,585,762,935,794,000 | 32.273684 | 109 | 0.578931 | false |
jtauber/minilight
|
raytracer.py
|
1
|
2081
|
# MiniLight Python : minimal global illumination renderer
#
# Copyright (c) 2007-2008, Harrison Ainsworth / HXA7241 and Juraj Sukop.
# http://www.hxa7241.org/
#
# Copyright (c) 2009-2012, James Tauber.
from surfacepoint import SurfacePoint
from vector3f import ZERO
class RayTracer(object):
def __init__(self, scene):
self.scene = scene
def get_radiance(self, ray_origin, ray_direction, last_hit=None):
hit_object, hit_position = self.scene.get_intersection(ray_origin, ray_direction, last_hit)
if hit_object:
surface_point = SurfacePoint(hit_object, hit_position)
if last_hit:
local_emission = ZERO
else:
local_emission = surface_point.get_emission(ray_origin, -ray_direction, False)
illumination = self.sample_emitters(ray_direction, surface_point)
next_direction, color = surface_point.get_next_direction(-ray_direction)
if next_direction.is_zero():
reflection = ZERO
else:
reflection = color * self.get_radiance(surface_point.position, next_direction, surface_point.triangle)
return reflection + illumination + local_emission
else:
return self.scene.get_default_emission(-ray_direction)
def sample_emitters(self, ray_direction, surface_point):
emitter_position, emitter = self.scene.get_emitter()
if emitter:
emit_direction = (emitter_position - surface_point.position).unitize()
hit_object, hit_position = self.scene.get_intersection(surface_point.position, emit_direction, surface_point.triangle)
if not hit_object or emitter == hit_object:
emission_in = SurfacePoint(emitter, emitter_position).get_emission(surface_point.position, -emit_direction, True)
else:
emission_in = ZERO
return surface_point.get_reflection(emit_direction, emission_in * self.scene.emitters_count(), -ray_direction)
else:
return ZERO
|
bsd-3-clause
| -1,257,308,011,745,464,800 | 34.87931 | 130 | 0.643441 | false |
alfa-addon/addon
|
plugin.video.alfa/channels/serviporno.py
|
1
|
5093
|
# -*- coding: utf-8 -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo
else:
import urlparse # Usamos el nativo de PY2 que es más rápido
import re
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
host = "https://www.serviporno.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="videos", title="Útimos videos", url=host))
itemlist.append(item.clone(action="videos", title="Más vistos", url=host + "/mas-vistos/"))
itemlist.append(item.clone(action="videos", title="Más votados", url=host + "/mas-votados/"))
itemlist.append(item.clone(action="chicas", title="Chicas", url=host + "/pornstars/"))
itemlist.append(item.clone(action="categorias", title="Canal", url=host + "/sitios/"))
itemlist.append(item.clone(action="categorias", title="Categorias", url= host + "/categorias/"))
itemlist.append(item.clone(action="search", title="Buscar", last=""))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = '%s/search/?q=%s' % (host, texto)
try:
return videos(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="wrap-box-escena.*?'
patron += 'data-src="([^"]+)".*?'
patron += '<h4.*?<a href="([^"]+)">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for thumbnail, url, title in matches:
url = urlparse.urljoin(item.url, url)
itemlist.append(item.clone(action='videos', title=title, url=url, thumbnail=thumbnail, plot=""))
# Paginador "Página Siguiente >>"
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" class="btn-pagination">Siguiente')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="categorias", title="[COLOR blue]Página Siguiente >>[/COLOR]", url=next_page) )
return itemlist
def chicas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="box-chica">.*?'
patron += '<a href="([^"]+)".*?'
patron += 'src=\'([^\']+.jpg)\'.*?'
patron += '<h4><a href="[^"]+">([^<]+)</a></h4>.*?'
patron += '<a class="total-videos".*?>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, thumbnail, title, videos in matches:
url = urlparse.urljoin(item.url, url)
title = "%s (%s)" % (title, videos)
itemlist.append(item.clone(action='videos', title=title, url=url, thumbnail=thumbnail, fanart=thumbnail))
# Paginador
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" class="btn-pagination">Siguiente')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="chicas", title="[COLOR blue]Página Siguiente >>[/COLOR]", url=next_page) )
return itemlist
def videos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)<div class="wrap-box-escena">.*?'
patron += '<div class="box-escena">.*?'
patron += '<a href="([^"]+)".*?'
patron += 'src="([^"]+.jpg)".*?'
patron += '<h4><a href="[^"]+">([^<]+)</a></h4>.*?'
patron += '<div class="duracion">([^"]+) min</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for url, thumbnail, title,duration in matches:
title = "[COLOR yellow]%s[/COLOR] %s" % (duration, title)
url = urlparse.urljoin(item.url, url)
itemlist.append(item.clone(action='play', title=title, contentTitle = title, url=url,
thumbnail=thumbnail, fanart=thumbnail))
# Paginador
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" class="btn-pagination">Siguiente')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="videos", title="[COLOR blue]Página Siguiente >>[/COLOR]", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, "sendCdnInfo.'([^']+)")
url = url.replace("&", "&")
itemlist.append(
Item(channel=item.channel, action="play", title=item.title, url=url, thumbnail=item.thumbnail,
plot=item.plot, folder=False))
return itemlist
|
gpl-3.0
| 1,524,511,089,806,292,000 | 38.991935 | 121 | 0.596301 | false |
ThreeFx/LightingSystem
|
lightingsystem/transform.py
|
1
|
3970
|
import datetime, os, serial, sys, traceback
from subprocess import call
from time import sleep
import numpy as np
import math
# -- Sliding window length -- #
configurelimit = 3 * 250
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
from serialcomm import *
# -- The light calculation function, adapt to sensor sensitivity and parameters -- #
def getLightFromFrequency(data, init):
# return data[1]
frequency = data[1]
darkfrequency = init[1]
temp = data[2]
frequencydiff = frequency - darkfrequency
irradiance = frequencydiff * 10 / 100; # convert to microW/cm^2 and then to W/m^2
lightoutput = irradiance * 0.21 * 0.21; # Watt per sternradian
return lightoutput # watt per sterntradian
def fixpermissions(path):
uid = int(os.environ.get('SUDO_UID'))
gid = int(os.environ.get('SUDO_GID'))
os.chown(os.path.abspath(path), uid, gid)
def fixall(cd):
fixpermissions('logs/log-{}.csv'.format(cd))
fixpermissions('rawlogs/rawlog-{}.csv'.format(cd))
plotfile = os.path.abspath('plots/plot-{}.csv'.format(cd))
if (os.path.exists(plotfile)):
fixpermissions(plotfile)
def makeDirIfExists(path):
if not os.path.exists(os.path.abspath(path)):
os.makedirs(os.path.abspath(path))
fixpermissions(path)
# --------- Main begins here ----------- #
sys.stdout.write("Beginning setup ... ")
makeDirIfExists('logs')
makeDirIfExists('rawlogs')
makeDirIfExists('avg')
ser = serial.Serial(
port = '/dev/ttyACM0',\
baudrate = 115200)
currentdate = getdatefilename()
rawdata = open('rawlogs/rawlog-{}.csv'.format(currentdate), 'w')
f = open('logs/log-{}.csv'.format(currentdate), 'w')
avg = open('avg/avg-{}.csv'.format(currentdate), 'w')
print("done")
sys.stdout.write("Acquiring initial data ... ")
init = readnums(ser)
print("done")
sys.stdout.write("You have ten seconds to plug in the LED ...")
sleep(8)
print
print("configurelimit is set to {}".format(configurelimit))
sys.stdout.write("Acquiring initial reference data ... ")
prevdata = [init]
for x in xrange(0, configurelimit):
prevdata.append(readnums(ser))
print("done")
map(lambda x: writenumsto(rawdata, x), prevdata)
# Compare the data to the data 3 minutes prior
# If there is no significant frequency change in
# the light, the led junction has warmed up
sys.stdout.write("Configuring ... ")
while True:
data = readnums(ser)
writenumsto(rawdata, data)
if abs(prevdata[-configurelimit][1] - data[1]) < 100: # in Hz
break;
prevdata.append(data)
print("done")
print("Acquiring real data (^C to end) ... ")
try:
c = 0;
avgdata = [];
while True:
data = readnums(ser)
writenumsto(rawdata, data)
data[1] = getLightFromFrequency(data, init)
# log the average separately
avgdata.append(data)
c = c + 1
if c >= 250:
average = [np.mean(map(lambda x: x[1], avgdata)), data[2]]
print("{}, {}".format(average[0], average[1]));
writenumsto(avg, average)
avgdata = []
c = 0
writenumsto(f, data)
rawdata.flush()
f.flush()
except KeyboardInterrupt:
print
print("Stopping ... ")
finally:
rawdata.close();
f.close();
fixall(currentdate);
#sys.stdout.write("Do you want a plot of the data? (requires gnuplot) [y/N]: ")
#resp = raw_input();
#if (resp == 'y' or resp == 'Y'):
# makeDirIfExists('plots')
# try:
# with open('plots/plot-{}.png'.format(currentdate), 'w') as plot:
# call([ "gnuplot -e \"datafile='./logs/log-{0}.csv'\" script.plg" ], stdout=plot)
# except Exception, e:
# print("An error occurred:")
# print traceback.format_exc(e)
print("Exiting ... ")
|
gpl-2.0
| 4,966,475,604,367,056,000 | 24.644295 | 101 | 0.601259 | false |
Catch-up-TV-and-More/plugin.video.catchuptvandmore
|
resources/lib/channels/ca/tva.py
|
1
|
11972
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, SylvainCecchetto
# GNU General Public License v2.0+ (see LICENSE.txt or https://www.gnu.org/licenses/gpl-2.0.txt)
# This file is part of Catch-up TV & More
from __future__ import unicode_literals
from builtins import str
import json
import re
import inputstreamhelper
from codequick import Listitem, Resolver, Route
import urlquick
from resources.lib import web_utils
from resources.lib.addon_utils import get_item_media_path
from resources.lib.kodi_utils import get_selected_item_art, get_selected_item_label, get_selected_item_info, INPUTSTREAM_PROP
from resources.lib.menu_utils import item_post_treatment
# TO DO
# Fix download mode when video is not DRM protected
URL_ROOT = 'https://www.qub.ca'
URL_API = URL_ROOT + '/proxy/pfu/content-delivery-service/v1'
URL_CATEGORIES = URL_API + '/entities?slug=/%s'
URL_LIVE = URL_ROOT + '/tvaplus/%s/en-direct'
URL_INFO_STREAM = URL_ROOT + '/tvaplus%s'
URL_BRIGHTCOVE_POLICY_KEY = 'http://players.brightcove.net/%s/%s_default/index.min.js'
# AccountId, PlayerId
URL_BRIGHTCOVE_VIDEO_JSON = 'https://edge.api.brightcove.com/'\
'playback/v1/accounts/%s/videos/%s'
# AccountId, VideoId
@Route.register
def tva_root(plugin, **kwargs):
# (item_id, label, thumb, fanart)
channels = [
('tva', 'TVA', 'tva.png', 'tva_fanart.jpg'),
('addiktv', 'addikTV', 'addiktv.png', 'addiktv_fanart.jpg'),
('casa', 'Casa', 'casa.png', 'casa_fanart.jpg'),
('evasion', 'Evasion', 'evasion.png', 'evasion_fanart.jpg'),
('moi-et-cie', 'MOI ET CIE', 'moietcie.png', 'moietcie_fanart.jpg'),
('prise2', 'PRISE2', 'prise2.png', 'prise2_fanart.jpg'),
('yoopa', 'Yoopa', 'yoopa.png', 'yoopa_fanart.jpg'),
('zeste', 'Zeste', 'zeste.png', 'zeste_fanart.jpg'),
('tva-sports', 'TVA Sports', 'tvasports.png', 'tvasports_fanart.jpg'),
('lcn', 'LCN', 'lcn.png', 'lcn_fanart.jpg')
]
for channel_infos in channels:
item = Listitem()
item.label = channel_infos[1]
item.art["thumb"] = get_item_media_path('channels/ca/' + channel_infos[2])
item.art["fanart"] = get_item_media_path('channels/ca/' + channel_infos[3])
item.set_callback(list_categories, channel_infos[0])
item_post_treatment(item)
yield item
@Route.register
def list_categories(plugin, item_id, **kwargs):
"""
Build categories listing
- Tous les programmes
- Séries
- Informations
- ...
"""
resp = urlquick.get(URL_CATEGORIES % item_id)
json_parser = json.loads(resp.text)
for category_datas in json_parser['associatedEntities']:
if 'name' in category_datas:
category_name = category_datas['name']
item = Listitem()
item.label = category_name
item.set_callback(
list_programs, item_id=item_id, category_name=category_name, next_url=None)
item_post_treatment(item)
yield item
@Route.register
def list_programs(plugin, item_id, category_name, next_url, **kwargs):
if next_url is None:
resp = urlquick.get(URL_CATEGORIES % item_id)
json_parser = json.loads(resp.text)
for category_datas in json_parser['associatedEntities']:
if 'name' in category_datas:
if category_name == category_datas['name']:
for program_datas in category_datas['associatedEntities']:
program_name = program_datas['label']
program_image = program_datas['mainImage']['url']
program_slug = program_datas['slug']
item = Listitem()
item.label = program_name
item.art['thumb'] = item.art['landscape'] = program_image
item.set_callback(
list_seasons, item_id=item_id, program_slug=program_slug)
item_post_treatment(item)
yield item
if 'next' in category_datas:
yield Listitem.next_page(
item_id=item_id, category_name=category_name, next_url=URL_API + category_datas['next'])
else:
resp = urlquick.get(next_url)
json_parser = json.loads(resp.text)
for program_datas in json_parser['associatedEntities']:
program_name = program_datas['label']
program_image = program_datas['mainImage']['url']
program_slug = program_datas['slug']
item = Listitem()
item.label = program_name
item.art['thumb'] = item.art['landscape'] = program_image
item.set_callback(
list_seasons, item_id=item_id, program_slug=program_slug)
item_post_treatment(item)
yield item
if 'next' in json_parser:
yield Listitem.next_page(
item_id=item_id, category_name=category_name, next_url=URL_API + category_datas['next'])
@Route.register
def list_seasons(plugin, item_id, program_slug, **kwargs):
resp = urlquick.get(URL_API + '/entities?slug=%s' % program_slug)
json_parser = json.loads(resp.text)
if 'seasons' in json_parser['knownEntities']:
for season_datas in json_parser['knownEntities']['seasons']['associatedEntities']:
season_name = json_parser['knownEntities']['seasons']['name'] + ' ' + str(season_datas['seasonNumber'])
season_number = str(season_datas['seasonNumber'])
item = Listitem()
item.label = season_name
item.set_callback(
list_videos_categories, item_id=item_id, program_slug=program_slug, season_number=season_number)
item_post_treatment(item)
yield item
season_name = json_parser['name']
season_number = '-1'
item = Listitem()
item.label = season_name
item.set_callback(
list_videos_categories, item_id=item_id, program_slug=program_slug, season_number=season_number)
item_post_treatment(item)
yield item
@Route.register
def list_videos_categories(plugin, item_id, program_slug, season_number, **kwargs):
resp = urlquick.get(URL_API + '/entities?slug=%s' % program_slug)
json_parser = json.loads(resp.text)
if season_number == '-1':
for video_category_datas in json_parser['associatedEntities']:
if 'associatedEntities' in video_category_datas:
if len(video_category_datas['associatedEntities']) > 0:
video_category_name = video_category_datas['name']
video_category_slug = video_category_datas['slug']
item = Listitem()
item.label = video_category_name
item.set_callback(
list_videos, item_id=item_id, video_category_slug=video_category_slug)
item_post_treatment(item)
yield item
else:
for season_datas in json_parser['knownEntities']['seasons']['associatedEntities']:
if season_number == str(season_datas['seasonNumber']):
for video_category_datas in season_datas['associatedEntities']:
if len(video_category_datas['associatedEntities']) > 0:
video_category_name = video_category_datas['name']
video_category_slug = video_category_datas['slug']
item = Listitem()
item.label = video_category_name
item.set_callback(
list_videos, item_id=item_id, video_category_slug=video_category_slug)
item_post_treatment(item)
yield item
@Route.register
def list_videos(plugin, item_id, video_category_slug, **kwargs):
resp = urlquick.get(URL_API + '/entities?slug=%s' % video_category_slug)
json_parser = json.loads(resp.text)
for video_datas in json_parser['associatedEntities']:
video_name = video_datas['secondaryLabel'] + ' - ' + video_datas['label']
video_image = video_datas['mainImage']['url']
video_plot = ''
if 'description' in video_datas:
video_plot = video_datas['description']
video_duration = video_datas['durationMillis'] / 1000
video_slug = video_datas['slug']
item = Listitem()
item.label = video_name
item.art['thumb'] = item.art['landscape'] = video_image
item.info['plot'] = video_plot
item.info['duration'] = video_duration
item.set_callback(
get_video_url, item_id=item_id, video_slug=video_slug)
item_post_treatment(item, is_playable=True, is_downloadable=False)
yield item
if 'next' in json_parser:
yield Listitem.next_page(
item_id=item_id, video_category_slug=json_parser['next'])
# BRIGHTCOVE Part
def get_brightcove_policy_key(data_account, data_player):
"""Get policy key"""
file_js = urlquick.get(URL_BRIGHTCOVE_POLICY_KEY %
(data_account, data_player))
return re.compile('policyKey:"(.+?)"').findall(file_js.text)[0]
@Resolver.register
def get_video_url(plugin,
item_id,
video_slug,
download_mode=False,
**kwargs):
is_helper = inputstreamhelper.Helper('mpd', drm='widevine')
if not is_helper.check_inputstream():
return False
resp = urlquick.get(URL_INFO_STREAM % video_slug)
data_account = re.compile(
r'data-accound\=\"(.*?)\"').findall(resp.text)[0]
data_player = re.compile(
r'data-player\=\"(.*?)\"').findall(resp.text)[0]
data_video_id = re.compile(
r'data-video-id\=\"(.*?)\"').findall(resp.text)[0]
# Method to get JSON from 'edge.api.brightcove.com'
resp = urlquick.get(
URL_BRIGHTCOVE_VIDEO_JSON % (data_account, data_video_id),
headers={
'User-Agent':
web_utils.get_random_ua(),
'Accept':
'application/json;pk=%s' %
(get_brightcove_policy_key(data_account, data_player)),
'X-Forwarded-For':
plugin.setting.get_string('header_x-forwarded-for')
})
json_parser = json.loads(resp.text)
video_url = ''
licence_url = ''
is_protected_drm = False
if 'sources' in json_parser:
for url in json_parser["sources"]:
if 'src' in url:
if 'manifest.mpd' in url["src"]:
video_url = url["src"]
if 'key_systems' in url:
licence_url = url['key_systems']['com.widevine.alpha']['license_url']
is_protected_drm = True
else:
if json_parser[0]['error_code'] == "ACCESS_DENIED":
plugin.notify('ERROR', plugin.localize(30713))
return False
if video_url == '':
return False
item = Listitem()
item.path = video_url
item.label = get_selected_item_label()
item.art.update(get_selected_item_art())
item.info.update(get_selected_item_info())
item.property[INPUTSTREAM_PROP] = 'inputstream.adaptive'
item.property['inputstream.adaptive.manifest_type'] = 'mpd'
if is_protected_drm:
item.property[
'inputstream.adaptive.license_type'] = 'com.widevine.alpha'
item.property[
'inputstream.adaptive.license_key'] = licence_url + '|Content-Type=&User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3041.0 Safari/537.36|R{SSM}|'
return item
@Resolver.register
def get_live_url(plugin, item_id, **kwargs):
resp = urlquick.get(URL_LIVE % item_id)
return re.compile(
r'videoSourceUrl\"\:\"(.*?)\"').findall(resp.text)[0]
|
gpl-2.0
| -468,134,449,925,972,400 | 36.882911 | 208 | 0.588338 | false |
jblupus/PyLoyaltyProject
|
old/project/integration/to_neo4j.py
|
1
|
1096
|
import numpy as np
from neo4jrestclient.client import GraphDatabase
from old.project import list_dirs, read_lines
class IntegrationToNeo:
def __init__(self, host=None, username=None, password=None):
self.username = username or 'neo4j'
self.password = password or 'root'
self.host = host or 'http://localhost:7474'
self.db = GraphDatabase(self.host, username=self.username, password=self.password)
def friends_to_neo(self, user_id, friends_ids):
lb_user = self.db.labels.create('User')
u = self.db.nodes.create(id=user_id)
lb_user.add(u)
for friend_id in friends_ids:
v = self.db.nodes.create(id=friend_id)
lb_user.add(v)
u.relationships.create('friends', v)
def integrate_friends():
itn = IntegrationToNeo()
home_path = '/home/joao/Dev/Data/Twitter/friends/'
files = np.sort(list_dirs(home_path))
for _file in files:
user_id = _file.split('.')[0]
print _file, user_id
itn.friends_to_neo(user_id, read_lines(home_path + _file))
break
|
bsd-2-clause
| 876,676,310,413,979,300 | 32.212121 | 90 | 0.632299 | false |
steppicrew/useful-scripts
|
KindleBooks/lib/genxml.py
|
1
|
3703
|
#! /usr/bin/python2
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# For use with Topaz Scripts Version 2.6
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
import sys
sys.stdout=Unbuffered(sys.stdout)
import os, getopt
# local routines
import convert2xml
import flatxml2html
import decode_meta
def usage():
print 'Usage: '
print ' '
print ' genxml.py dict0000.dat unencryptedBookDir'
print ' '
def main(argv):
bookDir = ''
if len(argv) == 0:
argv = sys.argv
try:
opts, args = getopt.getopt(argv[1:], "h:")
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(1)
if len(opts) == 0 and len(args) == 0 :
usage()
sys.exit(1)
for o, a in opts:
if o =="-h":
usage()
sys.exit(0)
bookDir = args[0]
if not os.path.exists(bookDir) :
print "Can not find directory with unencrypted book"
sys.exit(1)
dictFile = os.path.join(bookDir,'dict0000.dat')
if not os.path.exists(dictFile) :
print "Can not find dict0000.dat file"
sys.exit(1)
pageDir = os.path.join(bookDir,'page')
if not os.path.exists(pageDir) :
print "Can not find page directory in unencrypted book"
sys.exit(1)
glyphsDir = os.path.join(bookDir,'glyphs')
if not os.path.exists(glyphsDir) :
print "Can not find glyphs directory in unencrypted book"
sys.exit(1)
otherFile = os.path.join(bookDir,'other0000.dat')
if not os.path.exists(otherFile) :
print "Can not find other0000.dat in unencrypted book"
sys.exit(1)
metaFile = os.path.join(bookDir,'metadata0000.dat')
if not os.path.exists(metaFile) :
print "Can not find metadata0000.dat in unencrypted book"
sys.exit(1)
xmlDir = os.path.join(bookDir,'xml')
if not os.path.exists(xmlDir):
os.makedirs(xmlDir)
print 'Processing ... '
print ' ', 'metadata0000.dat'
fname = os.path.join(bookDir,'metadata0000.dat')
xname = os.path.join(xmlDir, 'metadata.txt')
metastr = decode_meta.getMetaData(fname)
file(xname, 'wb').write(metastr)
print ' ', 'other0000.dat'
fname = os.path.join(bookDir,'other0000.dat')
xname = os.path.join(xmlDir, 'stylesheet.xml')
pargv=[]
pargv.append('convert2xml.py')
pargv.append(dictFile)
pargv.append(fname)
xmlstr = convert2xml.main(pargv)
file(xname, 'wb').write(xmlstr)
filenames = os.listdir(pageDir)
filenames = sorted(filenames)
for filename in filenames:
print ' ', filename
fname = os.path.join(pageDir,filename)
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
pargv=[]
pargv.append('convert2xml.py')
pargv.append(dictFile)
pargv.append(fname)
xmlstr = convert2xml.main(pargv)
file(xname, 'wb').write(xmlstr)
filenames = os.listdir(glyphsDir)
filenames = sorted(filenames)
for filename in filenames:
print ' ', filename
fname = os.path.join(glyphsDir,filename)
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
pargv=[]
pargv.append('convert2xml.py')
pargv.append(dictFile)
pargv.append(fname)
xmlstr = convert2xml.main(pargv)
file(xname, 'wb').write(xmlstr)
print 'Processing Complete'
return 0
if __name__ == '__main__':
sys.exit(main(''))
|
mit
| -3,832,606,348,814,704,600 | 24.537931 | 69 | 0.605725 | false |
beeftornado/sentry
|
src/sentry/api/endpoints/organization_integration_details.py
|
1
|
2518
|
from __future__ import absolute_import
from uuid import uuid4
import six
from sentry.api.bases.organization import OrganizationIntegrationsPermission
from sentry.api.bases.organization_integrations import OrganizationIntegrationBaseEndpoint
from sentry.api.serializers import serialize
from sentry.models import AuditLogEntryEvent, ObjectStatus, OrganizationIntegration
from sentry.shared_integrations.exceptions import IntegrationError
from sentry.tasks.deletion import delete_organization_integration
from sentry.utils.audit import create_audit_entry
class OrganizationIntegrationDetailsEndpoint(OrganizationIntegrationBaseEndpoint):
permission_classes = (OrganizationIntegrationsPermission,)
def get(self, request, organization, integration_id):
org_integration = self.get_organization_integration(organization, integration_id)
return self.respond(serialize(org_integration, request.user))
def delete(self, request, organization, integration_id):
# Removing the integration removes the organization
# integrations and all linked issues.
org_integration = self.get_organization_integration(organization, integration_id)
updated = OrganizationIntegration.objects.filter(
id=org_integration.id, status=ObjectStatus.VISIBLE
).update(status=ObjectStatus.PENDING_DELETION)
if updated:
delete_organization_integration.apply_async(
kwargs={
"object_id": org_integration.id,
"transaction_id": uuid4().hex,
"actor_id": request.user.id,
},
countdown=0,
)
integration = org_integration.integration
create_audit_entry(
request=request,
organization=organization,
target_object=integration.id,
event=AuditLogEntryEvent.INTEGRATION_REMOVE,
data={"provider": integration.provider, "name": integration.name},
)
return self.respond(status=204)
def post(self, request, organization, integration_id):
integration = self.get_integration(organization, integration_id)
installation = integration.get_installation(organization.id)
try:
installation.update_organization_config(request.data)
except IntegrationError as e:
return self.respond({"detail": six.text_type(e)}, status=400)
return self.respond(status=200)
|
bsd-3-clause
| 5,551,286,546,587,051,000 | 40.278689 | 90 | 0.692216 | false |
Sainsburys/chef-websphere
|
files/default/virtual_host_alias.py
|
1
|
2608
|
# this script manages virtual hosts.
import sys
if len(sys.argv) < 4:
print "Missing arguments. \n Usage: virtual_hosts.py <action> <alias_host> <alias_port>. \n Action must be add_alias or remove_alias."
sys.exit(1)
action = sys.argv[0] # can be add_alian or remove_alias
vhost = sys.argv[1]
alias_host = sys.argv[2]
alias_port = sys.argv[3]
def addVirtualHostAlias(virtualHostName, alias):
virtualHost = AdminConfig.getid("/VirtualHost:" + virtualHostName)
print "adding vhost alias " + virtualHost + " alias_host: " + alias[0] + " port: " + alias[1]
AdminConfig.create("HostAlias", virtualHost, [["hostname", alias[0]], ["port", alias[1]]])
def removeVirtualHostAlias(virtualHostName, alias):
virtualHost = AdminConfig.getid("/VirtualHost:" + virtualHostName)
for a in toList(AdminConfig.showAttribute(virtualHost, 'aliases')):
if AdminConfig.showAttribute(a, 'hostname') == alias[0] and AdminConfig.showAttribute(a, 'port') == alias[1]:
print "removing vhost alias " + virtualHost + " alias_host: " + alias[0] + " port: " + alias[1]
AdminConfig.remove(a)
def virtualHostExists(virtualHostName):
for vh in toList(AdminConfig.list("VirtualHost")):
if AdminConfig.showAttribute(vh, "name") == virtualHostName:
return 1
return 0
def aliasExists(virtualHostName, alias):
for vh in toList(AdminConfig.list("VirtualHost")):
if AdminConfig.showAttribute(vh, "name") == virtualHostName:
for al in toList(AdminConfig.showAttribute(vh, 'aliases')):
if AdminConfig.showAttribute(al, 'hostname') == alias[0] and AdminConfig.showAttribute(al, 'port') == alias[1]:
return 1
return 0
def toList(inStr):
outList=[]
if (len(inStr)>0 and inStr[0]=='[' and inStr[-1]==']'):
inStr = inStr[1:-1]
tmpList = inStr.split(" ")
else:
tmpList = inStr.split("\n")
for item in tmpList:
item = item.rstrip();
if (len(item)>0):
outList.append(item)
return outList
if action == 'add_alias':
if virtualHostExists(vhost)==1 and aliasExists(vhost, [alias_host, alias_port])==0:
addVirtualHostAlias(vhost, [alias_host, alias_port])
AdminConfig.save()
else:
print "vhost doesn't exist, or alias already exists"
elif action == 'remove_alias':
if aliasExists(vhost, [alias_host, alias_port]):
removeVirtualHostAlias(vhost, [alias_host, alias_port])
AdminConfig.save()
else:
print "Missing is mismatched action paramater. Action must be add_alias or remove_alias. \n Usage: virtual_hosts.py <action> <alias_host> <alias_port>"
|
apache-2.0
| 4,363,268,437,483,799,600 | 39.75 | 153 | 0.671396 | false |
p10rahulm/Dmage
|
contiguous_patterns.py
|
1
|
3961
|
import time
from collections import defaultdict
import operator
# Below are two codes to read the file, we will be going with the non list comprehension version
def readtext(filename):
with open(filename) as f:
txtlines = [[str(s) for s in line.rstrip("\n").split(" ")] for line in f]
return txtlines
def readtext2(filename):
data = open(filename, "r")
txtlines = list()
for line in data:
line = line.rstrip("\n")
lst = [str(s) for s in line.split(" ")]
# print(lst)
txtlines.append(lst)
return txtlines
def getdictoffreqwords(listoflines):
fullwordlist = defaultdict(int)
for line in listoflines:
for word in line:
fullwordlist[word] +=1
return fullwordlist
def getreducedwordlist(worddict,minsup):
return {k:v for k,v in worddict.items() if v >= minsup}
def getpatternsgivenline(line):
linelen = len(line)
# print(linelen)
patterns = set()
for i in range(1,linelen):
for j in range(0,linelen-i+1):
patterns.add(" ".join(line[j:j+i]))
# print(patterns)
# print(len(patterns))
return(patterns)
def getpatternsforeachline(alltext):
listoflinesets = []
i = 0
for line in alltext:
listoflinesets.append(getpatternsgivenline(line))
# print(i)
i += 1
return listoflinesets
def getphrasefreq(listoflinesets):
# print(listoflinesets)
phrasedict = defaultdict(int)
for lineset in listoflinesets:
# print(lineset)
if lineset is not None:
# print("inside")
for element in lineset:
phrasedict[element] +=1
return phrasedict
def filterbyfrequency(phrasefrequencydict,minsup):
return {k:v for k,v in phrasefrequencydict.items() if v >= minsup}
def filterbywordlength(phrasefrequencydict,minlength):
return {k: v for k, v in phrasefrequencydict.items() if len(k.split(" ")) >= minlength}
def printreturnfile(inputdict,outputfile):
# inputlist.sort(key=lambda x: -x[1])
inputlist = [(k,v) for k,v in inputdict.items()]
# print(inputlist)
inputlist.sort(key=operator.itemgetter(1),reverse=True)
with open(outputfile, 'w') as the_file:
for element in inputlist:
the_file.write(str(element[1]) + ":" + element[0].replace(" ",";") + '\n')
if __name__ == "__main__":
#testing time for reading
times = time.time()
txtlines = readtext2("rawdata/yelp_reviews.txt")
# print("timetaken by longer code = ",time.time() - times)
# time taken by the list comprehension is 0.18secs
# times = time.time()
# txtlines = readtext("rawdata/yelp_reviews.txt")
# print("timetaken by shorter code = ", time.time() - times)
# time taken by normal loop is 0.15secs
# going with normal code
# print(txtlines)
worddict = getdictoffreqwords(txtlines)
# print("worddict is ",worddict )
# print("len of worddict is ", len(worddict))
# worddict = getreducedwordlist(worddict,100)
# print("reduced worddict is ", worddict)
# print("len of reduced worddict is ", len(worddict))
# Test whether single line comprehension works
# getpatternsgivenline(txtlines[0])
# Get list of sets for each line
# times = time.time()
listoflinesets = getpatternsforeachline(txtlines)
# print("Got list of line phrases in ", time.time() - times, "seconds")
# Get list of all phrases
# times = time.time()
phrasesfreq = getphrasefreq(listoflinesets)
print("number of all phrases checked:",len(phrasesfreq))
frequentphrases = filterbyfrequency(phrasesfreq,100)
# print(frequentphrases)
# print(len(frequentphrases))
frequentphrases = filterbywordlength(frequentphrases, 2)
# print(frequentphrases)
# print(len(frequentphrases))
print("Ran Algo for yelp in ", time.time() - times, "seconds")
printreturnfile(frequentphrases, "output/yelpcontiguouspatterns.txt")
|
mit
| 3,165,986,164,382,605,300 | 31.467213 | 96 | 0.65741 | false |
OpenSTC-Eleger/stc-achats-cyril
|
__openerp__.py
|
1
|
1589
|
# -*- coding: utf-8 -*-
##############################################################################
# Copyright (C) 2012 SICLIC http://siclic.fr
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
#############################################################################
{
"name": "openstc_achat_stock",
"version": "0.1",
"depends": ["openstc_achat_stock"],
"author": "SICLIC",
"category": "SICLIC",
"description": """
This module is used to be able to synchronize with Civil-Net-Finances (created by CIRIL).
Update Partners according to CIRIL exported csv file
Push .txt files to a remote server of CIRIL to create engages on Civil-Net-Finances
""",
"data": [
"wizard/import_partners_wizard_view.xml"
],
"init_xml":[],
"demo": [],
"test": [],
"installable": True,
"active": False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 817,183,899,976,228,600 | 36.833333 | 93 | 0.599119 | false |
klis87/django-cloudinary-storage
|
cloudinary_storage/app_settings.py
|
1
|
4326
|
import importlib
import os
import sys
from operator import itemgetter
import cloudinary
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import receiver
from django.test.signals import setting_changed
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
user_settings = getattr(settings, 'CLOUDINARY_STORAGE', {})
def set_credentials(user_settings):
try:
credentials = itemgetter('CLOUD_NAME', 'API_KEY', 'API_SECRET')(user_settings)
except KeyError:
if os.environ.get('CLOUDINARY_URL'):
return
if (os.environ.get('CLOUDINARY_CLOUD_NAME') and os.environ.get('CLOUDINARY_API_KEY') and
os.environ.get('CLOUDINARY_API_SECRET')):
return
else:
raise ImproperlyConfigured('In order to use cloudinary storage, you need to provide '
'CLOUDINARY_STORAGE dictionary with CLOUD_NAME, API_SECRET '
'and API_KEY in the settings or set CLOUDINARY_URL variable '
'(or CLOUDINARY_CLOUD_NAME, CLOUDINARY_API_KEY, CLOUDINARY_API_SECRET '
'variables).')
else:
cloudinary.config(
cloud_name=credentials[0],
api_key=credentials[1],
api_secret=credentials[2]
)
set_credentials(user_settings)
cloudinary.config(
secure=user_settings.get('SECURE', True)
)
MEDIA_TAG = user_settings.get('MEDIA_TAG', 'media')
INVALID_VIDEO_ERROR_MESSAGE = user_settings.get('INVALID_VIDEO_ERROR_MESSAGE', 'Please upload a valid video file.')
EXCLUDE_DELETE_ORPHANED_MEDIA_PATHS = user_settings.get('EXCLUDE_DELETE_ORPHANED_MEDIA_PATHS', ())
STATIC_TAG = user_settings.get('STATIC_TAG', 'static')
STATICFILES_MANIFEST_ROOT = user_settings.get('STATICFILES_MANIFEST_ROOT', os.path.join(BASE_DIR, 'manifest'))
STATIC_IMAGES_EXTENSIONS = user_settings.get('STATIC_IMAGES_EXTENSIONS',
[
'jpg',
'jpe',
'jpeg',
'jpc',
'jp2',
'j2k',
'wdp',
'jxr',
'hdp',
'png',
'gif',
'webp',
'bmp',
'tif',
'tiff',
'ico'
])
STATIC_VIDEOS_EXTENSIONS = user_settings.get('STATIC_VIDEOS_EXTENSIONS',
[
'mp4',
'webm',
'flv',
'mov',
'ogv',
'3gp',
'3g2',
'wmv',
'mpeg',
'flv',
'mkv',
'avi'
])
# used only on Windows, see https://github.com/ahupp/python-magic#dependencies for your reference
MAGIC_FILE_PATH = user_settings.get('MAGIC_FILE_PATH', 'magic')
PREFIX = user_settings.get('PREFIX', settings.MEDIA_URL)
@receiver(setting_changed)
def reload_settings(*args, **kwargs):
setting_name, value = kwargs['setting'], kwargs['value']
if setting_name in ['CLOUDINARY_STORAGE', 'MEDIA_URL']:
importlib.reload(sys.modules[__name__])
|
mit
| 2,761,080,783,546,130,400 | 43.597938 | 115 | 0.4184 | false |
locaweb/netl2api
|
netl2api/server/httpd.py
|
1
|
20469
|
#!/usr/bin/python
# -*- coding: utf-8; -*-
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Eduardo S. Scarpellini
# @author: Luiz Ozaki
__copyright__ = "Copyright 2012, Locaweb IDC"
import re
import os
import sys
import pwd
from multiprocessing import Process
from bottle import ServerAdapter, debug, run, route, get, put, delete, error, request, response, abort
from netl2api.server.http_cache import cached, invalidate_cache
from netl2api.server.http_utils import reply_json, validate_input, context
from netl2api.server.workers import switch_cfg_persistence
from netl2api.server.workers.switch_cfg_persistence_utils import defer_save_switch_cfg
from netl2api.lib.utils import get_switch_instance
from netl2api.lib.config import get_netl2server_cfg, setup_netl2server_logger, get_devices_cfg
cfg = get_netl2server_cfg()
logger = setup_netl2server_logger(cfg)
netl2debug = cfg.get("logger", "level").lower() == "debug"
RE_TYPE_VLAN_TAGGED = re.compile(r"^(?:True|False)$", re.IGNORECASE)
def log_request_ahead(msg=None, msg_args=None):
""" use @log_request_ahead between @authorize and @cached """
def proxy(f):
def log(*args, **kwargs):
if msg is not None:
lmsg = msg
lmsg_args = msg_args
if msg_args is not None:
lmsg = lmsg % tuple([kwargs.get(a) for a in lmsg_args])
logger.info("%s -- context: %s" % (lmsg, request["context"]))
return f(*args, **kwargs)
return log
return proxy
# Force Exception if using devices.cfg and permissions are wrong
dev_cfg = get_devices_cfg()
@get("/devices")
@context
@log_request_ahead("Listing available devices")
@reply_json
def devices_list():
# MUST return ONLY switch names -- for CLI completion purpose
#logger.info("Listing available devices -- context: %s" % request["context"])
return get_devices_cfg().keys()
@get("/info/<device>")
@context
@log_request_ahead("Showing generic information for device %s", ("device",))
@reply_json
@cached(ttl=86400)
def device_info(device=None):
#logger.info("Showing generic information for device %s -- context: %s" %\
# (device, request["context"]))
swinfo = {}
swinst = get_switch_instance(device)
swinfo["hostname"] = swinst.show_hostname()
swinfo["version"] = swinst.show_version()
swinfo["l2api"] = { "device.mgmt-api": "%s.%s" % (swinst.__class__.__module__,
swinst.__class__.__name__),
"device.mgmt-host": swinst.transport.host,
"device.vendor": swinst.__VENDOR__,
"device.hwtype": swinst.__HWTYPE__ }
return swinfo
@get("/version/<device>")
@context
@log_request_ahead("Showing version information from device %s", ("device",))
@reply_json
@cached(ttl=86400)
def show_version(device=None):
#logger.info("Showing version information from device '%s' -- context: %s" %\
# (device, request["context"]))
swinst = get_switch_instance(device)
defer_save_switch_cfg(device)
return swinst.show_version()
@get("/system/<device>")
@context
@log_request_ahead("Showing system information from device '%s'", ("device",))
@reply_json
@cached(ttl=86400)
def show_system(device=None):
#logger.info("Showing system information from device '%s' -- context: %s" %\
# (device, request["context"]))
swinst = get_switch_instance(device)
return swinst.show_system()
RE_ROUTE_INTERFACE_ACTIONS = re.compile(r"^(.+)/((?:at|de)tach_vlan|change_description|(?:dis|en)able)$")
@route(["/interfaces/<device>", "/interfaces/<device>/<remaining_path:path>"], ["get", "put"])
@context
def interfaces_route_actions(device=None, remaining_path=None):
if request.method.lower() == "get":
return show_interfaces(device=device, interface_id=remaining_path)
if request.method.lower() == "put":
m = RE_ROUTE_INTERFACE_ACTIONS.search(remaining_path)
if m is None:
abort(404, "Not Found")
route_act = m.group(2)
interface_id=m.group(1).lower()
if route_act == "attach_vlan":
return interface_attach_vlan(device=device, interface_id=interface_id)
if route_act == "detach_vlan":
return interface_detach_vlan(device=device, interface_id=interface_id)
if route_act == "change_description":
return change_interface_description(device=device, interface_id=interface_id)
if route_act == "enable":
return enable_interface(device=device, interface_id=interface_id)
if route_act == "disable":
return disable_interface(device=device, interface_id=interface_id)
abort(404, "Not Found")
abort(405, "Method Not Allowed")
@log_request_ahead("Showing interfaces informations from device '%s'", ("device",))
@reply_json
@cached(ttl=3600)
def show_interfaces(device=None, interface_id=None):
#logger.info("Showing interfaces informations from device '%s' -- context: %s" %\
# (device, request["context"]))
swinst = get_switch_instance(device)
return swinst.show_interfaces(interface_id=interface_id)
@reply_json
@validate_input(src="forms", vlan_id=int, tagged=RE_TYPE_VLAN_TAGGED)
def interface_attach_vlan(device=None, interface_id=None):
logger.info("Attaching VLAN to the interface '%s' in device '%s' -- context: %s" %\
(interface_id, device, request["context"]))
vlan_id = request.forms.get("vlan_id")
tagged = request.forms.get("tagged", "").lower() == "true"
swinst = get_switch_instance(device)
swinst.interface_attach_vlan(interface_id=interface_id, vlan_id=vlan_id, tagged=tagged)
defer_save_switch_cfg(device)
invalidate_cache("/vlans/%s" % device)
@validate_input(src="forms", vlan_id=int, tagged=RE_TYPE_VLAN_TAGGED)
def interface_detach_vlan(device=None, interface_id=None):
logger.info("Detaching VLAN from the interface '%s' in device '%s' -- context: %s" %\
(device, interface_id, request["context"]))
vlan_id = request.forms.get("vlan_id")
tagged = request.forms.get("tagged", "").lower() == "true"
swinst = get_switch_instance(device)
swinst.interface_detach_vlan(interface_id=interface_id, vlan_id=vlan_id, tagged=tagged)
defer_save_switch_cfg(device)
invalidate_cache("/vlans/%s" % device)
@validate_input(src="forms", interface_description=str)
def change_interface_description(device=None, interface_id=None):
logger.info("Changing interface '%s' description in device '%s' -- context: %s" %\
(interface_id, device, request["context"]))
interface_description = request.forms.get("interface_description")
swinst = get_switch_instance(device)
swinst.change_interface_description(interface_id=interface_id,
interface_description=interface_description)
defer_save_switch_cfg(device)
invalidate_cache("/interfaces/%s" % device)
@reply_json
def enable_interface(device=None, interface_id=None):
logger.info("Enabling interface '%s' in device '%s' -- context: %s" %\
(interface_id, device, request["context"]))
swinst = get_switch_instance(device)
swinst.enable_interface(interface_id=interface_id)
defer_save_switch_cfg(device)
invalidate_cache("/interfaces/%s" % device)
@reply_json
def disable_interface(device=None, interface_id=None):
logger.info("Disabling interface '%s' in device '%s' -- context: %s" %\
(interface_id, device, request["context"]))
swinst = get_switch_instance(device)
swinst.disable_interface(interface_id=interface_id)
defer_save_switch_cfg(device)
invalidate_cache("/interfaces/%s" % device)
@put("/vlans/<device>/<vlan_id>")
@context
@reply_json
def create_vlan(device=None, vlan_id=None):
logger.info("Creating new VLAN with id '%s' in device '%s' -- context: %s" %\
(vlan_id, device, request["context"]))
vlan_description = request.forms.get("vlan_description")
swinst = get_switch_instance(device)
swinst.create_vlan(vlan_id=vlan_id, vlan_description=vlan_description)
defer_save_switch_cfg(device)
invalidate_cache("/vlans/%s" % device)
response.status = 201
@put("/vlans/<device>/<vlan_id>/change_description")
@context
@reply_json
@validate_input(src="forms", vlan_description=str)
def change_vlan_description(device=None, vlan_id=None):
logger.info("Changing VLAN '%s' description in device '%s' -- context: %s" %\
(vlan_id, device, request["context"]))
vlan_description = request.forms.get("vlan_description")
swinst = get_switch_instance(device)
swinst.change_vlan_description(vlan_id=vlan_id,
vlan_description=vlan_description)
defer_save_switch_cfg(device)
invalidate_cache("/vlans/%s" % device)
@delete("/vlans/<device>/<vlan_id>")
@context
@reply_json
def destroy_vlan(device=None, vlan_id=None):
logger.info("Removing VLAN '%s' from device '%s' -- context: %s" %\
(vlan_id, device, request["context"]))
swinst = get_switch_instance(device)
swinst.destroy_vlan(vlan_id=vlan_id)
defer_save_switch_cfg(device)
invalidate_cache("/vlans/%s" % device)
response.status = 204
@get(["/vlans/<device>", "/vlans/<device>/<vlan_id>"])
@context
@log_request_ahead("Showing VLAN information from device %s", ("device",))
@reply_json
@cached(ttl=3600)
def show_vlans(device=None, vlan_id=None):
#logger.info("Showing VLAN information from device '%s' -- context: %s" %\
# (device, request["context"]))
swinst = get_switch_instance(device)
return swinst.show_vlans(vlan_id=vlan_id)
@put("/vlans/<device>/<vlan_id>/enable")
@context
@reply_json
def enable_vlan(device=None, vlan_id=None):
logger.info("Enabling VLAN '%s' in device '%s' -- context: %s" %\
(vlan_id, device, request["context"]))
swinst = get_switch_instance(device)
swinst.enable_vlan(vlan_id=vlan_id)
defer_save_switch_cfg(device)
invalidate_cache("/vlans/%s" % device)
@put("/vlans/<device>/<vlan_id>/disable")
@context
@reply_json
def disable_vlan(device=None, vlan_id=None):
logger.info("Disabling VLAN '%s' in device '%s' -- context: %s" %\
(vlan_id, device, request["context"]))
swinst = get_switch_instance(device)
swinst.disable_vlan(vlan_id=vlan_id)
defer_save_switch_cfg(device)
invalidate_cache("/vlans/%s" % device)
@put("/lags/<device>/<lag_id>")
@context
@reply_json
def create_lag(device=None, lag_id=None):
logger.info("Creating new LAG with id '%s' in device '%s' -- context: %s" %\
(lag_id, device, request["context"]))
lag_description = request.forms.get("lag_description")
swinst = get_switch_instance(device)
swinst.create_lag(lag_id=lag_id, lag_description=lag_description)
defer_save_switch_cfg(device)
invalidate_cache("/lags/%s" % device)
response.status = 201
@put("/lags/<device>/<lag_id>/change_description")
@context
@reply_json
@validate_input(src="forms", lag_description=str)
def change_lag_description(device=None, lag_id=None):
logger.info("Changing LAG '%s' description in device '%s' -- context: %s" %\
(lag_id, device, request["context"]))
lag_description = request.forms.get("lag_description")
swinst = get_switch_instance(device)
swinst.change_lag_description(lag_id=lag_id,
lag_description=lag_description)
defer_save_switch_cfg(device)
invalidate_cache("/lags/%s" % device)
@delete("/lags/<device>/<lag_id>")
@context
@reply_json
def destroy_lag(device=None, lag_id=None):
logger.info("Removing LAG '%s' from device '%s' -- context: %s" %\
(lag_id, device, context))
swinst = get_switch_instance(device)
swinst.destroy_lag(lag_id=lag_id)
defer_save_switch_cfg(device)
invalidate_cache("/lags/%s" % device)
response.status = 204
@get(["/lags/<device>", "/lags/<device>/<lag_id>"])
@context
@log_request_ahead("Showing LAG information from device %s", ("device",))
@reply_json
@cached(ttl=3600)
def show_lags(device=None, lag_id=None):
#logger.info("Showing LAG information from device '%s' -- context: %s" %\
# (device, request["context"]))
swinst = get_switch_instance(device)
return swinst.show_lags(lag_id=lag_id)
@put("/lags/<device>/<lag_id>/enable")
@context
@reply_json
def enable_lag(device=None, lag_id=None):
logger.info("Enabling LAG '%s' in device '%s' -- context: %s" %\
(lag_id, device, request["context"]))
swinst = get_switch_instance(device)
swinst.enable_lag(lag_id=lag_id)
defer_save_switch_cfg(device)
invalidate_cache("/lags/%s" % device)
@put("/lags/<device>/<lag_id>/disable")
@context
@reply_json
def disable_lag(device=None, lag_id=None):
logger.info("Disabling LAG '%s' in device '%s' -- context: %s" %\
(lag_id, device, request["context"]))
swinst = get_switch_instance(device)
swinst.disable_lag(lag_id=lag_id)
defer_save_switch_cfg(device)
invalidate_cache("/lags/%s" % device)
@put("/lags/<device>/<lag_id>/attach_interface")
@context
@validate_input(src="forms", interface_id=str)
@reply_json
def lag_attach_interface(device=None, lag_id=None):
logger.info("Attaching a new interface to LAG '%s' in device '%s' -- context: %s" %\
(lag_id, device, request["context"]))
interface_id = request.forms.get("interface_id")
swinst = get_switch_instance(device)
swinst.lag_attach_interface(lag_id=lag_id, interface_id=interface_id)
defer_save_switch_cfg(device)
invalidate_cache("/lags/%s" % device)
@put("/lags/<device>/<lag_id>/detach_interface")
@context
@validate_input(src="forms", interface_id=str)
@reply_json
def lag_detach_interface(device=None, lag_id=None):
logger.info("Detaching an interface from LAG '%s' in device '%s' -- context: %s" %\
(lag_id, device, request["context"]))
interface_id = request.forms.get("interface_id")
swinst = get_switch_instance(device)
swinst.lag_detach_interface(lag_id=lag_id, interface_id=interface_id)
defer_save_switch_cfg(device)
invalidate_cache("/lags/%s" % device)
@put("/lags/<device>/<lag_id>/attach_vlan")
@context
@validate_input(src="forms", vlan_id=int, tagged=RE_TYPE_VLAN_TAGGED)
@reply_json
def lag_attach_vlan(device=None, lag_id=None):
logger.info("Attaching a new VLAN to LAG '%s' in device '%s' -- context: %s" %\
(lag_id, device, request["context"]))
vlan_id = request.forms.get("vlan_id")
tagged = request.forms.get("tagged", "").lower() == "true"
swinst = get_switch_instance(device)
swinst.lag_attach_vlan(lag_id=lag_id, vlan_id=vlan_id, tagged=tagged)
defer_save_switch_cfg(device)
invalidate_cache("/vlans/%s" % device)
@put("/lags/<device>/<lag_id>/detach_vlan")
@context
@validate_input(src="forms", vlan_id=int, tagged=RE_TYPE_VLAN_TAGGED)
@reply_json
def lag_detach_vlan(device=None, lag_id=None):
logger.info("Detaching a VLAN from LAG '%s' in device '%s' -- context: %s" %\
(lag_id, device, request["context"]))
vlan_id = request.forms.get("vlan_id")
tagged = request.forms.get("tagged", "").lower() == "true"
swinst = get_switch_instance(device)
swinst.lag_detach_vlan(lag_id=lag_id, vlan_id=vlan_id, tagged=tagged)
defer_save_switch_cfg(device)
invalidate_cache("/vlans/%s" % device)
#@get(["/networkpath/<from_device>", "/networkpath/<from_device>/<to_device>"])
#@context
#@log_request_ahead("Tracing network-path from device '%s' to '%s'", ("from_device", "to_device"))
#@reply_json
#@cached(ttl=86400)
#def trace_network_path(from_device=None, to_device=None):
# #logger.info("Tracing network-path from device '%s' to '%s' -- context: %s" %\
# # (from_device, to_device, request["context"]))
# network_paths = find_network_paths(graph_repr(from_device=from_device),
# from_device=from_device, to_device=to_device)
# #logger.debug("Path from device '%s' to device '%s': %s" % (from_device, to_device, network_paths))
# return network_paths
@error(400)
@reply_json
def error400(err):
return {"server.status": err.status, "server.message": err.output}
@error(403)
@reply_json
def error403(err):
return {"server.status": err.status, "server.message": err.output}
@error(404)
@reply_json
def error404(err):
return {"server.status": err.status, "server.message": err.output}
@error(405)
@reply_json
def error405(err):
return {"server.status": err.status, "server.message": err.output}
@error(500)
@reply_json
def error500(err):
err_type = repr(err.exception).split("(")[0]
err_msg = err.exception.message
err_info = { "server.status": err.status,
"app.error.type": err_type,
"app.error.message": err_msg }
#if isinstance(err.exception, L2Exception):
if str(type(err.exception)).find("netl2api.l2api") > -1:
err_info["server.message"] = "L2API Error"
else:
err_info["server.message"] = "Internal Server Error"
return err_info
class PasteServerAdapter(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
if not self.quiet:
from paste.translogger import TransLogger
handler = TransLogger(handler)
httpserver.serve(handler, host=self.host, port=str(self.port), protocol_version="HTTP/1.1",
daemon_threads=True, socket_timeout=600,
use_threadpool=cfg.get("httpd", "use_threadpool").lower() == "true",
threadpool_workers=cfg.getint("httpd", "threadpool_workers"),
threadpool_options={ "spawn_if_under": cfg.getint("httpd", "threadpool_workers")/2,
"hung_check_period": 60,
"kill_thread_limit": 900 },
**self.options)
def start_workers():
if cfg.get("job.switch_cfg_persistence", "enabled") == "true":
p_switch_cfg_persistence = Process(target=switch_cfg_persistence.daemon,
name="netl2api [netl2server:http-daemon/job/switch-cfg-persistence]")
p_switch_cfg_persistence.start()
else:
logger.info("Persistence-control job is disabled")
def start():
debug(netl2debug)
ps_owner = cfg.get("httpd", "user")
if ps_owner:
os.setuid(pwd.getpwnam(ps_owner)[2])
try:
from setproctitle import setproctitle
except ImportError:
pass
else:
setproctitle("netl2api [netl2server:http-daemon]")
logger.info("Starting netl2server...")
start_workers()
run(server=PasteServerAdapter, host=cfg.get("httpd", "host"), port=cfg.getint("httpd", "port"))
def main(action="foreground"):
from supay import Daemon
daemon = Daemon(name="netl2server", catch_all_log=cfg.get("httpd", "logfile"))
if action == "start":
daemon.start()
start()
elif action == "foreground":
start()
elif action == "stop":
daemon.stop()
elif action == "status":
daemon.status()
else:
cli_help()
def cli_help():
print "Usage: %s <start|stop|status|foreground>" % sys.argv[0]
sys.exit(1)
def cli():
if len(sys.argv) < 2:
cli_help()
main(action=sys.argv[1])
if __name__ == '__main__':
cli()
|
apache-2.0
| -7,323,187,569,724,626,000 | 36.489011 | 112 | 0.637305 | false |
MaterialsDiscovery/PyChemia
|
tests/test_1_doctest_utils.py
|
1
|
1891
|
import doctest
import unittest
from .doctest_2to3 import doctest_suite
def broken_function():
raise Exception('This is broken')
class MyTestCase(unittest.TestCase):
def test(self):
"""
DocTests (pychemia.utils) [exceptions] :
"""
from pychemia.utils.periodic import atomic_number
with self.assertRaises(Exception) as context:
atomic_number(['H', u'A'])
# self.assertTrue(u'Atomic symbol not found' == context.exception)
from pychemia.utils.computing import read_file
with self.assertRaises(Exception) as context:
read_file('/dev/abc')
# self.assertTrue('Could not open file: /dev/abc' in context.exception)
from pychemia.utils.computing import get_float
with self.assertRaises(Exception) as context:
get_float('3i')
# self.assertTrue("Could not convert '3i' into a float number" in context.exception)
def test_periodic():
"""
DocTests (pychemia.utils.periodic) :
"""
import pychemia.utils.periodic
dt = doctest.testmod(pychemia.utils.periodic, verbose=True)
assert dt.failed == 0
def test_mathematics():
"""
DocTests (pychemia.utils.mathematics) :
"""
import pychemia.utils.mathematics
dt = doctest.testmod(pychemia.utils.mathematics, verbose=True)
assert dt.failed == 0
def test_computing():
"""
DocTests (pychemia.utils.computing) :
"""
import pychemia.utils.computing
suite = unittest.TestSuite()
suite.addTest(doctest_suite(pychemia.utils.computing))
runner = unittest.TextTestRunner(verbosity=1)
result = runner.run(suite)
assert result.wasSuccessful()
if __name__ == "__main__":
unittest.main(defaultTest='test_computing')
unittest.main()
|
mit
| 3,513,044,519,845,336,000 | 29.015873 | 96 | 0.626653 | false |
gunan/tensorflow
|
tensorflow/python/keras/activations.py
|
1
|
14697
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in activation functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util.tf_export import keras_export
# b/123041942
# In TF 2.x, if the `tf.nn.softmax` is used as an activation function in Keras
# layers, it gets serialized as 'softmax_v2' instead of 'softmax' as the
# internal method name is returned in serialization. This results in errors in
# model exporting and loading as Keras can't find any activation function with
# the name of `softmax_v2`.
# This dict maps the activation function name from its v2 version to its
# canonical name.
_TF_ACTIVATIONS_V2 = {
'softmax_v2': 'softmax',
}
@keras_export('keras.activations.softmax')
def softmax(x, axis=-1):
"""Softmax converts a real vector to a vector of categorical probabilities.
The elements of the output vector are in range (0, 1) and sum to 1.
Each vector is handled independently. The `axis` argument sets which axis
of the input the function is applied along.
Softmax is often used as the activation for the last
layer of a classification network because the result could be interpreted as
a probability distribution.
The softmax of each vector x is computed as
`exp(x) / tf.reduce_sum(exp(x))`.
The input values in are the log-odds of the resulting probability.
Arguments:
x : Input tensor.
axis: Integer, axis along which the softmax normalization is applied.
Returns:
Tensor, output of softmax transformation (all values are non-negative
and sum to 1).
Raises:
ValueError: In case `dim(x) == 1`.
"""
ndim = K.ndim(x)
if ndim == 2:
return nn.softmax(x)
elif ndim > 2:
e = math_ops.exp(x - math_ops.reduce_max(x, axis=axis, keepdims=True))
s = math_ops.reduce_sum(e, axis=axis, keepdims=True)
return e / s
else:
raise ValueError('Cannot apply softmax to a tensor that is 1D. '
'Received input: %s' % (x,))
@keras_export('keras.activations.elu')
def elu(x, alpha=1.0):
"""Exponential linear unit.
Arguments:
x: Input tensor.
alpha: A scalar, slope of negative section.
Returns:
The exponential linear activation: `x` if `x > 0` and
`alpha * (exp(x)-1)` if `x < 0`.
Reference:
- [Clevert et al. 2016](https://arxiv.org/abs/1511.07289)
"""
return K.elu(x, alpha)
@keras_export('keras.activations.selu')
def selu(x):
"""Scaled Exponential Linear Unit (SELU).
The Scaled Exponential Linear Unit (SELU) activation function is defined as:
- `if x > 0: return scale * x`
- `if x < 0: return scale * alpha * (exp(x) - 1)`
where `alpha` and `scale` are pre-defined constants
(`alpha=1.67326324` and `scale=1.05070098`).
Basically, the SELU activation function multiplies `scale` (> 1) with the
output of the `tf.keras.activations.elu` function to ensure a slope larger
than one for positive inputs.
The values of `alpha` and `scale` are
chosen so that the mean and variance of the inputs are preserved
between two consecutive layers as long as the weights are initialized
correctly (see `tf.keras.initializers.LecunNormal` initializer)
and the number of input units is "large enough"
(see reference paper for more information).
Example Usage:
>>> num_classes = 10 # 10-class problem
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(64, kernel_initializer='lecun_normal',
... activation='selu'))
>>> model.add(tf.keras.layers.Dense(32, kernel_initializer='lecun_normal',
... activation='selu'))
>>> model.add(tf.keras.layers.Dense(16, kernel_initializer='lecun_normal',
... activation='selu'))
>>> model.add(tf.keras.layers.Dense(num_classes, activation='softmax'))
Arguments:
x: A tensor or variable to compute the activation function for.
Returns:
The scaled exponential unit activation: `scale * elu(x, alpha)`.
Notes:
- To be used together with the
`tf.keras.initializers.LecunNormal` initializer.
- To be used together with the dropout variant
`tf.keras.layers.AlphaDropout` (not regular dropout).
References:
- [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)
"""
return nn.selu(x)
@keras_export('keras.activations.softplus')
def softplus(x):
"""Softplus activation function.
Example Usage:
>>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)
>>> b = tf.keras.activations.softplus(a)
>>> b.numpy()
array([2.0611537e-09, 3.1326166e-01, 6.9314718e-01, 1.3132616e+00,
2.0000000e+01], dtype=float32)
Arguments:
x: Input tensor.
Returns:
The softplus activation: `log(exp(x) + 1)`.
"""
return nn.softplus(x)
@keras_export('keras.activations.softsign')
def softsign(x):
"""Softsign activation function.
Example Usage:
>>> a = tf.constant([-1.0, 0.0, 1.0], dtype = tf.float32)
>>> b = tf.keras.activations.softsign(a)
>>> b.numpy()
array([-0.5, 0. , 0.5], dtype=float32)
Arguments:
x: Input tensor.
Returns:
The softsign activation: `x / (abs(x) + 1)`.
"""
return nn.softsign(x)
@keras_export('keras.activations.swish')
def swish(x):
"""Swish activation function.
Swish activation function which returns `x*sigmoid(x)`.
It is a smooth, non-monotonic function that consistently matches
or outperforms ReLU on deep networks, it is unbounded above and
bounded below.
Example Usage:
>>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)
>>> b = tf.keras.activations.swish(a)
>>> b.numpy()
array([-4.1223075e-08, -2.6894143e-01, 0.0000000e+00, 7.3105860e-01,
2.0000000e+01], dtype=float32)
Arguments:
x: Input tensor.
Returns:
The swish activation applied to `x` (see reference paper for details).
Reference:
- [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)
"""
return nn.swish(x)
@keras_export('keras.activations.relu')
def relu(x, alpha=0., max_value=None, threshold=0):
"""Applies the rectified linear unit activation function.
With default values, this returns the standard ReLU activation:
`max(x, 0)`, the element-wise maximum of 0 and the input tensor.
Modifying default parameters allows you to use non-zero thresholds,
change the max value of the activation,
and to use a non-zero multiple of the input for values below the threshold.
For example:
>>> foo = tf.constant([-10, -5, 0.0, 5, 10], dtype = tf.float32)
>>> tf.keras.activations.relu(foo).numpy()
array([ 0., 0., 0., 5., 10.], dtype=float32)
>>> tf.keras.activations.relu(foo, alpha=0.5).numpy()
array([-5. , -2.5, 0. , 5. , 10. ], dtype=float32)
>>> tf.keras.activations.relu(foo, max_value=5).numpy()
array([0., 0., 0., 5., 5.], dtype=float32)
>>> tf.keras.activations.relu(foo, threshold=5).numpy()
array([-0., -0., 0., 0., 10.], dtype=float32)
Arguments:
x: Input `tensor` or `variable`.
alpha: A `float` that governs the slope for values lower than the
threshold.
max_value: A `float` that sets the saturation threshold (the largest value
the function will return).
threshold: A `float` giving the threshold value of the activation function
below which values will be damped or set to zero.
Returns:
A `Tensor` representing the input tensor,
transformed by the relu activation function.
Tensor will be of the same shape and dtype of input `x`.
"""
return K.relu(x, alpha=alpha, max_value=max_value, threshold=threshold)
@keras_export('keras.activations.tanh')
def tanh(x):
"""Hyperbolic tangent activation function.
For example:
>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
>>> b = tf.keras.activations.tanh(a)
>>> b.numpy()
array([-0.9950547, -0.7615942, 0., 0.7615942, 0.9950547], dtype=float32)
Arguments:
x: Input tensor.
Returns:
Tensor of same shape and dtype of input `x`, with tanh activation:
`tanh(x) = sinh(x)/cosh(x) = ((exp(x) - exp(-x))/(exp(x) + exp(-x)))`.
"""
return nn.tanh(x)
@keras_export('keras.activations.sigmoid')
def sigmoid(x):
"""Sigmoid activation function, `sigmoid(x) = 1 / (1 + exp(-x))`.
Applies the sigmoid activation function. For small values (<-5),
`sigmoid` returns a value close to zero, and for large values (>5)
the result of the function gets close to 1.
Sigmoid is equivalent to a 2-element Softmax, where the second element is
assumed to be zero. The sigmoid function always returns a value between
0 and 1.
For example:
>>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)
>>> b = tf.keras.activations.sigmoid(a)
>>> b.numpy()
array([2.0611537e-09, 2.6894143e-01, 5.0000000e-01, 7.3105860e-01,
1.0000000e+00], dtype=float32)
Arguments:
x: Input tensor.
Returns:
Tensor with the sigmoid activation: `1 / (1 + exp(-x))`.
"""
return nn.sigmoid(x)
@keras_export('keras.activations.exponential')
def exponential(x):
"""Exponential activation function.
For example:
>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
>>> b = tf.keras.activations.exponential(a)
>>> b.numpy()
array([0.04978707, 0.36787945, 1., 2.7182817 , 20.085537], dtype=float32)
Arguments:
x: Input tensor.
Returns:
Tensor with exponential activation: `exp(x)`.
"""
return math_ops.exp(x)
@keras_export('keras.activations.hard_sigmoid')
def hard_sigmoid(x):
"""Hard sigmoid activation function.
A faster approximation of the sigmoid activation.
For example:
>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
>>> b = tf.keras.activations.hard_sigmoid(a)
>>> b.numpy()
array([0. , 0.3, 0.5, 0.7, 1. ], dtype=float32)
Arguments:
x: Input tensor.
Returns:
The hard sigmoid activation, defined as:
- `if x < -2.5: return 0`
- `if x > 2.5: return 1`
- `if -2.5 <= x <= 2.5: return 0.2 * x + 0.5`
"""
return K.hard_sigmoid(x)
@keras_export('keras.activations.linear')
def linear(x):
"""Linear activation function (pass-through).
For example:
>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
>>> b = tf.keras.activations.linear(a)
>>> b.numpy()
array([-3., -1., 0., 1., 3.], dtype=float32)
Arguments:
x: Input tensor.
Returns:
The input, unmodified.
"""
return x
@keras_export('keras.activations.serialize')
def serialize(activation):
"""Returns the string identifier of an activation function.
Arguments:
activation : Function object.
Returns:
String denoting the name attribute of the input function
For example:
>>> tf.keras.activations.serialize(tf.keras.activations.tanh)
'tanh'
>>> tf.keras.activations.serialize(tf.keras.activations.sigmoid)
'sigmoid'
>>> tf.keras.activations.serialize('abcd')
Traceback (most recent call last):
...
ValueError: ('Cannot serialize', 'abcd')
Raises:
ValueError: The input function is not a valid one.
"""
if (hasattr(activation, '__name__') and
activation.__name__ in _TF_ACTIVATIONS_V2):
return _TF_ACTIVATIONS_V2[activation.__name__]
return serialize_keras_object(activation)
@keras_export('keras.activations.deserialize')
def deserialize(name, custom_objects=None):
"""Returns activation function given a string identifier.
Arguments:
x : String identifier.
Returns:
Corresponding activation function.
For example:
>>> tf.keras.activations.deserialize('linear')
<function linear at 0x1239596a8>
>>> tf.keras.activations.deserialize('sigmoid')
<function sigmoid at 0x123959510>
>>> tf.keras.activations.deserialize('abcd')
Traceback (most recent call last):
...
ValueError: Unknown activation function:abcd
Args:
name: The name of the activation function.
custom_objects: Optional `{function_name: function_obj}`
dictionary listing user-provided activation functions.
Raises:
ValueError: `Unknown activation function` if the input string does not
denote any defined Tensorflow activation function.
"""
return deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='activation function')
@keras_export('keras.activations.get')
def get(identifier):
"""Returns function.
Arguments:
identifier: Function or string
Returns:
Function corresponding to the input string or input function.
For example:
>>> tf.keras.activations.get('softmax')
<function softmax at 0x1222a3d90>
>>> tf.keras.activations.get(tf.keras.activations.softmax)
<function softmax at 0x1222a3d90>
>>> tf.keras.activations.get(None)
<function linear at 0x1239596a8>
>>> tf.keras.activations.get(abs)
<built-in function abs>
>>> tf.keras.activations.get('abcd')
Traceback (most recent call last):
...
ValueError: Unknown activation function:abcd
Raises:
ValueError: Input is an unknown function or string, i.e., the input does
not denote any defined function.
"""
if identifier is None:
return linear
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
elif isinstance(identifier, dict):
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise TypeError(
'Could not interpret activation function identifier: {}'.format(
repr(identifier)))
|
apache-2.0
| -2,648,682,694,046,936,000 | 28.93279 | 80 | 0.668027 | false |
freevo/kaa-candy
|
src/widgets/video.py
|
1
|
8318
|
# -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# video.py - video widget
# -----------------------------------------------------------------------------
# kaa-candy - Fourth generation Canvas System using Clutter as backend
# Copyright (C) 2012 Dirk Meyer
#
# First Version: Dirk Meyer <https://github.com/Dischi>
# Maintainer: Dirk Meyer <https://github.com/Dischi>
#
# Based on various previous attempts to create a canvas system for
# Freevo by Dirk Meyer and Jason Tackaberry. Please see the file
# AUTHORS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------------
__all__ = [ 'Video', 'Audio', 'SEEK_RELATIVE', 'SEEK_ABSOLUTE', 'SEEK_PERCENTAGE',
'STATE_IDLE', 'STATE_PLAYING', 'STATE_PAUSED', 'NEXT', 'POSSIBLE_PLAYER' ]
# python imports
import logging
# kaa imports
import kaa
import kaa.metadata
# kaa.candy imports
from widget import Widget
from .. import config
# get logging object
log = logging.getLogger('kaa.candy')
SEEK_RELATIVE = 'SEEK_RELATIVE'
SEEK_ABSOLUTE = 'SEEK_ABSOLUTE'
SEEK_PERCENTAGE = 'SEEK_PERCENTAGE'
STATE_IDLE = 'STATE_IDLE'
STATE_PLAYING = 'STATE_PLAYING'
STATE_PAUSED = 'STATE_PAUSED'
ASPECT_ORIGINAL = 'ASPECT_ORIGINAL'
ASPECT_16_9 = 'ASPECT_16_9'
ASPECT_4_3 = 'ASPECT_4_3'
ASPECT_ZOOM = 'ASPECT_ZOOM'
ASPECTS = [ ASPECT_ORIGINAL, ASPECT_16_9, ASPECT_4_3, ASPECT_ZOOM ]
NEXT = 'NEXT'
# filled with values from the backend later
POSSIBLE_PLAYER = []
class Video(Widget):
"""
Video widget
"""
candyxml_name = 'video'
candy_backend = 'candy.Video'
attributes = [ 'uri', 'config', 'audio_only', 'player' ]
audio_only = False
__player = None
def __init__(self, pos=None, size=None, uri=None, player='gstreamer', context=None):
"""
Create the video widget. The widget supports gstreamer
(default) and mplayer but only gstreamer can be used as real
widget for now. When choosing mplayer it will always open a
full screen window to play the video.
The playback can be configured using the config member
dictionary. Please note, that gstreamer tries to figure out
most of the stuff itself and AC3 and DTS passthrough only
works when using pulseaudio and pulseaudio configured
correctly (pavucontrol). Future versions of kaa.candy may have
more or changed options.
"""
super(Video, self).__init__(pos, size, context)
self.uri = uri
self.signals = kaa.Signals('finished', 'progress', 'streaminfo')
self.state = STATE_IDLE
# player configuration
self.config = {
'mplayer.vdpau': False,
'mplayer.passthrough': False,
'fresh-rate': None
}
# current streaminfo / audio / subtitle values
self.streaminfo = {
'audio': {},
'subtitle': {},
'is_menu': False,
}
self.aspect = ASPECT_ORIGINAL
self.player = player or 'gstreamer'
@property
def player(self):
return self.__player
@player.setter
def player(self, value):
if self.state != STATE_IDLE:
raise RuntimeError('player already running')
self.__player = value
@property
def uri(self):
return self.__uri
@uri.setter
def uri(self, value):
if value and isinstance(value, (str, unicode)) and value.startswith('$'):
# variable from the context, e.g. $varname
value = self.context.get(value) or ''
if value and not value.find('://') > 0:
value = 'file://' + value
if value:
self.metadata = kaa.metadata.parse(value)
else:
self.metadata = None
self.__uri = value
@classmethod
def candyxml_parse(cls, element):
"""
Parse the candyxml element for parameter to create the widget.
"""
return super(Video, cls).candyxml_parse(element).update(
uri=element.uri or element.filename, player=element.player)
#
# public API to control the player
#
def play(self):
"""
Start the playback
"""
if self.state != STATE_IDLE:
raise RuntimeError('player already running')
if self.player not in POSSIBLE_PLAYER:
raise RuntimeError('unknown player %s' % self.player)
self.state = STATE_PLAYING
self.backend.do_play()
def stop(self):
"""
Stop the playback
"""
if self.state != STATE_IDLE:
self.backend.do_stop()
def pause(self):
"""
Pause playback
"""
if self.state == STATE_PLAYING:
self.backend.do_pause()
self.state = STATE_PAUSED
def resume(self):
"""
Resume a paused playback
"""
if self.state == STATE_PAUSED:
self.backend.do_resume()
self.state = STATE_PLAYING
def seek(self, value, type=SEEK_RELATIVE):
"""
Seek to the given position. Type is either SEEK_RELATIVE
(default), SEEK_ABSOLUTE or SEEK_PERCENTAGE.
"""
self.backend.do_seek(value, type)
def set_audio(self, idx):
"""
Set the audio channel to stream number idx
"""
self.backend.do_set_audio(idx)
return idx
def set_subtitle(self, idx):
"""
Set the subtitle sream idx. Use -1 to turn subtitles off.
"""
self.backend.do_set_subtitle(idx)
return idx
def set_deinterlace(self, value):
"""
Turn on/off deinterlacing
"""
self.backend.do_set_deinterlace(value)
def set_aspect(self, aspect):
"""
Set the aspect ratio
"""
if aspect == NEXT:
aspect = ASPECTS[(ASPECTS.index(self.aspect) + 1) % len(ASPECTS)]
self.backend.do_set_aspect(aspect)
self.aspect = aspect
def nav_command(self, cmd):
"""
Send DVD navigation command
"""
self.backend.do_nav_command(cmd)
#
# backend callbacks
#
def event_progress(self, pos):
"""
Callback from the backend: new progress information
"""
self.signals['progress'].emit(pos)
def event_finished(self):
"""
Callback from the backend: playback finished
"""
self.state = STATE_IDLE
self.signals['finished'].emit()
def event_streaminfo(self, streaminfo):
"""
Callback from the backend: streaminfo
"""
del streaminfo['sync']
self.signals['streaminfo'].emit(streaminfo)
self.streaminfo = streaminfo
class Audio(Video):
"""
Hidden video widget for audio only
"""
candyxml_name = 'audio'
candy_backend = 'candy.Audio'
audio_only = True
attributes = Video.attributes + [ 'visualisation' ]
def __init__(self, pos=None, size=None, uri=None, player='gstreamer', visualisation=None,
context=None):
"""
Create the audio widget. If visualisation is None it is invisible.
"""
super(Audio, self).__init__(pos, size, uri, player, context)
self.visualisation = visualisation
@classmethod
def candyxml_parse(cls, element):
"""
Parse the candyxml element for parameter to create the widget.
"""
return super(Audio, cls).candyxml_parse(element).update(
visualisation=element.visualisation)
|
gpl-2.0
| 1,214,040,656,855,306,000 | 29.028881 | 93 | 0.590406 | false |
qkitgroup/qkit
|
qkit/core/s_init/S85_init_measurement.py
|
1
|
2691
|
import qkit
# Check if we are using the new data structure and if we have set user and RunID
if 'new_data_structure' in qkit.cfg:
raise ValueError(__name__+": Please use qkit.cfg['datafolder_structure'] = 1 instead of qkit.cfg['new_data_structure'] in your config.")
if qkit.cfg.get('datafolder_structure', 1) == 2:
# noinspection SpellCheckingInspection
try:
import ipywidgets as widgets
from IPython.display import display
b = widgets.Button(
description='Please Check!',
disabled=False,
button_style='info', # 'success', 'info', 'warning', 'danger' or ''
)
b.f1 = widgets.Text(
value=str(qkit.cfg.get('run_id', '')),
placeholder='***RUN_ID IS EMPTY***',
description='Please check: Run ID',
disabled=False,
style={'description_width': 'initial'}
)
b.f2 = widgets.Text(
value=str(qkit.cfg.get('user', '')),
placeholder='***USER IS EMPTY***',
description='user name',
disabled=False,
style={'description_width': 'initial'}
)
if not qkit.cfg.get('run_id', False):
b.f1.border_color = 'red'
b.button_style = 'danger'
if not qkit.cfg.get('user', False):
b.f2.border_color = 'red'
b.button_style = 'danger'
def clickfunc(btn):
if not b.f1.value:
raise ValueError("RUN_ID is still empty!")
if not b.f2.value:
raise ValueError("USER is still empty!")
qkit.cfg['run_id'] = b.f1.value
qkit.cfg['user'] = b.f2.value
btn.f1.disabled = True # close()
btn.f1.border_color = '#cccccc'
btn.f2.border_color = '#cccccc'
btn.f2.disabled = True # close()
btn.disabled = True # ()
btn.button_style = 'success'
btn.description = 'Done.'
b.on_click(clickfunc)
display(widgets.HBox([b.f1, b.f2, b]))
except ImportError:
import logging
if 'run_id' not in qkit.cfg:
logging.error(
'You are using the new data structure, but you did not specify a run ID. Please set qkit.cfg["run_id"] NOW to avoid searching your data.')
if 'user' not in qkit.cfg:
logging.error(
'You are using the new data structure, but you did not specify a username. Please set qkit.cfg["user"] NOW to avoid searching your data.')
|
gpl-2.0
| -3,821,623,076,404,946,400 | 38.573529 | 158 | 0.522482 | false |
mitodl/open-discussions
|
open_discussions/permissions.py
|
1
|
7578
|
"""Custom permissions"""
from django.http import Http404
from prawcore.exceptions import Forbidden as PrawForbidden, Redirect as PrawRedirect
from rest_framework import permissions
from channels.models import Channel
from open_discussions import features
def channel_exists(view):
"""
Return True if a Channel object exists for a channel_name in the view, or there is no channel name.
Raises 404 if the Channel does not exist.
Args:
view (rest_framework.views.APIView): django DRF view
Returns:
bool: True if Channel exists (or there is no channel name)
"""
channel_name = view.kwargs.get("channel_name", None)
if not channel_name or Channel.objects.filter(name=channel_name).exists():
return True
raise Http404()
def is_staff_user(request):
"""
Args:
request (HTTPRequest): django request object
Returns:
bool: True if user is staff
"""
return request.user is not None and request.user.is_staff
def is_moderator(request, view):
"""
Helper function to check if a user is a moderator
Args:
request (HTTPRequest): django request object
view (APIView): a DRF view object
Returns:
bool: True if user is moderator on the channel
"""
user_api = request.channel_api
channel_name = view.kwargs.get("channel_name", None)
try:
return (
channel_name
and not request.user.is_anonymous
and user_api.is_moderator(channel_name, request.user.username)
)
except PrawForbidden:
# User was forbidden to list moderators so they are most certainly not one
return False
except PrawRedirect:
# if a redirect occurred, that means the user doesn't have any permissions
# for the subreddit and most definitely is not a moderator
return False
def channel_is_mod_editable(view):
"""
Helper function to check that a channel can be edited by a moderator on discussions.
Args:
view (APIView): a DRF view object
Returns:
bool:
True if the channel can be edited by a moderator. False if the channel does not exist or can only
be edited by a staff user from another server.
"""
channel_name = view.kwargs.get("channel_name")
managed = (
Channel.objects.filter(name=channel_name)
.values_list("membership_is_managed", flat=True)
.first()
)
# None means the channel does not exist, True means it does but we shouldn't edit it via REST API
return managed is False
def is_readonly(request):
"""
Returns True if the request uses a readonly verb
Args:
request (HTTPRequest): A request
Returns:
bool: True if the request method is readonly
"""
return request.method in permissions.SAFE_METHODS
class IsStaffPermission(permissions.BasePermission):
"""Checks the user for the staff permission"""
def has_permission(self, request, view):
"""Returns True if the user has the staff role"""
return is_staff_user(request)
class IsStaffOrReadonlyPermission(permissions.BasePermission):
"""Checks the user for the staff permission"""
def has_permission(self, request, view):
"""Returns True if the user has the staff role or if the request is readonly"""
return is_readonly(request) or is_staff_user(request)
class IsStaffOrModeratorPermission(permissions.BasePermission):
"""Checks that the user is either staff or a moderator"""
def has_permission(self, request, view):
"""Returns True if the user has the staff role or is a moderator"""
return channel_exists(view) and (
is_staff_user(request) or is_moderator(request, view)
)
class IsStaffModeratorOrReadonlyPermission(permissions.BasePermission):
"""Checks that the user is either staff, a moderator, or performing a readonly operation"""
def has_permission(self, request, view):
"""Returns True if the user has the staff role, is a moderator, or the request is readonly"""
return channel_exists(view) and (
is_readonly(request)
or is_staff_user(request)
or is_moderator(request, view)
)
class IsOwnSubscriptionOrAdminPermission(permissions.BasePermission):
"""
Checks that the user is (1) staff/moderator, (2) editing their own subscription, or (3) making
a readonly request
"""
@staticmethod
def is_own_resource_request(request, view):
"""Returns True if the request is on the user's own behalf"""
resource_owner_username = view.kwargs.get(
"subscriber_name", None
) or request.data.get("subscriber_name", None)
return resource_owner_username == request.user.username
def has_permission(self, request, view):
"""
Returns True if user is (1) staff/moderator, (2) editing their own subscription, or (3) making
a readonly request
"""
return (
is_readonly(request)
or self.is_own_resource_request(request, view)
or is_staff_user(request)
or is_moderator(request, view)
)
class ContributorPermissions(permissions.BasePermission):
"""
Only staff and moderators should be able to see and edit the list of contributors
"""
def has_permission(self, request, view):
if not channel_exists(view):
return False
# Allow self-delete
if (
request.method == "DELETE"
and view.kwargs.get("contributor_name", None) == request.user.username
):
return True
return is_staff_user(request) or (
(channel_is_mod_editable(view) or is_readonly(request))
and is_moderator(request, view)
)
class ModeratorPermissions(permissions.BasePermission):
"""
All users should be able to see a list of moderators. Only staff and moderators should be able to edit it.
"""
def has_permission(self, request, view):
return channel_exists(view) and (
is_readonly(request)
or is_staff_user(request)
or (channel_is_mod_editable(view) and is_moderator(request, view))
)
class AnonymousAccessReadonlyPermission(permissions.BasePermission):
"""Checks that the user is authenticated or is allowed anonymous access"""
def has_permission(self, request, view):
"""Is the user authenticated or allowed anonymous access?"""
if request.user.is_anonymous and not is_readonly(request):
return False
return True
class ReadOnly(permissions.BasePermission):
"""Allows read-only requests through for any user"""
def has_permission(self, request, view):
"""Return true if the request is read-only"""
return request.method in permissions.SAFE_METHODS
class ObjectOnlyPermissions(permissions.DjangoObjectPermissions):
"""Validates only object-level permissions"""
# NOTE: this is because DjangoObjectPermissions subclasses DjangoModelPermissions, which also checks permissions on models
def has_permission(self, request, view):
"""Ignores model-level permissions"""
return True
class PodcastFeatureFlag(permissions.BasePermission):
"""Forbids access if the podcast feature flag is not enabled"""
def has_permission(self, request, view):
"""Check that the feature flag is enabled"""
return features.is_enabled(features.PODCAST_APIS)
|
bsd-3-clause
| -8,327,566,343,972,762,000 | 31.523605 | 126 | 0.665215 | false |
LCAS/spqrel_tools
|
actions/soundtrack.py
|
1
|
2339
|
import qi
import argparse
import sys
import time
import threading
import math
import functools
import action_base
from action_base import *
import conditions
from conditions import get_condition
actionName = "soundtrack"
# typical values: distance = 1.0 m, confidence = 0.5
def actionThread_exec (params):
t = threading.currentThread()
memory_service = getattr(t, "mem_serv", None)
motion_service = getattr(t, "session", None).service("ALMotion")
session = getattr(t, "session", None)
print "Action "+actionName+" started with params "+params
values = params.split('_')
confidence_threshold = float(values[0])/100.0
distance_to_people = float(values[1])
time_to_rotate = int(values[2])
print "Confidence: " , confidence_threshold
print "Distance: " , distance_to_people
print "Time: " , time_to_rotate
# action init
tracker_service = session.service("ALTracker")
tracker_service.setMode("WholeBody")
tracker_service.registerTarget("Sound",[distance_to_people,confidence_threshold])
tracker_service.track("Sound")
# action init
val = False
while (getattr(t, "do_run", True) and (not val)):
#print "Action "+actionName+" "+params+" exec..."
# action exec
try:
sound_value = memory_service.getData("ALSoundLocalization/SoundLocated")
if len(sound_value)> 1 :
#print "confidence: ", sound_value[1][2]
confidence = sound_value[1][2]
if (confidence > confidence_threshold):
val = True
break
except:
pass
# action exec
time.sleep(0.25)
count = time_to_rotate * 10
while (getattr(t, "do_run", True) and val and count > 0):
time.sleep(.1)
count -= 1
# action end
tracker_service.stopTracker()
tracker_service.unregisterAllTargets()
action_success(actionName,params)
def init(session):
print actionName+" init"
action_base.init(session, actionName, actionThread_exec)
def quit():
print actionName+" quit"
actionThread_exec.do_run = False
if __name__ == "__main__":
app = action_base.initApp(actionName)
init(app.session)
#Program stays at this point until we stop it
app.run()
quit()
|
mit
| 5,987,243,401,159,304,000 | 23.621053 | 85 | 0.627619 | false |
davidam/python-examples
|
qt/filedialog.py
|
1
|
2594
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <[email protected]>
# Maintainer: David Arroyo Menéndez <[email protected]>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QInputDialog, QLineEdit, QFileDialog
from PyQt5.QtGui import QIcon
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 file dialogs - pythonspot.com'
self.left = 10
self.top = 10
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.openFileNameDialog()
self.openFileNamesDialog()
self.saveFileDialog()
self.show()
def openFileNameDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", "","All Files (*);;Python Files (*.py)", options=options)
if fileName:
print(fileName)
def openFileNamesDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
files, _ = QFileDialog.getOpenFileNames(self,"QFileDialog.getOpenFileNames()", "","All Files (*);;Python Files (*.py)", options=options)
if files:
print(files)
def saveFileDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getSaveFileName(self,"QFileDialog.getSaveFileName()","","All Files (*);;Text Files (*.txt)", options=options)
if fileName:
print(fileName)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
|
gpl-3.0
| -3,177,637,239,197,869,600 | 34.013514 | 145 | 0.671555 | false |
y4n9squared/HEtest
|
hetest/python/circuit_generation/stealth/stealth_circuit_object_test.py
|
1
|
3519
|
# *****************************************************************
# Copyright (c) 2013 Massachusetts Institute of Technology
#
# Developed exclusively at US Government expense under US Air Force contract
# FA8721-05-C-002. The rights of the United States Government to use, modify,
# reproduce, release, perform, display or disclose this computer software and
# computer software documentation in whole or in part, in any manner and for
# any purpose whatsoever, and to have or authorize others to do so, are
# Unrestricted and Unlimited.
#
# Licensed for use under the BSD License as described in the BSD-LICENSE.txt
# file in the root directory of this release.
#
# Project: SPAR
# Authors: SY
# Description: Stealth TA2 circuit object superclass test
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 17 Oct 2012 SY Original version
# *****************************************************************
import stealth_circuit_object as sco
import unittest
class TestCircuitObject(unittest.TestCase):
def test_bad_init(self):
"""
Tests that initializing a circuit object with a None, an integer,
or an empty string as a displayname throws an error.
"""
self.assertRaises(TypeError,
sco.StealthCircuitObject, None)
self.assertRaises(TypeError,
sco.StealthCircuitObject, 1)
self.assertRaises(AssertionError,
sco.StealthCircuitObject, "")
def test_get_name(self):
"""
Tests that get_name returns the name of the object correctly.
"""
co1_name = "object1"
co1 = sco.StealthCircuitObject(co1_name)
self.assertEqual(co1.get_name(), co1_name)
def test_get_short_display_string(self):
"""
Tests that get_short_display_string returns the appropriate
representation string.
self.co1.get_short_display_string(x) where x can be interpreted as False
(i.e. x = False, x = 0, etc) shoud give "object1",
self.co1.get_short_display_string(x) where x can be interpreted as True
(i.e. x = True, x = 1, etc) should give "N(object1)",
and self.co1.get_short_display_string(x) where x is anything that is not
equal to True or False should result in an AssertionError.
"""
co1_name = "object1"
co1 = sco.StealthCircuitObject(co1_name)
self.assertEqual(co1.get_short_display_string(False),
"object1")
self.assertEqual(co1.get_short_display_string(0),
"object1")
self.assertEqual(co1.get_short_display_string(True),
"N(object1)")
self.assertEqual(co1.get_short_display_string(1),
"N(object1)")
self.assertRaises(AssertionError,
co1.get_short_display_string, None)
self.assertRaises(AssertionError,
co1.get_short_display_string, "this_is_a_string")
def test_evaluate(self):
"""
Tests to see that calling evaluate from a CircuitObject (and not from
a subclass thereof) causes an AssertionError.
"""
co1_name = "object1"
co1 = sco.StealthCircuitObject(co1_name)
self.assertRaises(AssertionError, co1.evaluate)
if __name__ == '__main__':
stc.unittest.main()
|
bsd-2-clause
| -5,205,232,918,386,953,000 | 40.4 | 80 | 0.588804 | false |
internetimagery/timetrack
|
db.py
|
1
|
2889
|
# Persist data!
from __future__ import print_function
import collections
import contextlib
import sqlite3
import os.path
import uuid
import timestamp
UUID = str(uuid.uuid4())
class DB(object):
""" Access and store records in a DB. Manage updates. """
def __init__(s, path):
s.path = path
s.struct = collections.OrderedDict()
s.struct["id"] = "INTEGER PRIMARY KEY" # Entry ID
s.struct["checkin"] = "NUMBER" # Time entry was logged
s.struct["session"] = "TEXT" # ID for software session
s.struct["period"] = "NUMBER" # Period of time this chunk covers
s.struct["user"] = "TEXT" # Username
s.struct["software"] = "TEXT" # Software running
s.struct["file"] = "TEXT" # File loaded in software
s.struct["status"] = "TEXT" # Status of user (ie active/idle/etc)
s.struct["note"] = "TEXT" # Additional information
def __enter__(s):
""" Start context manager """
exist = os.path.isfile(s.path)
s.db = db = sqlite3.connect(s.path)
s.cursor = db.cursor()
if not exist:
s.cursor.execute("CREATE TABLE timesheet ({})".format(",".join("{} {}".format(a, s.struct[a]) for a in s.struct)))
def __exit__(s, exc_type, exc_val, exc_tb):
""" Close DB connection """
if not exc_type:
s.db.commit()
s.db.close()
def write(s, *values):
""" Write into DB stuff """
num = len(s.struct)
if len(values) != num:
raise RuntimeError("Not enough values provided.")
s.cursor.execute("INSERT INTO timesheet VALUES ({})".format(",".join("?" for _ in range(num))), values)
return s.cursor.lastrowid
def read(s, query, *values):
""" Read query and return formatted response """
return ({k: v for k, v in zip(s.struct, r)} for r in s.cursor.execute("SELECT * FROM timesheet WHERE ({}) ORDER BY checkin".format(query), values))
def poll(s, period, user, software, file, status, note=""):
""" Poll the database to show activity """
with s:
return s.write(None, timestamp.now(), UUID, period, user, software, file, status, note)
def read_all(s):
""" Quick way to grab all data from the database """
with s:
for row in s.read("id != 0"):
yield row
if __name__ == '__main__':
import test
import os
with test.temp(".db") as f:
os.unlink(f)
db = DB(f)
assert list(db.read_all()) == []
# Add entries
db.poll(1, "me", "python", "path/to/file", "active", "first entry")
db.poll(1, "you", "python", "path/to/file", "idle", "second entry")
db.poll(1, "us", "python", "path/to/file", "active", "last entry")
res = list(db.read_all())
assert len(res) == 3
assert len(res[0]) == len(db.struct)
|
gpl-3.0
| 2,935,258,421,061,812,700 | 36.519481 | 155 | 0.564555 | false |
ovnicraft/server-tools
|
keychain/tests/test_keychain.py
|
1
|
8467
|
# -*- coding: utf-8 -*-
# © 2016 Akretion Raphaël REVERDY
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests.common import TransactionCase
from odoo.tools.config import config
from odoo.exceptions import ValidationError, UserError
import logging
_logger = logging.getLogger(__name__)
try:
from cryptography.fernet import Fernet
except ImportError as err:
_logger.debug(err)
class TestKeychain(TransactionCase):
def setUp(self):
super(TestKeychain, self).setUp()
self.keychain = self.env['keychain.account']
config['keychain_key'] = Fernet.generate_key()
self.old_running_env = config.get('running_env', '')
config['running_env'] = None
def _init_data(self):
return {
"c": True,
"a": "b",
"d": "",
}
def _validate_data(self, data):
return 'c' in data
keychain_clss = self.keychain.__class__
keychain_clss._keychain_test_init_data = _init_data
keychain_clss._keychain_test_validate_data = _validate_data
self.keychain._fields['namespace'].selection.append(
('keychain_test', 'test')
)
def tearDown(self):
config['running_env'] = self.old_running_env
return super(TestKeychain, self).tearDown()
def _create_account(self):
vals = {
"name": "test",
"namespace": "keychain_test",
"login": "test",
"technical_name": "keychain.test"
}
return self.keychain.create(vals)
def test_password(self):
"""It should encrypt passwords."""
account = self._create_account()
passwords = ('', '12345', 'djkqfljfqm', u"""&é"'(§è!ç""")
for password in passwords:
account.clear_password = password
account._inverse_set_password()
self.assertTrue(account.clear_password != account.password)
self.assertEqual(account._get_password(), password)
def test_wrong_key(self):
"""It should raise an exception when encoded key != decoded."""
account = self._create_account()
password = 'urieapocq'
account.clear_password = password
account._inverse_set_password()
config['keychain_key'] = Fernet.generate_key()
try:
account._get_password()
self.fail('It should not work with another key')
except UserError as err:
self.assertTrue(True, 'It should raise a UserError')
self.assertTrue(
'has been encrypted with a diff' in str(err),
'It should display the right msg')
else:
self.fail('It should raise a UserError')
def test_no_key(self):
"""It should raise an exception when no key is set."""
account = self._create_account()
del config.options['keychain_key']
with self.assertRaises(UserError) as err:
account.clear_password = 'aiuepr'
account._inverse_set_password()
self.fail('It should not work without key')
self.assertTrue(
'Use a key similar to' in str(err.exception),
'It should display the right msg')
def test_badly_formatted_key(self):
"""It should raise an exception when key is not acceptable format."""
account = self._create_account()
config['keychain_key'] = ""
with self.assertRaises(UserError):
account.clear_password = 'aiuepr'
account._inverse_set_password()
self.fail('It should not work missing formated key')
self.assertTrue(True, 'It shoud raise a ValueError')
def test_retrieve_env(self):
"""Retrieve env should always return False at the end"""
config['running_env'] = False
self.assertListEqual(self.keychain._retrieve_env(), [False])
config['running_env'] = 'dev'
self.assertListEqual(self.keychain._retrieve_env(), ['dev', False])
config['running_env'] = 'prod'
self.assertListEqual(self.keychain._retrieve_env(), ['prod', False])
def test_multienv(self):
"""Encrypt with dev, decrypt with dev."""
account = self._create_account()
config['keychain_key_dev'] = Fernet.generate_key()
config['keychain_key_prod'] = Fernet.generate_key()
config['running_env'] = 'dev'
account.clear_password = 'abc'
account._inverse_set_password()
self.assertEqual(
account._get_password(),
'abc', 'Should work with dev')
config['running_env'] = 'prod'
with self.assertRaises(UserError):
self.assertEqual(
account._get_password(),
'abc', 'Should not work with prod key')
def test_multienv_blank(self):
"""Encrypt with blank, decrypt for all."""
account = self._create_account()
config['keychain_key'] = Fernet.generate_key()
config['keychain_key_dev'] = Fernet.generate_key()
config['keychain_key_prod'] = Fernet.generate_key()
config['running_env'] = ''
account.clear_password = 'abc'
account._inverse_set_password()
self.assertEqual(
account._get_password(),
'abc', 'Should work with dev')
config['running_env'] = 'prod'
self.assertEqual(
account._get_password(),
'abc', 'Should work with prod')
def test_multienv_force(self):
"""Set the env on the record"""
account = self._create_account()
account.environment = 'prod'
config['keychain_key'] = Fernet.generate_key()
config['keychain_key_dev'] = Fernet.generate_key()
config['keychain_key_prod'] = Fernet.generate_key()
config['running_env'] = ''
account.clear_password = 'abc'
account._inverse_set_password()
with self.assertRaises(UserError):
self.assertEqual(
account._get_password(),
'abc', 'Should not work with dev')
config['running_env'] = 'prod'
self.assertEqual(
account._get_password(),
'abc', 'Should work with prod')
def test_wrong_json(self):
"""It should raise an exception when data is not valid json."""
account = self._create_account()
wrong_jsons = ("{'hi':'o'}", "{'oq", '[>}')
for json in wrong_jsons:
with self.assertRaises(ValidationError) as err:
account.write({"data": json})
self.fail('Should not validate baddly formatted json')
self.assertTrue(
'Data should be a valid JSON' in str(err.exception),
'It should raise a ValidationError')
def test_invalid_json(self):
"""It should raise an exception when data don't pass _validate_data."""
account = self._create_account()
invalid_jsons = ('{}', '{"hi": 1}')
for json in invalid_jsons:
with self.assertRaises(ValidationError) as err:
account.write({"data": json})
self.assertTrue(
'Data not valid' in str(err.exception),
'It should raise a ValidationError')
def test_valid_json(self):
"""It should work with valid data."""
account = self._create_account()
valid_jsons = ('{"c": true}', '{"c": 1}', '{"a": "o", "c": "b"}')
for json in valid_jsons:
try:
account.write({"data": json})
self.assertTrue(True, 'Should validate json')
except:
self.fail('It should validate a good json')
def test_default_init_and_valid(self):
"""."""
self.keychain._fields['namespace'].selection.append(
('keychain_test_default', 'test')
)
account = self.keychain.create({
"name": "test",
"namespace": "keychain_test_default",
"login": "test",
"technical_name": "keychain.test"
})
try:
account.write({"login": "test default"})
except ValidationError:
self.fail('It should validate any json in default')
self.assertEqual(
account.data, account._serialize_data(
account._default_init_data()),
'Data should be default value')
|
agpl-3.0
| -4,873,580,351,542,346,000 | 33.96281 | 79 | 0.568373 | false |
Talvalin/server-client-python
|
tableauserverclient/models/connection_item.py
|
1
|
1810
|
import xml.etree.ElementTree as ET
from .. import NAMESPACE
class ConnectionItem(object):
def __init__(self):
self._datasource_id = None
self._datasource_name = None
self._id = None
self._connection_type = None
self.embed_password = None
self.password = None
self.server_address = None
self.server_port = None
self.username = None
@property
def datasource_id(self):
return self._datasource_id
@property
def datasource_name(self):
return self._datasource_name
@property
def id(self):
return self._id
@property
def connection_type(self):
return self._connection_type
@classmethod
def from_response(cls, resp):
all_connection_items = list()
parsed_response = ET.fromstring(resp)
all_connection_xml = parsed_response.findall('.//t:connection', namespaces=NAMESPACE)
for connection_xml in all_connection_xml:
connection_item = cls()
connection_item._id = connection_xml.get('id', None)
connection_item._connection_type = connection_xml.get('type', None)
connection_item.server_address = connection_xml.get('serverAddress', None)
connection_item.server_port = connection_xml.get('serverPort', None)
connection_item.username = connection_xml.get('userName', None)
datasource_elem = connection_xml.find('.//t:datasource', namespaces=NAMESPACE)
if datasource_elem is not None:
connection_item._datasource_id = datasource_elem.get('id', None)
connection_item._datasource_name = datasource_elem.get('name', None)
all_connection_items.append(connection_item)
return all_connection_items
|
mit
| 7,449,999,205,610,892,000 | 35.2 | 93 | 0.630387 | false |
grow/grow-ext-build-server
|
grow_build_server/locale_redirect_middleware.py
|
1
|
3879
|
import os
class LocaleRedirectMiddleware(object):
def __init__(self, app, root, locales=None, default_locale=None):
self.app = app
self.root = root
self.default_locale = default_locale
if self.default_locale:
self.default_locale = self.default_locale.lower()
self.locales = locales or []
self.locales = [locale.lower() for locale in self.locales]
self.territories_to_identifiers = {}
for locale in self.locales:
territory = locale.split('_')[-1]
territory = territory.lower()
self.territories_to_identifiers[territory] = locale
def redirect(self, locale_start_response, url):
if url.endswith('/index.html'):
url = url[:-11]
url = '/{}'.format(url)
status = '302 Found'
response_headers = [('Location', url)]
locale_start_response(status, response_headers)
return []
def __call__(self, environ, start_response):
# Extract territory from URL. If the URL is localized, return.
# If it's not localized, check if a cookie is set.
# If a cookie is set already, don't do anything and serve the app.
# If no cookie, determine if there's a file on disk that matches
# the locale, set the cookie, and redirect.
url_path = environ['PATH_INFO'].lstrip('/')
locale_part = url_path.split('/', 1)[0]
locale_from_url = None
territory_from_url = None
# Do nothing if requesting a localized URL.
if locale_part in self.locales:
locale_from_url = locale_part
territory_from_url = locale_from_url.split('_')[-1]
def matched_locale_start_response(status, headers, exc_info=None):
headers.append(('Grow-Build-Server-Locale', locale_part))
return start_response(status, headers, exc_info)
return self.app(environ, matched_locale_start_response)
territory_from_header = environ.get('HTTP_X_APPENGINE_COUNTRY', '')
territory_from_header = territory_from_header.lower()
locale_from_header = \
self.territories_to_identifiers.get(territory_from_header, '')
locale_from_header = locale_from_header.lower()
def locale_start_response(status, headers, exc_info=None):
headers.append(('Grow-Build-Server-Locale', locale_from_header))
headers.append(('Grow-Build-Server-Territory', territory_from_header))
return start_response(status, headers, exc_info)
if not url_path:
url_path = 'index.html'
if url_path.endswith('/'):
url_path += '/index.html'
root_path_on_disk = os.path.join(self.root, url_path)
localized_path_on_disk = None
if locale_from_header:
localized_path_on_disk = os.path.join(
self.root, locale_from_header, url_path)
# Redirect the user if we have a localized file.
if locale_from_header and os.path.exists(localized_path_on_disk):
url = os.path.join(locale_from_header, url_path)
return self.redirect(locale_start_response, url)
# If no file is found at the current location, and if we have a file at
# a path corresponding to the default locale, redirect.
if self.default_locale:
default_localized_path_on_disk = os.path.join(
self.root, self.default_locale, url_path)
if not os.path.exists(root_path_on_disk) \
and os.path.exists(default_localized_path_on_disk):
url = os.path.join(self.default_locale, url_path)
return self.redirect(locale_start_response, url)
# Do nothing if user is in a country we don't have.
return self.app(environ, locale_start_response)
|
mit
| 661,384,384,769,322,600 | 44.104651 | 82 | 0.608404 | false |
des-testbed/des_chan
|
util.py
|
1
|
9343
|
#!/usr/bin/python
"""
DES-CHAN: A Framework for Channel Assignment Algorithms for Testbeds
This module holds utility functions that may be needed by different parts of the
framework (and for the algorithms build with the framework).
The module uses PgSQL - A PyDB-SIG 2.0 compliant module to access the PostgreSQL
database. The latter is included in the Debian package python-pgsql.
Authors: Matthias Philipp <[email protected]>,
Felix Juraschek <[email protected]>
Copyright 2008-2013, Freie Universitaet Berlin (FUB). All rights reserved.
These sources were developed at the Freie Universitaet Berlin,
Computer Systems and Telematics / Distributed, embedded Systems (DES) group
(http://cst.mi.fu-berlin.de, http://www.des-testbed.net)
-------------------------------------------------------------------------------
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see http://www.gnu.org/licenses/ .
--------------------------------------------------------------------------------
For further information and questions please use the web site
http://www.des-testbed.net
"""
import sys
import time
import socket
import subprocess
import netifaces
from pythonwifi import iwlibs
from des_chan.error import *
def resolve_node_name(ip_address):
"""Resolves the name of the node to which the given IP address belongs.
"""
if ip_address == "localhost" or ip_address == "127.0.0.1":
return socket.gethostname()
try:
host_name = socket.gethostbyaddr(ip_address)[0]
except socket.herror:
raise CHANError("Unable to resolve node name (host name for IP %s unknown)" % ip_address)
node_name = host_name.split("-ch")
if node_name:
return node_name[0]
else:
raise CHANError("Unable to resolve node name (%s does not comply with DES naming conventions)" % host_name)
def get_node_ip(node_name, channel):
"""Returns the IP address of the wireless interface that belongs to the
given node and is tuned to the given channel.
"""
host_name = "%s-ch%s" % (node_name, channel)
try:
ip = socket.gethostbyname(host_name)
except socket.gaierror:
raise CHANError("Unable to get node IP (%s can not be resolved)" % host_name)
else:
return ip
def is_available(host):
"""Ping the supplied host and return if the host replied.
"""
retval = subprocess.call("ping -c5 %s" % host, shell=True)
return retval == 0
def is_interface_up(if_name):
"""Return if the supplied network interface is set up already.
"""
# is the interface name valid?
if if_name not in netifaces.interfaces():
raise CHANError("Unable to check if interface is up (invalid interface name %s)" % if_name)
# see if the interface name is listed in ifconfig
return subprocess.call("ifconfig | grep -q %s" % if_name, shell=True) == 0
def get_if_name(channel):
"""Returns the name of the interface that is tuned to the given channel.
"""
for if_name in iwlibs.getWNICnames():
try:
tuned_channel = iwlibs.Wireless(if_name).getChannel()
except IOError:
continue
if tuned_channel == channel and is_interface_up(if_name):
return if_name
raise CHANError("No interface tuned to channel %s and set up" % channel)
def get_free_if_name(if_names=None):
"""Check (and return) a so far unused network interface
"""
# if no interfaces are specified, use all wireless interfaces
if not if_names:
if_names = iwlibs.getWNICnames()
for if_name in if_names:
print if_name
if not is_interface_up(if_name):
return if_name
raise CHANError("No free interfaces left")
def channel(iface):
"""Returns the default channel per network interface.
"""
return {
'wlan0': 14,
'wlan1': 36,
'wlan2': 40
}.get(iface, 14)
def cell_id(iface):
"""Returns the default cell id per network interface.
"""
return {
'wlan0': '16:EB:FF:18:C8:6F',
'wlan1': '46:44:4B:28:57:41',
'wlan2': '8A:BF:D2:99:8B:45'
}.get(iface, 'aa:aa:aa:aa:aa:aa')
def set_up_interface(if_name):
"""Sets up an interface with ifconfig / iwconfig commands. This way, we are
independent of the settings in /etc/network/interfaces, which may change over time.
"""
chan = channel(if_name)
essid = cell_id(if_name)
# put iface down (just in case)
subprocess.call("ifconfig " + if_name + " down", shell=True)
subprocess.call("ifdown " + if_name, shell=True)
subprocess.call("iwconfig " + if_name + " mode ad-hoc", shell=True)
subprocess.call("iwconfig " + if_name + " essid des-mesh" + str(chan), shell=True)
subprocess.call("iwconfig " + if_name + " channel " + str(chan), shell=True)
subprocess.call("iwconfig " + if_name + " ap " + essid, shell=True)
subprocess.call("iwconfig " + if_name + " txpower auto", shell=True)
subprocess.call("iwconfig " + if_name + " rate 6M", shell=True)
subprocess.call("ifconfig " + if_name + " $(calc_ip " + if_name[-1] + ") netmask 255.255.0.0", shell=True)
# double check it the interface is up now
#if retval != 0 or not is_interface_up(if_name):
# raise CHANError("Unable to set up interface %s" % if_name)
def shut_down_interface(if_name):
"""Shuts down the given interface.
"""
subprocess.call("ifdown %s" % if_name, shell=True)
subprocess.call("ifconfig %s down" % if_name, shell=True)
if is_interface_up(if_name):
raise CHANError("Unable to shut down interface %s" % if_name)
def shut_down_interfaces(if_names=None):
"""Wrapper to shut down more than one interface.
"""
# if no interfaces are specified, use all wireless interfaces
if not if_names:
if_names = iwlibs.getWNICnames()
for if_name in if_names:
shut_down_interface(if_name)
def get_channel(if_name):
"""Returns the channel the given interface is currently operating on.
"""
if if_name not in iwlibs.getWNICnames():
raise CHANError("Unable to set channel (invalid wireless interface: %s)" % if_name)
interface = iwlibs.Wireless(if_name)
return interface.getChannel()
def set_channel(if_name, channel, set_ip=True):
"""Set the channel for the interface. This is a bit more complicated since we also have
to set the ESSID, Cell ID, and the IP according to /etc/hosts.
"""
if if_name not in iwlibs.getWNICnames():
raise CHANError("Unable to set channel (invalid wireless interface: %s)" % if_name)
interface = iwlibs.Wireless(if_name)
try:
print if_name, channel
interface.setChannel(channel)
except ValueError, IOError:
raise CHANError("Unable to set channel (invalid channel: %s)" % channel)
print "channel: %s" % interface.getChannel()
# essid
essid = "des-mesh-ch%d" % channel
interface.setEssid(essid)
print "essid: %s" % interface.getEssid()
if interface.getEssid() != essid:
raise CHANError("Unable to set channel (ESSID %s cannot be set)" % essid)
# ip address
if set_ip:
host_name = "%s-ch%d" % (resolve_node_name("localhost"), channel)
print host_name
ip = socket.gethostbyname(host_name)
print "ip: %s" % ip
retval = subprocess.call("ifconfig %s %s netmask 255.255.255.128" %
(if_name, ip), shell=True)
if retval != 0:
raise CHANError("Unable to set channel (IP %s cannot be assigned)"
% ip)
ap_addr = "02:00:00:00:00:%02X" % channel
interface.setAPaddr(ap_addr)
actual_ap_addr = interface.getAPaddr()
print "ap: %s" % actual_ap_addr
# double check
# if not (actual_ap_addr == ap_addr or actual_ap_addr == "00:00:00:00:00:00"):
# raise CHANError("Unable to set channel (AP address should be %s, but is %s)" % (ap_addr, actual_ap_addr))
def red(str):
"""Colors the given string red.
"""
return "\033[31m%s\033[0m" % str
def green(str):
"""Colors the given string green.
"""
return "\033[32m%s\033[0m" % str
def blue(str):
"""Colors the given string blue.
"""
return "\033[34m%s\033[0m" % str
def bold(str):
"""Colors the given string bold.
"""
return "\033[1m%s\033[0m" % str
def run(cmd):
"""Runs the given shell command and returns its output.
"""
return subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
def busy_wait(seconds):
"""Waits the specified number of seconds and prints nice dots to indicate
activity.
"""
for i in range(seconds):
print ".",
sys.stdout.flush()
time.sleep(1)
print
|
gpl-3.0
| -2,041,239,628,742,820,400 | 31.897887 | 115 | 0.640158 | false |
stornado/djwechat
|
djwechat/wallpaper/models.py
|
1
|
1331
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
# Create your models here.
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(verbose_name=_('Name'), max_length=25, db_index=True)
def __str__(self):
return self.name
class Meta:
verbose_name = _('Tag')
verbose_name_plural = _('Tags')
@python_2_unicode_compatible
class Image(models.Model):
title = models.CharField(verbose_name=_('title'), max_length=25, db_index=True)
url = models.URLField(verbose_name=_('URL'), unique=True)
tags = models.ManyToManyField(Tag, verbose_name=_('Tags'))
publishedAt = models.DateField(verbose_name=_('Published Date'),
auto_now_add=True)
uuid = models.CharField(verbose_name=_('UUID'), max_length=32, unique=True)
show = models.BooleanField(verbose_name=_('Show'),
help_text=_('Whether to show this image'),
default=True)
def __str__(self):
return self.title
class Meta:
ordering = ['title']
verbose_name = _('Image')
verbose_name_plural = _('Images')
|
apache-2.0
| 7,220,403,460,134,093,000 | 32.275 | 83 | 0.616829 | false |
RCOS-Grading-Server/HWserver
|
sample_files/sample_CSV/verify.py
|
2
|
2622
|
def parse_assigned_zones():
allowed_str = 'ABCDEFGHJKLMNPUZ'
assigned_zone_dict = {}
with open('exam1_seating.txt', 'r') as assigned:
for line in assigned:
line = line.strip()
line_list = line.split(' ')
line_list = [ line_.strip() for line_ in line_list ]
line_list = [ line_ for line_ in line_list if len(line_) > 0 ]
if len(line_list) == 3:
assigned_zone = 'U'
elif len(line_list) == 6:
assigned_zone = line_list[-1]
else:
assigned_zone = line_list[-2]
if assigned_zone == 'UNASSIGNED':
assigned_zone = None
assert assigned_zone is None or assigned_zone in allowed_str
student_rcs = line_list[2]
assigned_zone_dict[student_rcs] = assigned_zone
return assigned_zone_dict
def get_actual_zone_dict():
actual_dict = {}
showed_dict = {}
assigned_zone_dict = parse_assigned_zones()
direct_list = ['CSCI_1100_Exam_1']
for direct in direct_list:
with open('%s/9_Zone_Assignment.csv' % (direct, ), 'r') as zones:
# Get header row contents
header_str = zones.readline()
header_list = header_str.strip().split(',')[6: -3]
line_list = zones.readlines()
# Trim last three rows
line_list = line_list[:-3]
for index, line in enumerate(line_list):
line = line.strip()
if len(line) > 0:
record = line.split(',')
student_name = record[1]
student_rcs = record[2]
assigned_zone = assigned_zone_dict[student_rcs]
actual_list = record[6: -3]
actual_index = actual_list.index('true')
actual_zone = header_list[actual_index]
actual_dict[student_rcs] = actual_zone
if assigned_zone == actual_zone:
if assigned_zone not in showed_dict:
showed_dict[assigned_zone] = 0
showed_dict[assigned_zone] += 1
else:
print('%s (%s)' % (student_name, student_rcs, ))
print('\tAssigned: %s' % (assigned_zone, ))
print('\tActual: %s' % (actual_zone, ))
for key in sorted(showed_dict.keys()):
print('Zone % 2s: %d' % (key, showed_dict[key]))
return actual_dict
if __name__ == '__main__':
get_actual_zone_dict()
|
bsd-3-clause
| -8,098,178,319,314,803,000 | 34.432432 | 74 | 0.493516 | false |
TudorRosca/enklave
|
server/backend/migrations/0039_raider.py
|
1
|
1181
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0038_resetpasswordtoken'),
]
operations = [
# migrations.CreateModel(
# name='Raider',
# fields=[
# ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
# ('latitude', models.FloatField()),
# ('longitude', models.FloatField()),
# ('bearing', models.SmallIntegerField(default=100)),
# ('level', models.IntegerField(default=0)),
# ('energy', models.IntegerField(default=0)),
# ('status', models.SmallIntegerField(default=0)),
# ('created_at', models.DateTimeField(auto_now_add=True)),
# ('updated_at', models.DateTimeField(auto_now=True)),
# ('deleted_at', models.DateTimeField(default=None, null=True, blank=True)),
# ('enklave', models.ForeignKey(to='backend.Enklave', db_constraint=False)),
# ],
# ),
]
|
agpl-3.0
| -2,974,276,659,569,629,700 | 38.366667 | 116 | 0.551228 | false |
Diiaablo95/friendsNet
|
test/services_api_test_user_tags.py
|
1
|
6207
|
import unittest
import json
import flask
import friendsNet.resources as resources
import friendsNet.database as database
DB_PATH = 'db/friendsNet_test.db'
ENGINE = database.Engine(DB_PATH)
COLLECTION_JSON = "application/vnd.collection+json"
STATUS_PROFILE = "/profiles/status-profile"
#Tell Flask that I am running it in testing mode.
resources.app.config['TESTING'] = True
#Necessary for correct translation in url_for
resources.app.config['SERVER_NAME'] = 'localhost:5000'
#Database Engine utilized in our testing
resources.app.config.update({'Engine': ENGINE})
class ResourcesAPITestCase(unittest.TestCase):
#INITIATION AND TEARDOWN METHODS
@classmethod
def setUpClass(cls):
''' Creates the database structure. Removes first any preexisting database file.'''
print "Testing ", cls.__name__
ENGINE.remove_database()
ENGINE.create_tables()
@classmethod
def tearDownClass(cls):
'''Remove the testing database.'''
print "Testing ENDED for ", cls.__name__
ENGINE.remove_database()
def setUp(self):
'''Populates the database.'''
#This method loads the initial values from friendsNet_data_db.sql
ENGINE.populate_tables()
#Activate app_context for using url_for
self.app_context = resources.app.app_context()
self.app_context.push()
#Create a test client
self.client = resources.app.test_client()
def tearDown(self):
'''
Remove all records from database.
'''
ENGINE.clear()
self.app_context.pop()
class UserTagsTestCase (ResourcesAPITestCase):
resp_get = {
"collection" : {
"version" : "1.0",
"href" : "/friendsNet/api/users/4/tags/",
"links" : [
{"href" : "/friendsNet/api/users/4/profile/", "rel" : "tag", "prompt" : "User profile"}
],
"items" : [
{
"href" : "/friendsNet/api/statuses/1/",
"data" : [
{"name" : "id", "value" : 1, "prompt" : "Status id"},
{"name" : "user_id", "value" : 1, "prompt" : "Creator id"},
{"name" : "content", "value" : "Good morning!", "prompt" : "Status content"},
{"name" : "creation_time", "value" : 50, "prompt" : "Status creation time"}
],
"links" : [
{"href" : "/friendsNet/api/statuses/1/comments/", "rel" : "status comments", "prompt" : "Status comments"},
{"href" : "/friendsNet/api/statuses/1/rates/", "rel" : "status rates", "prompt" : "Status rates"},
{"href" : "/friendsNet/api/statuses/1/tags/", "rel" : "status tags", "prompt" : "Status tags"},
{"href" : "/friendsNet/api/statuses/1/media/", "rel" : "media list", "prompt" : "Status media items"}
]
}
]
}
}
resp_get_empty = {
"collection" : {
"version" : "1.0",
"href" : "/friendsNet/api/users/6/tags/",
"links" : [
{"href" : "/friendsNet/api/users/6/profile/", "rel" : "tag", "prompt" : "User profile"}
],
"items" : []
}
}
def setUp(self):
super(UserTagsTestCase, self).setUp()
self.url = resources.api.url_for(resources.User_tags, user_id = 4, _external = False)
self.url_empty = resources.api.url_for(resources.User_tags, user_id = 6, _external = False)
self.url_wrong = resources.api.url_for(resources.User_tags, user_id = 999, _external = False)
def test_url(self):
#Checks that the URL points to the right resource
_url = '/friendsNet/api/users/6/tags/'
print '('+self.test_url.__name__+')', self.test_url.__doc__
with resources.app.test_request_context(_url):
rule = flask.request.url_rule
view_point = resources.app.view_functions[rule.endpoint].view_class
self.assertEquals(view_point, resources.User_tags)
def test_wrong_url(self):
resp = self.client.get(self.url_wrong, headers = {"Accept" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 404)
data = json.loads(resp.data)["collection"]
version = data["version"] #test VERSION
self.assertEquals(version, self.resp_get["collection"]["version"])
href = data["href"] #test HREF
self.assertEquals(href, self.url_wrong)
error = data["error"]
self.assertEquals(error["code"], 404)
#TEST GET
#200 + MIMETYPE & PROFILE
def test_get_tags(self):
print '('+self.test_get_tags.__name__+')', self.test_get_tags.__doc__
with resources.app.test_client() as client:
resp = client.get(self.url, headers = {"Accept" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 200)
data = json.loads(resp.data)
self.assertEquals(self.resp_get, data)
self.assertEqual(resp.headers.get("Content-Type", None), COLLECTION_JSON + ";profile=" + STATUS_PROFILE)
#EMPTY ITEMS
def test_get_empty_tags(self):
print '('+self.test_get_empty_tags.__name__+')', self.test_get_empty_tags.__doc__
with resources.app.test_client() as client:
resp = client.get(self.url_empty, headers = {"Accept" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 200)
data = json.loads(resp.data)
self.assertEquals(self.resp_get_empty, data)
self.assertEqual(resp.headers.get("Content-Type", None), COLLECTION_JSON + ";profile=" + STATUS_PROFILE)
#404
def test_get_not_existing_user(self):
print '('+self.test_get_not_existing_user.__name__+')', self.test_get_not_existing_user.__doc__
with resources.app.test_client() as client:
resp = client.get(self.url_wrong, headers = {"Accept" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 404)
if __name__ == '__main__':
print 'Start running tests'
unittest.main()
|
gpl-3.0
| -3,934,080,413,662,253,000 | 39.051613 | 131 | 0.571774 | false |
vargheseg5/CultRegDesk
|
regdesk/regdesk/urls.py
|
1
|
1157
|
"""regdesk URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from login.views import default, lo
urlpatterns = [
url(r'^$', default, name='default'),
url(r'^admin/', admin.site.urls),
url(r'^login/', include('login.urls', namespace="login")),
url(r'^add_event/', include('add_event.urls', namespace="add_event")),
url(r'^logout/', lo, name='logout'),
url(r'^register/', include('register.urls', namespace="register")),
url(r'^home/', include('home.urls', namespace="home")),
]
|
lgpl-3.0
| 8,424,410,044,104,002,000 | 40.321429 | 79 | 0.678479 | false |
JacekPierzchlewski/RxCS
|
examples/reconstruction/L1recon_ex1.py
|
1
|
10853
|
"""
This script is an example of how to use the L1 optimization reconstruction
module (regularized regression scheme). |br|
Firstly, a multitone signal is generated. The signal model is as follows.
There are two random tones randomly distributed in the spectrum
from 11kHz to 20kHz and two random tones randomly distributed
in the spectrum from 31kHz to 40kHz.
o
o |
o | |
| | o |
| | | |
| | | |
----11kHz<----------->20kHz------\\--------31kHz<----------->40kHz-->
f [kHz]
The signal is nonuniformly sampled and reconstructed with L1 reconstrucion
module from the RxCS toolbox. The module perfroms the regularized regression
optimization scheme and uses the external 'cvxopt' toolbox. |br|
After the signal generation and sampling, the original signal, the observed
samples, and the reconstructed signal are plot in both the time domain and
the frequency domain |br|
*Author*:
Jacek Pierzchlewski, Aalborg University, Denmark. <[email protected]>
*Version*:
1.0 | 18-SEP-2014 : * Version 1.0 released. |br|
2.0 | 25-AUG-2015 : * Adjusted to the version 2.0 of the L1 solver |br|
2.1 | 25-AUG-2015 : * New file name and improvements in header |br|
*License*:
BSD 2-Clause
"""
from __future__ import division
import numpy as np
import rxcs
import matplotlib.pyplot as plt
def _L1_recon_ex1():
# ---------------------------------------------------------------------
# Settings for the example
# ---------------------------------------------------------------------
# General settings:
TIME = 1e-3 # time of the signal is 1 ms
FSMP = 2e6 # signal representation sampling frequency 2MHz
# Signals:
FDELTA = 1e3 # tone separation in both signals is 1kHz
FMIN1 = 11e3 # spectrum #1: 11kHz <-> 20kHz
FMAX1 = 20e3 # ^
NTNS1 = 2 # the number of tones is in the spectrum (11kHz <-> 20kHz)
FMIN2 = 31e3 # spectrum #2: 31kHz <-> 40kHz
FMAX2 = 40e3 # ^
NTNS2 = 2 # the number of tones is 2 in the spectrum (31kHz <-> 40kHz)
POWER = 1 # Power of the signal is 1 W
# Sampler:
GRIDT = 1e-6 # sampling grid period is 1 us
FSAMP = 25e3 # the average sampling frequency is 25 kHz
# Things on the board:
gen1 = rxcs.sig.randMult() # Signal generator #1 - for the 1st part of the spectrum
gen2 = rxcs.sig.randMult() # Signal generator #2 - for the 2nd part of the spectrum
samp = rxcs.acq.nonuniANGIE() # Sampler
IDFT = rxcs.cs.dict.IDFT() # IDFT dictionary generator
makeTheta = rxcs.cs.makeTheta() # Theta matrix generator
L1recon = rxcs.cs.cvxoptL1() # L1 reconstruction
analysisSNR = rxcs.ana.SNR() # SNR analysis
# ---------------------------------------------------------------------
# Generate the original signals
# ---------------------------------------------------------------------
# Settings for the generator #1
gen1.tS = TIME # time
gen1.fR = FSMP # sig. representation sampling frequency
gen1.fRes = FDELTA # tone separation
gen1.fMin = FMIN1 # Spectrum #1
gen1.fMax = FMAX1 # ^
gen1.nTones = NTNS1 # ^
gen1.iP = POWER/2 # power
# Settings for the generator #2
gen2.tS = TIME # time
gen2.fR = FSMP # sig. representation sampling frequency
gen2.fRes = FDELTA # tone separation
gen2.fMin = FMIN2 # Spectrum #1
gen2.fMax = FMAX2 # ^
gen2.nTones = NTNS2 # ^
gen2.iP = POWER/2 # power
# ------------------------
gen1.run() # run the generators
gen2.run() # ^
mSig = gen1.mSig + gen2.mSig # the original signal is a sum of the two generated signals
# ---------------------------------------------------------------------
# Sample the original signals
# ---------------------------------------------------------------------
# Settings for the sampler
samp.tS = TIME # time of the signal
samp.fR = FSMP # the signal representation sampling freuqnecy
samp.Tg = GRIDT # the sampling grid period
samp.fSamp = FSAMP # the average sampling frequency
samp.tMin = 5e-6 # minimum time between sampling points
# ------------------------
samp.mSig = mSig # connect the original signal to the sampler
samp.run() # run the sampler
# -----------------------------------------------------------------
# Reconstruct the signal
# -----------------------------------------------------------------
# Generate the IDFT dictionaries:
# dictionary #1
IDFT.tS = TIME # time of the dictionaries
IDFT.fR = FSMP # representation sampling frequency
IDFT.fDelta = FDELTA # the frequency separation between tones
IDFT.fFirst = FMIN1 # minimum frequency in the dictionary
IDFT.nTones = int((FMAX1 - FMIN1) / FDELTA) # the number of tones
IDFT.run()
mDict1 = IDFT.mDict.copy() # Take the dictionary matrix for the 1st part
# of the spectrum
# dictionary #2
IDFT.fFirst = FMIN2 # minimum frequency in the dictionary
IDFT.nTones = int((FMAX2 - FMIN2) / FDELTA) # the number of tones
IDFT.run()
mDict2 = IDFT.mDict.copy() # Take the dictionary matrix for the 2nd part
# of the spectrum
# Concatenate the dictionary matrices
(nRows, _) = mDict1.shape # Cut down the dictionary matrix #1
mDict1 = mDict1[np.arange(int(nRows/2)), :] # ^
(nRows, _) = mDict2.shape # Cut down the dictionary matrix #2
mDict2 = mDict2[np.arange(int(nRows/2)), :] # ^
mDict = np.vstack((mDict1, mDict2))
# Compute the Theta matrix
makeTheta.lPhi = samp.lPhi # # Add the observation matrix
makeTheta.lDict = [mDict.T] # Add the dictionary
makeTheta.run()
# Run the L1 minimization - generate signal coefficients
vObSig = samp.mObSig[0, :] # the observed signal
L1recon.lTheta = makeTheta.lTheta # add the Theta matrix
L1recon.lObserved = [vObSig] # add the observed signals
L1recon.iK = 0.1 # add the 'k' parameter
L1recon.bComplex = 1 # add info that the problem contains complex
# numbers
L1recon.run() # Run the reconstruction
# Reconstruct the signal using the found signal coefficients and the dictionary
vCoeff = L1recon.lCoeff[0]
vSigRecon = np.dot(mDict.T, vCoeff)
vSigRecon = vSigRecon.real
# -----------------------------------------------------------------
# Measure the SNR of the reconstruction
# -----------------------------------------------------------------
mSigNN = gen1.mSigNN + gen2.mSigNN # the original reference (unnoisy signal)
analysisSNR.mSigRef = mSigNN # nonnoisy signal from the generator is a a reference signal
analysisSNR.mSig = vSigRecon # reconstructed signal is a signal under test
analysisSNR.run() # run the reconstruction
# ---------------------------------------------------------------------
plot(gen1, gen2, mSig, samp, vSigRecon)
plt.show(block=True)
def plot(gen1, gen2, mSig, samp, vSigRecon):
"""
This function takes care of plotting the results.
"""
# ---------------------------------------------------------------------
# Plot the original signal, reconstructed signal and signal samples
# in the time domain
# ---------------------------------------------------------------------
vSig = mSig[0, :] # Get the original signal
vT = gen1.vTSig # Get the signal time vector
vObSig = samp.mObSig[0, :] # The observed signal
mPhi = samp.lPhi[0] # Take the observation matrix from the sampler
# Plot the figure
hFig1 = plt.figure(1)
hSubPlot1 = hFig1.add_subplot(111)
hSubPlot1.plot(vT, vSig, 'g-', label="original sig")
hSubPlot1.plot(vT, vSigRecon, 'b--', label="reconstructed sig")
hSubPlot1.plot(np.dot(mPhi,vT), vObSig,
'*r', label="observed samps", markersize=10)
hSubPlot1.set_xlabel('time')
hSubPlot1.set_title('Time domain')
hSubPlot1.grid(True)
hSubPlot1.legend(loc="best")
# ---------------------------------------------------------------------
# Plot the original signal, reconstructed signal in the frequency domain
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Original signal
vFFT = np.fft.fft(vSig) # Analyze the spectrum of the orignal signal
iS = vFFT.size # Get the size of the spectrum
vFFTa = 2 * np.abs(vFFT[np.arange(iS / 2).astype(int)]) / iS # Get the amps
# Reconstructed signal
vFFTR = np.fft.fft(vSigRecon) # Analyze the spectrum of the reconstructed signal
iS = vFFT.size # Get the size of the spectrum
vFFTRa = 2 * np.abs(vFFTR[np.arange(iS / 2).astype(int)]) / iS # Get the amps
# Create a vector with frequencies of the signal spectrum
fFFTR = gen1.fFFTR # Signal FFT frequency resolution
vF = fFFTR * np.arange(iS / 2)
# ---------------------------------------------------------------------
# Plot half of the spectrum - original signal
hFig2 = plt.figure(2)
hSubPlot2 = hFig2.add_subplot(111)
# Original signal
(mo, so, _) = hSubPlot2.stem(vF, vFFTa, markerfmt='o', basefmt='g-', label="original signal")
plt.setp(so, color='g', linewidth=2.0)
plt.setp(mo, color='g', markersize=10.0)
# Reconstructed signal
(mr, sr, _) = hSubPlot2.stem(vF, vFFTRa, markerfmt='x', basefmt='b-', label="reconstructed signal")
plt.setp(sr, color='b', linewidth=2.0)
plt.setp(mr, color='b', markersize=10.0)
hSubPlot2.grid(True)
hSubPlot2.set_xlabel('Frequency [Hz]')
hSubPlot2.set_xlim(-1*1e3, 51*1e3)
hSubPlot2.set_ylim(-0.1, 3.1)
hSubPlot2.set_title('Frequency domain')
hSubPlot2.legend(loc="best")
# =====================================================================
# Trigger when start as a script
# =====================================================================
if __name__ == '__main__':
_L1_recon_ex1()
|
bsd-2-clause
| -4,270,055,851,621,958,000 | 39.954717 | 103 | 0.518658 | false |
tensorflow/model-analysis
|
tensorflow_model_analysis/evaluators/analysis_table_evaluator.py
|
1
|
5069
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Public API for creating analysis table."""
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
from typing import Any, Dict, Iterable, Optional, Text, Union
import apache_beam as beam
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import types
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.extractors import extractor
def AnalysisTableEvaluator( # pylint: disable=invalid-name
key: Text = constants.ANALYSIS_KEY,
run_after: Text = extractor.LAST_EXTRACTOR_STAGE_NAME,
include: Optional[Union[Iterable[Text], Dict[Text, Any]]] = None,
exclude: Optional[Union[Iterable[Text],
Dict[Text, Any]]] = None) -> evaluator.Evaluator:
"""Creates an Evaluator for returning Extracts data for analysis.
If both include and exclude are None then tfma.INPUT_KEY extracts will be
excluded by default.
Args:
key: Name to use for key in Evaluation output.
run_after: Extractor to run after (None means before any extractors).
include: List or map of keys to include in output. Keys starting with '_'
are automatically filtered out at write time. If a map of keys is passed
then the keys and sub-keys that exist in the map will be included in the
output. An empty dict behaves as a wildcard matching all keys or the value
itself. Since matching on feature values is not currently supported, an
empty dict must be used to represent the leaf nodes.
For example: {'key1': {'key1-subkey': {}}, 'key2': {}}.
exclude: List or map of keys to exclude from output. If a map of keys is
passed then the keys and sub-keys that exist in the map will be excluded
from the output. An empty dict behaves as a wildcard matching all keys or
the value itself. Since matching on feature values is not currently
supported, an empty dict must be used to represent the leaf nodes.
For example: {'key1': {'key1-subkey': {}}, 'key2': {}}.
Returns:
Evaluator for collecting analysis data. The output is stored under the key
'analysis'.
Raises:
ValueError: If both include and exclude are used.
"""
# pylint: disable=no-value-for-parameter
return evaluator.Evaluator(
stage_name='EvaluateExtracts',
run_after=run_after,
ptransform=EvaluateExtracts(key=key, include=include, exclude=exclude))
# pylint: enable=no-value-for-parameter
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(Any)
def EvaluateExtracts( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection,
key: Text = constants.ANALYSIS_KEY,
include: Optional[Union[Iterable[Text], Dict[Text, Any]]] = None,
exclude: Optional[Union[Iterable[Text],
Dict[Text, Any]]] = None) -> evaluator.Evaluation:
"""Creates Evaluation output for extracts.
If both include and exclude are None then tfma.INPUT_KEY extracts will be
excluded by default.
Args:
extracts: PCollection of Extracts.
key: Name to use for key in Evaluation output.
include: List or map of keys to include in output. Keys starting with '_'
are automatically filtered out at write time. If a map of keys is passed
then the keys and sub-keys that exist in the map will be included in the
output. An empty dict behaves as a wildcard matching all keys or the value
itself. Since matching on feature values is not currently supported, an
empty dict must be used to represent the leaf nodes.
For example: {'key1': {'key1-subkey': {}}, 'key2': {}}.
exclude: List or map of keys to exclude from output. If a map of keys is
passed then the keys and sub-keys that exist in the map will be excluded
from the output. An empty dict behaves as a wildcard matching all keys or
the value itself. Since matching on feature values is not currently
supported, an empty dict must be used to represent the leaf nodes.
For example: {'key1': {'key1-subkey': {}}, 'key2': {}}.
Returns:
Evaluation containing PCollection of Extracts.
"""
if include is None and exclude is None:
exclude = [constants.INPUT_KEY]
filtered = extracts
if include or exclude:
filtered = extracts | extractor.Filter(include=include, exclude=exclude)
return {key: filtered}
|
apache-2.0
| 8,333,739,888,524,670,000 | 43.858407 | 80 | 0.717696 | false |
kk47/Python
|
django/td2.0/settings.py
|
1
|
2483
|
import os,sys
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
HERE=os.path.dirname(os.path.dirname(__file__))
ADMINS = (
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'monit',
'USER': 'monit',
'PASSWORD': '123456',
'HOST': '',
'PORT': '',
}
}
ALLOWED_HOSTS = []
TIME_ZONE = 'Asia/Shanghai'
LANGUAGE_CODE = 'zh-CN'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static').replace('\\','/')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(HERE,'app/static/').replace('\\','/'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SECRET_KEY = 'hyc*qbz@t!$)ki-6d3+dj5!0(=@gk3q14u9&som!v_*ocpi8yi'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
SESSION_COOKIE_AGE=60*300
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'urls'
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
'south',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
lgpl-3.0
| 207,309,780,206,392,060 | 20.591304 | 81 | 0.595248 | false |
inercia/evy
|
evy/patched/dns.py
|
1
|
2469
|
#!/usr/bin/env python
# Portions of this code taken from the gogreen project:
# http://github.com/slideinc/gogreen
#
# Copyright (c) 2005-2010 Slide, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Non-blocking DNS support for Evy
"""
from evy import patcher
from evy.patched import _socket_nodns
from evy.patched import time
from evy.patched import select
dns = patcher.import_patched('dns',
socket = _socket_nodns,
time = time,
select = select)
for pkg in ('dns.query', 'dns.exception', 'dns.inet', 'dns.message',
'dns.rdatatype', 'dns.resolver', 'dns.reversename'):
setattr(dns, pkg.split('.')[1], patcher.import_patched(pkg,
socket = _socket_nodns,
time = time,
select = select))
|
mit
| -8,872,464,855,560,028,000 | 43.089286 | 82 | 0.664642 | false |
mfalesni/cfme_tests
|
cfme/storage/manager.py
|
1
|
7210
|
# -*- coding: utf-8 -*-
import attr
from navmazing import NavigateToAttribute, NavigateToSibling
from widgetastic.utils import VersionPick, Version
from widgetastic.widget import View, NoSuchElementException, Text
from widgetastic_patternfly import (
Button,
Dropdown
)
from cfme.base.ui import BaseLoggedInPage
from cfme.common import TagPageView, Taggable, PolicyProfileAssignable
from cfme.exceptions import ItemNotFound
from cfme.modeling.base import BaseCollection, BaseEntity
from cfme.utils.appliance.implementations.ui import CFMENavigateStep, navigator, navigate_to
from widgetastic_manageiq import (
Accordion,
BreadCrumb,
ManageIQTree,
PaginationPane,
SummaryTable,
Table
)
class StorageManagerToolbar(View):
"""The toolbar on the Storage Manager or Provider page"""
configuration = Dropdown('Configuration')
policy = Dropdown('Policy')
class StorageManagerDetailsToolbar(View):
"""The toolbar on the Storage Manager or Provider detail page"""
reload = Button(title=VersionPick({Version.lowest(): 'Reload current display',
'5.9': 'Refresh this page'}))
configuration = Dropdown('Configuration')
policy = Dropdown('Policy')
monitoring = Dropdown('Monitoring')
download = Button(title='Download summary in PDF format')
class StorageManagerEntities(View):
"""The entities on the main list Storage Manager or Provider page"""
table = Table(".//div[@id='list_grid' or @class='miq-data-table']/table")
class StorageManagerDetailsEntities(View):
"""The entities on the Storage Manager or Provider details page"""
breadcrumb = BreadCrumb()
properties = SummaryTable('Properties')
relationships = SummaryTable('Relationships')
smart_management = SummaryTable('Smart Management')
status = SummaryTable('Status')
class StorageManagerDetailsAccordion(View):
"""The accordion on the Storage Manager or Provider details page"""
@View.nested
class properties(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class relationships(Accordion): # noqa
tree = ManageIQTree()
class StorageManagerView(BaseLoggedInPage):
"""A base view for all the Storage Manager or Provider pages"""
title = Text('.//div[@id="center_div" or @id="main-content"]//h1')
@property
def in_manager(self):
navigation_path = self.context['object'].navigation_path
return(
self.logged_in_as_current_user and
self.navigation.currently_selected == navigation_path)
class StorageManagerAllView(StorageManagerView):
"""The all Storage Manager or Provider page"""
@property
def is_displayed(self):
return (
self.in_manager and
self.title.text in ('Storage Managers', self.context['object'].manager_type))
toolbar = View.nested(StorageManagerToolbar)
entities = View.nested(StorageManagerEntities)
paginator = PaginationPane()
class ProviderStorageManagerAllView(StorageManagerAllView):
@property
def is_displayed(self):
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ['Compute', 'Clouds', 'Providers'] and
self.title.text == '{} (All Storage Managers)'.format(self.context['object'].name)
)
class StorageManagerDetailsView(StorageManagerView):
"""The details page for Storage Manager or Provider"""
@property
def is_displayed(self):
expected_title = '{} (Summary)'.format(self.context['object'].name)
return(
self.title.text == expected_title and
self.entities.breadcrumb.active_location == expected_title)
toolbar = View.nested(StorageManagerDetailsToolbar)
sidebar = View.nested(StorageManagerDetailsAccordion)
entities = View.nested(StorageManagerDetailsEntities)
@attr.s
class StorageManager(BaseEntity, Taggable, PolicyProfileAssignable):
""" Model of an storage manager in cfme
Args:
collection: Instance of collection
name: Name of the object manager.
provider: Provider
"""
name = attr.ib()
provider = attr.ib()
storage_title = 'Storage Manager'
@property
def navigation_path(self):
return self.parent.navigation_path
@property
def manager_type(self):
return self.parent.manager_type
def refresh(self, cancel=False):
"""Refresh storage manager"""
view = navigate_to(self, 'Details')
view.toolbar.configuration.item_select('Refresh Relationships and Power States',
handle_alert=not cancel)
if not cancel:
msg = "Refresh Provider initiated for 1 {} from the CFME Database".format(
self.storage_title)
view.flash.assert_success_message(msg)
def delete(self):
"""Delete storage manager"""
view = navigate_to(self, 'Details')
view.toolbar.configuration.item_select(
'Remove this {} from Inventory'.format(self.storage_title), handle_alert=True)
view = self.create_view(StorageManagerDetailsView)
view.flash.assert_no_error()
@property
def exists(self):
try:
navigate_to(self, 'Details')
return True
except ItemNotFound:
return False
@attr.s
class BlockManagerCollection(BaseCollection):
"""Collection object [block manager] for the :py:class:'cfme.storage.manager'"""
ENTITY = StorageManager
manager_type = 'Block Storage Managers'
navigation_path = ['Storage', 'Block Storage', 'Managers']
@attr.s
class ObjectManagerCollection(BaseCollection):
"""Collection object [object manager] for the :py:class:'cfme.storage.manager'"""
ENTITY = StorageManager
manager_type = 'Object Storage Managers'
navigation_path = ['Storage', 'Object Storage', 'Managers']
@navigator.register(BlockManagerCollection, 'All')
@navigator.register(ObjectManagerCollection, 'All')
class StorageManagerAll(CFMENavigateStep):
VIEW = StorageManagerAllView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self, *args, **kwargs):
self.prerequisite_view.navigation.select(*self.obj.navigation_path)
@navigator.register(StorageManager, 'Details')
class StorageManagerDetails(CFMENavigateStep):
VIEW = StorageManagerDetailsView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self, *args, **kwargs):
try:
row = self.prerequisite_view.paginator.find_row_on_pages(
self.prerequisite_view.entities.table, Name=self.obj.name)
row.click()
except NoSuchElementException:
raise ItemNotFound('Could not locate {}'.format(self.obj.name))
@navigator.register(StorageManager, 'EditTagsFromDetails')
class StorageManagerDetailEditTag(CFMENavigateStep):
""" This navigation destination help to WidgetasticTaggable"""
VIEW = TagPageView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.policy.item_select('Edit Tags')
|
gpl-2.0
| 316,978,537,150,088,500 | 32.37963 | 94 | 0.686685 | false |
flass/agrogenom
|
scripts/chromosomal_translocations.py
|
1
|
4877
|
#!/usr/bin/python
# -*- coding: utf8 -*-
import sys
import os
import copy
import tree2
import rec_to_db
cwd = os.getcwd()
def setToStr(s):
l = list(s)
l.sort()
return "|".join(l)
nfreftree = sys.argv[1]
topoccurclade = sys.argv[2]
dirout = sys.argv[3]
dbcon, dbcur = rec_to_db.dbconnect(dbclue='phylariane', dbpwd='********')
reftree = tree2.ReferenceTree(fic=nfreftree)
reflabs = reftree.sort(reftree[topoccurclade].get_children_labels(), labels=True)
dtaxid_code = {}
for node in reftree:
code = node.label()
dtaxid_code[rec_to_db.get_taxid(code, dbcur)] = code
nsubfam = 0
# subfam-to-species matrix of replicon location
fmatout = open('%s/subfam_location_profile.mat'%dirout, 'w')
# species-to-location subfamily trees
ortlocout = '%s/mappping_subfam_locations'%dirout
if not os.access(ortlocout, os.F_OK):
os.mkdir(ortlocout)
# table of translocation events
ftranslocout = open('%s/subfam_translocations.tab'%dirout, 'w')
dbfields = ["subfam_id", "name_txt", "hogenom_gene_id", "name", "locus_tag", "new_locus_tag", "genomic_beg", "genomic_end", "description"]
headerfields = ["subfamily", "organism", "hogenom_gene_id", "gene_name", "locus_tag", "new_locus_tag", "begin", "end", "description"]
translocfields = ["ancestor.code", "ancestor.location", "descendent.code", "descendent.location"]
ftranslocout.write('\t'.join(headerfields+translocfields)+'\n')
# sum over the genome of species-to-location trees
stateloc = copy.deepcopy(reftree)
transloc = copy.deepcopy(reftree)
tmptabsubfam = 'subfam_%s'%topoccurclade
tsubfams = rec_to_db.getSubfamFromPhyloPattern(dbcur, specificity=(tuple(reftree[topoccurclade].get_children_labels()),True), tempTable=tmptabsubfam)
# fetch annotations for all genes in db
dgene_annot = rec_to_db.get_all_gene_annotation(dbcur, cols=dbfields, join_clause=[('genome.gene2subfam', 'hogenom_gene_id'), (tmptabsubfam, 'subfam_id')])
#~ testfam = '49RHIZOB_5155.1'
#~ tsubfams = (testfam,)
for subfam in tsubfams:
#~ print subfam
ortloc = copy.deepcopy(reftree)
ortloc.resetPresence(state='absent')
ortloc.cleanEvents()
profile = rec_to_db.getSubfamOccurence(dbcur, subfam, occurence='count', returnDict=True)
#~ print subfam, profile
# filters subfamilies with multiple occurences (might be families without trees and possibly very large gene counts/genome)
if max(profile.values())>1: continue
ttloc = rec_to_db.getRepliconLoc(dbcur, subfam=subfam)
for tloc in ttloc:
hogenomid, taxid, fam, subfam, replicon = tloc
code = dtaxid_code.setdefault(taxid, rec_to_db.get_code(taxid, dbcur))
if replicon==None: replicon='?'
ortloc[code].presenceAtNode(state=replicon)
absents = []
for node in profile:
if profile[node]==0: absents.append(ortloc[node])
ortloc.FitchPars(excludedNodes=absents)
before = ortloc.newick(comment='presence', ignoreBS=True)
try:
ortloc.refineFitchPars(excludedNodes=absents, silent=True)
except ValueError, e:
print subfam
print before
raise ValueError, e
#~ if subfam==testfam:
#~ print ortloc[topoccurclade].newick(comment='presence', ignoreBS=True)
#~ break
ortloc.writePhyloProfileMatrix(fmatout, fam=subfam, reflabs=reflabs, leavesOnly=False, header=(nsubfam==0), counts=False)
# find translocation events
for node in ortloc[topoccurclade]:
f = node.go_father()
if f:
fstates = set(f.state().split('|'))
nstates = set(node.state().split('|'))
if (fstates <= set(['?', '-']) or (nstates <= set(['?', '-']))): continue
difstates = nstates - fstates
if (difstates and not (difstates <= set(['?', '-']))):
# replicon location of the subfamily changed between the father and the node, with a informative change
floc = setToStr(fstates)
nloc = setToStr(nstates)
# sum the replicon translocation counts over the whole genome
dtrans = transloc[node.label()].misc()
dtrans[(floc, nloc)] = dtrans.get((floc, nloc), 0) + 1
# get the genes of the subfamily under the father
lspe = f.get_leaf_labels()
supannots = [f.label(), floc, node.label(), nloc]
for tloc in ttloc:
hogenomid = tloc[0]
if hogenomid.split('_')[0] in lspe:
rec_to_db.write_gene_annot(ftranslocout, hogenomid, dgene_annot, fields=dbfields, supvalues=supannots)
# transform annotation
ortloc.factorStateToDict(alternativeAsFraction=True)
# save the location tree
tree2.dump_pickle(ortloc, '%s/%s.ortloc.pickle'%(ortlocout,subfam))
# sum the replicon location counts over the whole genome
stateloc += ortloc
nsubfam += 1
sys.stdout.write('\r%d\t\t'%nsubfam)
fmatout.close()
stateloc += transloc
tree2.dump_pickle(stateloc, '%s/genome_synthesis.replicon_location.pickle'%(dirout))
stateloc.write_newick('%s/genome_synthesis.replicon_location.nwk'%(dirout), comment='locationcount')
stateloc.write_newick('%s/genome_synthesis.replicon_translocation.nwk'%(dirout), comment='translocationcount')
|
gpl-3.0
| -3,396,170,161,535,987,700 | 37.101563 | 156 | 0.717654 | false |
decarlin/indra
|
indra/reach/processor.py
|
1
|
3741
|
import re
import objectpath
from indra.statements import *
residue_names = {
'S': 'Serine',
'T': 'Threonine',
'Y': 'Tyrosine',
'SER': 'Serine',
'THR': 'Threonine',
'TYR': 'Tyrosine',
'SERINE': 'Serine',
'THREONINE': 'Threonine',
'TYROSINE': 'Tyrosine'
}
class ReachProcessor:
def __init__(self, json_dict):
self.tree = objectpath.Tree(json_dict)
self.statements = []
def get_phosphorylation(self):
citation = self.tree.execute("$.frames.object_meta.doc_id")
qstr = "$.frames[(@.type is 'protein-modification') " + \
"and (@.subtype is 'phosphorylation')]"
res = self.tree.execute(qstr)
for r in res:
frame_id = r['frame_id']
args = r['arguments']
site = None
theme = None
controller = None
for a in args:
if a['argument_label'] == 'theme':
theme = a['text']
elif a['argument_label'] == 'site':
site = a['text']
qstr = "$.frames[(@.type is 'regulation') and " + \
"(@.arguments[0].arg is '%s')]" % frame_id
reg_res = self.tree.execute(qstr)
controller = None
for reg in reg_res:
for a in reg['arguments']:
if a['argument_label'] == 'controller':
controller = a['text']
if controller is None:
warnings.warn('Skipping phosphorylation with missing controller.')
continue
controller_agent = Agent(controller)
theme_agent = Agent(theme)
mod = 'Phosphorylation'
if site is not None:
residue, pos = self._parse_site_text(site)
else:
residue = ''
pos = ''
mod = mod + residue
sentence = r['verbose-text']
evidence = sentence
# TODO: read $.object-meta.doc-id as citation
# but dashes don't work with objectpath!
citation = ''
annotations = None
self.statements.append(Phosphorylation(controller_agent,
theme_agent, mod, pos, sentence,
citation, evidence, annotations))
def get_complexes(self):
citation = self.tree.execute("$.frames.object_meta.doc_id")
qstr = "$.frames[@.type is 'complex-assembly']"
res = self.tree.execute(qstr)
for r in res:
frame_id = r['frame_id']
args = r['arguments']
members = []
for a in args:
agent = Agent(a['text'])
members.append(agent)
self.statements.append(Complex(members))
def _parse_site_text(self, s):
m = re.match(r'([TYS])[-]?([0-9]+)', s)
if m is not None:
residue = residue_names[m.groups()[0]]
site = m.groups()[1]
return residue, site
m = re.match(r'(THR|TYR|SER)[- ]?([0-9]+)', s.upper())
if m is not None:
residue = residue_names[m.groups()[0]]
site = m.groups()[1]
return residue, site
m = re.match(r'(THREONINE|TYROSINE|SERINE)[^0-9]*([0-9]+)', s.upper())
if m is not None:
residue = residue_names[m.groups()[0]]
site = m.groups()[1]
return residue, site
m = re.match(r'.*(THREONINE|TYROSINE|SERINE).*', s.upper())
if m is not None:
residue = residue_names[m.groups()[0]]
site = None
return residue, site
return '', None
|
bsd-2-clause
| 8,315,694,529,296,643,000 | 32.702703 | 82 | 0.483828 | false |
fooelisa/pyiosxr
|
pyIOSXR/exceptions.py
|
1
|
2670
|
#!/usr/bin/env python
# coding=utf-8
"""Exceptions for pyiosxr, a module to interact with Cisco devices running IOS-XR."""
# Copyright 2015 Netflix. All rights reserved.
# Copyright 2016 BigWaveIT. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
class IOSXRException(Exception):
def __init__(self, msg=None, dev=None):
super(IOSXRException, self).__init__(msg)
if dev:
self._xr = dev
# release the XML agent
if self._xr._xml_agent_locker.locked():
self._xr._xml_agent_locker.release()
class ConnectError(IOSXRException):
"""Exception while openning the connection."""
def __init__(self, msg=None, dev=None):
super(ConnectError, self).__init__(msg=msg, dev=dev)
if dev:
self._xr = dev
self._xr._xml_agent_alive = False
class CommitError(IOSXRException):
"""Raised when unable to commit. Mostly due to ERROR 0x41866c00"""
pass
class LockError(IOSXRException):
"""Throw this exception when unable to lock the config DB."""
pass
class UnlockError(IOSXRException):
"""Throw this exception when unable to unlock the config DB."""
pass
class CompareConfigError(IOSXRException):
"""Throw this exception when unable to compare config."""
pass
class UnknownError(IOSXRException):
"""UnknownError Exception."""
pass
class InvalidInputError(IOSXRException):
"""InvalidInputError Exception."""
pass
class XMLCLIError(IOSXRException):
"""XMLCLIError Exception."""
pass
class InvalidXMLResponse(IOSXRException):
"""Raised when unable to process properly the XML reply from the device."""
pass
class TimeoutError(IOSXRException):
"""TimeoutError Exception."""
def __init__(self, msg=None, dev=None):
super(TimeoutError, self).__init__(msg=msg, dev=dev)
if dev:
self._xr = dev
self._xr._xml_agent_alive = False
class EOFError(IOSXRException):
"""EOFError Exception."""
pass
class IteratorIDError(IOSXRException):
"""IteratorIDError Exception."""
pass
|
apache-2.0
| 247,740,485,529,006,500 | 22.839286 | 85 | 0.675655 | false |
James-Ye/JsonConvertor
|
source/JsonConvertor.py
|
1
|
26617
|
from collections import OrderedDict
import xlrd
import json
import os
from os.path import join
#########################################################################
def getPurKey(key):
index_str = 1
while key.find("[") != -1:
idx = key.find("[")
idx2 = key.find("]")
if idx != -1 and idx2 != -1:
key1 = key[0:idx]
index_str = key[idx+1:idx2]
key2 = key[idx2+1:len(key)]
key = key1 + key2
return (key,int(index_str))
#########################################################################
def copyItem(objsrc):
objdes = None
if str(type(objsrc)) == "<class 'collections.OrderedDict'>":
objdes = OrderedDict()
items = list(objsrc.items())
for i in range(0, len(objsrc)):
objkey = items[i][0]
objvalue = items[i][1]
objdes.setdefault(objkey,copyItem(objvalue))
elif str(type(objsrc)) == "<class 'dict'>":
objdes = {}
items = list(objsrc.items())
for i in range(0, len(objsrc)):
objkey = items[i][0]
objvalue = items[i][1]
objdes.setdefault(objkey,copyItem(objvalue))
elif str(type(objsrc)) == "<class 'list'>":
objdes = []
for i in range(0, len(objsrc)):
objdes.append(copyItem(objsrc[i]))
else:
objdes = objsrc
return objdes
###########################################################################################
def delete_file_folder(src):
if os.path.isfile(src):
try:
os.remove(src)
except:
pass
elif os.path.isdir(src):
for item in os.listdir(src):
itemsrc=os.path.join(src,item)
delete_file_folder(itemsrc)
try:
os.rmdir(src)
except:
pass
###########################################################################################
def getDictionary(fileName, dict_element, list_list):
bk = xlrd.open_workbook(fileName)
shxrange = range(bk.nsheets)
sh = bk.sheet_by_name("dictionary")
#获取行数
nrows = sh.nrows
#获取列数
ncols = sh.ncols
statuscol = 1
for j in range(1,ncols):
cell_value = sh.cell_value(0,j)
if cell_value == "Status":
statuscol = j
break
row_list = []
last_value = ""
for i in range(1,nrows):
for j in range(0,ncols):
cell_value = sh.cell_value(i,j)
if str(type(cell_value)) == "<class 'str'>":
if j == statuscol:
if cell_value == "◆":
key = sh.cell_value(i,j+1)
if key != '':
value = ""
for strRow in row_list:
value += strRow
value += "."
value += last_value
list_list.append((key, value))
elif cell_value == "●":
key = sh.cell_value(i,j+1)
if key != '':
value = ""
for strRow in row_list:
value += strRow
value += "."
value += last_value
dict_element.setdefault(key, value)
break
elif j < statuscol:
if cell_value != "":
level = len(row_list)
if j == level:
last_value = cell_value
elif j > level:
row_list.append(last_value)
last_value = cell_value
elif j < level:
for n in range(j,level):
del row_list[-1]
last_value = cell_value
return
###########################################################################################
######################################################################################
def delvoiddict(dictionary):
count = len(dictionary)
while count > 0:
list_keys = list(dictionary.keys())
curKey = list_keys[count -1]
value = dictionary[curKey]
if str(type(value)) == "<class 'collections.OrderedDict'>":
delvoiddict(value)
if len(value) == 0:
del dictionary[curKey]
elif str(type(value)) == "<class 'list'>":
list_child = list(value)
list_count = len(list_child)
while list_count > 0:
list_value = list_child[list_count -1]
if str(type(list_value)) == "<class 'collections.OrderedDict'>":
delvoiddict(list_value)
if len(list_value) == 0:
del list_child[list_count -1]
list_count -= 1
else:
continue
if len(value) == 0:
del dictionary[curKey]
count -= 1
return
######################################################################################
######################################################################################
def readTemplate(fileName):
bk = xlrd.open_workbook(fileName)
shxrange = range(bk.nsheets)
sh = bk.sheet_by_name("dictionary")
#获取行数
nrows = sh.nrows
#获取列数
ncols = sh.ncols
#获取第一行第一列数据
cell_value = sh.cell_value(1,1)
statuscol = 1
for j in range(1,ncols):
cell_value = sh.cell_value(0,j)
if cell_value == "Status":
statuscol = j
break
dict_main = OrderedDict()
cell_value = ""
curlevel = 0
preDict = dict_main
preKey = ''
savedKey = ''
isValue = True
current_obj_list = []
isDictBegin = False
for i in range(1,nrows):
for j in range(0,ncols):
cell_value = sh.cell_value(i,j)
if str(type(cell_value)) != "<class 'str'>":
continue
if j == statuscol:
if cell_value == "◆":
d = OrderedDict()
d.setdefault('XXX','')
preDict[preKey] = [d]
preDict = d
preKey = 'XXX'
current_obj_list.append(preDict)
elif cell_value == "○":
del preDict[preKey]
preKey = savedKey
savedKey = ''
break
elif j < statuscol:
if cell_value != '':
if j == 0:
preDict = dict_main
preDict.setdefault(cell_value,'')
preKey = cell_value
savedKey = ''
current_obj_list.clear()
current_obj_list.append(preDict)
elif j == curlevel:
preDict.setdefault(cell_value,'')
savedKey = preKey
preKey = cell_value
elif j > curlevel:
if preKey == 'XXX':
del preDict[preKey]
preDict.setdefault(cell_value,'')
preKey = cell_value
else:
d = OrderedDict()
d.setdefault(cell_value,'')
preDict[preKey] = d
preDict = d
savedKey = ''
preKey = cell_value
current_obj_list.append(preDict)
else:
list_count = len(current_obj_list)
if j < list_count:
preDict = current_obj_list[j]
for n in range(j+1,list_count):
del current_obj_list[-1]
preDict.setdefault(cell_value,'')
savedKey = ''
preKey = cell_value
curlevel = j
else:
break
delvoiddict(dict_main)
return dict_main
######################################################################################
def getheadlist(list_list):
headlist = []
for i in range(0,len(list_list)):
headlist.append(list_list[i][0])
return headlist
#########################################################################
def processDictionary(dictionary, key, list_list, oldJsonList, json_list_list, list_index):
RPHList = ["TransactionInput.PricingInput.Pnr.Segments",
"TransactionInput.PricingInput.Pnr.Passenger"]
headlist = getheadlist(list_list);
count = len(dictionary)
isInList = False
for i in range(0, count):
list_keys = list(dictionary.keys())
itemKey = list_keys[i]
value = dictionary[itemKey]
if key == "":
curKey = key + itemKey
else:
curKey = key + "." + itemKey
if curKey in headlist:
isInList = True
list_count = 1
if str(type(value)) == "<class 'dict'>":
dict_child = dict(value)
processDictionary(dict_child, curKey, list_list, oldJsonList, json_list_list, list_index)
elif str(type(value)) == "<class 'list'>":
list_child = list(value)
list_count = len(list_child)
for j in range(0, list_count):
list_value = list_child[j]
if str(type(list_value)) == "<class 'dict'>":
if isInList:
list_index = j + 1
dict_child = dict(list_value)
processDictionary(dict_child, curKey, list_list, oldJsonList, json_list_list, list_index)
else:
if list_index > 1:
curKey = key + "[%d]"%(list_index) + "." + itemKey
if key in RPHList:
indexKey = ""
if list_index > 1:
indexKey = key + "[%d]"%(list_index) + "." + "Index"
elif list_index == 1:
indexKey = key + "." + "Index"
oldJsonList.append((indexKey,"%d"%(list_index)))
tup = (curKey, list_child)
oldJsonList.append(tup)
break
ist_index = 0
else:
if list_index > 1:
curKey = key + "[%d]"%(list_index) + "." + itemKey
if key in RPHList:
indexKey = ""
if list_index > 1:
indexKey = key + "[%d]"%(list_index) + "." + "Index"
elif list_index == 1:
indexKey = key + "." + "Index"
oldJsonList.append((indexKey,"%d"%(list_index)))
tup = (curKey, value)
oldJsonList.append(tup)
if curKey in headlist:
idx = headlist.index(curKey)
listtup = (list_list[idx], list_count)
json_list_list.append(listtup)
return
#########################################################################
def getAgentContext(keyName):
context = ""
if keyName == "TransactionInput.PricingInput.Agent.IataNum":
context = "IATANumber"
elif keyName == "TransactionInput.PricingInput.Agent.CRS":
context = "CRSCode"
elif keyName == "TransactionInput.PricingInput.Agent.DeptCode":
context = "DepartmentCode"
return context
def getDiagnosticContext(keyName):
context = ""
if keyName == "TransactionInput.PricingInput.Options.Diagnostic.DiagnosticType.SliceAndDice":
context = "SliceAndDice"
elif keyName == "TransactionInput.PricingInput.Options.Diagnostic.DiagnosticType.Category":
context = "RuleValidation"
elif keyName == "TransactionInput.PricingInput.Options.Diagnostic.DiagnosticType.FareRetrieve":
context = "FareRetrieval"
elif keyName == "TransactionInput.PricingInput.Options.Diagnostic.DiagnosticType.YQYR":
context = "YQYR"
return context
#########################################################################
def ConverToTrueFalse(strYN):
context = False
if strYN == "Y":
context = True
return context
#########################################################################
def converYNToTrueFalse(oldJsonList):
YN_list = ["TransactionInput.PricingInput.Agent.IsAgency",
"TransactionInput.PricingInput.Pnr.Segments.IsForceStopover",
"TransactionInput.PricingInput.Pnr.Segments.IsForceConnection",
"TransactionInput.PricingInput.Options.AllEndosAppl",
"TransactionInput.PricingInput.Options.IsEtkt",
"TransactionInput.PricingInput.Options.FbcInHfcAppl",
"TransactionInput.PricingInput.Options.FboxCurOverride",
"TransactionInput.PricingInput.Options.InterlineOverride",
"TransactionInput.PricingInput.Options.NetFareAppl",
"TransactionInput.PricingInput.Options.IsBestbuy",
"TransactionInput.PricingInput.Options.TaxDetailAppl",
"TransactionInput.PricingInput.Options.FilterPtcAppl",
"TransactionInput.PricingInput.Options.TaxSummaryAppl",
"TransactionInput.PricingInput.Options.PrivateNegoFaresAppl",
"TransactionInput.PricingInput.Pnr.Segments.IsOpen",
"TransactionInput.PricingInput.Options.YqyrOnly",
"TransactionInput.PricingInput.Options.TaxOnly"]
useIsForceStopover = []
modifyList = []
removeList = []
for i in range(0, len(oldJsonList)):
tup = oldJsonList[i]
tupkey = getPurKey(tup[0])
oldkey = tupkey[0]
index = tupkey[1]
if oldkey in YN_list:
newkey = tup[0]
strYN = tup[1]
if "TransactionInput.PricingInput.Pnr.Segments.IsForceStopover" == oldkey:
useIsForceStopover.append(index)
elif "TransactionInput.PricingInput.Pnr.Segments.IsForceConnection" == oldkey:
if index in useIsForceStopover:
removeList.append(tup)
continue
else:
newkey = "TransactionInput.PricingInput.Pnr.Segments[%d].IsForceStopover"%(index)
modifyList.append((i,(newkey,ConverToTrueFalse(strYN))))
#modify
for i in range(0, len(modifyList)):
oldJsonList[modifyList[i][0]] = modifyList[i][1]
#remove
for i in range(0, len(removeList)):
oldJsonList.remove(removeList[i])
return
#########################################################################
def processDiagnostic(oldJsonList, json_list_list):
mlist = []
listName = "TransactionInput.PricingInput.Options.Diagnostic"
typeName = "TransactionInput.PricingInput.Options.Diagnostic.DiagnosticType"
for i in range(0, len(oldJsonList)):
tup = oldJsonList[i]
oldkey = tup[0]
if typeName in oldkey:
if getDiagnosticContext(oldkey) != "":
mlist.append(tup)
for i in range(0, len(json_list_list)):
if listName == json_list_list[i][0][0]:
tup = (json_list_list[i][0],len(mlist))
json_list_list.remove(json_list_list[i])
json_list_list.append(tup)
for i in range(0, len(mlist)):
if i == 0:
tup = ("TransactionInput.PricingInput.Options.Diagnostic.DiagnosticType", getDiagnosticContext(mlist[i][0]))
oldJsonList.append(tup)
tup = ("TransactionInput.PricingInput.Options.Diagnostic.DiagnosticInclude", ConverToTrueFalse(mlist[i][1]))
oldJsonList.append(tup)
else:
tup = ("TransactionInput.PricingInput.Options.Diagnostic[%d].DiagnosticType"%(i+1), getDiagnosticContext(mlist[i][0]))
oldJsonList.append(tup)
tup = ("TransactionInput.PricingInput.Options.Diagnostic[%d].DiagnosticInclude"%(i+1), ConverToTrueFalse(mlist[i][1]))
oldJsonList.append(tup)
oldJsonList.remove(mlist[i])
return
#########################################################################
def processSource(oldJsonList, json_list_list):
agent_list = []
mlist = []
listName = "TransactionInput.PricingInput.Agent"
for i in range(0, len(oldJsonList)):
tup = oldJsonList[i]
oldkey = tup[0]
if listName in oldkey:
if getAgentContext(oldkey) != "":
mlist.append(tup)
else:
agent_list.append(tup)
for i in range(0, len(json_list_list)):
if listName == json_list_list[i][0][0]:
tup = (json_list_list[i][0],len(mlist))
json_list_list.remove(json_list_list[i])
json_list_list.append(tup)
for i in range(0, len(mlist)):
if i == 0:
tup = ("TransactionInput.PricingInput.Agent.Request.ID", mlist[i][1])
oldJsonList.append(tup)
tup = ("TransactionInput.PricingInput.Agent.Request.ID_Context", getAgentContext(mlist[i][0]))
oldJsonList.append(tup)
else:
tup = ("TransactionInput.PricingInput.Agent[%d].Request.ID"%(i+1), mlist[i][1])
oldJsonList.append(tup)
tup = ("TransactionInput.PricingInput.Agent[%d].Request.ID_Context"%(i+1), getAgentContext(mlist[i][0]))
oldJsonList.append(tup)
for j in range(0, len(agent_list)):
keyold = agent_list[j][0]
keyName = listName + "[%d]"%((i+1)) + keyold[len(listName):len(keyold)]
tup = (keyName, agent_list[j][1])
oldJsonList.append(tup)
oldJsonList.remove(mlist[i])
return
#########################################################################
def procDateTime(oldJsonList, key_old, tup, dicDateTime, index, prex, key1, key2):
if key_old == prex + key1:
dicDateTime.setdefault("Date",tup)
if "Time" in list(dicDateTime.keys()) and len(dicDateTime["Time"]) != 0:
if index > 1:
key_old = prex +"[%d]"%(index) + key1 + "Time"
else:
key_old = prex + key1 + "Time"
value = dicDateTime["Date"][1] + 'T' + dicDateTime["Time"][1]
oldJsonList.append((key_old,value))
dicDateTime.clear()
else:
return True
elif key_old == prex + key2:
dicDateTime.setdefault("Time",tup)
if "Date" in list(dicDateTime.keys()) and len(dicDateTime["Date"]) != 0:
if index > 1:
key_old = prex +"[%d]"%(index) + key1 + "Time"
else:
key_old = prex + key1 + "Time"
value = dicDateTime["Date"][1] + 'T' + dicDateTime["Time"][1]
oldJsonList.append((key_old,value))
dicDateTime.clear()
else:
return True
return False
#########################################################################
def processDateTime(oldJsonList):
dicDateTimeDep = {}
DepDateTime = 0
dicDateTimeArr = {}
ArrDateTime = 0
dicDateTimeRes = {}
ResDateTime = 0
prex = "TransactionInput.PricingInput.Pnr.Segments"
depkey1 = ".DepDate"
depkey2 = ".DepTime"
arrkey1 = ".ArrDate"
arrkey2 = ".ArrTime"
reskey1 = ".ResDate"
reskey2 = ".ResTime"
for i in range(0, len(oldJsonList)):
tup = oldJsonList[i]
tupkey = getPurKey(tup[0])
key_old = tupkey[0]
index = tupkey[1]
value = tup[1]
if procDateTime(oldJsonList, key_old, tup, dicDateTimeDep, index, prex, depkey1, depkey2):
continue
if procDateTime(oldJsonList, key_old, tup, dicDateTimeArr, index, prex, arrkey1, arrkey2):
continue
if procDateTime(oldJsonList, key_old, tup, dicDateTimeRes, index, prex, reskey1, reskey2):
continue
return
#########################################################################
def readOldJson(fileName, list_list):
fp = open(fileName, 'r')
dict_json = json.loads(fp.read())
# json.dump(dict_json, open('../Temp/rawjson.json', 'w'))
key = ""
oldJsonList = []
json_list_list = []
processDictionary(dict_json, key, list_list, oldJsonList, json_list_list, 0)
processSource(oldJsonList, json_list_list)
processDiagnostic(oldJsonList, json_list_list)
processDateTime(oldJsonList)
converYNToTrueFalse(oldJsonList)
fp.close()
return (oldJsonList,json_list_list)
#########################################################################
#########################################################################
def setValue(dict_json, keys, value, list_keys, list_index):
idx = keys.find(".")
key = ''
subkeys = ''
if idx == -1:
key = keys
else:
key = keys[0:idx]
subkeys = keys[idx+1:len(keys)]
idx_list = list_keys.find(".")
listkey = ''
sublistkeys = ''
if idx_list == -1:
listkey = list_keys
else:
listkey = list_keys[0:idx_list]
sublistkeys = list_keys[idx_list+1:len(list_keys)]
index = 0
if key == listkey:
if sublistkeys == '':
index = list_index
else:
sublistkeys = ''
if key in list(dict_json.keys()):
obj = dict_json[key]
dic = OrderedDict()
if str(type(obj)) == "<class 'collections.OrderedDict'>":
dic = obj
elif str(type(obj)) == "<class 'list'>":
dic = obj[index]
else:
dict_json[key] = value
if len(dic) != 0 and subkeys!= '':
setValue(dic, subkeys, value, sublistkeys, list_index)
return
#########################################################################
#########################################################################
def setList(dict_json, keys, count):
index = keys.find(".")
key = ''
subkeys = ''
if index != -1:
key = keys[0:index]
subkeys = keys[index+1:len(keys)]
else:
key = keys
if key in list(dict_json.keys()):
obj = dict_json[key]
dic = OrderedDict()
if str(type(obj)) == "<class 'collections.OrderedDict'>":
dic = obj
elif str(type(obj)) == "<class 'list'>":
if subkeys == '':
if len(obj) == count:
return
else:
item = dict_json[key][0]
for i in range (1, count):
dict_json[key].append(copyItem(item))
else:
dic = obj[0]
else:
return
if len(dic) != 0 and subkeys!= '':
setList(dic, subkeys, count)
return
#########################################################################
def processTemplate(new_Json_Template, json_list_list):
new_Json = new_Json_Template
for i in range(0,len(json_list_list)):
count = json_list_list[i][1]
if count > 1:
keys = json_list_list[i][0][1]
setList(new_Json, keys, count)
return new_Json
#########################################################################
def convert(fileName , dict_element, list_list, new_Json_Template):
ret = readOldJson(fileName, list_list)
oldJsonList = ret[0]
json_list_list = ret[1]
# json.dump(oldJsonList, open('../Temp/oldjsonfile.json', 'w'))
# json.dump(json_list_list, open('../Temp/oldjsonListfile.json', 'w'))
new_Json = processTemplate(new_Json_Template, json_list_list)
# json.dump(new_Json, open('../Temp/newjsontemplate.json', 'w'))
# json.dump(list(dict_element.keys()), open('../Temp/dict_element_key_list.json', 'w'))
list_element_keys = list(dict_element.keys())
for i in range(0, len(oldJsonList)):
tup = oldJsonList[i]
key_old_raw = tup[0]
value = tup[1]
list_key = ""
list_index = 0
key_old = key_old_raw
idx = key_old_raw.find("[")
if idx != -1:
idx2 = key_old_raw.find("]")
list_index_str = key_old_raw[idx+1:idx2]
if list_index_str.isnumeric():
list_index = int(list_index_str)
list_key = key_old_raw[0:idx]
key_old = list_key + key_old_raw[idx2+1:len(key_old_raw)]
new_list_key = ''
if list_index > 1:
for i in range(0, len(json_list_list)):
if list_key == json_list_list[i][0][0]:
new_list_key = json_list_list[i][0][1]
break
if key_old in list_element_keys:
key_new = dict_element[key_old]
setValue(new_Json, key_new, value, new_list_key, list_index-1)
return new_Json
#########################################################################
def convertFiles(source, dest, new_Json_Template, dict_element, list_list):
os.mkdir(dest)
for root, dirs, files in os.walk( source ):
for OneFileName in files :
inputFileName = root + "/" + OneFileName
outputFileName = dest + root[8:len(root)] + "/" + OneFileName
new_Json_Temp = copyItem(new_Json_Template)
new_Json = convert(inputFileName, dict_element, list_list, new_Json_Temp)
json.dump(new_Json, open(outputFileName, 'w'))
print ("%s is OK" %(OneFileName))
if len(dirs) != 0:
for dir in dirs:
os.mkdir(dest + "/" + root[8:len(root)] + "/" + dir)
return
#########################################################################
tempdir = "../Temp"
delete_file_folder(tempdir)
os.mkdir(tempdir)
fileName = "../data/dictionary.xls"
dict_element = OrderedDict()
list_list = []
getDictionary(fileName, dict_element, list_list)
#json.dump(list_list, open('../Temp/dictionarylistfile.json', 'w'))
#json.dump(dict_element, open('../Temp/dictionaryelementfile.json', 'w'))
new_Json_Template = readTemplate(fileName)
#json.dump(new_Json_Template, open('../Temp/jsontemplate.json', 'w'))
source = "../input"
dest = "../output"
delete_file_folder(dest)
convertFiles(source, dest, new_Json_Template, dict_element, list_list)
print ("All finished!")
|
gpl-3.0
| 1,715,549,357,373,807,000 | 34.887838 | 130 | 0.484957 | false |
tilacog/rows
|
tests/tests_plugin_csv.py
|
1
|
2638
|
# coding: utf-8
# Copyright 2014-2015 Álvaro Justen <https://github.com/turicas/rows/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import tempfile
import unittest
import rows
import rows.plugins.csv
import utils
class PluginCsvTestCase(utils.RowsTestMixIn, unittest.TestCase):
filename = 'tests/data/all-field-types.csv'
encoding = 'utf-8'
def test_imports(self):
self.assertIs(rows.import_from_csv, rows.plugins.csv.import_from_csv)
self.assertIs(rows.export_to_csv, rows.plugins.csv.export_to_csv)
def test_import_from_csv_filename(self):
table = rows.import_from_csv(self.filename, encoding=self.encoding)
self.assert_table_equal(table, utils.table)
expected_meta = {'imported_from': 'csv', 'filename': self.filename,}
self.assertEqual(table.meta, expected_meta)
def test_import_from_csv_fobj(self):
# TODO: may test with codecs.open passing an encoding
with open(self.filename) as fobj:
table = rows.import_from_csv(fobj, encoding=self.encoding)
self.assert_table_equal(table, utils.table)
expected_meta = {'imported_from': 'csv', 'filename': self.filename,}
self.assertEqual(table.meta, expected_meta)
def test_export_to_csv_filename(self):
# TODO: may test file contents
temp = tempfile.NamedTemporaryFile(delete=False)
self.files_to_delete.append(temp.name)
rows.export_to_csv(utils.table, temp.name)
table = rows.import_from_csv(temp.name)
self.assert_table_equal(table, utils.table)
def test_export_to_csv_fobj(self):
# TODO: may test with codecs.open passing an encoding
# TODO: may test file contents
temp = tempfile.NamedTemporaryFile(delete=False)
self.files_to_delete.append(temp.name)
rows.export_to_csv(utils.table, temp.file)
table = rows.import_from_csv(temp.name)
self.assert_table_equal(table, utils.table)
|
gpl-3.0
| -8,903,840,165,655,480,000 | 36.671429 | 77 | 0.692074 | false |
benzkji/django-layout
|
project/settings/ckeditor.py
|
1
|
1166
|
from django.urls import reverse_lazy
TEXTBLOCKS_CKEDITORJS_URL = '/static/ckeditor/ckeditor/ckeditor.js'
CKEDITOR_LINK_MODEL = '{{ project_name }}.models.Link'
CKEDITOR_LINK_IFRAME_URL = reverse_lazy('admin:{{ project_name }}_link_add')
CKEDITOR_LINK_VERIFY_URL = reverse_lazy('admin:{{ project_name }}_link_verify')
CKEDITOR_CONFIGS = {
'default': {
'djangolinkIframeURL': CKEDITOR_LINK_IFRAME_URL,
'djangolinkVerifyURL': CKEDITOR_LINK_VERIFY_URL,
'djangolinkFallbackField': 'free',
'disallowedContent': 'a[style]; pre[style]; h1[style]; h2[style]; h3[style]; p[style]; ul[style]; ol[style]; li[style]',
'extraPlugins': ','.join(
[
# your extra plugins here
'djangolink',
]),
'toolbar': 'Custom',
'toolbar_Custom': [
['Maximize'],
['Format'],
['Bold', ],
['BulletedList', ],
['DjangoLink', 'Unlink'],
['Cut', 'Copy', 'PasteText', ],
['cleanup', ],
['ShowBlocks', 'Source'],
],
'format_tags': 'h2;p',
'width': '730',
}
}
|
mit
| -7,918,390,630,464,996,000 | 31.388889 | 128 | 0.534305 | false |
benoit-pierre/mcomix
|
mcomix/process.py
|
1
|
6817
|
"""process.py - Process spawning module."""
import gc
import sys
import os
from distutils import spawn
from mcomix import log
from mcomix import i18n
try:
import subprocess32 as subprocess
_using_subprocess32 = True
except ImportError:
log.warning('subprocess32 not available! using subprocess')
import subprocess
_using_subprocess32 = False
NULL = open(os.devnull, 'r+b')
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
# Convert argument vector to system's file encoding where necessary
# to prevent automatic conversion when appending Unicode strings
# to byte strings later on.
def _fix_args(args):
fixed_args = []
for arg in args:
if isinstance(arg, unicode):
fixed_args.append(arg.encode(sys.getfilesystemencoding()))
else:
fixed_args.append(arg)
return fixed_args
def _get_creationflags():
if 'win32' == sys.platform:
# Do not create a console window.
return 0x08000000
else:
return 0
# Cannot spawn processes with PythonW/Win32 unless stdin
# and stderr are redirected to a pipe/devnull as well.
def call(args, stdin=NULL, stdout=NULL, stderr=NULL):
return 0 == subprocess.call(_fix_args(args), stdin=stdin,
stdout=stdout, stderr=stderr,
creationflags=_get_creationflags())
def popen(args, stdin=NULL, stdout=PIPE, stderr=NULL):
if not _using_subprocess32:
gc.disable() # Avoid Python issue #1336!
try:
return subprocess.Popen(_fix_args(args), stdin=stdin,
stdout=stdout, stderr=stderr,
creationflags=_get_creationflags())
finally:
if not _using_subprocess32:
gc.enable()
if 'win32' == sys.platform:
_exe_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
def find_executable(candidates, workdir=None, is_valid_candidate=None):
""" Find executable in path.
Return an absolute path to a valid executable or None.
<workdir> default to the current working directory if not set.
<is_valid_candidate> is an optional function that must return True
if the path passed in argument is a valid candidate (to check for
version number, symlinks to an unsupported variant, etc...).
If a candidate has a directory component,
it will be checked relative to <workdir>.
On Windows:
- '.exe' will be appended to each candidate if not already
- MComix executable directory is prepended to the path on Windows
(to support embedded tools/executables in the distribution).
- <workdir> will be inserted first in the path.
On Unix:
- a valid candidate must have execution right
"""
if workdir is None:
workdir = os.getcwd()
workdir = os.path.abspath(workdir)
search_path = os.environ['PATH'].split(os.pathsep)
if 'win32' == sys.platform:
if workdir is not None:
search_path.insert(0, workdir)
search_path.insert(0, _exe_dir)
is_valid_exe = lambda exe: \
os.path.isfile(exe) and \
os.access(exe, os.R_OK|os.X_OK)
if is_valid_candidate is None:
is_valid = is_valid_exe
else:
is_valid = lambda exe: \
is_valid_exe(exe) and \
is_valid_candidate(exe)
for name in candidates:
# On Windows, must end with '.exe'
if 'win32' == sys.platform:
if not name.endswith('.exe'):
name = name + '.exe'
# Absolute path?
if os.path.isabs(name):
if is_valid(name):
return name
# Does candidate have a directory component?
elif os.path.dirname(name):
# Yes, check relative to working directory.
path = os.path.normpath(os.path.join(workdir, name))
if is_valid(path):
return path
# Look in search path.
else:
for dir in search_path:
path = os.path.abspath(os.path.join(dir, name))
if is_valid(path):
return path
return None
def Win32Popen(cmd):
""" Spawns a new process on Win32. cmd is a list of parameters.
This method's sole purpose is calling CreateProcessW, not
CreateProcessA as it is done by subprocess.Popen. """
import ctypes
# Declare common data types
DWORD = ctypes.c_uint
WORD = ctypes.c_ushort
LPTSTR = ctypes.c_wchar_p
LPBYTE = ctypes.POINTER(ctypes.c_ubyte)
HANDLE = ctypes.c_void_p
class StartupInfo(ctypes.Structure):
_fields_ = [("cb", DWORD),
("lpReserved", LPTSTR),
("lpDesktop", LPTSTR),
("lpTitle", LPTSTR),
("dwX", DWORD),
("dwY", DWORD),
("dwXSize", DWORD),
("dwYSize", DWORD),
("dwXCountChars", DWORD),
("dwYCountChars", DWORD),
("dwFillAttribute", DWORD),
("dwFlags", DWORD),
("wShowWindow", WORD),
("cbReserved2", WORD),
("lpReserved2", LPBYTE),
("hStdInput", HANDLE),
("hStdOutput", HANDLE),
("hStdError", HANDLE)]
class ProcessInformation(ctypes.Structure):
_fields_ = [("hProcess", HANDLE),
("hThread", HANDLE),
("dwProcessId", DWORD),
("dwThreadId", DWORD)]
LPSTRARTUPINFO = ctypes.POINTER(StartupInfo)
LPROCESS_INFORMATION = ctypes.POINTER(ProcessInformation)
ctypes.windll.kernel32.CreateProcessW.argtypes = [LPTSTR, LPTSTR,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_bool, DWORD,
ctypes.c_void_p, LPTSTR, LPSTRARTUPINFO, LPROCESS_INFORMATION]
ctypes.windll.kernel32.CreateProcessW.restype = ctypes.c_bool
# Convert list of arguments into a single string
cmdline = subprocess.list2cmdline(cmd)
buffer = ctypes.create_unicode_buffer(cmdline)
# Resolve executable path.
exe = find_executable((cmd[0],))
# Some required structures for the method call...
startupinfo = StartupInfo()
ctypes.memset(ctypes.addressof(startupinfo), 0, ctypes.sizeof(startupinfo))
startupinfo.cb = ctypes.sizeof(startupinfo)
processinfo = ProcessInformation()
# Spawn new process
success = ctypes.windll.kernel32.CreateProcessW(exe, buffer,
None, None, False, 0, None, None, ctypes.byref(startupinfo),
ctypes.byref(processinfo))
if success:
ctypes.windll.kernel32.CloseHandle(processinfo.hProcess)
ctypes.windll.kernel32.CloseHandle(processinfo.hThread)
return processinfo.dwProcessId
else:
raise ctypes.WinError(ctypes.GetLastError(),
i18n.to_unicode(ctypes.FormatError()))
# vim: expandtab:sw=4:ts=4
|
gpl-2.0
| 4,675,806,988,680,161,000 | 30.706977 | 79 | 0.618894 | false |
goodcrypto/goodcrypto-libs
|
syr/_log.py
|
1
|
1905
|
'''
Log for syr.log and modules it uses.
The modules syr.log uses cannot use syr.log itself. Use syr.log instead
of this module if you can. This module is much less efficient and
powerful than syr.log. To debug this module use print().
Functions that are used by both log and _log are here.
Copyright 2015-2016 GoodCrypto
Last modified: 2016-05-24
This file is open source, licensed under GPLv3 <http://www.gnu.org/licenses/>.
'''
from __future__ import unicode_literals
import sys
IS_PY2 = sys.version_info[0] == 2
import os, pwd, sh, time
def log(message, filename=None, mode=None):
''' Log message that syr.log can't. '''
if filename is None:
filename = '/tmp/_log.{}.log'.format(whoami())
if mode is None:
mode = '0666'
# print(message)
sh.touch(filename)
try:
sh.chmod(mode, filename)
except sh.ErrorReturnCode_1:
# hopefully the perms are already ok
pass
with open(filename, 'a') as logfile:
try:
logfile.write('{} {}\n'.format(timestamp(), message))
except UnicodeDecodeError:
from syr.python import is_string
logfile.write('unable to write message because it is a type: {}'.format(type(message)))
if not is_string(message):
logfile.write('{} {}\n'.format(timestamp(), message.decode(errors='replace')))
def whoami():
''' Get user '''
# without using syr.user.whoami()
return pwd.getpwuid(os.geteuid()).pw_name
def timestamp():
''' Timestamp as a string. Duplicated in this module to avoid recursive
imports. '''
ct = time.time()
if IS_PY2:
milliseconds = int((ct - long(ct)) * 1000)
else:
milliseconds = int((ct - int(ct)) * 1000)
t = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
return '{},{:03}'.format(t, milliseconds)
|
gpl-3.0
| 2,743,117,299,901,773,300 | 29.238095 | 99 | 0.612073 | false |
eswartz/panda3d-stuff
|
programs/dynamic-geometry/draw_path_tunnel.py
|
1
|
17639
|
'''
Draw a tunnel with mouse movement, create it and its collision geometry, and walk through it.
Created on Feb 27, 2015
Released Feb 4, 2016
@author: ejs
'''
from panda3d.core import loadPrcFileData # @UnusedImport
#loadPrcFile("./myconfig.prc")
# loadPrcFileData("", "load-display p3tinydisplay\nbasic-shaders-only #t\nhardware-animated-vertices #f")
# loadPrcFileData("", "notify-level-collide debug")
loadPrcFileData("", "sync-video 1")
from direct.gui.OnscreenText import OnscreenText
from direct.showbase.ShowBase import ShowBase
from panda3d.core import TextNode, GeomNode, LVecBase4i, GeomVertexFormat, Geom,\
GeomVertexWriter, GeomTristrips, GeomVertexData, Vec3, CollisionNode, \
CollisionTraverser, CollisionSphere,\
CollisionFloorMesh, GeomVertexReader, Point3, CollisionHandlerFloor
import sys
import fpscontroller
class MyApp(ShowBase):
def __init__(self):
ShowBase.__init__(self)
self.seeNode = self.render.attachNewNode('see')
self.cam.reparentTo(self.seeNode)
self.cam.setPos(0, 0, 5)
self.fpscamera = fpscontroller.FpsController(self, self.seeNode)
self.fpscamera.setFlyMode(True)
self.fpscamera.setMouseLook(True)
self.prevPos = self.fpscamera.getPos()
self.prevInto = None
self.makeInstructions()
self.info = self.genLabelText("Position: <unknown>", 2)
self.initCollisions()
self.leftColor = LVecBase4i(224, 224, 64, 255)
self.rightColor = LVecBase4i(64, 224, 224, 255)
self.isDrawing = False
self.toggleDrawing()
self.accept("escape", sys.exit) #Escape quits
self.accept("enter", self.toggleDrawing)
def initCollisions(self):
# Initialize the collision traverser.
self.cTrav = CollisionTraverser()
self.cTrav.showCollisions(self.render)
# Initialize the Pusher collision handler.
self.pusher = CollisionHandlerFloor()
### player
# Create a collsion node for this object.
playerNode = CollisionNode('player')
playerNode.addSolid(CollisionSphere(0, 0, 0, 1))
# Attach the collision node to the object's model.
self.playerC = self.fpscamera.player.attachNewNode(playerNode)
# Set the object's collision node to render as visible.
self.playerC.show()
def toggleDrawing(self):
self.isDrawing = not self.isDrawing
if self.isDrawing:
self.instructionText.setText('Enter: Generate Tunnel from Movement')
self.fpscamera.setFlyMode(True)
self.prevPos = None
# self.cTrav.remosveCollider(self.playerC)
self.removeTask('updatePhysics')
self.addTask(self.drawHere, 'drawHere')
self.geomNode = GeomNode('geomNode')
self.geomNodePath = self.render.attachNewNode(self.geomNode)
self.geomNodePath.setTwoSided(True)
# apparently p3tinydisplay needs this
self.geomNodePath.setColorOff()
# Create a collision node for this object.
self.floorCollNode = CollisionNode('geom')
# Attach the collision node to the object's model.
floorC = self.geomNodePath.attachNewNode(self.floorCollNode)
# Set the object's collision node to render as visible.
floorC.show()
self.newVertexData()
self.newGeom()
else:
self.instructionText.setText('Enter: Record Movement for Tunnel')
self.removeTask('drawHere')
if self.prevPos:
#self.completePath()
self.completeTunnelPath()
self.fpscamera.setFlyMode(True)
self.drive.setPos(self.fpscamera.getPos())
self.cTrav.addCollider(self.playerC, self.pusher)
self.pusher.addCollider(self.playerC, self.fpscamera.player)
self.taskMgr.add(self.updatePhysics, 'updatePhysics')
def newVertexData(self):
fmt = GeomVertexFormat.getV3c4()
# fmt = GeomVertexFormat.getV3n3c4()
self.vertexData = GeomVertexData("path", fmt, Geom.UHStatic)
self.vertexWriter = GeomVertexWriter(self.vertexData, 'vertex')
# self.normalWriter = GeomVertexWriter(self.vertexData, 'normal')
self.colorWriter = GeomVertexWriter(self.vertexData, 'color')
def newGeom(self):
self.triStrips = GeomTristrips(Geom.UHDynamic)
self.geom = Geom(self.vertexData)
self.geom.addPrimitive(self.triStrips)
def makeInstructions(self):
OnscreenText(text="Draw Path by Walking (WSAD/space/mouselook)",
style=1, fg=(1,1,0,1),
pos=(0.5,-0.95), scale = .07)
self.genLabelText("ESC: Quit", 0)
self.instructionText = self.genLabelText("", 1)
def genLabelText(self, text, i):
return OnscreenText(text = text, pos = (-1.3, .95-.05*i), fg=(1,1,0,1),
align = TextNode.ALeft, scale = .05)
def drawHere(self, task):
pos = self.fpscamera.getPos()
self.info.setText("Position: {0}, {1}, {2} at {3} by {4}".format(int(pos.x*100)/100., int(pos.y*100)/100., int(pos.z)/100.,
self.fpscamera.getHeading(), self.fpscamera.getLookAngle()))
prevPos = self.prevPos
if not prevPos:
self.prevPos = pos
elif (pos - prevPos).length() >= 1:
# self.extendPathQuad(prevPos, pos, 2)
self.extendPathTunnel(prevPos, pos, 3)
self.leftColor[1] += 63
self.rightColor[2] += 37
self.prevPos = pos
return task.cont
def extendPathQuad(self, prevPos, pos, width):
self.drawQuadTo(prevPos, pos, width)
row = self.vertexWriter.getWriteRow()
numPrims = self.triStrips.getNumPrimitives()
if numPrims == 0:
primVerts = row
else:
primVerts = row - self.triStrips.getPrimitiveEnd(numPrims-1)
if primVerts >= 4:
self.triStrips.closePrimitive()
if row >= 256:
print "Packing and starting anew"
newGeom = True
self.geom.unifyInPlace(row, False)
else:
newGeom = False
self.completeQuadPath()
if newGeom:
self.newVertexData()
self.newGeom()
if newGeom:
self.drawQuadTo(prevPos, pos, width)
else:
self.triStrips.addConsecutiveVertices(row - 2, 2)
def extendPathTunnel(self, prevPos, pos, width):
self.drawTunnelTo(prevPos, pos, width)
def drawLineTo(self, pos, color):
self.vertexWriter.addData3f(pos.x, pos.y, pos.z)
# self.normalWriter.addData3f(0, 0, 1)
self.colorWriter.addData4i(color)
self.triStrips.addNextVertices(1)
return 1
def drawQuadTo(self, a, b, width):
""" a (to) b are vectors defining a line bisecting a new quad. """
into = (b - a)
if abs(into.x) + abs(into.y) < 1:
# ensure that if we jump in place, we don't get a thin segment
if not self.prevInto:
return
into = self.prevInto
else:
into.normalize()
# the perpendicular of (a,b) is (-b,a); we want the path to be "flat" in Z=space
if self.vertexWriter.getWriteRow() == 0:
self.drawQuadRow(a, into, width)
verts = self.drawQuadRow(b, into, width)
self.prevInto = into
return verts
def drawQuadRow(self, a, into, width):
""" a defines a point, with 'into' being the normalized direction. """
# the perpendicular of (a,b) is (-b,a); we want the path to be "flat" in Z=space
aLeft = Vec3(a.x - into.y * width, a.y + into.x * width, a.z)
aRight = Vec3(a.x + into.y * width, a.y - into.x * width, a.z)
row = self.vertexWriter.getWriteRow()
self.vertexWriter.addData3f(aLeft)
self.vertexWriter.addData3f(aRight)
# self.normalWriter.addData3f(Vec3(0, 0, 1))
# self.normalWriter.addData3f(Vec3(0, 0, 1))
self.colorWriter.addData4i(self.leftColor)
self.colorWriter.addData4i(self.rightColor)
self.triStrips.addConsecutiveVertices(row, 2)
return 2
def drawTunnelTo(self, a, b, width):
""" a (to) b are vectors defining a line bisecting a new tunnel segment. """
into = (b - a)
if abs(into.x) + abs(into.y) < 1:
# ensure that if we jump in place, we don't get a thin segment
if not self.prevInto:
return
into = self.prevInto
else:
into.normalize()
# the perpendicular of (a,b) is (-b,a); we want the path to be "flat" in Z=space
if self.vertexWriter.getWriteRow() == 0:
self.drawTunnelBoundary(a, into, width)
row = self.vertexWriter.getWriteRow()
verts = self.drawTunnelBoundary(b, into, width)
totalVerts = self.drawTunnelRow(row, verts)
self.prevInto = into
return totalVerts
def drawTunnelBoundary(self, a, into, width):
""" a defines a point, with 'into' being the normalized direction. """
aLowLeft = Vec3(a.x - into.y * width, a.y + into.x * width, a.z)
aLowRight = Vec3(a.x + into.y * width, a.y - into.x * width, a.z)
aHighRight = Vec3(a.x + into.y * width, a.y - into.x * width, a.z + width * 3)
aHighLeft = Vec3(a.x - into.y * width, a.y + into.x * width, a.z + width * 3)
self.vertexWriter.addData3f(aLowLeft)
self.vertexWriter.addData3f(aLowRight)
self.vertexWriter.addData3f(aHighRight)
self.vertexWriter.addData3f(aHighLeft)
self.colorWriter.addData4i(self.leftColor)
self.colorWriter.addData4i(self.rightColor)
self.colorWriter.addData4i(self.leftColor)
self.colorWriter.addData4i(self.rightColor)
return 4
def drawTunnelRowX(self, row, verts):
# BOTTOM: bottom-left, new-bottom-left, bottom-right, new-bottom-right
self.triStrips.addConsecutiveVertices(row - verts + 0, 1)
self.triStrips.addConsecutiveVertices(row + 0, 1)
self.triStrips.addConsecutiveVertices(row - verts + 1, 1)
self.triStrips.addConsecutiveVertices(row + 1, 1)
self.triStrips.closePrimitive()
# RIGHT: (new-bottom-right) bottom-right, new-top-right, top-right
self.triStrips.addConsecutiveVertices(row + 1, 1)
self.triStrips.addConsecutiveVertices(row - verts + 1, 1)
self.triStrips.addConsecutiveVertices(row + 2, 1)
self.triStrips.addConsecutiveVertices(row - verts + 2, 1)
self.triStrips.closePrimitive()
# TOP: top-left, new top-right, new top-left
self.triStrips.addConsecutiveVertices(row - verts + 2, 1)
self.triStrips.addConsecutiveVertices(row - verts + 3, 1)
self.triStrips.addConsecutiveVertices(row + 2, 1)
self.triStrips.addConsecutiveVertices(row + 3, 1)
self.triStrips.closePrimitive()
# LEFT: (new top-left) new bottom-left, top-left, bottom-left, new-bottom-left
self.triStrips.addConsecutiveVertices(row + 3, 1)
self.triStrips.addConsecutiveVertices(row + 0, 1)
self.triStrips.addConsecutiveVertices(row - verts + 3, 1)
self.triStrips.addConsecutiveVertices(row - verts + 0, 1)
self.triStrips.closePrimitive()
return verts * 4
def drawTunnelRow(self, row, verts):
# # clockwise for the inside of the tunnel
# # TOP: new-top-left, top-left, new-top-right, top-right
# self.triStrips.addConsecutiveVertices(row + 3, 1)
# self.triStrips.addConsecutiveVertices(row - verts + 3, 1)
# self.triStrips.addConsecutiveVertices(row + 2, 1)
# self.triStrips.addConsecutiveVertices(row - verts + 2, 1)
# # RIGHT: new-bottom-right, bottom-right
# self.triStrips.addConsecutiveVertices(row + 1, 1)
# self.triStrips.addConsecutiveVertices(row - verts + 1, 1)
# # BOTTOM: new-bottom-left, bottom-left
# self.triStrips.addConsecutiveVertices(row, 1)
# self.triStrips.addConsecutiveVertices(row - verts, 1)
# # LEFT: new top-left, top-left
# self.triStrips.addConsecutiveVertices(row + 3, 1)
# self.triStrips.addConsecutiveVertices(row - verts + 3, 1)
# TOP: new-top-left, top-left, new-top-right, top-right
self.triStrips.addConsecutiveVertices(row - verts + 3, 1)
self.triStrips.addConsecutiveVertices(row + 3, 1)
self.triStrips.addConsecutiveVertices(row - verts + 2, 1)
self.triStrips.addConsecutiveVertices(row + 2, 1)
# RIGHT: new-bottom-right, bottom-right
self.triStrips.addConsecutiveVertices(row - verts + 1, 1)
self.triStrips.addConsecutiveVertices(row + 1, 1)
# BOTTOM: new-bottom-left, bottom-left
self.triStrips.addConsecutiveVertices(row - verts, 1)
self.triStrips.addConsecutiveVertices(row, 1)
# LEFT: new top-left, top-left
self.triStrips.addConsecutiveVertices(row - verts + 3, 1)
self.triStrips.addConsecutiveVertices(row + 3, 1)
self.triStrips.closePrimitive()
return verts * 4
def completeQuadPath(self):
self.geomNode.addGeom(self.geom)
if self.triStrips.getNumPrimitives() == 0:
return
floorMesh = CollisionFloorMesh()
vertexReader = GeomVertexReader(self.vertexData, 'vertex')
tris = self.triStrips.decompose()
print "Decomposed prims:",tris.getNumPrimitives()
p = 0
for i in range(tris.getNumPrimitives()):
v0 = tris.getPrimitiveStart(i)
ve = tris.getPrimitiveEnd(i)
if v0 < ve:
vertexReader.setRow(tris.getVertex(v0))
floorMesh.addVertex(Point3(vertexReader.getData3f()))
vertexReader.setRow(tris.getVertex(v0+1))
floorMesh.addVertex(Point3(vertexReader.getData3f()))
vertexReader.setRow(tris.getVertex(v0+2))
floorMesh.addVertex(Point3(vertexReader.getData3f()))
floorMesh.addTriangle(p, p+1, p+2)
p += 3
self.floorCollNode.addSolid(floorMesh)
def completeTunnelPath(self):
self.geomNode.addGeom(self.geom)
if self.triStrips.getNumPrimitives() == 0:
return
floorMesh = CollisionFloorMesh()
vertexReader = GeomVertexReader(self.vertexData, 'vertex')
print "Original prims:",self.triStrips.getNumPrimitives()
p = 0
for i in range(self.triStrips.getNumPrimitives()):
v0 = self.triStrips.getPrimitiveStart(i)
ve = self.triStrips.getPrimitiveEnd(i)
j = v0 + 4
# add the bottom triangles
vertexReader.setRow(self.triStrips.getVertex(j))
floorMesh.addVertex(Point3(vertexReader.getData3f()))
vertexReader.setRow(self.triStrips.getVertex(j+1))
floorMesh.addVertex(Point3(vertexReader.getData3f()))
vertexReader.setRow(self.triStrips.getVertex(j+2))
floorMesh.addVertex(Point3(vertexReader.getData3f()))
floorMesh.addTriangle(p, p+1, p+2)
vertexReader.setRow(self.triStrips.getVertex(j+3))
floorMesh.addVertex(Point3(vertexReader.getData3f()))
floorMesh.addTriangle(p+1, p+3, p+2)
p += 4
# this adds every triangle, but is not appropriate for a closed path
# tris = self.triStrips.decompose()
# print "Decomposed prims:",tris.getNumPrimitives()
# p = 0
# for i in range(tris.getNumPrimitives()):
# v0 = tris.getPrimitiveStart(i)
# ve = tris.getPrimitiveEnd(i)
# if v0 < ve:
# vertexReader.setRow(tris.getVertex(v0))
# floorMesh.addVertex(Point3(vertexReader.getData3f()))
# vertexReader.setRow(tris.getVertex(v0+1))
# floorMesh.addVertex(Point3(vertexReader.getData3f()))
# vertexReader.setRow(tris.getVertex(v0+2))
# floorMesh.addVertex(Point3(vertexReader.getData3f()))
# floorMesh.addTriangle(p, p+1, p+2)
# p += 3
self.floorCollNode.addSolid(floorMesh)
def updatePhysics(self, task):
pos = self.fpscamera.getPos()
self.info.setText("Position: {0}, {1}, {2}".format(int(pos.x*100)/100., int(pos.y*100)/100., int(pos.z)/100.))
return task.cont
app = MyApp()
app.run()
|
mit
| -1,020,509,209,721,139,600 | 37.015086 | 132 | 0.586541 | false |
vnsofthe/odoo-dev
|
vnsoft/all2xml.py
|
1
|
5860
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#from lxml import etree
import pymongo
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
import os,sys
import base64
import Image
import re
def str2trans(s):
if not s:return ""
if isinstance(s,(long,int,float)):s=str(s)
s = s.replace(u"{", "\\{").replace(u"}", "\\}").replace("%","{\%}").replace("\n","\n\n").replace(u"Ⅰ","\\RNum{1}").replace(u"Ⅱ","\\RNum{2}").replace(u"Ⅲ","\\RNum{3}").replace(u"Ⅳ","\\RNum{4}").replace(u"Ⅴ","\\RNum{5}").replace(u"Ⅵ","\\RNum{6}").replace(u"Ⅶ","\\RNum{7}").replace(u"Ⅷ","\\RNum{8}").replace(u"Ⅸ","\\RNum{9}").replace(u"Ⅹ","\\RNum{10}").replace(u"Ⅺ","\\RNum{11}").replace(u"Ⅻ","\\RNum{12}").replace(u"ⅩⅢ","\\RNum{13}").replace(u"α", "\\textalpha ").replace(u"β", "\\textbeta ").replace(u"γ", "\\textgamma ").replace(u"μ", "\\textmu ").replace(u"δ", "\\textdelta ").replace(u"κ", "\\textkappa ").replace(u"$", "\\$").replace(u"≥","$\\geq$").replace(u"≤", "$\\leq$").replace(u"~", "\\textasciitilde ").replace(u"_", "\\_").replace(u"#", "\\#").replace(u"/", "{/}")
rgx = re.findall("\^(\D+)", s)
for rg in rgx:
s = s.replace("^", "\\^{}")
rgx = re.findall("\^(\d+)", s)
for rg in rgx:
ori = "^" + rg
nwi = "$^{" + rg + "}$"
# print ori
# print nwi
s = s.replace(ori, nwi)
s = s.replace(u"【",u"\\vskip.6\\baselineskip\\noindent {\\bfseries\\wuhao 【")
s = s.replace(u"】",u"】}\\smallskip")
s = s.replace(u"腘", u"\mbox{\\scalebox{0.5}[1]{月 }\\kern-.15em\\scalebox{0.75}[1]{国}}")
return s
def image_resize(f):
img = Image.open(f)
width,height = img.size
if width>250 or height>250:
if width>height:
newbox=(250, 250 * height / width)
else:
newbox=(250*width/height, 250)
targetImg = img.resize(
newbox,
Image.ANTIALIAS
)
else:
targetImg = img.resize((width,height),Image.ANTIALIAS)
os.remove(f)
new=".".join(f.split(".")[:-1])+".jpg"
if f.split(".")[-1]=='gif':targetImg =targetImg.convert("RGB")
targetImg.save(new, "jpeg")
return new
def dict2file(pd,pm,lang):
if not pd.has_key("_id"):return
opt = etree.Element("opt")
etree.SubElement(opt, "id").text=pd.get("_id")
etree.SubElement(opt, "oriid").text=pd.get("oriid")
lang_element = etree.SubElement(opt, lang)
check_dir(os.path.join(sys.argv[5],pd["category"]))
l_path = check_dir(os.path.join(os.path.join(sys.argv[5],pd["category"]),lang)) #判断目录是否存在
# print pd.get("_id")
for l in pm:
for k,v in l.items():
if k=="sex":
etree.SubElement(lang_element,k).text=str2trans(pd[k])
elif k=="pic":
pic_path= os.path.join(l_path,"pic")
check_dir(pic_path)
if pd.get(lang).get("pic",{}).get("base64"):
imgname= pic_path+"/section_"+pd.get("oriid").replace("'","").replace("`","").replace(" ","")+"."+(pd.get(lang).get("pic").get("mimetype").split("/")[1])
fimg = open(imgname,"wb")
pic_base64 = pd.get(lang).get("pic").get("base64")
fimg.write(pic_base64.decode('base64','strict'))
fimg.close()
etree.SubElement(lang_element,"pic").text=image_resize(imgname).split('/')[-1]
else:
etree.SubElement(lang_element,"pic")
elif isinstance(v,(type(u""),)):
etree.SubElement(lang_element,k).text=str2trans(pd[lang].get(k, ""))
elif isinstance(v,(list,)):
ele = etree.SubElement(lang_element,k)
for e in v:
if e.has_key("node"):continue
etree.SubElement(ele,e.keys()[0]).text = str2trans(pd.get(lang,{}).get(k,{}).get(e.keys()[0],""))
if pd.get(lang,{}).has_key("responses"):
responses = etree.SubElement(lang_element,"responses")
for i in pd.get(lang,{}).get("responses",[]):
response = etree.SubElement(responses,"response")
for k,v in i.items():
etree.SubElement(response,k).text=v
etree.SubElement(opt, "orititle").text=pd.get(lang).get("title")
xml_path= os.path.join(l_path,"section")
check_dir(xml_path)
f=open(xml_path+"/section_"+pd.get("oriid").replace("'","").replace("`","").replace(" ","")+".xml","w")
f.write(etree.tostring(opt, encoding="utf-8", method="xml"))
f.close()
def check_dir(path_name):
if not os.path.exists(path_name):
os.makedirs(path_name)
return path_name
if __name__=="__main__":
if(len(sys.argv)!=6):
print "参数不正确。\n格式:命令 客户 语言 套系 套餐 输出目录"
sys.exit(-1)
conn = pymongo.Connection("10.0.0.8",27021)
db = conn.susceptibility
content = db.products.find({"belongsto":sys.argv[1].decode("utf-8")})
for i in content:
if not i.get(sys.argv[2].decode("utf-8")):
continue #如果语言不匹配,则不处理
if(i[sys.argv[2].decode("utf-8")]["name"]!=sys.argv[3].decode("utf-8")):
continue #如果套系名称不匹配,则不处理
for k,v in i[sys.argv[2].decode("utf-8")]["sets"].items():
if(v["name"]!=sys.argv[4].decode("utf-8")):
continue#如果套餐不匹配,则不处理
for k1,v1 in v["list"].items():
pd = db.prodata.find_one({"_id":v1})
pagemode = pd.get("pagemode")
pm = db.pagemodes.find_one({"_id":pagemode})
dict2file(pd,pm.get("itms"),sys.argv[2].decode("utf-8"))
|
agpl-3.0
| -6,811,745,771,448,243,000 | 42.282443 | 779 | 0.529101 | false |
mancellin/capytaine
|
docs/user_manual/examples/finite_depth_cylinder.py
|
1
|
1678
|
#!/usr/bin/env python
import logging
import numpy as np
import capytaine as cpt
# Set up logging
logging.basicConfig(level=logging.INFO,
format="%(levelname)s:\t%(message)s")
# Initialize floating body by generating a geometric mesh
cylinder = cpt.HorizontalCylinder(
length=10.0, radius=1.0, # Dimensions
center=(0, 0, -2), # Position
nr=5, nx=40, ntheta=20, # Fineness of the mesh
)
# Define a degree of freedom. The keyword "Heave"
# is recognized by the code and the vertical translation
# motion is automatically defined.
cylinder.add_translation_dof(name="Heave")
# Define the range of water depth
depth_range = list(range(5, 25, 2)) + [np.infty]
# Set up the problems: we will solve a radiation problem for each
# water depth:
problems = [
cpt.RadiationProblem(body=cylinder, sea_bottom=-depth, omega=2.0)
for depth in depth_range
]
# Water density, gravity and radiating dof have not been specified.
# Default values are used. (For the radiating dof, the default value
# is usually the first one that has been defined. Here only one has
# been defined.)
# Solve all radiation problems
solver = cpt.BEMSolver(engine=cpt.HierarchicalToeplitzMatrixEngine())
results = [solver.solve(pb) for pb in sorted(problems)]
# Gather the computed added mass into a labelled array.
data = cpt.assemble_dataset(results)
# Plot the added mass of each dofs as a function of the water depth.
import matplotlib.pyplot as plt
plt.figure()
plt.plot(
depth_range,
data['added_mass'].sel(omega=2.0, radiating_dof="Heave", influenced_dof="Heave"),
marker="s",
)
plt.xlabel('water depth')
plt.ylabel('added mass')
plt.show()
|
gpl-3.0
| 1,316,753,893,773,796,900 | 30.074074 | 85 | 0.721692 | false |
kpj/youtubeAnalyzer
|
youtubeCommentAnalyzer.py
|
1
|
3344
|
import sys
import re
import urllib
import os, os.path
import json
from bs4 import BeautifulSoup
from pprint import pprint
import filters
class YoutubeChannel(object):
def __init__(self, name):
self.id = name
self.chunk_size = 50
self.videos = None
def get_all_vids(self, index=1):
com_data_url = "http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%i&start-index=%i&alt=json" % (self.id, self.chunk_size, index)
cont = urllib.urlopen(com_data_url).read()
data = json.loads(cont)
vids = data["feed"]["entry"]
more_vids = list()
if len(vids) == self.chunk_size:
# grab 'em all
more_vids = self.get_all_vids(index + self.chunk_size)
return vids + more_vids
def parse_videos(self):
raw_videos = self.get_all_vids()
res = []
for v in raw_videos:
cur = dict()
cur["title"] = v["title"]["$t"]
cur["rating"] = v["gd$rating"]["average"]
cur["views"] = v["yt$statistics"]["viewCount"]
cur["published"] = v["published"]["$t"]
res.append(cur)
return res
def apply_filter(self, filter, *args):
if self.videos == None:
self.videos = self.parse_videos()
return filter.apply(self.videos, *args)
class YoutubeVideo(object):
def __init__(self, id):
self.id = id
self.comments = None
self.chunk_size = 50
def get_all_coms(self, index=1):
com_data_url = "https://gdata.youtube.com/feeds/api/videos/%s/comments?orderby=published&alt=json&max-results=%i&start-index=%i" % (self.id, self.chunk_size, index)
cont = urllib.urlopen(com_data_url).read()
data = json.loads(cont)
coms = data["feed"]["entry"]
more_coms = list()
if len(coms) == self.chunk_size:
# grab 'em all
more_coms = self.get_all_coms(index + self.chunk_size)
return coms + more_coms
def parse_comments(self):
raw_comments = self.get_all_coms()
res = []
for c in raw_comments:
cur = dict()
cur["text"] = c["content"]["$t"]
cur["author"] = c["author"][0]["name"]["$t"]
res.append(cur)
return res
def applyFilter(self, filter, *args):
if self.comments == None:
self.comments = self.parse_comments()
return filter.apply(self.comments, *args)
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Usage: %s <video id/channel name>" % sys.argv[0]
sys.exit(1)
query = re.findall(r'www.youtube.com/watch\?v=(.*)', sys.argv[1])
if len(query) == 1:
vid = YoutubeVideo(query[0])
pprint(vid.applyFilter(filters.all_caps))
print vid.applyFilter(filters.average_comment_length)
pprint(vid.applyFilter(filters.scan_for_regexp, "[Mm]inecraft"))
#pprint(vid.applyFilter(filters.highest_vote))
#pprint(vid.applyFilter(filters.show_downvoted))
pprint(vid.applyFilter(filters.scan_wordlist, os.path.join("filters", "data", "smileys.txt"), True))
elif len(query) == 0:
chan = YoutubeChannel(sys.argv[1])
pprint(chan.apply_filter(filters.gameone))
else:
print "Weird input!"
|
apache-2.0
| 1,062,261,788,670,946,200 | 28.59292 | 172 | 0.570275 | false |
edinburghlivinglab/dds-notebooks
|
setup.py
|
1
|
3837
|
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
from pip.req import parse_requirements
here = path.abspath(path.dirname(__file__))
install_reqs = parse_requirements(here + '/requirements.txt', session=False)
reqs = [str(ir.req) for ir in install_reqs]
# Get the long description from the relevant file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='dds-notebooks',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/development.html#single-sourcing-the-version
version='1.0',
description='interactive ipython notebooks + server for the DDS course',
long_description=long_description,
# The project's main homepage.
url='https://github.com/edinburghlivinglab/dds-notebooks.git',
# Author details
author='Ewan Klein, Gavin Gray, Francisco Vargas',
author_email='[email protected]',
# Choose your license
license='CC0 1.0 Universal',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: University Students, possibly with non numerical abilitites',
'Topic :: Data, Design and Society',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: CC0 1.0 Universal',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='data visualizatio, edinburghlivinglab, edinburgh council, data science, data processing',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=[ 'test']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=reqs,
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require = {
'dev': ['check-manifest']
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
cc0-1.0
| -2,276,991,637,151,383,800 | 38.979167 | 103 | 0.681261 | false |
hackhowtofaq/xxdiff
|
lib/python/xxdiff/scripts/match.py
|
1
|
2735
|
# This file is part of the xxdiff package. See xxdiff for license and details.
"""xx-match [<options>] <file> <file> <file> <file> ...
Wrapper script for xxdiff that implements an invocation syntax like this:
xx-match dir1/*.c dir2/*.c
The way we implement the heuristic to match up files is simple: we match up the
basenames of the files. If there are exactly two or three files in the list, we
invoke an xxdiff on these files. The files are always invoked in the order that
they show up on the command-line.
Another way to achieving similar behaviour is to use a more generic file
matching script (see 'match-files' by the same author) along with xargs, e.g.::
match-files dir1/*.c dir2/*.c | xargs -n2 xxdiff
Using xx-match has the advantage that it work with matches of 2 or 3 files,
and also spawns xxdiff on single files (with an empty side).
Note: many users do not seem to understand why such a script is necessary. One
must realize a few things: 1) globbing pattern expansion is done by the shell,
before invocation of xxdiff, thus xxdiff cannot be aware of the globbing
patterns (unless it itself would support the heuristic mentioned here, along
with the user quoting the globbing patterns to avoid shell expansion); 2) the
solution to this problem using expanded file lists is not trivial, the input
(and its ordering) can encompass a lot more possibilities than immediately
appears. Nevertheless, we built this functionality/heuristic in this script,
because it may be useful to some.
See 'match-files' script by the same author for a more generic implementation of
this file matching heuristic.
"""
__author__ = "Martin Blais <[email protected]>"
__depends__ = ['xxdiff', 'Python-2.4']
# stdlib imports.
import sys, os
# xxdiff imports.
import xxdiff.scripts
import xxdiff.invoke
def match_main():
"""
Main program for match script.
"""
# Get options
opts, files = xxdiff.scripts.passthruopts(sys.argv)
# Make sure that we display the commands.
class Opts:
xxdiff_verbose = True
# Build map of basenames
bnmap = {}
for fn in files:
dn, bn = os.path.split(fn)
bnmap.setdefault(bn, []).append(fn)
# Invoke xxdiff's on alphabetical order of the basenames
bnkeys = bnmap.keys()
bnkeys.sort()
for bn in bnkeys:
filenames = bnmap[bn]
if len(filenames) == 2 or len(filenames) == 3:
extra = []
elif len(filenames) == 1:
extra = ['--single']
else:
# ignore the files.
continue
xxdiff.invoke.xxdiff_display(Opts, *(extra + filenames))
def main():
xxdiff.scripts.interruptible_main(match_main)
if __name__ == '__main__':
main()
|
gpl-2.0
| -7,376,531,578,382,240,000 | 30.802326 | 80 | 0.693967 | false |
khchine5/lino-welfare
|
lino_welfare/modlib/client_vouchers/models.py
|
1
|
4009
|
# -*- coding: UTF-8 -*-
# Copyright 2012-2015 Luc Saffre
# This file is part of Lino Welfare.
#
# Lino Welfare is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Lino Welfare is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Lino Welfare. If not, see
# <http://www.gnu.org/licenses/>.
"""Database models for `lino_welfare.modlib.client_vouchers`.
See also :ref:`welfare.specs.ledger`.
"""
import logging
logger = logging.getLogger(__name__)
from decimal import Decimal
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import string_concat
from lino.api import dd
from lino_xl.lib.ledger.mixins import (
PartnerRelated, ProjectRelated, AccountVoucherItem, Matching)
from lino_xl.lib.ledger.models import Voucher
from lino_xl.lib.ledger.roles import LedgerUser
class ClientVoucher(Voucher, ProjectRelated):
class Meta:
app_label = 'client_vouchers'
verbose_name = _("Client voucher")
verbose_name_plural = _("Client vouchers")
amount = dd.PriceField(_("Amount"), blank=True, null=True)
def compute_totals(self):
if self.pk is None:
return
base = Decimal()
for i in self.items.all():
if i.amount is not None:
base += i.amount
self.amount = base
def get_vat_sums(self):
sums_dict = dict()
def book(account, amount):
if account in sums_dict:
sums_dict[account] += amount
else:
sums_dict[account] = amount
tt = self.get_trade_type()
for i in self.items.order_by('seqno'):
if i.amount:
b = i.get_base_account(tt)
if b is None:
raise Exception(
"No base account for %s (amount is %r)" % (
i, i.amount))
book(b, i.amount)
return sums_dict
def get_wanted_movements(self):
sums_dict = self.get_vat_sums()
#~ logger.info("20120901 get_wanted_movements %s",sums_dict)
sum = Decimal()
for acc, m in sums_dict.items():
if m:
yield self.create_movement(
None, acc, not self.journal.dc, m)
sum += m
acc = self.get_trade_type().get_main_account()
if acc is not None:
yield self.create_movement(
None, acc, self.journal.dc, sum,
partner=self.partner,
project=self.project,
match=self.match)
def full_clean(self, *args, **kw):
self.compute_totals()
super(ClientVoucher, self).full_clean(*args, **kw)
def before_state_change(self, ar, old, new):
if new.name == 'registered':
self.compute_totals()
elif new.name == 'draft':
pass
super(ClientVoucher, self).before_state_change(ar, old, new)
class VoucherItem(Matching, PartnerRelated, AccountVoucherItem):
"""An item of an :class:`ClientVoucher`."""
class Meta:
app_label = 'client_vouchers'
verbose_name = _("Client voucher item")
verbose_name_plural = _("Client voucher items")
voucher = dd.ForeignKey(
'client_vouchers.ClientVoucher', related_name='items')
amount = dd.PriceField(_("Amount"), blank=True, null=True)
@dd.chooser()
def match_choices(cls, voucher, partner):
return cls.get_match_choices(voucher.journal, partner)
from .ui import *
|
agpl-3.0
| -7,629,520,192,703,154,000 | 30.320313 | 70 | 0.613619 | false |
amoskong/scylla-cluster-tests
|
sdcm/microbenchmarking.py
|
1
|
24765
|
#!/usr/bin/env python2
import os
import sys
import logging
import datetime
import json
import argparse
import socket
import tempfile
from collections import defaultdict
# disable InsecureRequestWarning
import urllib3
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from sdcm.results_analyze import BaseResultsAnalyzer # pylint: disable=wrong-import-position
from sdcm.utils.log import setup_stdout_logger # pylint: disable=wrong-import-position
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
setup_stdout_logger()
LOGGER = logging.getLogger("microbenchmarking")
LOGGER.setLevel(logging.DEBUG)
class LargeNumberOfDatasetsException(Exception):
def __init__(self, msg, *args, **kwargs):
super(LargeNumberOfDatasetsException, self).__init__(*args, **kwargs)
self.message = msg
def __str__(self):
return "MBM: {0.message}".format(self)
class EmptyResultFolder(Exception):
def __init__(self, msg, *args, **kwargs):
super(EmptyResultFolder, self).__init__(*args, **kwargs)
self.message = msg
def __str__(self):
return "MBM: {0.message}".format(self)
class MicroBenchmarkingResultsAnalyzer(BaseResultsAnalyzer):
allowed_stats = ('Current', 'Stats', 'Last, commit, date', 'Diff last [%]', 'Best, commit, date', 'Diff best [%]')
higher_better = ('frag/s',)
lower_better = ('avg aio',)
submetrics = {'frag/s': ['mad f/s', 'max f/s', 'min f/s']}
def __init__(self, email_recipients, db_version=None):
super(MicroBenchmarkingResultsAnalyzer, self).__init__(
es_index="microbenchmarking",
es_doc_type="microbenchmark",
send_email=True,
email_recipients=email_recipients,
email_template_fp="results_microbenchmark.html",
query_limit=10000,
logger=LOGGER
)
self.hostname = socket.gethostname()
self._run_date_pattern = "%Y-%m-%d_%H:%M:%S"
self.test_run_date = datetime.datetime.now().strftime(self._run_date_pattern)
self.db_version = db_version
self.build_url = os.getenv('BUILD_URL', "")
self.cur_version_info = None
self.metrics = self.higher_better + self.lower_better
def check_regression(self, current_results): # pylint: disable=arguments-differ
# pylint: disable=too-many-locals
if not current_results:
return {}
start_date = datetime.datetime.strptime("2019-01-01", "%Y-%m-%d")
filter_path = (
"hits.hits._id", # '2018-04-02_18:36:47_large-partition-skips_[64-32.1)'
"hits.hits._source.test_args", # [64-32.1)
"hits.hits.test_group_properties.name", # large-partition-skips
"hits.hits._source.hostname", # 'godzilla.cloudius-systems.com'
"hits.hits._source.test_run_date",
"hits.hits._source.test_group_properties.name", # large-partition-skips
"hits.hits._source.results.stats.aio",
"hits.hits._source.results.stats.avg aio",
"hits.hits._source.results.stats.cpu",
"hits.hits._source.results.stats.time (s)",
"hits.hits._source.results.stats.frag/s",
"hits.hits._source.versions",
"hits.hits._source.excluded"
)
self.db_version = self.cur_version_info["version"]
tests_filtered = self._es.search(index=self._es_index, filter_path=filter_path, size=self._limit, # pylint: disable=unexpected-keyword-arg
q="hostname:'%s' \
AND versions.scylla-server.version:%s* \
AND ((-_exists_:excluded) OR (excluded:false))" % (self.hostname, # pylint: disable=unexpected-keyword-arg
self.db_version[:3]))
assert tests_filtered, "No results from DB"
results = []
for doc in tests_filtered['hits']['hits']:
doc_date = datetime.datetime.strptime(
doc['_source']['versions']['scylla-server']['run_date_time'], "%Y-%m-%d %H:%M:%S")
if doc_date > start_date:
results.append(doc)
sorted_by_type = defaultdict(list)
for res in results:
test_type = "%s_%s" % (res["_source"]["test_group_properties"]["name"],
res["_source"]["test_args"])
sorted_by_type[test_type].append(res)
report_results = defaultdict(dict)
# report_results = {
# "large-partition-skips_1-0.1": {
# "aio":{
# "Current":
# "Last":
# "Diff last [%]":
# "Best":
# "Diff best [%]":
# },
# "frag/s":{
# "Current":
# "Stats": { submetrica: }
# "Last":
# "Diff last [%]":
# "Best":
# "Diff best [%]":
# },
# }
def set_results_for(current_result, metrica):
list_of_results_from_db.sort(key=lambda x: datetime.datetime.strptime(x["_source"]["test_run_date"],
self._run_date_pattern))
def get_metrica_val(val):
metrica_val = val["_source"]["results"]["stats"].get(metrica, None)
return float(metrica_val) if metrica_val else None
def get_commit_id(val):
return val["_source"]['versions']['scylla-server']['commit_id']
def get_commit_date(val):
return datetime.datetime.strptime(val["_source"]['versions']['scylla-server']['date'],
"%Y%m%d").date()
def get_best_result_for_metrica():
# build new list with results where analyzing metrica is not None
# metrica s with result 0, will be included
list_for_searching = [el for el in list_of_results_from_db if get_metrica_val(el) is not None]
# if list is empty ( which could be happened for new metric),
# then return first element in list, because result will be None
if not list_for_searching:
return list_of_results_from_db[0]
if metrica in self.higher_better:
return max(list_for_searching, key=get_metrica_val)
elif metrica in self.lower_better:
return min(list_for_searching, key=get_metrica_val)
else:
return list_of_results_from_db[0]
def count_diff(cur_val, dif_val):
try:
cur_val = float(cur_val) if cur_val else None
except ValueError:
cur_val = None
if not cur_val:
return None
if dif_val is None:
return None
ret_dif = ((cur_val - dif_val) / dif_val) * 100 if dif_val > 0 else cur_val * 100
if metrica in self.higher_better:
ret_dif = -ret_dif
ret_dif = -ret_dif if ret_dif != 0 else 0
return ret_dif
def get_diffs(cur_val, best_result_val, last_val):
# if last result doesn't contain the metric
# assign 0 to last value to count formula of changes
diff_best = count_diff(cur_val, best_result_val)
diff_last = count_diff(cur_val, last_val)
return (diff_last, diff_best)
if len(list_of_results_from_db) > 1 and get_commit_id(list_of_results_from_db[-1]) == self.cur_version_info["commit_id"]:
last_idx = -2
else: # when current results are on disk but db is not updated
last_idx = -1
cur_val = current_result["results"]["stats"].get(metrica, None)
if cur_val:
cur_val = float(cur_val)
last_val = get_metrica_val(list_of_results_from_db[last_idx])
last_commit = get_commit_id(list_of_results_from_db[last_idx])
last_commit_date = get_commit_date(list_of_results_from_db[last_idx])
best_result = get_best_result_for_metrica()
best_result_val = get_metrica_val(best_result)
best_result_commit = get_commit_id(best_result)
best_commit_date = get_commit_date(best_result)
diff_last, diff_best = get_diffs(cur_val, best_result_val, last_val)
stats = {
"Current": cur_val,
"Last, commit, date": (last_val, last_commit, last_commit_date),
"Best, commit, date": (best_result_val, best_result_commit, best_commit_date),
"Diff last [%]": diff_last, # diff in percents
"Diff best [%]": diff_best,
"has_regression": False,
"has_improvement": False,
}
if ((diff_last and diff_last < -5) or (diff_best and diff_best < -5)):
report_results[test_type]["has_diff"] = True
stats["has_regression"] = True
if (diff_last > 50 or diff_best > 50):
report_results[test_type]['has_improve'] = True
stats['has_improvement'] = True
report_results[test_type]["dataset_name"] = current_result['dataset_name']
report_results[test_type][metrica] = stats
def set_results_for_sub(current_result, metrica):
report_results[test_type][metrica].update({'Stats': {}})
for submetrica in self.submetrics.get(metrica):
submetrica_cur_val = float(current_result["results"]["stats"][submetrica])
report_results[test_type][metrica]['Stats'].update({submetrica: submetrica_cur_val})
for test_type, current_result in current_results.iteritems():
list_of_results_from_db = sorted_by_type[test_type]
if not list_of_results_from_db:
self.log.warning("No results for '%s' in DB. Skipping", test_type)
continue
for metrica in self.metrics:
self.log.info("Analyzing {test_type}:{metrica}".format(**locals()))
set_results_for(current_result, metrica)
if metrica in self.submetrics.keys():
set_results_for_sub(current_result, metrica)
return report_results
def send_html_report(self, report_results, html_report_path=None, send=True):
subject = "Microbenchmarks - Performance Regression - %s" % self.test_run_date
dashboard_path = "app/kibana#/dashboard/aee9b370-09db-11e9-a976-2fe0f5890cd0?_g=(filters%3A!())"
for_render = {
"subject": subject,
"testrun_id": self.test_run_date,
"results": report_results,
"stats_names": self.allowed_stats,
"metrics": self.metrics,
"kibana_url": self.gen_kibana_dashboard_url(dashboard_path),
"build_url": self.build_url,
"full_report": True,
"hostname": self.hostname,
"test_version": self.cur_version_info
}
if html_report_path:
html_file_path = html_report_path
else:
html_file_path = tempfile.mkstemp(suffix=".html", prefix="microbenchmarking-")[1]
self.render_to_html(for_render, html_file_path=html_file_path)
for_render["full_report"] = False
summary_html = self.render_to_html(for_render)
if send:
return self.send_email(subject, summary_html, files=(html_file_path,))
else:
return html_file_path, summary_html
def get_results(self, results_path, update_db):
# pylint: disable=too-many-locals
bad_chars = " "
os.chdir(os.path.join(results_path, "perf_fast_forward_output"))
results = {}
for (fullpath, subdirs, files) in os.walk(os.getcwd()):
self.log.info(fullpath)
if (os.path.dirname(fullpath).endswith('perf_fast_forward_output') and
len(subdirs) > 1):
raise LargeNumberOfDatasetsException('Test set {} has more than one datasets: {}'.format(
os.path.basename(fullpath),
subdirs))
if not subdirs:
dataset_name = os.path.basename(fullpath)
self.log.info('Dataset name: {}'.format(dataset_name))
dirname = os.path.basename(os.path.dirname(fullpath))
self.log.info("Test set: {}".format(dirname))
for filename in files:
if filename.startswith('.'):
continue
new_filename = "".join(c for c in filename if c not in bad_chars)
test_args = os.path.splitext(new_filename)[0]
test_type = dirname + "_" + test_args
json_path = os.path.join(dirname, dataset_name, filename)
with open(json_path, 'r') as json_file:
self.log.info("Reading: %s", json_path)
datastore = json.load(json_file)
datastore.update({'hostname': self.hostname,
'test_args': test_args,
'test_run_date': self.test_run_date,
'dataset_name': dataset_name,
'excluded': False
})
if update_db:
self._es.create_doc(index=self._es_index, doc_type=self._es_doc_type,
doc_id="%s_%s" % (self.test_run_date, test_type), body=datastore)
results[test_type] = datastore
if not results:
raise EmptyResultFolder("perf_fast_forward_output folder is empty")
self.cur_version_info = results[results.keys()[0]]['versions']['scylla-server']
return results
def exclude_test_run(self, testrun_id=''):
"""Exclude test results by testrun id
Filter test result by hostname, scylla version and test_run_date filed
and mark the all found result with flag exluded: True
Keyword Arguments:
testrun_id {str} -- testrun id as value of field test_run_date (default: {''})
"""
if not testrun_id or not self.db_version:
self.log.info("Nothing to exclude")
return
self.log.info('Exclude testrun {} from results'.format(testrun_id))
filter_path = (
"hits.hits._id", # '2018-04-02_18:36:47_large-partition-skips_[64-32.1)'
"hits.hits._source.hostname", # 'godzilla.cloudius-systems.com'
"hits.hits._source.test_run_date",
)
testrun_results = self._es.search(index=self._es_index, filter_path=filter_path, size=self._limit, # pylint: disable=unexpected-keyword-arg
q="hostname:'%s' AND versions.scylla-server.version:%s* AND test_run_date:\"%s\"" % (self.hostname,
self.db_version[:3],
testrun_id))
if not testrun_results:
self.log.info("Nothing to exclude")
return
for res in testrun_results['hits']['hits']:
self.log.info(res['_id'])
self.log.info(res['_source']['test_run_date'])
self._es.update_doc(index=self._es_index,
doc_type=self._es_doc_type,
doc_id=res['_id'],
body={'excluded': True})
def exclude_by_test_id(self, test_id=''):
"""Exclude test result by id
Filter test result by id (ex. 2018-10-29_18:58:51_large-partition-single-key-slice_begin_incl_0-500000_end_incl.1)
and mark the test result with flag excluded: True
Keyword Arguments:
test_id {str} -- test id from field _id (default: {''})
"""
if not test_id or not self.db_version:
self.log.info("Nothing to exclude")
return
self.log.info('Exclude test id {} from results'.format(test_id))
doc = self._es.get_doc(index=self._es_index, doc_id=test_id)
if doc:
self._es.update_doc(index=self._es_index,
doc_type=self._es_doc_type,
doc_id=doc['_id'],
body={'excluded': True})
else:
self.log.info("Nothing to exclude")
return
def exclude_before_date(self, date=''):
"""Exclude all test results before date
Query test result by hostname and scylla version,
convert string to date object,
filter all test result with versions.scylla-server.run_date_time before
date, and mark them with flag excluded: True
Keyword Arguments:
date {str} -- date in format YYYY-MM-DD or YYYY-MM-DD hh:mm:ss (default: {''})
"""
if not date and not self.db_version:
self.log.info("Nothing to exclude")
return
format_pattern = "%Y-%m-%d %H:%M:%S"
try:
date = datetime.datetime.strptime(date, '%Y-%m-%d')
except ValueError:
try:
date = datetime.datetime.strptime(date, format_pattern)
except ValueError:
self.log.error("Wrong format of parameter --before-date. Should be \"YYYY-MM-DD\" or \"YYYY-MM-DD hh:mm:ss\"")
return
filter_path = (
"hits.hits._id",
"hits.hits._source.hostname",
"hits.hits._source.versions.scylla-server.run_date_time"
)
self.log.info('Exclude tests before date {}'.format(date))
results = self._es.search(index=self._es_index, filter_path=filter_path, size=self._limit, # pylint: disable=unexpected-keyword-arg
q="hostname:'%s' AND versions.scylla-server.version:%s*" %
(self.hostname, self.db_version[:3]))
if not results:
self.log.info('Nothing to exclude')
return
before_date_results = []
for doc in results['hits']['hits']:
doc_date = datetime.datetime.strptime(
doc['_source']['versions']['scylla-server']['run_date_time'], format_pattern)
if doc_date < date:
before_date_results.append(doc)
for res in before_date_results:
self._es.update_doc(index=self._es_index,
doc_type=self._es_doc_type,
doc_id=res['_id'],
body={'excluded': True})
def exclude_testrun_by_commit_id(self, commit_id=None):
if not commit_id and not self.db_version:
self.log.info('Nothing to exclude')
return
filter_path = (
"hits.hits._id",
"hits.hits._source.hostname",
"hits.hits._source.versions.scylla-server.commit_id",
"hits.hits._source.test_run_date"
)
self.log.info('Exclude tests by commit id #{}'.format(commit_id))
results = self._es.search(index=self._es_index, filter_path=filter_path, size=self._limit, # pylint: disable=unexpected-keyword-arg
q="hostname:'{}' \
AND versions.scylla-server.version:{}*\
AND versions.scylla-server.commit_id:'{}'".format(self.hostname, self.db_version[:3], commit_id))
if not results:
self.log.info('There is no testrun results for commit id #{}'.format(commit_id))
return
for doc in results['hits']['hits']:
self.log.info("Exlcude test: {}\nCommit: #{}\nRun Date time: {}\n".format(doc['_id'],
doc['_source']['versions']['scylla-server']['commit_id'],
doc['_source']['test_run_date']))
self._es.update_doc(index=self._es_index,
doc_type=self._es_doc_type,
doc_id=doc['_id'],
body={'excluded': True})
def main(args):
if args.mode == 'exclude':
mbra = MicroBenchmarkingResultsAnalyzer(email_recipients=None, db_version=args.db_version)
if args.testrun_id:
mbra.exclude_test_run(args.testrun_id)
if args.test_id:
mbra.exclude_by_test_id(args.test_id)
if args.before_date:
mbra.exclude_before_date(args.before_date)
if args.commit_id:
mbra.exclude_testrun_by_commit_id(args.commit_id)
if args.mode == 'check':
mbra = MicroBenchmarkingResultsAnalyzer(email_recipients=args.email_recipients.split(","))
results = mbra.get_results(results_path=args.results_path, update_db=args.update_db)
if results:
if args.hostname:
mbra.hostname = args.hostname
report_results = mbra.check_regression(results)
mbra.send_html_report(report_results, html_report_path=args.report_path)
else:
LOGGER.warning('Perf_fast_forward testrun is failed or not build results in json format')
sys.exit(1)
def parse_args():
parser = argparse.ArgumentParser(description="Microbencmarking stats utility \
for upload and analyze or exclude test result from future analyze")
subparser = parser.add_subparsers(dest='mode',
title='Microbencmarking utility modes',
description='Microbencmarking could be run in two modes definded by subcommand',
metavar='Modes',
help='To see subcommand help use microbenchmarking.py subcommand -h')
exclude = subparser.add_parser('exclude', help='Exclude results by testrun, testid or date')
exclude_group = exclude.add_mutually_exclusive_group(required=True)
exclude_group.add_argument('--testrun-id', action='store', default='',
help='Exclude test results for testrun id')
exclude_group.add_argument('--test-id', action='store', default='',
help='Exclude test result by id')
exclude_group.add_argument('--before-date', action='store', default='',
help='Exclude all test results before date of run stored in field versions.scylla-server.run_date_time.\
Value in format YYYY-MM-DD or YYYY-MM-DD hh:mm:ss')
exclude_group.add_argument('--commit-id', action='store', default='',
help='Exclude test run for specific commit id')
exclude.add_argument('--db-version', action='store', default='',
help='Exclude test results for scylla version',
required=True)
check = subparser.add_parser('check', help='Upload and analyze test result')
check.add_argument("--update-db", action="store_true", default=False,
help="Upload current microbenchmarking stats to ElasticSearch")
check.add_argument("--results-path", action="store", default=".",
help="Path where to search for test results")
check.add_argument("--email-recipients", action="store", default="[email protected]",
help="Comma separated email addresses list that will get the report")
check.add_argument("--report-path", action="store", default="",
help="Save HTML generated results report to the file path before sending by email")
check.add_argument("--hostname", action="store", default="",
help="Run check regression for host with hostname")
return parser.parse_args()
if __name__ == '__main__':
ARGS = parse_args()
main(ARGS)
|
agpl-3.0
| 2,676,605,412,864,349,700 | 45.203358 | 151 | 0.5362 | false |
hashemd/Advanced-Virtual-Digest
|
libs/dg/digest.py
|
1
|
8888
|
"""
Released under the MIT License
Copyright (c) 2013 Hashem Al-Dujaili
"""
import re
# from google.appengine.ext import ndb
class Digest():
"""
Takes input as ndb.database (elements of name, frontsite and backsite),
sequence (FASTA ok).
"""
def __init__(self, database, sequence, shape):
self.database = database
self.shape = shape
self.sequence = sequence
self.sequence = str(self.sequence).lower()
self.sequence = re.sub(r'[\W\d\s\n]+', '', self.sequence)
def multi_digest(self, enzymes):
bases = []
frags = []
tmp = self.sequence
if isinstance(enzymes, basestring):
enzymes = [enzymes]
for enzyme in enzymes:
enz = self.database.query(self.database.name == enzyme).get()
frontsite = enz.frontsite.lower()
frontsite = re.sub(r'[\W\d\s\n]+', '', frontsite)
backsite = enz.backsite.lower()
backsite = re.sub(r'[\W\d\s\n]+', '', backsite)
res_site = frontsite + backsite
res_site = re.sub('r', '[ag]', res_site)
res_site = re.sub('y', '[ct]', res_site)
res_site = re.sub('k', '[gt]', res_site)
res_site = re.sub('m', '[ac]', res_site)
res_site = re.sub('n', '[gtac]', res_site)
for match in re.finditer(res_site, tmp):
bases.append([match.start(), len(frontsite)])
if len(bases) == 0:
return -1 # Does not cut
bases = sorted(bases, key=lambda x: x[0])
n = 0
for base in bases:
frags.append([tmp[n:base[0] + base[1]],
str(len(tmp[n:base[0] + base[1]]))])
n = base[0] + base[1]
frags.append([tmp[n:], str(len(tmp[n:]))])
if frags[0][0] == '':
frags.pop(0)
if self.shape == 'Circular':
frags[0][1] = str(int(frags[0][1]) + int(frags[-1][1]))
frags[0][0] = (frags[-1][0] + frags[0][0])
frags.pop()
return frags # return [(fragment,length)]
def linearize(self):
linenz = []
qry = self.database.query()
listenz = qry.fetch()
for n in range(0, len(listenz), 1):
subseq = listenz[n].frontsite.lower() + listenz[n].backsite.lower()
if len(re.findall(subseq, self.sequence)) == 1:
linenz.append((listenz[n].name, str(self.sequence.find(subseq)
+ len(listenz[n].frontsite))))
else:
continue
return linenz
# Returns a list of tuples in the format (Enzyme name, Site of cut)
def insert(self, first_site, second_site, vector,
vector_database, sites_database=None):
query = vector_database.query(vector_database.name == vector).get()
vec_seq = query.sequence
vec_seq = vec_seq.lower()
vec_seq = re.sub('\n', '', vec_seq)
ins_seq = self.sequence
query = self.database.query(self.database.name == first_site).get()
if query is None:
if sites_database is None:
return -2
query = sites_database.query(sites_database.name
== first_site).get()
if query is None:
return -2
else:
fs = query.sequence.lower()
else:
fs = query.frontsite.lower() + query.backsite.lower()
query = self.database.query(self.database.name == second_site).get()
if query is None:
if sites_database is None:
return -2
query = sites_database.query(self.database.name
== second_site).get()
if query is None:
return -2
else:
ss = query.sequence.lower()
else:
ss = query.frontsite.lower() + query.backsite.lower()
x = vec_seq.find(fs)
y = vec_seq.find(ss)
inserted = vec_seq[:x] + ins_seq + vec_seq[y + len(ss):]
return inserted
def rem_insert(self, first_site, second_site, vector, vector_database,
sites_database=None, min_distance=300):
#Sites database is optional, use only if non enzyme sites are included.
self.shape = 'linear'
query = vector_database.query(vector_database.name == vector).get()
vec_seq = query.sequence
vec_seq = vec_seq.lower()
vec_seq = re.sub('\n', '', vec_seq)
ins_seq = self.sequence
first_enz = []
second_enz = []
both_enz = []
query = self.database.query(self.database.name == first_site).get()
if query is None:
if sites_database is None:
return -2
query = sites_database.query(sites_database.name
== first_site).get()
if query is None:
return -2
else:
fs = query.sequence.lower()
else:
fs = query.frontsite.lower() + query.backsite.lower()
query = self.database.query(self.database.name == second_site).get()
if query is None:
if sites_database is None:
return -2
query = sites_database.query(self.database.name ==
second_site).get()
if query is None:
return -2
else:
ss = query.sequence.lower()
else:
ss = query.frontsite.lower() + query.backsite.lower()
x = vec_seq.find(fs)
y = vec_seq.find(ss)
if (x == -1 or y == -1):
return -1
five_seq = vec_seq[:x]
three_seq = vec_seq[y + len(ss):]
qry = self.database.query()
listenz = qry.fetch(projection=[self.database.name])
for n in range(0, len(listenz), 1):
self.sequence = ins_seq
if self.multi_digest(listenz[n].name) == -1:
self.sequence = five_seq
x = self.multi_digest(listenz[n].name)
if not x == -1:
self.sequence = three_seq
y = self.multi_digest(listenz[n].name)
if not y == -1:
too_short = False
if isinstance(x[0][1], basestring):
x[0][1] = [x[0][1]]
if isinstance(y[0][1], basestring):
y[0][1] = [y[0][1]]
adj_frag = [x[0][1].pop() + y[0][1].pop(0)]
frags = x[0][1] + y[0][1]
if len(frags) < 1:
continue
frags[0] = frags[-1] + frags[0]
frags.pop()
side_product = []
for frag in frags:
if (-min_distance < (frag - len(ins_seq))
< min_distance):
too_short = True
break
side_product.append(frag)
if too_short is False:
both_enz.append((listenz[n].name, side_product,
adj_frag))
continue
first_enz.append(listenz[n].name)
else:
self.sequence = three_seq
x = self.multi_digest(listenz[n].name)
if x == -1 is False:
second_enz.append(listenz[n].name)
#Ordered by shortest (insert + adjacent fragments) length
both_enz = sorted(both_enz, key=lambda x: (x[2][0]))
first_enz = sorted(first_enz, key=lambda x: (x[1][0]))
second_enz = sorted(second_enz, key=lambda x: (x[1][0]))
return (both_enz, first_enz, second_enz)
def enzyme_map(self):
qry = self.database.query()
strenz = []
listenz = qry.fetch(projection=[self.database.name])
for n in range(0, len(listenz), 1):
x = self.multi_digest([listenz[n].name])
if not (x == -1):
tmp = []
for lengths in x:
tmp.append(int(lengths[1]))
order = sorted(tmp, reverse=True)
strenz.append([listenz[n].name, order])
continue
strenz = sorted(strenz, key=lambda x: len(x[1]), reverse=True)
return strenz # Returns a list of list[enzyme and frag lengths]
# sorted by reverse number of frags.
def main():
print "This module cannot be executed directly."
if __name__ == '__main__':
main()
|
mit
| -6,917,409,222,109,605,000 | 31.676471 | 79 | 0.474572 | false |
vanbroup/octodns
|
octodns/provider/googlecloud.py
|
1
|
12215
|
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
import shlex
import time
from logging import getLogger
from uuid import uuid4
import re
from google.cloud import dns
from .base import BaseProvider
from ..record import Record
class GoogleCloudProvider(BaseProvider):
"""
Google Cloud DNS provider
google_cloud:
class: octodns.provider.googlecloud.GoogleCloudProvider
# Credentials file for a service_account or other account can be
# specified with the GOOGLE_APPLICATION_CREDENTIALS environment
# variable. (https://console.cloud.google.com/apis/credentials)
#
# The project to work on (not required)
# project: foobar
#
# The File with the google credentials (not required). If used, the
# "project" parameter needs to be set, else it will fall back to the
# "default credentials"
# credentials_file: ~/google_cloud_credentials_file.json
#
"""
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NAPTR',
'NS', 'PTR', 'SPF', 'SRV', 'TXT'))
SUPPORTS_GEO = False
CHANGE_LOOP_WAIT = 5
def __init__(self, id, project=None, credentials_file=None,
*args, **kwargs):
if credentials_file:
self.gcloud_client = dns.Client.from_service_account_json(
credentials_file, project=project)
else:
self.gcloud_client = dns.Client(project=project)
# Logger
self.log = getLogger('GoogleCloudProvider[{}]'.format(id))
self.id = id
self._gcloud_zones = {}
super(GoogleCloudProvider, self).__init__(id, *args, **kwargs)
def _apply(self, plan):
"""Required function of manager.py to actually apply a record change.
:param plan: Contains the zones and changes to be made
:type plan: octodns.provider.base.Plan
:type return: void
"""
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
# Get gcloud zone, or create one if none existed before.
if desired.name not in self.gcloud_zones:
gcloud_zone = self._create_gcloud_zone(desired.name)
else:
gcloud_zone = self.gcloud_zones.get(desired.name)
gcloud_changes = gcloud_zone.changes()
for change in changes:
class_name = change.__class__.__name__
_rrset_func = getattr(
self, '_rrset_for_{}'.format(change.record._type))
if class_name == 'Create':
gcloud_changes.add_record_set(
_rrset_func(gcloud_zone, change.record))
elif class_name == 'Delete':
gcloud_changes.delete_record_set(
_rrset_func(gcloud_zone, change.record))
elif class_name == 'Update':
gcloud_changes.delete_record_set(
_rrset_func(gcloud_zone, change.existing))
gcloud_changes.add_record_set(
_rrset_func(gcloud_zone, change.new))
else:
raise RuntimeError('Change type "{}" for change "{!s}" '
'is none of "Create", "Delete" or "Update'
.format(class_name, change))
gcloud_changes.create()
for i in range(120):
gcloud_changes.reload()
# https://cloud.google.com/dns/api/v1/changes#resource
# status can be one of either "pending" or "done"
if gcloud_changes.status != 'pending':
break
self.log.debug("Waiting for changes to complete")
time.sleep(self.CHANGE_LOOP_WAIT)
if gcloud_changes.status != 'done':
raise RuntimeError("Timeout reached after {} seconds".format(
i * self.CHANGE_LOOP_WAIT))
def _create_gcloud_zone(self, dns_name):
"""Creates a google cloud ManagedZone with dns_name, and zone named
derived from it. calls .create() method and returns it.
:param dns_name: fqdn of zone to create
:type dns_name: str
:type return: new google.cloud.dns.ManagedZone
"""
# Zone name must begin with a letter, end with a letter or digit,
# and only contain lowercase letters, digits or dashes,
# and be 63 characters or less
zone_name = 'zone-{}-{}'.format(
dns_name.replace('.', '-'), uuid4().hex)[:63]
gcloud_zone = self.gcloud_client.zone(
name=zone_name,
dns_name=dns_name
)
gcloud_zone.create(client=self.gcloud_client)
# add this new zone to the list of zones.
self._gcloud_zones[gcloud_zone.dns_name] = gcloud_zone
self.log.info("Created zone {}. Fqdn {}.".format(zone_name, dns_name))
return gcloud_zone
def _get_gcloud_records(self, gcloud_zone, page_token=None):
""" Generator function which yields ResourceRecordSet for the managed
gcloud zone, until there are no more records to pull.
:param gcloud_zone: zone to pull records from
:type gcloud_zone: google.cloud.dns.ManagedZone
:param page_token: page token for the page to get
:return: a resource record set
:type return: google.cloud.dns.ResourceRecordSet
"""
gcloud_iterator = gcloud_zone.list_resource_record_sets(
page_token=page_token)
for gcloud_record in gcloud_iterator:
yield gcloud_record
# This is to get results which may be on a "paged" page.
# (if more than max_results) entries.
if gcloud_iterator.next_page_token:
for gcloud_record in self._get_gcloud_records(
gcloud_zone, gcloud_iterator.next_page_token):
# yield from is in python 3 only.
yield gcloud_record
def _get_cloud_zones(self, page_token=None):
"""Load all ManagedZones into the self._gcloud_zones dict which is
mapped with the dns_name as key.
:return: void
"""
gcloud_zones = self.gcloud_client.list_zones(page_token=page_token)
for gcloud_zone in gcloud_zones:
self._gcloud_zones[gcloud_zone.dns_name] = gcloud_zone
if gcloud_zones.next_page_token:
self._get_cloud_zones(gcloud_zones.next_page_token)
@property
def gcloud_zones(self):
if not self._gcloud_zones:
self._get_cloud_zones()
return self._gcloud_zones
def populate(self, zone, target=False, lenient=False):
"""Required function of manager.py to collect records from zone.
:param zone: A dns zone
:type zone: octodns.zone.Zone
:param target: Unused.
:type target: bool
:param lenient: Unused. Check octodns.manager for usage.
:type lenient: bool
:type return: void
"""
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
exists = False
before = len(zone.records)
gcloud_zone = self.gcloud_zones.get(zone.name)
if gcloud_zone:
exists = True
for gcloud_record in self._get_gcloud_records(gcloud_zone):
if gcloud_record.record_type.upper() not in self.SUPPORTS:
continue
record_name = gcloud_record.name
if record_name.endswith(zone.name):
# google cloud always return fqdn. Make relative record
# here. "root" records will then get the '' record_name,
# which is also the way octodns likes it.
record_name = record_name[:-(len(zone.name) + 1)]
typ = gcloud_record.record_type.upper()
data = getattr(self, '_data_for_{}'.format(typ))
data = data(gcloud_record)
data['type'] = typ
data['ttl'] = gcloud_record.ttl
self.log.debug('populate: adding record {} records: {!s}'
.format(record_name, data))
record = Record.new(zone, record_name, data, source=self)
zone.add_record(record)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _data_for_A(self, gcloud_record):
return {
'values': gcloud_record.rrdatas
}
_data_for_AAAA = _data_for_A
def _data_for_CAA(self, gcloud_record):
return {
'values': [{
'flags': v[0],
'tag': v[1],
'value': v[2]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
def _data_for_CNAME(self, gcloud_record):
return {
'value': gcloud_record.rrdatas[0]
}
def _data_for_MX(self, gcloud_record):
return {'values': [{
"preference": v[0],
"exchange": v[1]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
def _data_for_NAPTR(self, gcloud_record):
return {'values': [{
'order': v[0],
'preference': v[1],
'flags': v[2],
'service': v[3],
'regexp': v[4],
'replacement': v[5]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
_data_for_NS = _data_for_A
_data_for_PTR = _data_for_CNAME
_fix_semicolons = re.compile(r'(?<!\\);')
def _data_for_SPF(self, gcloud_record):
if len(gcloud_record.rrdatas) > 1:
return {
'values': [self._fix_semicolons.sub('\;', rr)
for rr in gcloud_record.rrdatas]}
return {
'value': self._fix_semicolons.sub('\;', gcloud_record.rrdatas[0])}
def _data_for_SRV(self, gcloud_record):
return {'values': [{
'priority': v[0],
'weight': v[1],
'port': v[2],
'target': v[3]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
_data_for_TXT = _data_for_SPF
def _rrset_for_A(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, record.values)
_rrset_for_AAAA = _rrset_for_A
def _rrset_for_CAA(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {} {}'.format(v.flags, v.tag, v.value)
for v in record.values])
def _rrset_for_CNAME(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [record.value])
def _rrset_for_MX(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {}'.format(v.preference, v.exchange)
for v in record.values])
def _rrset_for_NAPTR(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {} "{}" "{}" "{}" {}'.format(
v.order, v.preference, v.flags, v.service,
v.regexp, v.replacement) for v in record.values])
_rrset_for_NS = _rrset_for_A
_rrset_for_PTR = _rrset_for_CNAME
def _rrset_for_SPF(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, record.chunked_values)
def _rrset_for_SRV(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {} {} {}'
.format(v.priority, v.weight, v.port, v.target)
for v in record.values])
_rrset_for_TXT = _rrset_for_SPF
|
mit
| 4,137,766,884,276,775,000 | 34.612245 | 78 | 0.556447 | false |
roberzguerra/rover
|
mezzanine_file_collections/migrations/0003_auto_20151130_1513.py
|
1
|
4011
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
('mezzanine_file_collections', '0002_auto_20150928_1038'),
]
operations = [
migrations.AddField(
model_name='mediafile',
name='_meta_title',
field=models.CharField(help_text='Optional title to be used in the HTML title tag. If left blank, the main title field will be used.', max_length=500, null=True, verbose_name='Title', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='mediafile',
name='created',
field=models.DateTimeField(null=True, editable=False),
preserve_default=True,
),
migrations.AddField(
model_name='mediafile',
name='expiry_date',
field=models.DateTimeField(help_text="With Published chosen, won't be shown after this time", null=True, verbose_name='Expires on', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='mediafile',
name='gen_description',
field=models.BooleanField(default=True, help_text='If checked, the description will be automatically generated from content. Uncheck if you want to manually set a custom description.', verbose_name='Generate description'),
preserve_default=True,
),
migrations.AddField(
model_name='mediafile',
name='in_sitemap',
field=models.BooleanField(default=True, verbose_name='Show in sitemap'),
preserve_default=True,
),
migrations.AddField(
model_name='mediafile',
name='keywords_string',
field=models.CharField(max_length=500, editable=False, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='mediafile',
name='publish_date',
field=models.DateTimeField(help_text="With Published chosen, won't be shown until this time", null=True, verbose_name='Published from', db_index=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='mediafile',
name='short_url',
field=models.URLField(null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='mediafile',
name='site',
field=models.ForeignKey(default=1, editable=False, to='sites.Site'),
preserve_default=False,
),
migrations.AddField(
model_name='mediafile',
name='slug',
field=models.CharField(help_text='Leave blank to have the URL auto-generated from the title.', max_length=2000, null=True, verbose_name='URL', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='mediafile',
name='status',
field=models.IntegerField(default=2, help_text='With Draft chosen, will only be shown for admin users on the site.', verbose_name='Status', choices=[(1, 'Draft'), (2, 'Published')]),
preserve_default=True,
),
migrations.AddField(
model_name='mediafile',
name='updated',
field=models.DateTimeField(null=True, editable=False),
preserve_default=True,
),
migrations.AlterField(
model_name='mediafile',
name='description',
field=models.TextField(verbose_name='Description', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='mediafile',
name='title',
field=models.CharField(max_length=500, verbose_name='Title'),
preserve_default=True,
),
]
|
bsd-3-clause
| 3,245,482,364,551,560,000 | 39.515152 | 234 | 0.585639 | false |
rdo-management/tuskar-ui
|
tuskar_ui/infrastructure/parameters/urls.py
|
1
|
1083
|
# -*- coding: utf8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import urls
from tuskar_ui.infrastructure.parameters import views
urlpatterns = urls.patterns(
'',
urls.url(r'^$', views.IndexView.as_view(), name='index'),
urls.url(r'^simple-service-config$',
views.SimpleServiceConfigView.as_view(),
name='simple_service_configuration'),
urls.url(r'^advanced-service-config$',
views.AdvancedServiceConfigView.as_view(),
name='advanced_service_configuration'),
)
|
apache-2.0
| -3,259,586,383,016,573,400 | 36.344828 | 78 | 0.687904 | false |
necula01/bond
|
pybond/bond/bond.py
|
1
|
37496
|
from __future__ import print_function
from functools import wraps
import inspect
import copy
import os
import string
import json
from json import encoder
# Special result from spy when no agent matches, or no agent provides a result
AGENT_RESULT_NONE = '_bond_agent_result_none'
# Special result from spy when an agent specifically wants the spy point
# to continue. This is useful for spy points that require an agent result
AGENT_RESULT_CONTINUE = '_bond_agent_result_continue'
# We export some function to module-level for more convenient use
def start_test(current_python_test,
test_name=None,
observation_directory=None,
reconcile=None,
spy_groups=None,
decimal_precision=None):
"""
This function should be called in a ``unittest.TestCase`` before any
of the other Bond functions can be used. This will initialize the Bond
module for the current test, and will ensure proper cleanup of Bond
state when the test ends, including the comparison with the
reference observations. For example,
.. code::
def test_something(self):
bond.start_test(self)
...
:param current_python_test: the instance of ``unittest.TestCase`` that is running. This is the
only mandatory parameter. Bond uses this parameter to obtain good values for
the other optional parameters, and also to know when the test ends,
to activate the observation comparison.
:param test_name: (optional) the name of the test. By default, it is ``TestCase.testName``.
:param observation_directory: (optional) the directory where the observation files are stored.
By default this is the ``test_observations`` subdirectory in the
directory containing the test file. The directory will be created if not present.
You should plan to commit the
test observations to your repository, as reference for future test runs.
:param reconcile: (optional) the method used to reconcile the current observations with the
saved reference observations. By default the value of the
environment variable ``BOND_RECONCILE`` is used, or if missing, the
default is ``abort``.
* ``abort`` (aborts the test when there are differences)
* ``accept`` (accepts the differences as the new reference)
* ``console`` (show ``diff`` results and prompt at the console
whether to accept them or not, or possibly start visual merging tools)
* ``kdiff3`` (use kdiff3, if installed, to merge observations)
:param spy_groups: (optional) the list, or tuple, of spy point groups that are enabled. By default,
enable all spy points that do not have an ``enable_for_groups``
attribute.
:param decimal_precision: (optional) the precision (number of decimal places) to use when
serializing float values. Defaults to 4.
"""
Bond.instance().start_test(current_python_test, test_name=test_name,
observation_directory=observation_directory,
reconcile=reconcile, spy_groups=spy_groups,
decimal_precision=decimal_precision)
def settings(observation_directory=None,
reconcile=None,
spy_groups=None,
decimal_precision=None):
"""
Override settings that were set in :py:func:`start_test`. Only apply for the duration
of a test, so this should be called after :py:func:`start_test`. This
is useful if you set general test parameters with :py:func:`start_test` in a ``setUp()`` block,
but want to override them for some specific tests.
:param observation_directory: (optional) the directory where the observation files are stored.
By default this is the ``test_observations`` subdirectory in the
directory containing the test file. The directory will be created if not present.
You should plan to commit the
test observations to your repository, as reference for future test runs.
:param reconcile: (optional) the method used to reconcile the current observations with the
saved reference observations. By default the value of the
environment variable ``BOND_RECONCILE`` is used, or if missing, the
default is ``abort``.
* ``abort`` (aborts the test when there are differences)
* ``accept`` (accepts the differences as the new reference)
* ``console`` (show ``diff`` results and prompt at the console
whether to accept them or not, or possibly start visual merging tools)
* ``kdiff3`` (use kdiff3, if installed, to merge observations)
:param spy_groups: (optional) the list, or tuple, of spy point groups that are enabled. By default,
enable all spy points that do not have an ``enable_for_groups``
attribute.
:param decimal_precision: (optional) the precision (number of decimal places) to use when
serializing float values. Defaults to 4.
"""
Bond.instance().settings(observation_directory=observation_directory,
reconcile=reconcile,
spy_groups=spy_groups,
decimal_precision=decimal_precision)
def active():
"""
This function can be called to find out if a ``bond.start_test`` is currently active.
For example,
.. code::
if bond.active():
..do something..
"""
return Bond.instance().active()
def spy(spy_point_name=None, skip_save_observation=False, **kwargs):
"""
This is the most frequently used Bond function. It will collect the key-value pairs passed
in the argument list and will emit them to the spy observation log.
If you are not during testing (:py:func:`start_test` has not been called) then
this function does not do anything.
If there is an agent deployed for the current spy point (see :py:func:`deploy_agent`),
it will process the agent.
.. code::
bond.spy(file_name=file_name, content=data)
bond.spy(spy_point_name="other spy", args=args, output=output)
The values are formatted to JSON using the json module, with sorted keys, and indentation, with
one value per line, to streamline the observation comparison.
For user-defined classes, the method ``to_json`` is called on the instance before it is formatted.
This method should return a JSON-serializable data structure.
If you have deployed agents (see :py:func:`deploy_agent`) that are applicable to this spy point,
the agents can specify a
``formatter`` that can intervene to modify the observation dictionary before it is
serialized to JSON.
:param spy_point_name: (optional) the spy point name, useful to distinguish among different observations, and to
select the agents that are applicable to this spy point. There is no need for this value to
be unique in your test. You only need to have this value if you want to :py:func:`deploy_agent` for
this spy point later in your test. If you do use this parameter, then it will be observed
with the key ``__spy_point__`` to ensure that it appears first in the sorted observation.
:param skip_save_observation: (optional) If True (defaults to False), don't actually save the
observation, just process any relevant agents. This is used internally to enable mocking-only
spy points. This will be overriden if a value of skip_save_observation is specified on an agent
that is active for this spy point, allowing this parameter to be used as an overridable default.
:param kwargs: key-value pairs to be observed. This forms the observation dictionary that is
serialized as the current observation.
:return: the result from the agent, if any (see :py:func:`deploy_agent`), or ``bond.AGENT_RESULT_NONE``.
"""
return Bond.instance().spy(spy_point_name=spy_point_name,
skip_save_observation=skip_save_observation,
**kwargs)
def deploy_agent(spy_point_name, **kwargs):
"""
Create and deploy a new agent for the named spy point. When a spy point is encountered, the agents are searched
in reverse order of their deployment, and the first agent that matches is used.
.. code::
bond.deploy_agent("my file", file_name__contains='passwd',
result="mock result")
:param spy_point_name: (mandatory) the spy point where the agent is deployed.
:param kwargs: (optional) key-value pairs that control whether the agent is active and what it does.
The following keys are recognized:
* Keys that restrict for which invocations of bond.spy this agent is active. All of these conditions
must be true for the agent to be the active one:
* key=val : only when the observation dictionary contains the 'key' with the given value
* key__contains=substr : only when the observation dictionary contains the 'key' with a string value
that contains the given substr.
* key__startswith=substr : only when the observation dictionary contains the 'key' with a
string value that starts with the given substr.
* key__endswith=substr : only when the observation dictionary contains the 'key' with a string value
that ends with the given substr.
* filter=func : only when the given func returns true when passed observation dictionary.
The function should not make changes to the observation dictionary.
Uses the observation before formatting.
* Keys that control what the observer does when processed:
* do=func : executes the given function with the observation dictionary.
func can also be a list of functions, executed in order.
The function should not make changes to the observation dictionary.
Uses the observation before formatting.
* Keys that control what the corresponding spy returns (by default ``AGENT_RESULT_NONE``):
* exception=x : the call to bond.spy throws the given exception. If 'x' is a function
it is invoked on the observation dictionary to compute the exception to throw.
The function should not make changes to the observation dictionary.
Uses the observation before formatting.
* result=x : the call to bond.spy returns the given value. If 'x' is a function
it is invoked on the observe argument dictionary to compute the value to return.
If the function throws an exception then the spied function thrown an exception.
The function should not make changes to the observation dictionary.
Uses the observation before formatting.
* Keys that control how the observation is saved. This is processed after all the above functions.
* formatter : if specified, a function that is given the observation and can update it in place.
The formatted observation is what gets serialized and saved.
* skip_save_observation : if specified and True, this causes no observation to be saved as a result
of the call to spy for which this agent is active. This can be useful to conditionally save
calls to e.g. certain functions whose call order may not be relevant. This will override any
value of ``mock_only`` specified on a :py:func:`spy_point` or value of ``skip_save_observation``
specified on a call to :py:func:`spy`, meaning you can also specify a value of False to override.
:return: nothing
"""
Bond.instance().deploy_agent(spy_point_name, **kwargs)
def spy_point(spy_point_name=None,
enabled_for_groups=None,
mock_only=False,
require_agent_result=False,
excluded_keys=('self',),
spy_result=False):
"""
Function and method decorator for spying arguments and results of methods. This decorator is safe
to use on production code. It will have effects only if the function :py:func:`start_test` has
been called to initialize the Bond module.
Must be applied directly to a method or a function, not to another decorator.
.. code::
@staticmethod
@bond.spy_point()
def my_sneaky_function(arg1='', arg2=None):
# does something
:param spy_point_name: (optional) A name to use for this spy point. Default is obtained from the name
of the decorated function: for module methods, `module.method_name`. For other
methods, `ClassName.method_name`.
:param enabled_for_groups: (optional) A list or tuple of spy point groups to which this spy point belongs.
If missing then it is enabled for all groups. These names are arbitrary labels
that :py:func:`start_test` can use to turn off groups of spy points.
If you are writing a library that others are using, you should use a distinctive
spy group for your spy points, to avoid your library starting to spy if embedded
in some other test using Bond.
:param mock_only: (optional) If True (defaults to False), then don't record calls to this spy
point as an observation. This allows you to use the spy point as a mock only
without also recording the sequence of calls. If skip_save_observation is
specified on an agent that is active for this spy point, that value will override
this parameter (allowing mock_only to be used as an overridable default).
:param require_agent_result: (optional) if True, and if this spy point is enabled, then there must be an
agent that provides a result, or else the invocation of the function aborts.
The agent may still provide ``AGENT_RESULT_CONTINUE`` to tell the spy point
to continue the invocation of the underlying function. This parameter is
used to mark functions that should not be invoked normally during testing, e.g.,
invoking shell commands, or requesting user input.
:param excluded_keys: (optional) a tuple or list of parameter key names to skip when saving the observations.
Further manipulation of what gets observed can be done from agents.
:param spy_result: (optional) if True, then the result value is spied also, using a spy_point name of
`spy_point_name.result`. If there is an agent providing a result for
this spy point, then the agent result is saved as the observation.
"""
# TODO: Should we also have an excluded_from_groups parameter?
# TODO right now excluding 'self' using excludedKeys, should attempt to find a better way?
def wrap(fn):
# We have as little code here as possible, because this runs in production code
# ^ not if we use the try/except on import idiom. But good to still work if bond is imported
if not inspect.isfunction(fn):
raise TypeError('The observeFunction decorator may only be applied to functions/methods!')
# Convert enabled_for_groups into a tuple
if enabled_for_groups is None:
enabled_for_groups_local = None
elif isinstance(enabled_for_groups, basestring):
enabled_for_groups_local = (enabled_for_groups,)
else:
assert isinstance(enabled_for_groups, (list, tuple))
enabled_for_groups_local = enabled_for_groups
@wraps(fn)
def fn_wrapper(*args, **kwargs):
# Bypass spying if we are not TESTING
if not active():
return fn(*args, **kwargs)
the_bond = Bond.instance()
if enabled_for_groups_local is not None:
for grp in enabled_for_groups_local:
if grp in the_bond.spy_groups:
break
else:
# We are only enabled for some groups, but none of those and active
return fn(*args, **kwargs)
arginfo = inspect.getargspec(fn)
callargs = inspect.getcallargs(fn, *args, **kwargs)
if spy_point_name is None:
# We recognize instance methods by the first argument 'self'
# TODO: there must be a better way to do this
spy_point_name_local = None
if arginfo and arginfo[0]:
if arginfo[0][0] == 'self':
spy_point_name_local = args[0].__class__.__name__ + '.' + fn.__name__
elif arginfo[0][0] == 'cls':
# A class method
spy_point_name_local = args[0].__name__ + '.' + fn.__name__
if spy_point_name_local is None:
# TODO We get here both for staticmethod and for module-level functions
# If we had the spy_point wrapper outside the @staticmethod we could tell
# more easily what kind of method this was !!
module_name = getattr(fn, '__module__')
if module_name == '__main__': # Get the original module name from the filename
module_name = os.path.splitext(os.path.basename(inspect.getmodule(fn).__file__))[0]
# Keep only the last component of the name
module_name = module_name.split('.')[-1]
spy_point_name_local = module_name + '.' + fn.__name__
else:
spy_point_name_local = spy_point_name
observation_dictionary = {}
varargs_name = arginfo.varargs
for idx in range(0, min(len(args), len(arginfo.args))):
observation_dictionary[arginfo.args[idx]] = args[idx]
if varargs_name is not None and len(callargs[varargs_name]) != 0:
observation_dictionary[varargs_name] = callargs[varargs_name]
for key, val in kwargs.iteritems():
observation_dictionary[key] = val
observation_dictionary = {key: val for (key, val) in observation_dictionary.iteritems()
if key not in excluded_keys}
response = the_bond.spy(spy_point_name=spy_point_name_local,
skip_save_observation=mock_only,
**observation_dictionary)
if require_agent_result:
assert response is not AGENT_RESULT_NONE, \
'You MUST mock out spy_point {}: {}'.format(spy_point_name_local,
repr(observation_dictionary))
if response is AGENT_RESULT_NONE or response is AGENT_RESULT_CONTINUE:
return_val = fn(*args, **kwargs)
else:
return_val = response
if spy_result:
the_bond.spy(spy_point_name_local + '.result', result=return_val)
return return_val
return fn_wrapper
return wrap
class Bond:
DEFAULT_OBSERVATION_DIRECTORY = '/tmp/bond_observations'
_instance = None
@staticmethod
def instance():
if Bond._instance is None:
Bond._instance = Bond()
return Bond._instance
def __init__(self):
self._settings = {}
self.test_framework_bridge = None
self.start_count_failures = None
self.start_count_errors = None
self.test_name = None
self.spy_groups = None # Map indexed on enabled spy groups
self.observations = [] # Here we will collect the observations
self.spy_agents = {} # Map from spy_point_name to SpyAgents
def settings(self, **kwargs):
"""
Set the settings for Bond.
See documentation for top-level settings function
:param kwargs:
:return:
"""
# Get the not-None keys
for k, v in kwargs.iteritems():
self._settings[k] = v
if 'spy_groups' in self._settings:
self._set_spy_groups(self._settings['spy_groups'])
def start_test(self,
current_python_test,
**kwargs):
"""
Signal the starting of a new test.
See documentation for top-level start_test function
:param kwargs:
:return:
"""
self.observations = []
self.spy_agents = {}
self.spy_groups = {}
self.test_framework_bridge = TestFrameworkBridge.make_bridge(current_python_test)
self._settings = {} # Clear settings before each test
self.settings(**kwargs)
self.test_name = (self._settings.get('test_name') or
self.test_framework_bridge.full_test_name())
if self._settings['decimal_precision'] is None:
self._settings['decimal_precision'] = 4
# Register us on test exit
self.test_framework_bridge.on_finish_test(self._finish_test)
if 'observation_directory' not in self._settings:
print('WARNING: you should set the settings(observation_directory). Observations saved to {}'.format(
Bond.DEFAULT_OBSERVATION_DIRECTORY
))
def active(self):
return (self.test_framework_bridge is not None)
def spy(self, spy_point_name=None, skip_save_observation=False, **kwargs):
if not self.test_framework_bridge:
# Don't do anything if we are not testing
return None
if spy_point_name is not None:
assert isinstance(spy_point_name, basestring), "spy_point_name must be a string"
# Find the agent to apply. We process the agents in order, because they are deployed at the start of the list
active_agent = None
for agent in self.spy_agents.get(spy_point_name, []):
if not agent.filter(kwargs):
continue
active_agent = agent
break
else:
active_agent = None
observation = copy.deepcopy(kwargs)
if spy_point_name is not None:
observation['__spy_point__'] = spy_point_name # Use a key that should come first alphabetically
def save_observation():
# We postpone applying the formatter until we have run the "doer" and the "result"
formatted = self._format_observation(observation,
active_agent=active_agent)
# print("Observing: " + formatted + "\n")
# TODO ETK
self.observations.append(formatted)
do_save_observation = not skip_save_observation
if active_agent is not None and active_agent.skip_save_observation is not None:
do_save_observation = not active_agent.skip_save_observation
# Apply the doer if present
try:
res = AGENT_RESULT_NONE
if active_agent is not None:
active_agent.do(observation)
res = active_agent.result(observation) # This may throw an exception
finally:
if do_save_observation:
save_observation()
if res != AGENT_RESULT_NONE:
# print(" Result " + repr(res))
return res
return AGENT_RESULT_NONE
def deploy_agent(self, spy_point_name, **kwargs):
"""
Deploy an agent for a spy point.
See documentation for the top-level deploy_agent function.
:param spy_point_name:
:param kwargs:
:return:
"""
assert self.test_framework_bridge, "Should not call deploy_agent unless you have called start_test first"
assert isinstance(spy_point_name, basestring), "spy_point_name must be a string"
agent = SpyAgent(spy_point_name, **kwargs)
spy_agent_list = self.spy_agents.get(spy_point_name)
if spy_agent_list is None:
spy_agent_list = []
self.spy_agents[spy_point_name] = spy_agent_list
# add the agent at the start of the list
spy_agent_list.insert(0, agent)
def _set_spy_groups(self, spy_groups):
self.spy_groups = {}
if spy_groups is not None:
if isinstance(spy_groups, basestring):
self.spy_groups = {spy_groups: True}
else:
assert isinstance(spy_groups, (list, tuple))
for sg in spy_groups:
assert isinstance(sg, basestring)
self.spy_groups[sg] = True
def _format_observation(self,
observation,
active_agent=None):
# TODO: I do not quite like how formatters work. See issue #1
if active_agent:
active_agent.formatter(observation)
original_float_repr = encoder.FLOAT_REPR
format_string = '.{}f'.format(self._settings['decimal_precision'])
encoder.FLOAT_REPR = lambda o: format(o, format_string)
ret = json.dumps(observation,
sort_keys=True,
indent=4,
default=self._custom_json_serializer)
encoder.FLOAT_REPR = original_float_repr
return ret
def _custom_json_serializer(self, obj):
# TODO: figure out how to do this. Must be customizable from settings
if 'to_json' in obj.__class__.__dict__:
return obj.__class__.to_json(obj)
if hasattr(obj, 'to_json'):
return obj.to_json()
if type(obj) == type(lambda : 0):
return "\"<lambda>\""
def _finish_test(self):
"""
Called internally when a test ends
:return:
"""
try:
# Were there failures and errors in this test?
test_failed = self.test_framework_bridge.test_failed()
# Save the observations
if test_failed:
# Show the failures and errors now
print(test_failed)
no_save = test_failed
else:
no_save = None
fname = self._observation_file_name()
fdir = os.path.dirname(fname)
if not os.path.isdir(fdir):
os.makedirs(fdir)
reference_file = fname + '.json'
current_lines = self._get_observations()
# We have to reconcile them
reconcile_res = self._reconcile_observations(reference_file, current_lines, no_save=no_save)
if not test_failed:
# If the test did not fail already, but it failed reconcile, fail the test
assert reconcile_res, 'Reconciling observations for {}'.format(self.test_name)
finally:
# Mark that we are outside of a test
self.test_framework_bridge = None
pass
def _observation_file_name(self):
fname = os.path.join(*[self._observation_directory()] +
self.test_name.split('.'))
return fname
def _observation_directory(self):
obs_dir = self._settings.get('observation_directory')
if obs_dir is not None:
return obs_dir
# We build the observation directory based on the path of the current test file
test_file = self.test_framework_bridge.test_file_name()
if test_file:
return os.path.join(os.path.dirname(test_file),
'test_observations')
print("WARNING: Using temporary directory for Bond test observations. "
"Use observation_directory parameter to start_test or settings")
return Bond.DEFAULT_OBSERVATION_DIRECTORY
def _get_observations(self):
"""
Return all of the observations as a list of lines that would be
printed out
"""
if len(self.observations) == 0:
return ['[\n', ']\n']
else:
return ['[\n'] + [line + '\n' for line in string.split(',\n'.join(self.observations), '\n')] + [']\n']
def _reconcile_observations(self,
reference_file,
current_lines,
no_save=None):
settings = dict(reconcile=self._settings.get('reconcile'))
return bond_reconcile.reconcile_observations(settings,
test_name=self.test_name,
reference_file=reference_file,
current_lines=current_lines,
no_save=no_save)
class SpyAgent:
"""
A spy agent applies to a particular spy_point_name, has
some optional filters to select only certain observations,
and has optional mocking parameters.
See documentation for the deploy_agent top-level function.
"""
def __init__(self, spy_point_name, **kwargs):
self.spy_point_name = spy_point_name
self.result_spec = AGENT_RESULT_NONE
self.exception_spec = None
self.formatter_spec = None
self.doers = [] # A list of things to do
self.point_filter = None # The filter for pointName, if present
self.filters = [] # The generic filters
self.skip_save_observation = None
for k in kwargs:
if k == 'result':
self.result_spec = kwargs[k]
elif k == 'exception':
self.exception_spec = kwargs[k]
elif k == 'formatter':
self.formatter_spec = kwargs[k]
elif k == 'do':
doers = kwargs[k]
if isinstance(doers, list):
self.doers += doers
else:
self.doers.append(doers)
elif k == 'skip_save_observation':
self.skip_save_observation = kwargs[k]
else:
# Must be a filter
fo = SpyAgentFilter(k, kwargs[k])
self.filters.append(fo)
def filter(self, observation):
"""
Run the filter on an observation to see if the SpyAgent applies
:param observation:
:return: True, if the
"""
for f in self.filters:
if not f.filter(observation):
return False
return True
def formatter(self, observation):
"""Apply the formatter to modify the observation in place"""
if self.formatter_spec is not None:
self.formatter_spec(observation)
def do(self, observation):
for d in self.doers:
d(observation)
def result(self, observation):
"""Compute the result"""
es = self.exception_spec
if es is not None:
if hasattr(es, '__call__'):
raise es(observation)
else:
raise es
r = self.result_spec
if r is not AGENT_RESULT_NONE and hasattr(r, '__call__'):
return r(observation)
else:
return r
class SpyAgentFilter:
"""
Each SpyAgent can have multiple filters.
See documentation for deploy_agent function.
"""
def __init__(self, filter_key, filter_value):
self.field_name = None # The observation field name the filter applies to
self.filter_func = None # A filter function (applies to the field value)
if filter_key == 'filter':
assert isinstance(filter_value, type(lambda: 0))
self.field_name = None
self.filter_func = filter_value
return
parts = filter_key.split("__")
if len(parts) == 1:
self.field_name = parts[0]
self.filter_func = (lambda f: f == filter_value)
elif len(parts) == 2:
self.field_name = parts[0]
cmp_spec = parts[1]
if cmp_spec == 'exact':
self.filter_func = (lambda f: f == filter_value)
elif cmp_spec == 'eq':
self.filter_func = (lambda f: f == filter_value)
elif cmp_spec == 'startswith':
self.filter_func = (lambda f: f.find(filter_value) == 0)
elif cmp_spec == 'endswith':
self.filter_func = (lambda f: f.rfind(filter_value) == len(f) - len(filter_value))
elif cmp_spec == 'contains':
self.filter_func = (lambda f: filter_value in f)
else:
assert False, "Unknown operator: " + cmp_spec
else:
assert False
def filter(self, observation):
if self.field_name:
return self.field_name in observation and self.filter_func(observation[self.field_name])
else:
return self.filter_func(observation)
class TestFrameworkBridge:
"""
A class to abstract the interface to the host test framework
"""
def __init__(self,
current_python_test):
self.current_python_test = current_python_test
@staticmethod
def make_bridge(current_python_test):
"""
Make the proper bridge for the current python test
:param current_python_test:
:return:
"""
# We test for the presence of fields
if hasattr(current_python_test, '_resultForDoCleanups'):
resultForDoCleanups = current_python_test._resultForDoCleanups
if (hasattr(resultForDoCleanups, 'failures') and
hasattr(resultForDoCleanups, 'errors')):
return TestFrameworkBridgeUnittest(current_python_test)
if hasattr(resultForDoCleanups, '_fixtureinfo'):
return TestFrameworkBridgePyTest(current_python_test)
assert False, "Can't recognize the test framework"
def full_test_name(self):
"""
The full name of the test: Class.test
"""
return self.current_python_test.__class__.__name__ + "." + self.current_python_test._testMethodName
def test_file_name(self):
"""
The name of the .py file where the test is defined
:return:
"""
return inspect.getfile(self.current_python_test.__class__)
def on_finish_test(self, _callback):
"""
Register a callback to be called on test end
"""
self.current_python_test.addCleanup(_callback)
def test_failed(self):
"""
Return an error message if the test has failed
:return:
"""
assert False, "Must override"
class TestFrameworkBridgeUnittest(TestFrameworkBridge):
"""
A bridge for the standard unitest
"""
def __init__(self,
current_python_test):
TestFrameworkBridge.__init__(self, current_python_test)
# TODO: the rest is specific to unittest. We need to factor it out to allow other frameworks. See issue #2
# (the use of current_python_test._testMethodName above is unittest specific as well)
# We remember the start counter for failures and errors
# This is the best way I know how to tell that a test has failed
self.start_count_failures = len(self.current_python_test._resultForDoCleanups.failures)
self.start_count_errors = len(self.current_python_test._resultForDoCleanups.errors)
def test_failed(self):
"""
Return true if the test has failed
:return:
"""
failures_and_errors = self._get_failures_and_errors()
if failures_and_errors:
return "\n".join(failures_and_errors)
else:
return None
def _get_failures_and_errors(self):
"""
Return a list of failures and errors so far
"""
res = []
for fmsg in self.current_python_test._resultForDoCleanups.failures[self.start_count_failures:]:
res.append(fmsg[1])
for emsg in self.current_python_test._resultForDoCleanups.errors[self.start_count_errors:]:
res.append(emsg[1])
return res
class TestFrameworkBridgePyTest(TestFrameworkBridge):
"""
A bridge for py.test
"""
def __init__(self,
current_python_test):
TestFrameworkBridge.__init__(self, current_python_test)
self.start_count_excinfo = self._count_excinfo()
def test_failed(self):
"""
Return true if the test has failed
:return:
"""
return self._count_excinfo() > self.start_count_excinfo
def _count_excinfo(self):
return len(self.current_python_test._resultForDoCleanups._excinfo) \
if self.current_python_test._resultForDoCleanups._excinfo else 0
# Import bond_reconcile at the end, because we import bond from bond_reconcile, and
# we need at least the spy_point to be defined
import bond_reconcile
|
bsd-2-clause
| -200,315,467,010,092,260 | 42.098851 | 121 | 0.600704 | false |
onyb/cpp
|
DSATP/LinkedList/test_CH3_S7_Double_Linked_Lists.py
|
1
|
1519
|
import unittest
from LinkedList.CH3_S7_Double_Linked_Lists import LinkedList
from LinkedList.CH3_S7_Double_Linked_Lists import Node
class TestDoubleLinkedList(unittest.TestCase):
def test_node(self):
node_a = Node()
node_a.set_data('foo')
self.assertEqual(node_a.get_data(), 'foo')
node_b = Node()
node_b.set_data('baz')
node_a.set_next(node_b)
node_b.set_prev(node_a)
self.assertEqual(node_a.get_next(), node_b)
self.assertEqual(node_b.get_prev(), node_a)
self.assertEqual(node_a.has_next, True)
self.assertEqual(node_b.has_next, False)
self.assertEqual(node_a.has_prev, False)
self.assertEqual(node_b.has_prev, True)
def test_linked_list(self):
ll = LinkedList()
node = Node()
node.set_data('foo')
ll.set_head(node)
self.assertEqual(ll.head, node)
self.assertEqual(ll.length, 1)
# Insert at beginning
ll.insert_at_pos('bar', 0)
self.assertEqual(ll.head.get_data(), 'bar')
self.assertEqual(ll.head.get_next().get_data(), 'foo')
self.assertEqual(ll.length, 2)
# Insert at end
ll.insert_at_pos('baz', 2)
self.assertEqual(ll.head.get_next().get_next().get_data(), 'baz')
self.assertEqual(ll.length, 3)
# Insert at position
ll.insert_at_pos('ani', 1)
self.assertEqual(ll.head.get_next().get_next().get_data(), 'ani')
self.assertEqual(ll.length, 4)
|
mit
| -3,485,091,991,765,255,000 | 30.645833 | 73 | 0.601053 | false |
sassoftware/mint
|
mint/django_rest/rbuilder/repos/manager.py
|
1
|
7155
|
#!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from StringIO import StringIO
from django.db import connection
from django.core.exceptions import ObjectDoesNotExist
from conary import changelog
from conary.conaryclient import filetypes
from conary.repository import errors as reposerrors
from mint.rest import errors as resterrors
from mint import userlevels
from mint.db import repository as reposdbmgr
from mint.lib import unixutils
from mint.django_rest.rbuilder.manager import basemanager
from mint.django_rest.rbuilder.manager.basemanager import exposed
from mint.django_rest.rbuilder.repos import models
from mint.django_rest.rbuilder.projects import models as projectmodels
_cachedCfg = None
class ReposManager(basemanager.BaseManager, reposdbmgr.RepomanMixin):
def __init__(self, *args, **kwargs):
basemanager.BaseManager.__init__(self, *args, **kwargs)
bypass = kwargs.pop('bypass', False)
self._repoInit(bypass=bypass)
@property
def db(self):
return connection
@exposed
def createRepositoryForProject(self, project, createMaps=True):
repos = self.getRepositoryForProject(project)
if repos.hasDatabase:
# Create the repository infrastructure (db, dirs, etc.).
repos.create()
# Create users and roles
self.populateUsers(repos)
def populateUsers(self, repos):
# here we automatically create the USER and DEVELOPER levels
# This avoids the chance of paying a high price for adding
# them later - instead we amortize the cost over every commit
netServer = repos.getNetServer()
self._getRoleForLevel(netServer, userlevels.USER)
self._getRoleForLevel(netServer, userlevels.ADMIN)
if not repos.isExternal:
self._getRoleForLevel(netServer, userlevels.DEVELOPER)
self._getRoleForLevel(netServer, userlevels.OWNER)
def _getRoleForLevel(self, reposServer, level):
"""
Gets the role name for the given level, creating the role on
the fly if necessary
"""
roleName, canWrite, canAdmin = reposdbmgr.ROLE_PERMS[level]
try:
reposServer.auth.addRole(roleName)
except reposerrors.RoleAlreadyExists:
# assume that everything is good.
return roleName
else:
reposServer.auth.addAcl(roleName, trovePattern=None, label=None,
write=canWrite, remove=canAdmin)
reposServer.auth.setMirror(roleName, canAdmin)
reposServer.auth.setAdmin(roleName, canAdmin)
return roleName
def addUserByMd5(self, repos, username, salt, password, level):
reposServer = repos.getShimServer()
role = self._getRoleForLevel(reposServer, level)
try:
reposServer.auth.addUserByMD5(username, salt.decode('hex'), password)
except reposerrors.UserAlreadyExists:
reposServer.auth.deleteUserByName(username, deleteRole=False)
reposServer.auth.addUserByMD5(username, salt.decode('hex'), password)
reposServer.auth.setUserRoles(username, [role])
def addUser(self, repos, username, password, level):
reposServer = repos.getShimServer()
role = self._getRoleForLevel(reposServer, level)
try:
reposServer.auth.addUser(username, password)
except reposerrors.UserAlreadyExists:
reposServer.auth.deleteUserByName(username, deleteRole=False)
reposServer.auth.addUser(username, password)
reposServer.auth.setUserRoles(username, [role])
def editUser(self, repos, username, level):
reposServer = repos.getShimServer()
role = self._getRoleForLevel(reposServer, level)
reposServer.auth.setUserRoles(username, [role])
def deleteUser(self, repos, username):
reposServer = repos.getShimServer()
reposServer.auth.deleteUserByName(username, deleteRole=False)
def changePassword(self, repos, username, password):
reposServer = repos.getShimServer()
reposServer.auth.changePassword(username, password)
@exposed
def getRepositoryForProject(self, project):
projectInfo = {}
projectInfo["projectId"] = project.pk
projectInfo["shortname"] = str(project.short_name)
projectInfo["fqdn"] = str(project.repository_hostname)
projectInfo["external"] = project.external
projectInfo["hidden"] = project.hidden
projectInfo["commitEmail"] = project.commit_email and str(project.commit_email)
projectInfo["database"] = project.database and str(project.database)
try:
label = models.Label.objects.get(project=project)
projectInfo["localMirror"] = bool(len(
project.inbound_mirrors.all()))
projectInfo["url"] = label.url and str(label.url)
projectInfo["authType"] = str(label.auth_type)
projectInfo["username"] = label.user_name and str(label.user_name)
projectInfo["entitlement"] = label.entitlement and str(label.entitlement)
projectInfo["password"] = label.password and str(label.password)
except models.Label.DoesNotExist:
projectInfo["localMirror"] = None
projectInfo["url"] = None
projectInfo["authType"] = None
projectInfo["username"] = None
projectInfo["entitlement"] = None
projectInfo["password"] = None
return reposdbmgr.RepositoryHandle(self, projectInfo)
def getRepositoryFromFQDN(self, fqdn):
projects = self.iterRepositories(repository_hostname=fqdn)
try:
return projects.next()
except StopIteration:
raise resterrors.ProductNotFound(fqdn)
def iterRepositories(self, **conditions):
for project in projectmodels.Project.objects.filter(**conditions
).order_by('project_id'):
yield self.getRepositoryForProject(project)
@exposed
def createSourceTrove(self, *args, **kwargs):
# Overriden only to make it exposed
kwargs.update(auth=self.auth)
return reposdbmgr.RepomanMixin.createSourceTrove(self, *args, **kwargs)
@exposed
def getAdminClient(self, write=False):
# Overriden only to make it exposed
return reposdbmgr.RepomanMixin.getAdminClient(self, write=write)
def getUserClient(self, auth=None):
if auth is None:
auth = self.auth
return reposdbmgr.RepomanMixin.getUserClient(self, auth=auth)
|
apache-2.0
| 292,895,482,985,695,040 | 38.313187 | 87 | 0.680084 | false |
Parisson/TimeSide
|
tests/test_graphers_render_analyzers.py
|
1
|
1932
|
#! /usr/bin/env python
from __future__ import division
import unittest
from unit_timeside import TestRunner
import timeside
from timeside.core.tools.test_samples import samples
from tempfile import NamedTemporaryFile
import os
PLOT = False
class Test_graphers_analyzers(unittest.TestCase):
""" test Graphers from analyzers"""
def setUp(self):
source = samples["C4_scale.wav"]
decoder_cls = timeside.core.get_processor('file_decoder')
self.decoder = decoder_cls(uri=source)
def _perform_test(self, grapher_cls):
"""Internal function that test grapher for a given analyzer"""
grapher = grapher_cls()
pipe = (self.decoder | grapher)
pipe.run()
if PLOT:
grapher.render().show()
else:
self.temp_file = NamedTemporaryFile(suffix='.png',
delete=False)
grapher.render(self.temp_file.name)
def tearDown(self):
# Clean-up : delete temp file
if not PLOT:
os.unlink(self.temp_file.name)
def _tests_factory(grapher_analyzers):
for grapher in grapher_analyzers:
def _test_func_factory(grapher):
test_func = lambda self: self._perform_test(grapher)
test_func.__doc__ = 'Test Graphers: %s' % grapher.name()
return test_func
test_func_name = "test_%s" % grapher.name()
test_func = _test_func_factory(grapher)
setattr(Test_graphers_analyzers, test_func_name, test_func)
list_graphers = timeside.core.processor.processors(timeside.core.api.IGrapher)
from timeside.core.grapher import DisplayAnalyzer
grapher_analyzers = [grapher for grapher in list_graphers
if grapher.__base__ == DisplayAnalyzer]
_tests_factory(grapher_analyzers)
if __name__ == '__main__':
unittest.main(testRunner=TestRunner())
|
agpl-3.0
| -2,400,278,975,507,643,000 | 30.16129 | 78 | 0.623706 | false |
WosunOO/nca_xianshu
|
nca47/objects/dns/dns_gmap.py
|
1
|
1914
|
from nca47.db import api as db_api
from nca47.db.sqlalchemy.models import GMapInfo as GMapModel
from nca47.objects import base
from nca47.objects import fields as object_fields
from nca47.api.controllers.v1 import tools
class DnsGMap(base.Nca47Object):
VERSION = '1.0'
fields = {
'gmap_id': object_fields.StringField(),
'tenant_id': object_fields.StringField(),
'name': object_fields.StringField(),
'enable': object_fields.StringField(),
'algorithm': object_fields.StringField(),
'last_resort_pool': object_fields.StringField(),
'gpool_list': object_fields.ListOfStringsField(),
}
def __init__(self, context=None, **kwargs):
self.db_api = db_api.get_instance()
super(DnsGMap, self).__init__(context=None, **kwargs)
@staticmethod
def __from_db_object(dns_gmap, db_dns_gmap):
"""
:param dns_syngroup:
:param db_dns_syngroup:
:return:
"""
for field in dns_gmap.fields:
dns_gmap[field] = db_dns_gmap
dns_gmap.obj_reset_changes()
return dns_gmap
def create(self, context, values):
gmap = self.db_api.create(GMapModel, values)
return gmap
def update(self, context, id, values):
gmap = self.db_api.update_object(GMapModel, id, values)
return gmap
def get_object(self, context, **values):
gmap = self.db_api.get_object(GMapModel, **values)
return gmap
# def get_objects(self, context, **values):
# gmap = self.db_api.get_objects(GMapModel, **values)
# return gmap
def delete(self, context, id):
gmap = self.db_api.delete_object(GMapModel, id)
return gmap
def get_objects(self, context, str_sql):
gmap = self.db_api.get_all_object(GMapModel, str_sql)
return gmap
|
apache-2.0
| -8,303,890,767,679,279,000 | 31 | 63 | 0.602403 | false |
johdah/Podr_Python_Web
|
podcast/urls.py
|
1
|
1088
|
from django.conf.urls import patterns, url
from podcast import views
urlpatterns = patterns('',
# ex: /podcast/
url(r'^$', views.index, name='index'),
# ex: /podcast/all/
url(r'^all/$', views.all, name='all'),
# ex: /podcast/category/5/
url(r'^category/(?P<category_id>\d+)/$', views.category, name='category'),
# ex: /podcast/top/
url(r'^top/$', views.top, name='top'),
# ex: /podcast/5/
url(r'^(?P<podcast_id>\d+)/$', views.details, name='details'),
# ex: /podcast/5/follow
url(r'^(?P<podcast_id>\d+)/follow/$', views.follow, name='follow'),
# ex: /podcast/5/thumb_down
url(r'^(?P<podcast_id>\d+)/thumb_down', views.thumb_down, name='thumb_down'),
# ex: /podcast/5/thumb_up
url(r'^(?P<podcast_id>\d+)/thumb_up', views.thumb_up, name='thumb_up'),
# ex: /podcast/5/unfollow
url(r'^(?P<podcast_id>\d+)/unfollow/$', views.unfollow, name='unfollow'),
# ex: /podcast/5/update
url(r'^(?P<podcast_id>\d+)/update/$', views.update, name='update'),
# ex: /podcast/add/
url(r'^add$', views.add, name='add'),
)
|
gpl-3.0
| -7,188,716,312,099,011,000 | 39.333333 | 81 | 0.587316 | false |
nextcloud/appstore
|
nextcloudappstore/settings/base.py
|
1
|
9387
|
"""
Django settings for nextcloudappstore project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
from os.path import dirname, abspath, join, pardir, realpath
from django.conf.global_settings import LANGUAGES
BASE_DIR = realpath(join(dirname(dirname(abspath(__file__))), pardir))
INSTALLED_APPS = [
'nextcloudappstore.core.apps.CoreConfig',
'nextcloudappstore.api.apps.ApiConfig',
'nextcloudappstore.user.apps.UserConfig',
'nextcloudappstore.certificate.apps.CertificateConfig',
'nextcloudappstore.scaffolding.apps.ScaffoldingConfig',
'parler',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'snowpenguin.django.recaptcha2',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.github',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.humanize',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'csp.middleware.CSPMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
]
ROOT_URLCONF = 'nextcloudappstore.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'nextcloudappstore.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': join(BASE_DIR, 'db.sqlite3'),
'TEST': {
'NAME': join(BASE_DIR, 'test.sqlite3'),
}
}
}
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation'
'.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation'
'.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation'
'.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation'
'.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'djangorestframework_camel_case.render.CamelCaseJSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'djangorestframework_camel_case.parser.CamelCaseJSONParser',
),
'DEFAULT_THROTTLE_RATES': {
'app_upload': '100/day',
'app_register': '100/day',
}
}
SITE_ID = 1
# Allauth configuration
# http://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_LOGOUT_ON_GET = True
ACCOUNT_LOGOUT_REDIRECT_URL = 'home'
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_SIGNUP_FORM_CLASS = \
'nextcloudappstore.user.forms.SignupFormRecaptcha'
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_LOGOUT_ON_PASSWORD_CHANGE = True
PASSWORD_RESET_TIMEOUT_DAYS = 1
ACCOUNT_FORMS = {
'login': 'allauth.account.forms.LoginForm',
'add_email': 'allauth.account.forms.AddEmailForm',
'change_password': 'allauth.account.forms.ChangePasswordForm',
'set_password': 'allauth.account.forms.SetPasswordForm',
'reset_password': 'nextcloudappstore.user.forms.CustomResetPasswordForm',
'reset_password_from_key': 'allauth.account.forms.ResetPasswordKeyForm',
'disconnect': 'allauth.socialaccount.forms.DisconnectForm',
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
EXCLUDED_LANGS = ('en-au', 'en-gb')
LANGUAGES = [lang for lang in LANGUAGES if lang[0] not in EXCLUDED_LANGS]
PARLER_LANGUAGES = {
1: [{'code': code} for code, trans in LANGUAGES],
'default': {
'fallbacks': ['en'],
'hide_untranslated': False,
}
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
MEDIA_ROOT = join(BASE_DIR, 'media')
STATIC_ROOT = join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
# if None will use the /tmp for downloading app release
RELEASE_DOWNLOAD_ROOT = None
# Default security settings
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
CORS_ORIGIN_ALLOW_ALL = True
CORS_URLS_REGEX = r'^/api/.*$'
CORS_ALLOW_HEADERS = (
'x-requested-with',
'content-type',
'accept',
'origin',
'authorization',
'if-none-match',
)
CORS_EXPOSE_HEADERS = (
'etag',
'x-content-type-options',
'content-type',
)
CSP_DEFAULT_SRC = ('\'none\'',)
CSP_IMG_SRC = ('*',)
CSP_FONT_SRC = ('\'self\'',)
CSP_SCRIPT_SRC = ('\'self\'',)
CSP_CONNECT_SRC = ('\'self\'',)
CSP_STYLE_SRC = ('\'self\'',)
CSP_FORM_ACTION = ('\'self\'',)
CSP_SIGNUP = {
'SCRIPT_SRC': ['https://google.com/recaptcha/',
'https://www.gstatic.com/recaptcha/'],
'FRAME_SRC': ['https://www.google.com/recaptcha/'],
'STYLE_SRC': '\'unsafe-inline\'',
}
# use modern no Captcha reCaptcha
NOCAPTCHA = True
LOGIN_REDIRECT_URL = 'home'
LOGIN_URL = 'account_login'
LOG_LEVEL = 'WARNING'
LOG_FILE = join(BASE_DIR, 'appstore.log')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': LOG_LEVEL,
'class': 'logging.FileHandler',
'filename': LOG_FILE,
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': LOG_LEVEL,
'propagate': True,
},
},
}
LOCALE_PATHS = (
join(BASE_DIR, 'locale/'),
)
# Disable in order for cooldown periods to work properly
ACCOUNT_EMAIL_CONFIRMATION_HMAC = False
# App Store specific configs
# minimum number of comments to calculate a rating
RATING_THRESHOLD = 5
# number of days to include from today in the recent ratings calculation
RATING_RECENT_DAY_RANGE = 90
# for testing app uploads without cert validation set to false
VALIDATE_CERTIFICATES = True
# certification hash algorithm
CERTIFICATE_DIGEST = 'sha512'
# app archive downloader configuration
MAX_DOWNLOAD_FILE_SIZE = 1024 ** 2 # bytes
MAX_DOWNLOAD_TIMEOUT = 60 # seconds
MAX_DOWNLOAD_REDIRECTS = 10
MAX_DOWNLOAD_SIZE = 30 * (1024 ** 2) # bytes
ARCHIVE_FOLDER_BLACKLIST = {
'No .git directories': r'\.git$'
}
# certificate location configuration
NEXTCLOUD_CERTIFICATE_LOCATION = join(
BASE_DIR, 'nextcloudappstore/certificate/nextcloud.crt')
NEXTCLOUD_CRL_LOCATION = join(
BASE_DIR, 'nextcloudappstore/certificate/nextcloud.crl')
# whitelist for serializing markdown
MARKDOWN_ALLOWED_TAGS = [
'audio', 'video', 'source', 'dt', 'dd', 'dl', 'table', 'caption', 'tr',
'th', 'td', 'tbody', 'thead', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'strong',
'em', 'code', 'pre', 'blockquote', 'p', 'ul', 'li', 'ol', 'br', 'del', 'a',
'img', 'figure', 'figcaption', 'cite', 'time', 'abbr', 'iframe', 'q', ]
MARKDOWN_ALLOWED_ATTRIBUTES = {
'audio': ['controls', 'src'],
'video': ['poster', 'controls', 'height', 'width', 'src'],
'source': ['src', 'type'],
'a': ['href'],
'img': ['src', 'title', 'alt'],
'time': ['datetime'],
'abbr': ['title'],
'iframe': ['width', 'height', 'frameborder', 'src', 'allowfullscreen'],
'q': ['cite'],
}
# Discourse configuration for creating categories automatically for newly
# registered apps
DISCOURSE_URL = 'https://help.nextcloud.com'
DISCOURSE_USER = None
DISCOURSE_TOKEN = None
DISCOURSE_PARENT_CATEGORY_ID = 26
# list of values that will override or add user defined values for usage in
# templates; first key is the Nextcloud version for which the app is generated
APP_SCAFFOLDING_PROFILES = {}
# GitHub api configuration
GITHUB_API_BASE_URL = 'https://api.github.com'
GITHUB_API_TOKEN = None
# Nextcloud Email
NEXTCLOUD_FROM_EMAIL = '[email protected]'
NEXTCLOUD_INTEGRATIONS_APPROVAL_EMAILS = ['[email protected]']
|
agpl-3.0
| -94,212,529,529,638,290 | 28.990415 | 79 | 0.664536 | false |
r4fek/dsmcache
|
tests/test_client.py
|
1
|
2967
|
from __future__ import unicode_literals
from mock import patch, Mock
from unittest import TestCase
from dsmcache.client import Client
from dsmcache.exceptions import InvalidKeyError
class ClientTestCase(TestCase):
def setUp(self):
self.client = Client('0.0.0.0')
def tearDown(self):
self.client.disconnect()
@patch('dsmcache.client.ConnectionPool')
@patch('dsmcache.client.Host')
def test_init(self, host_mock, connection_pool_mock):
host = '127.0.0.1:11211'
pool_size = 10
socket_timeout = 3
connection_pool_mock.return_value = Mock()
host_mock.return_value = Mock()
client = Client(host, pool_size=pool_size,
socket_timeout=socket_timeout)
connection_pool_mock.assert_called_once_with(
host_mock.return_value, pool_size=pool_size, timeout=socket_timeout
)
host_mock.assert_called_once_with(host)
self.assertEqual(client._pool, connection_pool_mock.return_value)
@patch('dsmcache.client.Client._send_cmd')
@patch('dsmcache.client.Client._check_key')
def test_get(self, check_key_mock, send_mock):
check_key_mock.return_value = 'key'
self.client.get('key')
check_key_mock.assert_called_once_with('key')
send_mock.assert_called_once_with(
'get', key=check_key_mock.return_value)
@patch('dsmcache.client.Client._send_cmd')
@patch('dsmcache.client.Client._check_key')
def test_get_invalid_key(self, check_key_mock, send_mock):
check_key_mock.side_effect = InvalidKeyError('invalid key')
with self.assertRaises(InvalidKeyError) as cm:
self.client.get('key')
self.assertEqual(cm.exception, check_key_mock.side_effect)
self.assertFalse(send_mock.called)
@patch('dsmcache.client.Client._send_cmd')
@patch('dsmcache.client.Client._check_key')
def test_set(self, check_key_mock, send_mock):
check_key_mock.return_value = 'key'
self.client.set('key', 'value', time=0, flags=0)
check_key_mock.assert_called_once_with('key')
send_mock.assert_called_once_with(
'set', key=check_key_mock.return_value, value='value', flags=0,
time=0, size=len('value'))
@patch('dsmcache.client.Client._send_cmd')
@patch('dsmcache.client.Client._check_key')
def test_set_invalid_key(self, check_key_mock, send_mock):
check_key_mock.side_effect = InvalidKeyError('invalid key')
with self.assertRaises(InvalidKeyError) as cm:
self.client.set('key', 'v')
self.assertEqual(cm.exception, check_key_mock.side_effect)
self.assertFalse(send_mock.called)
def test_send_cmd(self):
pass
def test_parse_cmd(self):
pass
def test_check_key_valid(self):
pass
def test_check_key_invalid(self):
pass
def test_disconnect(self):
pass
|
mit
| -9,185,704,240,002,299,000 | 31.25 | 79 | 0.639029 | false |
geosim/QAD
|
qad_dimstyle_new_ui.py
|
1
|
4287
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qad_dimstyle_new.ui'
#
# Created: Wed Oct 12 11:24:06 2016
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_DimStyle_New_Dialog(object):
def setupUi(self, DimStyle_New_Dialog):
DimStyle_New_Dialog.setObjectName(_fromUtf8("DimStyle_New_Dialog"))
DimStyle_New_Dialog.resize(372, 142)
DimStyle_New_Dialog.setMinimumSize(QtCore.QSize(372, 142))
DimStyle_New_Dialog.setMaximumSize(QtCore.QSize(372, 142))
self.label = QtGui.QLabel(DimStyle_New_Dialog)
self.label.setGeometry(QtCore.QRect(10, 10, 221, 16))
self.label.setObjectName(_fromUtf8("label"))
self.newDimStyleName = QtGui.QLineEdit(DimStyle_New_Dialog)
self.newDimStyleName.setGeometry(QtCore.QRect(10, 30, 221, 20))
self.newDimStyleName.setObjectName(_fromUtf8("newDimStyleName"))
self.label_2 = QtGui.QLabel(DimStyle_New_Dialog)
self.label_2.setGeometry(QtCore.QRect(10, 90, 221, 16))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.DimStyleNameFrom = QtGui.QComboBox(DimStyle_New_Dialog)
self.DimStyleNameFrom.setGeometry(QtCore.QRect(10, 110, 221, 22))
self.DimStyleNameFrom.setObjectName(_fromUtf8("DimStyleNameFrom"))
self.continueButton = QtGui.QPushButton(DimStyle_New_Dialog)
self.continueButton.setGeometry(QtCore.QRect(284, 50, 81, 23))
self.continueButton.setObjectName(_fromUtf8("continueButton"))
self.cancelButton = QtGui.QPushButton(DimStyle_New_Dialog)
self.cancelButton.setGeometry(QtCore.QRect(284, 80, 81, 23))
self.cancelButton.setObjectName(_fromUtf8("cancelButton"))
self.helpButton = QtGui.QPushButton(DimStyle_New_Dialog)
self.helpButton.setGeometry(QtCore.QRect(284, 110, 81, 23))
self.helpButton.setObjectName(_fromUtf8("helpButton"))
self.label_3 = QtGui.QLabel(DimStyle_New_Dialog)
self.label_3.setGeometry(QtCore.QRect(10, 50, 221, 16))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.newDimStyleDescr = QtGui.QLineEdit(DimStyle_New_Dialog)
self.newDimStyleDescr.setGeometry(QtCore.QRect(10, 70, 221, 20))
self.newDimStyleDescr.setObjectName(_fromUtf8("newDimStyleDescr"))
self.retranslateUi(DimStyle_New_Dialog)
QtCore.QObject.connect(self.DimStyleNameFrom, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), DimStyle_New_Dialog.DimStyleNameFromChanged)
QtCore.QObject.connect(self.newDimStyleName, QtCore.SIGNAL(_fromUtf8("textEdited(QString)")), DimStyle_New_Dialog.newStyleNameChanged)
QtCore.QObject.connect(self.cancelButton, QtCore.SIGNAL(_fromUtf8("clicked()")), DimStyle_New_Dialog.reject)
QtCore.QObject.connect(self.helpButton, QtCore.SIGNAL(_fromUtf8("clicked()")), DimStyle_New_Dialog.ButtonHELP_Pressed)
QtCore.QObject.connect(self.continueButton, QtCore.SIGNAL(_fromUtf8("clicked()")), DimStyle_New_Dialog.ButtonBOX_continue)
QtCore.QMetaObject.connectSlotsByName(DimStyle_New_Dialog)
def retranslateUi(self, DimStyle_New_Dialog):
DimStyle_New_Dialog.setWindowTitle(_translate("DimStyle_New_Dialog", "QAD - Create new dimension style", None))
self.label.setText(_translate("DimStyle_New_Dialog", "New style name:", None))
self.label_2.setText(_translate("DimStyle_New_Dialog", "Start with:", None))
self.continueButton.setText(_translate("DimStyle_New_Dialog", "Continue...", None))
self.cancelButton.setText(_translate("DimStyle_New_Dialog", "Cancel", None))
self.helpButton.setText(_translate("DimStyle_New_Dialog", "?", None))
self.label_3.setText(_translate("DimStyle_New_Dialog", "Description:", None))
|
gpl-3.0
| 4,707,340,619,298,467,000 | 55.407895 | 152 | 0.712386 | false |
rdespoiu/QTitan
|
QTitan/QTSurvey/TestCases/TestSurveyField.py
|
1
|
1631
|
from django.test import TestCase
from ..models import Survey, SurveyField, User
class SurveyFieldTestCase(TestCase):
def setUp(self):
self.TEST_USER = User.objects.create(username = 'TestUser',
password = 'TestPassword',
first_name = 'TestFirstName',
last_name = 'TestLastName',
email = '[email protected]')
self.TEST_SURVEY = Survey.objects.create(ownerID = self.TEST_USER,
title = 'SurveyTestTitle',
description = 'SurveyTestDescription',
distribution = True,
consentneeded = True)
SurveyField.objects.create(surveyID = self.TEST_SURVEY,
value = 'TestValue1')
SurveyField.objects.create(surveyID = self.TEST_SURVEY,
value = 'TestValue2')
SurveyField.objects.create(surveyID = self.TEST_SURVEY,
value = 'TestValue3')
def testSurveyFieldObject(self):
TEST_SURVEY_FIELDS = sorted(list(SurveyField.objects.filter(surveyID = self.TEST_SURVEY)), key = lambda field: field.value)
for i in range(len(TEST_SURVEY_FIELDS)):
self.assertEqual(TEST_SURVEY_FIELDS[i].surveyID, self.TEST_SURVEY)
self.assertEqual(TEST_SURVEY_FIELDS[i].value, 'TestValue{}'.format(i + 1))
|
gpl-3.0
| -8,657,652,827,709,091,000 | 49.96875 | 131 | 0.499693 | false |
Hermlon/fishcode
|
src/fishcodeServer/entity.py
|
1
|
1598
|
#!/usr/bin/env/ python3
from fishcodeServer.location import Location
from fishcodeServer.texture import Texture
from fishcodeServer.serializable_mixin import SerializableMixin
import math
class Entity(SerializableMixin):
def __init__(self, size):
self.myMap = None
self.location = Location()
self.size = size
self.setVelocity(0)
#self.texture = Texture(self.getSize())
def update(self):
pass
def setLocation(self, location):
self.location = location
def getLocation(self):
return self.location
#Defined in units per tick, unit is defined by the size of the area and tick is always one, no matter how much real time passed
def setVelocity(self, velocity):
self.velocity = velocity
def getVelocity(self):
return self.velocity
def setMap(self, myMap):
self.myMap = myMap
def getMap(self):
return self.myMap
def getSize(self):
return self.size
def setSize(self, size):
self.size = size
def getTexture(self):
return self.texture
def setTexture(self, texture):
self.texture = texture
def hitsEntity(self, otherEntity):
radiusSum = self.getSize() + otherEntity.getSize()
deltax = abs(self.getLocation().getX() - otherEntity.getLocation().getX())
deltay = abs(self.getLocation().getY() - otherEntity.getLocation().getY())
distance = math.sqrt(deltax ** 2 + deltay ** 2)
return distance <= radiusSum
def toSerializible(self):
#return {"location":self.location.toSerializible(), "size":self.size, "texture":self.texture.toSerializible()}
return {"location":self.location.toSerializible(), "size":self.size, "velocity":self.velocity}
|
gpl-3.0
| -6,647,272,593,683,634,000 | 26.551724 | 128 | 0.734043 | false |
Hoikas/korman
|
korman/exporter/explosions.py
|
1
|
2481
|
# This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
class ExportError(Exception):
def __init__(self, value="Undefined Export Error"):
super(Exception, self).__init__(value)
class BlenderOptionNotSupportedError(ExportError):
def __init__(self, opt):
super(ExportError, self).__init__("Unsupported Blender Option: '{}'".format(opt))
class GLLoadError(ExportError):
def __init__(self, image):
super(ExportError, self).__init__("Failed to load '{}' into OpenGL".format(image.name))
class TooManyUVChannelsError(ExportError):
def __init__(self, obj, mat):
msg = "There are too many UV Textures on the material '{}' associated with object '{}'.".format(
mat.name, obj.name)
super(ExportError, self).__init__(msg)
class TooManyVerticesError(ExportError):
def __init__(self, mesh, matname, vertcount):
msg = "There are too many vertices ({}) on the mesh data '{}' associated with material '{}'".format(
vertcount, mesh, matname
)
super(ExportError, self).__init__(msg)
class UndefinedPageError(ExportError):
mistakes = {}
def __init__(self):
super(ExportError, self).__init__("You have objects in pages that do not exist!")
def add(self, page, obj):
if page not in self.mistakes:
self.mistakes[page] = [obj,]
else:
self.mistakes[page].append(obj)
def raise_if_error(self):
if self.mistakes:
# Better give them some idea of what happened...
print(repr(self.mistakes))
raise self
class UnsupportedTextureError(ExportError):
def __init__(self, texture, material):
super(ExportError, self).__init__("Cannot export texture '{}' on material '{}' -- unsupported type '{}'".format(texture.name, material.name, texture.type))
|
gpl-3.0
| -8,326,621,301,006,622,000 | 36.029851 | 163 | 0.652156 | false |
inkah-trace/inkah-python
|
inkah/flask/__init__.py
|
1
|
2163
|
from __future__ import absolute_import
import wrapt
from flask import request, g
from inkah import Span, TRACE_ID_HEADER, SPAN_ID_HEADER, PARENT_SPAN_ID_HEADER
from inkah.utils import is_installed, generate_id
class Inkah(object):
def __init__(self, app=None, **kwargs):
self._options = kwargs
if app is not None:
self.init_app(app, **kwargs)
def init_app(self, app, **kwargs):
self.monkey_patch_requests()
app.before_request(self.before_request)
app.after_request(self.after_request)
def before_request(self):
trace_id = request.headers.get(TRACE_ID_HEADER)
span_id = request.headers.get(SPAN_ID_HEADER)
parent_span_id = request.headers.get(PARENT_SPAN_ID_HEADER)
span = Span(trace_id, span_id, parent_span_id)
g.inkah_span = span
def after_request(self, response):
g.inkah_span.complete()
return response
def requests_header_injector(self, wrapped, instance, args, kwargs):
headers = kwargs.pop('headers', None) or {}
span_id = generate_id()
headers.update({
TRACE_ID_HEADER: g.inkah_span.trace_id,
SPAN_ID_HEADER: span_id,
PARENT_SPAN_ID_HEADER: g.inkah_span.span_id,
})
g.inkah_span.begin_request(span_id)
resp = wrapped(*args, headers=headers, **kwargs)
g.inkah_span.complete_request(span_id)
return resp
def monkey_patch_requests(self):
if is_installed('requests'):
wrapt.wrap_function_wrapper('requests', 'get', self.requests_header_injector)
wrapt.wrap_function_wrapper('requests', 'head', self.requests_header_injector)
wrapt.wrap_function_wrapper('requests', 'post', self.requests_header_injector)
wrapt.wrap_function_wrapper('requests', 'patch', self.requests_header_injector)
wrapt.wrap_function_wrapper('requests', 'put', self.requests_header_injector)
wrapt.wrap_function_wrapper('requests', 'delete', self.requests_header_injector)
wrapt.wrap_function_wrapper('requests', 'options', self.requests_header_injector)
|
mit
| -7,070,654,243,881,186,000 | 39.055556 | 93 | 0.6454 | false |
DanielJDufour/organization-extractor
|
organization_extractor/__init__.py
|
1
|
1899
|
from os import listdir
from os.path import dirname, realpath
from re import compile as re_compile
from re import findall, IGNORECASE, MULTILINE, UNICODE
from re import finditer
import pickle
flags = MULTILINE|UNICODE
directory_of_this_file = dirname(realpath(__file__))
# load patterns
directory_of_patterns = directory_of_this_file + "/prep/patterns"
language_pattern = {}
for filename in listdir(directory_of_patterns):
language = filename.split(".")[0]
with open(directory_of_patterns + "/" + language + ".txt") as f:
pattern_as_string = (f.read().decode("utf-8").strip())
pattern = re_compile(pattern_as_string, flags=flags)
language_pattern[language] = pattern
def flatten(lst):
result = []
for element in lst:
if hasattr(element, '__iter__'):
result.extend(flatten(element))
else:
result.append(element)
return result
def extract_organizations(text, language=None):
if isinstance(text, str):
text = text.decode("utf-8")
organizations = set()
if language:
organizations.update(flatten(findall(language_pattern[language], text)))
else:
for pattern in language_pattern.values():
organizations.update(flatten(findall(pattern, text)))
organizations = [org for org in list(organizations) if org and org.count(" ") > 0]
return organizations
def extract_organization(text):
return extract_organizations(text)[0]
def get_keywords():
directory_of_keywords = directory_of_this_file + "/prep/keywords"
language_keywords = {}
for filename in listdir(directory_of_keywords):
language = filename.split(".")[0]
with open(directory_of_keywords + "/" + language + ".txt") as f:
language_keywords[language] = f.read().decode("utf-8").strip().splitlines()
return language_keywords
eo=extract_organizations
|
apache-2.0
| -4,107,803,774,588,541,000 | 29.629032 | 87 | 0.670353 | false |
xxtea/xxtea-python
|
xxtea/__init__.py
|
1
|
1919
|
# encoding: utf-8
from cffi import FFI
import sys
from os.path import join, dirname
__PATH = dirname(__file__)
__SOURCES = [join(__PATH, 'xxtea.c')]
ffi = FFI()
ffi.cdef('''
void * xxtea_encrypt(const void * data, size_t len, const void * key, size_t * out_len);
void * xxtea_decrypt(const void * data, size_t len, const void * key, size_t * out_len);
void free(void * ptr);
''')
lib = ffi.verify('#include <xxtea.h>', sources = __SOURCES, include_dirs=[__PATH])
if sys.version_info < (3, 0):
def __tobytes(v):
if isinstance(v, unicode):
return v.encode('utf-8')
else:
return v
else:
def __tobytes(v):
if isinstance(v, str):
return v.encode('utf-8')
else:
return v
def encrypt(data, key):
'''encrypt the data with the key'''
data = __tobytes(data)
data_len = len(data)
data = ffi.from_buffer(data)
key = ffi.from_buffer(__tobytes(key))
out_len = ffi.new('size_t *')
result = lib.xxtea_encrypt(data, data_len, key, out_len)
ret = ffi.buffer(result, out_len[0])[:]
lib.free(result)
return ret
def decrypt(data, key):
'''decrypt the data with the key'''
data_len = len(data)
data = ffi.from_buffer(data)
key = ffi.from_buffer(__tobytes(key))
out_len = ffi.new('size_t *')
result = lib.xxtea_decrypt(data, data_len, key, out_len)
ret = ffi.buffer(result, out_len[0])[:]
lib.free(result)
return ret
def decrypt_utf8(data, key):
'''decrypt the data with the key to string'''
return decrypt(data, key).decode('utf-8')
if __name__ == "__main__":
text = "Hello World! \0你好,中国!"
key = "1234567890"
encrypt_data = encrypt(text, key)
if sys.version_info < (3, 0):
decrypt_data = decrypt(encrypt_data, key)
else:
decrypt_data = decrypt_utf8(encrypt_data, key)
assert(text == decrypt_data)
|
mit
| -5,807,230,747,741,009,000 | 28.338462 | 92 | 0.592029 | false |
sbnlp/mTOR-evaluation
|
cd2sbml.py
|
1
|
24193
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from xml.dom import minidom
import cgi
import libsbml
import sys
CELLDESIGNER_TYPE_REFERENCE = {
'GENE': 'geneReference',
'RNA': 'rnaReference',
'PROTEIN': 'proteinReference',
'ANTISENSE_RNA': 'antisensernaReference'}
STANDOFF_ENTITY_TO_SBO_MAPPING = { # informational molecule segment
# non-covalent complex
# polypeptide chain
# deoxyribonucleic acid
# deoxyribonucleic acid
# ribonucleic acid
# ribonucleic acid
# simple chemical
# simple chemical
# simple chemical
# simple chemical
'gene': 'SBO:0000354',
'complex': 'SBO:0000253',
'protein': 'SBO:0000252',
'dna': 'SBO:0000251',
'dnaregion': 'SBO:0000251',
'rna': 'SBO:0000250',
'rnaregion': 'SBO:0000250',
'smallmolecule': 'SBO:0000247',
'simple_molecule': 'SBO:0000247',
'ion': 'SBO:0000247',
'drug': 'SBO:0000247',
'phenotype': 'SBO:0000358'}
STANDOFF_EVENT_TO_SBO_MAPPING = { # degradation
'acetylation': 'SBO:0000215',
'activation': 'SBO:0000170',
'association': 'SBO:0000297',
'binding': 'SBO:0000297',
'catabolism': 'GO:0009056',
'catalysis': 'SBO:0000172',
'conversion': 'SBO:0000182',
'deacetylation': 'GO:0006476',
'degradation': 'SBO:0000179',
'demethylation': 'GO:0006482',
'dephosphorylation': 'SBO:0000330',
'deubiquitination': 'GO:0016579',
'dissociation': 'SBO:0000180',
'gene_expression': 'SBO:0000205',
'inactivation': 'SBO:0000169',
'localization': 'GO:0051179',
'methylation': 'SBO:0000214',
'negative_regulation': 'SBO:0000169',
'pathway': 'SBO:0000375',
'phosphorylation': 'SBO:0000216',
'positive_regulation': 'SBO:0000170',
'protein_catabolism': 'SBO:0000179',
'regulation': 'SBO:0000168',
'transcription': 'SBO:0000183',
'translation': 'SBO:0000184',
'transport': 'SBO:0000185',
'ubiquitination': 'SBO:0000224',
'STATE_TRANSITION': 'SBO:0000182',
'KNOWN_TRANSITION_OMITTED': 'SBO:0000182',
'UNKNOWN_TRANSITION': 'SBO:0000182',
'CATALYSIS': 'SBO:0000172',
'UNKNOWN_CATALYSIS': 'SBO:0000172',
'INHIBITION': 'SBO:0000169',
'UNKNOWN_INHIBITION': 'SBO:0000169',
'TRANSPORT': 'SBO:0000185',
'HETERODIMER_ASSOCIATION': 'SBO:0000297',
'DISSOCIATION': 'SBO:0000180',
'TRUNCATION': 'SBO:0000180',
'TRANSCRIPTIONAL_ACTIVATION': 'SBO:0000170',
'TRANSCRIPTIONAL_INHIBITION': 'SBO:0000169',
'TRANSLATIONAL_ACTIVATION': 'SBO:0000170',
'TRANSLATIONAL_INHIBITION': 'SBO:0000169',
'TRANSCRIPTION': 'SBO:0000183',
'TRANSLATION': 'SBO:0000184'}
STANDOFF_EVENT_TO_SBO_MAPPING = dict((k.lower(), v) for (k, v) in STANDOFF_EVENT_TO_SBO_MAPPING.iteritems())
# mapping of general reaction components to sbo term
GENERIC_REACTION_SBO_MAPPING = {
'reactant': 'SBO:0000010',
'product': 'SBO:0000011',
'modifier': 'SBO:0000019',
'activator': 'SBO:0000021',
'inhibitor': 'SBO:0000020'}
def add_cvterm(term):
controlled_vocab = libsbml.CVTerm()
controlled_vocab.setQualifierType(libsbml.BIOLOGICAL_QUALIFIER)
controlled_vocab.setBiologicalQualifierType(libsbml.BQB_IS)
controlled_vocab.addResource(term)
return controlled_vocab
def add_note(note, species):
""" Adds a note to species (wraps the note in <p></p> and escapes the text) """
species.appendNotes('<p xmlns="http://www.w3.org/1999/xhtml">{0}</p>'.format(cgi.escape(note)))
return species
def add_annotation_complex(model, model_id, participants):
species = model.getSpecies(model_id)
xmlns = libsbml.XMLNamespaces()
xmlns.add('http://www.w3.org/1999/02/22-rdf-syntax-ns#', 'rdf')
rdf_triple = libsbml.XMLTriple( 'RDF',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'rdf')
rdf_token = libsbml.XMLToken( rdf_triple,
libsbml.XMLAttributes(),
xmlns)
annotation = libsbml.XMLNode(rdf_token)
if species:
participants_xml_triple = libsbml.XMLTriple( 'Participants',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'rdf')
participants_xml_token = \
libsbml.XMLToken(participants_xml_triple,
libsbml.XMLAttributes())
participants_xml_node = libsbml.XMLNode(participants_xml_token)
participant_xml_triple = libsbml.XMLTriple('Participant',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'rdf')
for iii in participants:
resource_att = libsbml.XMLAttributes()
resource_att.add('participant', str(iii))
subject_token = libsbml.XMLToken(participant_xml_triple,
resource_att)
subject_token.setEnd()
participants_xml_node.addChild(libsbml.XMLNode(subject_token))
annotation.addChild(participants_xml_node)
species.appendAnnotation(annotation)
def correct_species_name(species):
if bool(species['modifications']):
new_name = species['name']
for modification in species['modifications']:
new_name = species['modifications'][modification] + ' ' \
+ new_name
species['newname'] = new_name
return species
def get_complex_to_species_link( species, celldesigner_complex_alias):
for i in celldesigner_complex_alias:
if celldesigner_complex_alias[i]['species'] == species:
return i
def correct_species_alias( species_alias, speciesIdentity, cell_designer_species_alias):
if species_alias in cell_designer_species_alias.keys() \
and cell_designer_species_alias[species_alias]['species'] \
in speciesIdentity:
if cell_designer_species_alias[species_alias]['activity'] \
== 'active':
if 'newname' \
in correct_species_name(speciesIdentity[cell_designer_species_alias[species_alias]['species']]).keys():
return 'activated ' \
+ correct_species_name(speciesIdentity[cell_designer_species_alias[species_alias]['species']])['newname']
else:
return 'activated ' \
+ correct_species_name(speciesIdentity[cell_designer_species_alias[species_alias]['species']])['name']
else:
if 'newname' \
in correct_species_name(speciesIdentity[cell_designer_species_alias[species_alias]['species']]).keys():
return correct_species_name(speciesIdentity[cell_designer_species_alias[species_alias]['species']])['newname']
else:
return correct_species_name(speciesIdentity[cell_designer_species_alias[species_alias]['species']])['name']
def add_compartment( compartment_id, model, compartmentIdentity):
compartment_ref = model.getCompartment(str(compartment_id))
if compartment_ref == None:
compartment_ref = model.createCompartment()
compartment_ref.setId(str(compartment_id))
compartment_ref.setName(str(compartmentIdentity[compartment_id]['name']))
compartment_ref.setConstant(True)
compartment_ref.setSize(1)
compartment_ref.setUnits('volume')
return compartment_ref
def export_pure_sbml( input_file = 'mTORPathway-celldesigner.xml', output_file = 'mTORPathway-celldesigner_out.xml'):
with open( input_file) as cellDesignerFile:
cd_xml = cellDesignerFile.read()
cd_xml_parsed = minidom.parseString(cd_xml)
reactions = cd_xml_parsed.getElementsByTagName('reaction')
species = cd_xml_parsed.getElementsByTagName('species')
cell_designer_species = \
cd_xml_parsed.getElementsByTagName('celldesigner:listOfIncludedSpecies')[0].getElementsByTagName('celldesigner:species')
speciesAlias = \
cd_xml_parsed.getElementsByTagName('celldesigner:speciesAlias')
compartments = cd_xml_parsed.getElementsByTagName('listOfCompartments')[0].getElementsByTagName('compartment')
complexAlias = \
cd_xml_parsed.getElementsByTagName('celldesigner:complexSpeciesAlias')
compartmentIdentity = {}
for compartment in compartments:
compartmentIdentity[compartment.attributes['id'].value] = {}
if compartment.hasAttribute('name'):
compartmentIdentity[compartment.attributes['id'].value]['name'] = compartment.attributes['name'].value
else:
compartmentIdentity[compartment.attributes['id'].value]['name'] = compartment.attributes['id'].value
speciesIdentity = {}
for specie in species:
speciesIdentity[specie.attributes['id'].value] = {}
# speciesIdentity[specie.attributes["id"].value]["metaid"] = specie.attributes["metaid"].value
speciesIdentity[specie.attributes['id'].value]['name'] = \
specie.attributes['name'].value
speciesIdentity[specie.attributes['id'].value]['compartment'] = \
specie.attributes['compartment'].value
speciesIdentity[specie.attributes['id'].value]['Class'] = \
specie.getElementsByTagName('celldesigner:speciesIdentity'
)[0].getElementsByTagName('celldesigner:class'
)[0].childNodes[0].data
if speciesIdentity[specie.attributes['id'].value]['Class'] \
in CELLDESIGNER_TYPE_REFERENCE.keys():
speciesIdentity[specie.attributes['id'].value]['reference'] = \
specie.getElementsByTagName('celldesigner:speciesIdentity'
)[0].getElementsByTagName('celldesigner:'
+ CELLDESIGNER_TYPE_REFERENCE[speciesIdentity[specie.attributes['id'
].value]['Class']])[0].childNodes[0].data
speciesIdentity[specie.attributes['id'].value]['modifications'] = {}
if specie.getElementsByTagName('celldesigner:listOfModifications'):
if specie.getElementsByTagName('celldesigner:listOfModifications'
)[0].getElementsByTagName('celldesigner:modification'):
modifications = \
specie.getElementsByTagName('celldesigner:listOfModifications'
)[0].getElementsByTagName('celldesigner:modification'
)
for modification in modifications:
speciesIdentity[specie.attributes['id'
].value]['modifications'
][modification.attributes['residue'].value] = \
modification.attributes['state'].value
for specie in cell_designer_species:
if specie.attributes['id'].value not in speciesIdentity:
speciesIdentity[specie.attributes['id'].value] = {}
speciesIdentity[specie.attributes['id'].value]['name'] = \
specie.attributes['name'].value
speciesIdentity[specie.attributes['id'].value]['Class'] = \
specie.getElementsByTagName('celldesigner:speciesIdentity')[0].getElementsByTagName('celldesigner:class')[0].childNodes[0].data
if speciesIdentity[specie.attributes['id'].value]['Class'] \
in CELLDESIGNER_TYPE_REFERENCE.keys():
speciesIdentity[specie.attributes['id'].value]['reference'] = \
specie.getElementsByTagName('celldesigner:speciesIdentity')[0].getElementsByTagName('celldesigner:' + CELLDESIGNER_TYPE_REFERENCE[speciesIdentity[specie.attributes['id'].value]['Class']])[0].childNodes[0].data
speciesIdentity[specie.attributes['id'].value]['modifications'] = {}
if specie.getElementsByTagName('celldesigner:listOfModifications'):
if specie.getElementsByTagName('celldesigner:listOfModifications')[0].getElementsByTagName('celldesigner:modification'):
modifications = specie.getElementsByTagName('celldesigner:listOfModifications')[0].getElementsByTagName('celldesigner:modification')
for modification in modifications:
speciesIdentity[specie.attributes['id'
].value]['modifications'
][modification.attributes['residue'].value] = \
modification.attributes['state'].value
# ....else:
# ........print specie.attributes["id"].value
cell_designer_species_alias = {}
for specie in speciesAlias:
cell_designer_species_alias[specie.attributes['id'].value] = {}
cell_designer_species_alias[specie.attributes['id'].value]['species'] = specie.attributes['species'].value
cell_designer_species_alias[specie.attributes['id'].value]['activity'] = \
specie.getElementsByTagName( 'celldesigner:activity')[0].childNodes[0].data
celldesigner_complex_alias = {}
for specie in complexAlias:
celldesigner_complex_alias[specie.attributes['id'].value] = {}
celldesigner_complex_alias[specie.attributes['id'].value]['players'] = []
celldesigner_complex_alias[specie.attributes['id'].value]['species'] = specie.attributes['species'].value
celldesigner_complex_alias[specie.attributes['id'].value]['activity'] = specie.getElementsByTagName('celldesigner:activity')[0].childNodes[0].data
for specie in speciesAlias:
if 'complexSpeciesAlias' in [item[0] for item in specie.attributes.items()]:
celldesigner_complex_alias[specie.attributes['complexSpeciesAlias'].value]['players'].append(specie.attributes['id'].value)
for specie in complexAlias:
if 'complexSpeciesAlias' in [item[0] for item in specie.attributes.items()]:
celldesigner_complex_alias[specie.attributes['complexSpeciesAlias'].value]['players'].append(specie.attributes['id'].value)
reactionIdentity = {}
for reaction in reactions:
reactionIdentity[reaction.attributes['id'].value] = {}
# reactionIdentity[reaction.attributes["id"].value]["metaid"] = reaction.attributes["metaid"].value
reactionIdentity[reaction.attributes['id'].value]['reversible'] = \
reaction.attributes['reversible'].value
reactionIdentity[reaction.attributes['id'].value]['reactionType'] = \
reaction.getElementsByTagName('celldesigner:reactionType')[0].childNodes[0].data
reactants = {}
products = {}
modifiers = {}
if reaction.getElementsByTagName('listOfReactants'):
for reactant in reaction.getElementsByTagName('listOfReactants')[0].getElementsByTagName('speciesReference'):
reactants[reactant.attributes['species'].value] = ''
if reaction.getElementsByTagName('listOfProducts'):
for product in reaction.getElementsByTagName('listOfProducts')[0].getElementsByTagName('speciesReference'):
products[product.attributes['species'].value] = ''
if reaction.getElementsByTagName('listOfModifiers'):
for modifier in reaction.getElementsByTagName('listOfModifiers')[0].getElementsByTagName('modifierSpeciesReference'):
modifiers[modifier.attributes['species'].value] = ''
if reaction.getElementsByTagName('celldesigner:listOfModification'):
listOfModifications = \
reaction.getElementsByTagName('celldesigner:listOfModification')[0].getElementsByTagName('celldesigner:modification')
for modification in listOfModifications:
modifiers[modification.attributes['modifiers'].value] = \
[modification.attributes['type'].value,
modification.attributes['aliases'].value]
if reaction.getElementsByTagName('celldesigner:baseReactants'):
listOfModifications = \
reaction.getElementsByTagName('celldesigner:baseReactants')[0].getElementsByTagName('celldesigner:baseReactant')
for modification in listOfModifications:
reactants[modification.attributes['species'].value] = \
modification.attributes['alias'].value
if reaction.getElementsByTagName('celldesigner:baseProducts'):
listOfModifications = \
reaction.getElementsByTagName('celldesigner:baseProducts')[0].getElementsByTagName('celldesigner:baseProduct')
for modification in listOfModifications:
products[modification.attributes['species'].value] = \
modification.attributes['alias'].value
reactionIdentity[reaction.attributes['id'].value]['reactants'] = \
reactants
reactionIdentity[reaction.attributes['id'].value]['products'] = \
products
reactionIdentity[reaction.attributes['id'].value]['modifiers'] = \
modifiers
document = libsbml.SBMLDocument(2, 4)
model = document.createModel()
for i in compartmentIdentity:
add_compartment(i, model, compartmentIdentity)
listofcell_designer_species = []
for i in cell_designer_species_alias:
if cell_designer_species_alias[i]['species'] in speciesIdentity \
and speciesIdentity[cell_designer_species_alias[i]['species']]['Class'].lower() \
in STANDOFF_ENTITY_TO_SBO_MAPPING:
species = model.createSpecies()
species.setId(str(i))
species.setMetaId('metaid_0000' + str(i))
listofcell_designer_species.append(str(i))
species.appendNotes('<p xmlns="http://www.w3.org/1999/xhtml">{0}</p>'.format(cgi.escape(cell_designer_species_alias[i]['activity'])))
for j in \
speciesIdentity[cell_designer_species_alias[i]['species']]['modifications']:
species.appendNotes('<p xmlns="http://www.w3.org/1999/xhtml">{0}</p>'.format(cgi.escape(speciesIdentity[cell_designer_species_alias[i]['species']]['modifications'][j])))
species.setName( str( correct_species_alias( i, speciesIdentity, cell_designer_species_alias)))
species.setSBOTerm(str(STANDOFF_ENTITY_TO_SBO_MAPPING[speciesIdentity[cell_designer_species_alias[i]['species']]['Class'].lower()]))
if 'compartment' \
in speciesIdentity[cell_designer_species_alias[i]['species']]:
species.setCompartment(str(speciesIdentity[cell_designer_species_alias[i]['species']]['compartment']))
else:
species.setCompartment('default')
for i in celldesigner_complex_alias:
if celldesigner_complex_alias[i]['species'] in speciesIdentity \
and speciesIdentity[celldesigner_complex_alias[i]['species']]['Class'].lower() \
in STANDOFF_ENTITY_TO_SBO_MAPPING:
species = model.createSpecies()
species.setId(str(i))
species.setMetaId('metaid_0000' + str(i))
listofcell_designer_species.append(str(i))
add_annotation_complex( model, str(i), celldesigner_complex_alias[i]['players'])
for j in speciesIdentity[celldesigner_complex_alias[i]['species']]['modifications']:
species.appendNotes('<p xmlns="http://www.w3.org/1999/xhtml">{0}</p>'.format(cgi.escape(speciesIdentity[celldesigner_complex_alias[i]['species']]['modifications'][j])))
species.setName(str(speciesIdentity[celldesigner_complex_alias[i]['species']]['name']))
if 'compartment' \
in speciesIdentity[celldesigner_complex_alias[i]['species']]:
species.setCompartment(str(speciesIdentity[celldesigner_complex_alias[i]['species']]['compartment']))
else:
species.setCompartment('default')
species.setSBOTerm(str(STANDOFF_ENTITY_TO_SBO_MAPPING[speciesIdentity[celldesigner_complex_alias[i]['species']]['Class'].lower()]))
for i in reactionIdentity:
reaction = model.createReaction()
reaction.setName(str(i))
reaction.setId(str(i))
if reactionIdentity[i]['reversible'].upper() == 'TRUE':
reaction.setReversible(True)
else:
reaction.setReversible(False)
if STANDOFF_EVENT_TO_SBO_MAPPING.get(reactionIdentity[i]['reactionType'].lower()) \
and (STANDOFF_EVENT_TO_SBO_MAPPING[reactionIdentity[i]['reactionType'].lower()])[0:3] == 'SBO':
reaction.setSBOTerm(STANDOFF_EVENT_TO_SBO_MAPPING[reactionIdentity[i]['reactionType'].lower()])
reaction.appendNotes('<p xmlns="http://www.w3.org/1999/xhtml">{0}</p>'.format(cgi.escape(str(reactionIdentity[i]['reactionType']))))
for j in reactionIdentity[i]['reactants']:
if model.getSpecies(str(reactionIdentity[i]['reactants'][j])):
reactant_ref = reaction.createReactant()
reactant_ref.setSpecies(str(reactionIdentity[i]['reactants'][j]))
elif model.getSpecies( str( get_complex_to_species_link( str(j), celldesigner_complex_alias))):
reactant_ref = reaction.createReactant()
reactant_ref.setSpecies(str(get_complex_to_species_link(str(j), celldesigner_complex_alias)))
for j in reactionIdentity[i]['products']:
if str(reactionIdentity[i]['products'][j]) \
in listofcell_designer_species:
product_ref = reaction.createProduct()
product_ref.setSpecies(str(reactionIdentity[i]['products'][j]))
elif str(get_complex_to_species_link( str(j), celldesigner_complex_alias)) \
in listofcell_designer_species:
product_ref = reaction.createProduct()
product_ref.setSpecies(str(get_complex_to_species_link(str(j),celldesigner_complex_alias)))
for j in reactionIdentity[i]['modifiers']:
if reactionIdentity[i]['modifiers'][j]:
if model.getSpecies(str(list(reactionIdentity[i]['modifiers'][j])[1])):
modifier_ref = reaction.createModifier()
modifier_ref.setSpecies(str(list(reactionIdentity[i]['modifiers'][j])[1]))
modifier_ref.appendNotes('<p xmlns="http://www.w3.org/1999/xhtml">{0}</p>'.format(cgi.escape(str(list(reactionIdentity[i]['modifiers'][j])[0]))))
modifier_ref.appendNotes('<p xmlns="http://www.w3.org/1999/xhtml">{0}</p>'.format(cgi.escape(str(STANDOFF_EVENT_TO_SBO_MAPPING[reactionIdentity[i]['modifiers'][j][0].lower()]))))
elif model.getSpecies(str(get_complex_to_species_link(str(j),celldesigner_complex_alias))):
modifier_ref = reaction.createModifier()
modifier_ref.setSpecies(str(get_complex_to_species_link(str(j),celldesigner_complex_alias)))
libsbml.writeSBMLToFile(document, output_file)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument( '--input',
action = "store",
dest = "input",
default = None,
help = "CellDesigner SBML input file name.")
parser.add_argument( '--output',
action = "store",
dest = "output",
default = None,
help = "Pure SBML output file name.")
cmd = parser.parse_args()
if cmd.input is None:
print( "Error. Please specify input.")
sys.exit(-1)
elif not os.path.isfile( cmd.input):
print( "ERROR: input file %s does not exist.\n", cmd.input)
sys.exit( 1)
elif cmd.output is None:
print( "Error. Please specify output.")
sys.exit(-1)
else:
export_pure_sbml( cmd.input, cmd.output)
|
apache-2.0
| 228,414,930,774,359,260 | 52.054825 | 229 | 0.624023 | false |
lbryio/lbry
|
torba/torba/rpc/util.py
|
1
|
3231
|
# Copyright (c) 2018, Neil Booth
#
# All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__all__ = ()
import asyncio
from collections import namedtuple
import inspect
# other_params: None means cannot be called with keyword arguments only
# any means any name is good
SignatureInfo = namedtuple('SignatureInfo', 'min_args max_args '
'required_names other_names')
def signature_info(func):
params = inspect.signature(func).parameters
min_args = max_args = 0
required_names = []
other_names = []
no_names = False
for p in params.values():
if p.kind == p.POSITIONAL_OR_KEYWORD:
max_args += 1
if p.default is p.empty:
min_args += 1
required_names.append(p.name)
else:
other_names.append(p.name)
elif p.kind == p.KEYWORD_ONLY:
other_names.append(p.name)
elif p.kind == p.VAR_POSITIONAL:
max_args = None
elif p.kind == p.VAR_KEYWORD:
other_names = any
elif p.kind == p.POSITIONAL_ONLY:
max_args += 1
if p.default is p.empty:
min_args += 1
no_names = True
if no_names:
other_names = None
return SignatureInfo(min_args, max_args, required_names, other_names)
class Concurrency(object):
def __init__(self, max_concurrent):
self._require_non_negative(max_concurrent)
self._max_concurrent = max_concurrent
self.semaphore = asyncio.Semaphore(max_concurrent)
def _require_non_negative(self, value):
if not isinstance(value, int) or value < 0:
raise RuntimeError('concurrency must be a natural number')
@property
def max_concurrent(self):
return self._max_concurrent
async def set_max_concurrent(self, value):
self._require_non_negative(value)
diff = value - self._max_concurrent
self._max_concurrent = value
if diff >= 0:
for _ in range(diff):
self.semaphore.release()
else:
for _ in range(-diff):
await self.semaphore.acquire()
|
mit
| -3,008,939,187,682,637,000 | 33.010526 | 73 | 0.646859 | false |
nowsecure/datagrid-gtk3
|
datagrid_gtk3/tests/utils/test_transformations.py
|
2
|
1682
|
"""Data transformation utilities test cases."""
import unittest
from datagrid_gtk3.utils.transformations import degree_decimal_str_transform
class DegreeDecimalStrTransformTest(unittest.TestCase):
"""Degree decimal string transformation test case."""
def test_no_basestring(self):
"""AssertionError raised when no basestring value is passed."""
self.assertRaises(AssertionError, degree_decimal_str_transform, 0)
self.assertRaises(AssertionError, degree_decimal_str_transform, 1.23)
self.assertRaises(AssertionError, degree_decimal_str_transform, True)
def test_no_digit(self):
"""AssertionError raised when other characters than digits."""
self.assertRaises(AssertionError, degree_decimal_str_transform, '.')
self.assertRaises(AssertionError, degree_decimal_str_transform, '+')
self.assertRaises(AssertionError, degree_decimal_str_transform, '-')
def test_length(self):
"""AssertionError when more characters than expected passed."""
self.assertRaises(
AssertionError, degree_decimal_str_transform, '123456789')
def test_point_insertion(self):
"""Decimal point is inserted in the expected location."""
self.assertEqual(
degree_decimal_str_transform('12345678'),
'12.345678',
)
self.assertEqual(
degree_decimal_str_transform('1234567'),
'1.234567',
)
self.assertEqual(
degree_decimal_str_transform('123456'),
'0.123456',
)
self.assertEqual(
degree_decimal_str_transform('12345'),
'0.012345',
)
|
mit
| -1,096,812,674,693,392,800 | 35.565217 | 77 | 0.653389 | false |
mercel92/monitoring-agent
|
modules/cpanel.py
|
1
|
3901
|
import os
import sqlite3
import subprocess
import re
import time,datetime,calendar
class Cpanel:
######
##
## Cpaneldeki domainleri tarar
## Bulunan domainlerin verilerini ceker
##
#######
def getCpanelInfo(self):
file = '/etc/trueuserdomains'
if (os.path.exists(file) == True):
fileObj = open(file, 'r')
sites = fileObj.readlines()
return self.getDomainInfo(sites)
return False
######
##
## Domainlerin kota ve trafik bilgilerini
## verir
##
#######
def getDomainInfo(self,sites):
siteList = []
for index, site in enumerate(sites):
site = site.replace(' ', '').replace('\n', '')
pos = site.find(':')
username = site[pos:].replace(':', '')
bandwidth = self.getBandwithFromDomain(username)
quota = self.getQuotaInfoFromDomain(username)
if (bandwidth == False or bandwidth == None):
bandwidth = -1;
if (quota == False):
quota = {'space': '0', 'limit': '0'}
siteList.append({'domain': site[:pos], 'username': username, 'bandwidth': bandwidth, 'disc': quota})
return siteList
######
##
## Gonderilen Cpanel kullanicisinin
## Trafik Bilgisini verir
##
#######
def getBandwithFromDomain(self,user):
try:
now = datetime.datetime.now()
monthRanges = calendar.monthrange(now.year, now.month)
s1 = '1 /' + str(now.month) + '/' + str(now.year)
timestamp = time.mktime(datetime.datetime.strptime(s1, "%d/%m/%Y").timetuple())
bandwidth = 0
file = '/var/cpanel/bandwidth/' + user + '.sqlite'
if (os.path.exists(file) == False):
return False
conn = sqlite3.connect(file)
c = conn.cursor()
sqlQuery = 'SELECT SUM(bytes) AS sum FROM \'bandwidth_5min\' WHERE unixtime >' + str(timestamp)
c.execute(sqlQuery)
bandwidth = c.fetchone()[0]
conn.close()
return bandwidth;
except:
return False
######
##
## Gonderilen Cpanel Kullanicisinin
## Kota bilgisini dondurur
##
#######
def getQuotaInfoFromDomain(self,user):
data = self.getQuotaInfo(user)
if(data == False):
return False
else:
obj = { 'quota': {}, 'inode': {}}
obj['quota']['used'] = data[0]
obj['quota']['soft'] = data[1]
obj['quota']['hard'] = data[2]
obj['inode']['used'] = data[3]
obj['inode']['soft'] = data[4]
obj['inode']['hard'] = data[5]
return obj
def getQuotaInfo(self,user):
try:
out = subprocess.check_output(['quota', '-u', user])
lines = out.splitlines()
data = lines[3].split()
return data
except:
return False
######
##
## Her hesabin gonderdigi mail sayilari
## username : count formatinda tutar
##
#######
def getMailCount(self):
logList = []
file = '/var/log/exim_mainlog'
if (os.path.exists(file) == True):
logs = subprocess.Popen(
"grep 'cwd=/home' /var/log/exim_mainlog | awk ' {print $3} ' | cut -d / -f 3 | sort -bg | uniq -c | sort -bg",
shell=True, stdout=subprocess.PIPE).stdout.readlines()
for index, log in enumerate(logs):
log = log.strip().replace('\n', '')
regexObj = re.search('\d+ {1}[A-Za-z0-9]{1}', log)
pos = regexObj.end() - 1
logList.append({'username': log[pos:], 'count': int(log[:pos])})
return logList
return False
|
gpl-3.0
| -8,129,534,031,758,002,000 | 26.478873 | 126 | 0.497308 | false |
garcia/simfile
|
simfile/timing/__init__.py
|
1
|
6486
|
"""
Timing data classes, plus submodules that operate on timing data.
"""
from decimal import Decimal
from fractions import Fraction
from numbers import Rational
from typing import Optional, Type, NamedTuple
from ._private.sscproxy import ssc_proxy
from simfile._private.generic import ListWithRepr
from simfile.types import Simfile, Chart
__all__ = ['Beat', 'BeatValue', 'BeatValues', 'TimingData']
MEASURE_SUBDIVISION = 192
BEAT_SUBDIVISION = MEASURE_SUBDIVISION // 4
class Beat(Fraction):
"""
A fractional beat value, denoting vertical position in a simfile.
The constructor the same arguments as Python's :code:`Fraction`. If
the input doesn't have an explicit denominator (either as a
`denominator` argument or a lone rational argument), the resulting
fraction will be rounded to the nearest :meth:`tick`.
"""
def __new__(cls, numerator=0, denominator=None):
self = super().__new__(cls, numerator, denominator)
if denominator or isinstance(numerator, Rational):
return self
else:
return self.round_to_tick()
@classmethod
def tick(cls) -> 'Beat':
"""
1/48 of a beat (1/192 of a measure).
"""
return cls(1, BEAT_SUBDIVISION)
@classmethod
def from_str(cls, beat_str) -> 'Beat':
"""
Convert a decimal string to a beat, rounding to the nearest tick.
"""
return Beat(beat_str).round_to_tick()
def round_to_tick(self) -> 'Beat':
"""
Round the beat to the nearest tick.
"""
return Beat(int(round(self * BEAT_SUBDIVISION)), BEAT_SUBDIVISION)
def __str__(self) -> str:
"""
Convert the beat to its usual MSD representation (3 decimal digits).
"""
return f'{float(self):.3f}'
def __repr__(self) -> str:
"""
Pretty repr() for beats.
If the beat falls on a tick, the printed value is a decimal
representation with at most 3 decimal digits and no trailing
zeros. Otherwise, the printed value is the numerator and
denominator.
"""
if BEAT_SUBDIVISION % self.denominator == 0:
return f"Beat({str(self).rstrip('0').rstrip('.')})"
else:
return super().__repr__()
# Preserve type for methods inherited from Fraction
def __abs__(self): return Beat(super().__abs__())
def __add__(self, other): return Beat(super().__add__(other))
def __divmod__(self, other):
quotient, remainder = super().__divmod__(other)
return (quotient, Beat(remainder))
def __mod__(self, other): return Beat(super().__mod__(other))
def __mul__(self, other): return Beat(super().__mul__(other))
def __neg__(self): return Beat(super().__neg__())
def __pos__(self): return Beat(super().__pos__())
def __pow__(self, other): return Beat(super().__pow__(other))
def __radd__(self, other): return Beat(super().__radd__(other))
def __rdivmod__(self, other):
quotient, remainder = super().__rdivmod__(other)
return (quotient, Beat(remainder))
def __rmod__(self, other): return Beat(super().__rmod__(other))
def __rmul__(self, other): return Beat(super().__rmul__(other))
def __rpow__(self, other): return Beat(super().__rpow__(other))
def __rsub__(self, other): return Beat(super().__rsub__(other))
def __rtruediv__(self, other): return Beat(super().__rtruediv__(other))
def __sub__(self, other): return Beat(super().__sub__(other))
def __truediv__(self, other): return Beat(super().__truediv__(other))
class BeatValue(NamedTuple):
"""
An event that occurs on a particular beat, e.g. a BPM change or stop.
The decimal value's semantics vary based on the type of event:
* BPMS: the new BPM value
* STOPS, DELAYS: number of seconds to pause
* WARPS: number of beats to skip
"""
beat: Beat
value: Decimal
class BeatValues(ListWithRepr[BeatValue]):
"""
A list of :class:`BeatValue` instances.
"""
@classmethod
def from_str(cls: Type['BeatValues'], string: str) -> 'BeatValues':
"""
Parse the MSD value component of a timing data list.
Specifically, `BPMS`, `STOPS`, `DELAYS`, and `WARPS` are
the timing data lists whose values can be parsed by this
method.
"""
instance = cls()
if string.strip():
for row in string.split(','):
beat, value = row.strip().split('=')
instance.append(BeatValue(Beat.from_str(beat), Decimal(value)))
return instance
def __str__(self) -> str:
"""
Convert the beat-value pairs to their MSD value representation.
"""
return ',\n'.join(f'{event.beat}={event.value}' for event in self)
class TimingData(NamedTuple):
"""
Timing data for a simfile, possibly enriched with SSC chart timing.
"""
bpms: BeatValues
stops: BeatValues
delays: BeatValues
warps: BeatValues
offset: Decimal
@classmethod
def from_simfile(
cls: Type['TimingData'],
simfile: Simfile,
chart: Optional[Chart] = None
) -> 'TimingData':
"""
Obtain timing data from a simfile and optionally an SSC chart.
If both an :class:`.SSCSimfile` (version 0.7 or higher) and an
:class:`.SSCChart` are provided, any "split timing" defined in
the chart will take precedence over the simfile's timing data.
This is true regardless of the property's value; for example, a
blank `STOPS` value in the chart overrides a non-blank value
from the simfile.
Per StepMania's behavior, the offset defaults to zero if the
simfile (and/or SSC chart) doesn't specify one. (However,
unlike StepMania, the BPM does not default to 60 when omitted;
the default BPM doesn't appear to be used deliberately in any
existing simfiles, whereas the default offset does get used
intentionally from time to time.)
"""
properties = ssc_proxy(simfile, chart)
return TimingData(
bpms=BeatValues.from_str(properties['BPMS']),
stops=BeatValues.from_str(properties['STOPS']),
delays=BeatValues.from_str(properties['DELAYS']),
warps=BeatValues.from_str(properties['WARPS']),
offset=Decimal(properties['OFFSET'] or 0),
)
|
mit
| 2,295,419,533,741,087,700 | 34.064865 | 79 | 0.609775 | false |
igsr/igsr_analysis
|
VCF/VCFfilter/MLclassifier.py
|
1
|
12655
|
'''
Created on 27 Feb 2017
@author: ernesto
'''
import pandas as pd
import numpy as np
import pdb
import pickle
import gc
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import Imputer
from sklearn.feature_selection import RFE
class MLclassifier:
"""
Class to filter a VCF using a supervised machine learning binary classifier. This class
relies on a truth set (for example the GIAB consensus call set) that can be used to train
the model
"""
def __init__(self, fitted_model=None, bcftools_folder=None):
"""
Constructor
Parameters
----------
fitted_model : str, optional
Path to file containing the serialized fitted model.
bcftools_folder : str, optional
Path to folder containing the bcftools binary.
"""
self.fitted_model = fitted_model
self.bcftools_folder = bcftools_folder
def __process_df(self, tp_annotations, fp_annotations):
"""
Private function that performs three types of operations on the tp_annotations
and fp_annotations:
1) Read-in the data
2) Impute the missing values
3) Normalize the different features
Parameters
----------
tp_annotations : str
Path to file with the variant annotations derived from the call
set with the True positives.
fp_annotations : str
Path to file with the variant annotations derived from the call
set with the False positives.
Return
------
aDF_std :A normalized dataframe
"""
#
## Read-in the data
#
# check if tp_annotations and fp_annotations have the same columns and get columns names
DF_TP_columns = pd.read_csv(tp_annotations, sep="\t", na_values=['.'], nrows=1).columns
DF_FP_columns = pd.read_csv(fp_annotations, sep="\t", na_values=['.'], nrows=1).columns
if DF_TP_columns.equals((DF_FP_columns)) is False:
raise Exception("Indices in the passed dataframes are not equal")
DF_TP = None
DF_FP = None
if DF_TP_columns[2] == '[3](null)' or DF_FP_columns[2] == '[3](null)':
# all INFO columns are in dataframe. Parse the DF with different function
DF_TP = self.__process_dfINFO(tp_annotations)
DF_FP = self.__process_dfINFO(fp_annotations)
else:
# create 2 dataframes from tsv files skipping the 2 first columns,
# as it is assumed that the 1st is 'chr' and 2nd is 'pos'
DF_TP = pd.read_csv(tp_annotations, sep="\t", na_values=['.'],
usecols=[i for i in range(2, len(DF_TP_columns))])
DF_FP = pd.read_csv(fp_annotations, sep="\t", na_values=['.'],
usecols=[i for i in range(2, len(DF_FP_columns))])
#assign outcome=1 if TP and 0 if FP
DF_TP = DF_TP.assign(is_valid=1)
DF_FP = DF_FP.assign(is_valid=0)
#now, we combine the 2 dataframes
frames = [DF_TP, DF_FP]
DF = pd.concat(frames)
#
## Impute missing values
#
#we have several columns with NA values, we will impute the missing values with the median
imputer = Imputer(strategy="median")
imputer.fit(DF)
X = imputer.transform(DF)
#transforming back to a DF
DF_tr = pd.DataFrame(X, columns=DF.columns)
#
## Normalization of different features
#
feature_names = DF_tr.columns.drop(['is_valid'])
std_scale = preprocessing.StandardScaler().fit(DF_tr[feature_names])
std_array = std_scale.transform(DF_tr[feature_names])
aDF_std = pd.DataFrame(data=std_array, columns=feature_names)
aDF_std.insert(loc=0, column='is_valid', value=DF_tr['is_valid'].values)
return aDF_std
def __get_ids(self, x):
ids = []
for i in x:
# sometimes, the value is None
if i is None:
continue
elms = i.split('=')
ids.append(elms[0])
new_ids = list(set(ids))
return new_ids[0]
def __get_values(self, x):
values = []
for i in x:
if i is None:
values.append(0)
continue
elms = i.split('=')
if len(elms)==1:
# value is of FLAG type
values.append(1)
else:
try:
float(elms[1])
except:
values.append(0)
else:
values.append(elms[1])
return values
def __process_dfINFO(self, annotations):
"""
Function to parse the annotations file when these are obtained
by using bcftools query -f '%INFO', i.e. all INFO fields are fetched
Parameters
----------
annotations : str
Path to file with variant annotations obtained using
'bcftools query'
Returns
-------
new_DF : dataframe
"""
DF_columns = pd.read_csv(annotations, sep="\t", na_values=['.'], nrows=1).columns
DF = pd.read_csv(annotations, sep="\t", na_values=['.'],usecols=[i for i in range(2, len(DF_columns))])
DF.rename(columns={"[3](null)":"INFO"},inplace=True)
DF = DF.INFO.str.split(";",expand=True,)
ids=DF.apply(self.__get_ids)
DF.columns=ids
new_DF=DF.apply(self.__get_values)
return new_DF
def train(self, tp_annotations, fp_annotations, outprefix, test_size=0.25):
"""
Function to train the binary classifier using a gold standart call set
Parameters
----------
tp_annotations : str
Path to file with the variant annotations derived from the
call set with the True positives.
fp_annotations : str
Path to file with the variant annotations derived from the
call set with the False positives.
outprefix : str
String used as the prefix for the fitted model.
test_size : float, default=0.25
Fraction of the initial call set that will be
used for assessing the model.
Returns
-------
outfile : str
Path to serialized fitted model.
"""
aDF_std = self.__process_df(tp_annotations, fp_annotations)
feature_names = aDF_std.columns.drop(['is_valid'])
#
## Fitting the ML model
#
predictors = aDF_std[feature_names]
outcome = aDF_std[['is_valid']]
x_train, x_test, y_train, y_test = train_test_split(predictors, outcome, test_size=0.25)
logisticRegr = LogisticRegression(verbose=1)
logisticRegr.fit(x_train, y_train)
#
## Assess the performance of the fitted model
#
score = logisticRegr.score(x_test, y_test)
print("Score for the logistic regression fitted model is: {0}".format(score))
self.score = score
#
## Model persistence
#
outfile = outprefix+".sav"
pickle.dump(logisticRegr, open(outfile, 'wb'))
self.fitted_model = outfile
outfscore = outprefix+".score"
f = open(outfscore, 'w')
f.write("Score for the logistic regression fitted model is: {0}\n".format(score))
f.close
return outfile
def predict(self, outprefix, annotation_f, filter_label='MLFILT', cutoff=0.8):
"""
Function to apply a serialized logistic regression model on a file
containing the annotations for each site and to predict if the variant is real
Note. Sites with missing annotations will be imputed with the median of that annotation
Parameters
----------
outprefix: str
String used as the prefix for the fitted model.
annotation_f: filename
Path to file with the sites and annotations that will be classified
filter_label: str, default='MLFILT'
String with the label used for FILTER that can be used with
'bcftools annotate' in order to annotate the VCF file.
cutoff : float, default=0.8
Cutoff value used for deciding if a variant is a TP.
Sites with a prob_1<cutoff will be considered as FP and
the FILTER column will be the string in the 'filter_label' option.
If prob_1>=cutoff then the FILTER column will be PASS.
Returns
-------
outfile : str
Path to table file with predictions.
"""
imputer = Imputer(strategy="median")
# load the serialized model
loaded_model = pickle.load(open(self.fitted_model, 'rb'))
outfile = '{0}.tsv'.format(outprefix)
DF_all = pd.read_csv(annotation_f, sep="\t", na_values=['.'], index_col=False)
DF_all_num = DF_all.drop("# [1]CHROM", axis=1)
# we will impute the missing values by the median
imputer.fit(DF_all_num)
X = imputer.transform(DF_all_num)
DF_all_imp = pd.DataFrame(X, columns=DF_all.columns.drop(['# [1]CHROM']))
# normalization
feature_names = DF_all_imp.columns.drop(['[2]POS'])
std_scale = preprocessing.StandardScaler().fit(DF_all_imp[feature_names])
std_array = std_scale.transform(DF_all_imp[feature_names])
# Now, we can calculate the probabilities for each of the
# categories of the dependent variable:
predictions_probs = loaded_model.predict_proba(std_array)
#decide if it is a TP or a FP
filter_outcome = []
for i in predictions_probs[:, 1]:
if i >= cutoff:
filter_outcome.append('PASS')
else:
filter_outcome.append(filter_label)
final_df = pd.DataFrame({
'#CHR': DF_all['# [1]CHROM'],
'POS': DF_all['[2]POS'].astype(int),
'FILTER': filter_outcome,
'prob_TP': [round(elem, 4) for elem in predictions_probs[:, 1]]})
# change order of columns
final_df = final_df[['#CHR', 'POS', 'FILTER', 'prob_TP']]
final_df.to_csv(outfile, sep='\t', header=True, index=False)
return outfile
def rfe(self, tp_annotations, fp_annotations, n_features, outreport):
"""
Function to select the variant annotations that are more relevant for
predicting if a variant is real. This is achieved by running sklearn.feature_selection.RFE
method to perform Recursive Feature Elimination, which works by recursively considering
smaller and smaller sets of features
Parameters
----------
tp_annotations : str
Path to file with the variant annotations derived from the call set with
the True positives.
fp_annotations : str
Path to file with the variant annotations derived from the call set with
the False positives.
n_features : int
Number of features to select by RFE.
outreport : str
Filename used to write the report to.
Returns
-------
outreport : str
Containing report on selected features.
"""
aDF_std = self.__process_df(tp_annotations, fp_annotations)
feature_names = aDF_std.columns.drop(['is_valid'])
array = aDF_std.values
total_feats = array.shape[1]
X = array[:, 1:total_feats]
Y = array[:, 0]
model = LogisticRegression()
rfe = RFE(model, int(n_features))
fit = rfe.fit(X, Y)
# write selected features report to file
f = open(outreport, 'w')
f.write("Number of features: {0}\n".format(fit.n_features_))
f.write("Selected Features: {0}\n".format(fit.support_))
f.write("Feature Ranking: {0}\n".format(fit.ranking_))
f.write("The selected features are:{0}\n".format(feature_names[fit.support_]))
f.write("All features are:{0}\n".format(feature_names))
f.close
return outreport
|
apache-2.0
| 5,320,168,096,877,009,000 | 34.448179 | 111 | 0.564836 | false |
ijmarshall/robotreviewer3
|
robotreviewer/ml_worker.py
|
1
|
12098
|
"""
RobotReviewer ML worker
called by `celery -A ml_worker worker --loglevel=info`
"""
# Authors: Iain Marshall <[email protected]>
# Joel Kuiper <[email protected]>
# Byron Wallace <[email protected]>
from celery import Celery, current_task
from celery.contrib import rdb
from celery.signals import worker_init
import json
import logging, os
import sqlite3
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
DEBUG_MODE = str2bool(os.environ.get("DEBUG", "true"))
LOCAL_PATH = "robotreviewer/uploads"
LOG_LEVEL = (logging.DEBUG if DEBUG_MODE else logging.INFO)
# determined empirically by Edward; covers 90% of abstracts
# (crudely and unscientifically adjusted for grobid)
NUM_WORDS_IN_ABSTRACT = 450
import robotreviewer
from robotreviewer import config
logging.basicConfig(level=LOG_LEVEL, format='[%(levelname)s] %(name)s %(asctime)s: %(message)s', filename=robotreviewer.get_data(config.LOG))
log = logging.getLogger(__name__)
log.info("RobotReviewer machine learning tasks starting")
from robotreviewer.textprocessing.pdfreader import PdfReader
pdf_reader = PdfReader() # launch Grobid process before anything else
from robotreviewer.textprocessing.tokenizer import nlp
''' robots! '''
# from robotreviewer.robots.bias_robot import BiasRobot
from robotreviewer.robots.rationale_robot import BiasRobot
from robotreviewer.robots.pico_robot import PICORobot
from robotreviewer.robots.rct_robot import RCTRobot
from robotreviewer.robots.pubmed_robot import PubmedRobot
from robotreviewer.robots.pico_span_robot import PICOSpanRobot
from robotreviewer.robots.bias_ab_robot import BiasAbRobot
from robotreviewer.robots.human_robot import HumanRobot
# from robotreviewer.robots.mendeley_robot import MendeleyRobot
# from robotreviewer.robots.ictrp_robot import ICTRPRobot
# from robotreviewer.robots import pico_viz_robot
# from robotreviewer.robots.pico_viz_robot import PICOVizRobot
from robotreviewer.robots.punchlines_robot import PunchlinesBot
from robotreviewer.robots.sample_size_robot import SampleSizeBot
from robotreviewer.data_structures import MultiDict
import robotreviewer
######
## default annotation pipeline defined here
######
'''
log.info("Loading the robots...")
bots = {"bias_bot": BiasRobot(top_k=3),
"pico_bot": PICORobot(),
"pubmed_bot": PubmedRobot(),
# "ictrp_bot": ICTRPRobot(),
"rct_bot": RCTRobot(),
#"pico_viz_bot": PICOVizRobot(),
"sample_size_bot":SampleSizeBot()}
log.info("Robots loaded successfully! Ready...")
'''
# lastly wait until Grobid is connected
pdf_reader.connect()
# start up Celery service
app = Celery('ml_worker', backend='amqp://', broker='amqp://')
#####
## connect to and set up database
#####
rr_sql_conn = sqlite3.connect(robotreviewer.get_data('uploaded_pdfs/uploaded_pdfs.sqlite'), detect_types=sqlite3.PARSE_DECLTYPES, check_same_thread=False)
c = rr_sql_conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS doc_queue(id INTEGER PRIMARY KEY, report_uuid TEXT, pdf_uuid TEXT, pdf_hash TEXT, pdf_filename TEXT, pdf_file BLOB, timestamp TIMESTAMP)')
c.execute('CREATE TABLE IF NOT EXISTS api_queue(id INTEGER PRIMARY KEY, report_uuid TEXT, uploaded_data TEXT, timestamp TIMESTAMP)')
c.execute('CREATE TABLE IF NOT EXISTS api_done(id INTEGER PRIMARY KEY, report_uuid TEXT, annotations TEXT, timestamp TIMESTAMP)')
c.execute('CREATE TABLE IF NOT EXISTS article(id INTEGER PRIMARY KEY, report_uuid TEXT, pdf_uuid TEXT, pdf_hash TEXT, pdf_file BLOB, annotations TEXT, timestamp TIMESTAMP, dont_delete INTEGER)')
c.close()
rr_sql_conn.commit()
@worker_init.connect
def on_worker_init(**_):
global bots
global friendly_bots
log.info("Loading the robots...")
# pico span bot must be loaded first i have *no* idea why...
print("LOADING ROBOTS")
bots = {"pico_span_bot": PICOSpanRobot(),
"bias_bot": BiasRobot(top_k=3),
"pico_bot": PICORobot(),
"pubmed_bot": PubmedRobot(),
# "ictrp_bot": ICTRPRobot(),
"rct_bot": RCTRobot(),
#"pico_viz_bot": PICOVizRobot(),
"punchline_bot":PunchlinesBot(),
"sample_size_bot":SampleSizeBot(),
"bias_ab_bot": BiasAbRobot(),
"human_bot": HumanRobot()}
friendly_bots = {"pico_span_bot": "Extracting PICO text from title/abstract",
"bias_bot": "Assessing risks of bias",
"pico_bot": "Extracting PICO information from full text",
"rct_bot": "Assessing study design (is it an RCT?)",
"sample_size_bot": "Extracting sample size",
"punchline_bot": "Extracting main conclusions",
"pubmed_bot": "Looking up meta-data in PubMed",
"bias_ab_bot": "Assessing bias from abstract"}
print("ROBOTS ALL LOADED")
log.info("Robots loaded successfully! Ready...")
@app.task
def pdf_annotate(report_uuid):
"""
takes a report uuid as input
searches for pdfs using that id,
then saves annotations in database
"""
pdf_uuids, pdf_hashes, filenames, blobs, timestamps = [], [], [], [], []
c = rr_sql_conn.cursor()
# load in the PDF data from the queue table
for pdf_uuid, pdf_hash, filename, pdf_file, timestamp in c.execute("SELECT pdf_uuid, pdf_hash, pdf_filename, pdf_file, timestamp FROM doc_queue WHERE report_uuid=?", (report_uuid, )):
pdf_uuids.append(pdf_uuid)
pdf_hashes.append(pdf_hash)
filenames.append(filename)
blobs.append(pdf_file)
timestamps.append(timestamp)
c.close()
current_task.update_state(state='PROGRESS', meta={'process_percentage': 25, 'task': 'reading PDFs'})
articles = pdf_reader.convert_batch(blobs)
parsed_articles = []
current_task.update_state(state='PROGRESS', meta={'process_percentage': 50, 'task': 'parsing text'})
# tokenize full texts here
for doc in nlp.pipe((d.get('text', u'') for d in articles), batch_size=1, n_threads=config.SPACY_THREADS):
parsed_articles.append(doc)
# adjust the tag, parse, and entity values if these are needed later
for article, parsed_text in zip(articles, parsed_articles):
article._spacy['parsed_text'] = parsed_text
current_task.update_state(state='PROGRESS',meta={'process_percentage': 75, 'task': 'doing machine learning'})
for pdf_uuid, pdf_hash, filename, blob, data, timestamp in zip(pdf_uuids, pdf_hashes, filenames, blobs, articles, timestamps):
# DEBUG
current_task.update_state(state='PROGRESS',meta={'process_percentage': 76, 'task': 'processing PDF {}'.format(filename)})
# "punchline_bot",
data = pdf_annotate_study(data, bot_names=["rct_bot", "pubmed_bot", "bias_bot", "pico_bot", "pico_span_bot", "punchline_bot", "sample_size_bot"])
data.gold['pdf_uuid'] = pdf_uuid
data.gold['filename'] = filename
c = rr_sql_conn.cursor()
c.execute("INSERT INTO article (report_uuid, pdf_uuid, pdf_hash, pdf_file, annotations, timestamp, dont_delete) VALUES(?, ?, ?, ?, ?, ?, ?)", (report_uuid, pdf_uuid, pdf_hash, sqlite3.Binary(blob), data.to_json(), timestamp, config.DONT_DELETE))
rr_sql_conn.commit()
c.close()
# finally delete the PDFs from the queue
c = rr_sql_conn.cursor()
c.execute("DELETE FROM doc_queue WHERE report_uuid=?", (report_uuid, ))
rr_sql_conn.commit()
c.close()
current_task.update_state(state='SUCCESS', meta={'process_percentage': 100, 'task': 'done!'})
return {"process_percentage": 100, "task": "completed"}
@app.task
def api_annotate(report_uuid):
"""
Handles annotation tasks sent from the API
Strict in datatype handling
"""
current_task.update_state(state='PROGRESS', meta={
'status': "in process",
'position': "received request, fetching data"}
)
c = rr_sql_conn.cursor()
c.execute("SELECT uploaded_data, timestamp FROM api_queue WHERE report_uuid=?", (report_uuid, ))
result = c.fetchone()
uploaded_data_s, timestamp = result
uploaded_data = json.loads(uploaded_data_s)
articles = uploaded_data["articles"]
target_robots = uploaded_data["robots"]
filter_rcts = uploaded_data.get("filter_rcts", "is_rct_balanced")
# now do the ML
if filter_rcts != 'none':
current_task.update_state(state='PROGRESS', meta={
'status': "in process",
'position': "rct_robot classification"}
)
# do rct_bot first
results = bots['rct_bot'].api_annotate(articles)
for a, r in zip(articles, results):
if r[filter_rcts]:
a['skip_annotation'] = False
else:
a['skip_annotation'] = True
a['rct_bot'] = r
# and remove from the task list if present so don't duplicate
target_robots = [tr for tr in target_robots if tr != "rct_bot"]
current_task.update_state(state='PROGRESS', meta={
'status': "in process",
'position': "tokenizing data"}
)
for k in ["ti", "ab", "fullText"]:
parsed = nlp.pipe((a.get(k, "") for a in articles if a.get('skip_annotation', False)==False))
articles_gen = (a for a in articles)
while True:
try:
current_doc = articles_gen.__next__()
except StopIteration:
break
if current_doc.get("skip_annotation"):
continue
else:
current_doc['parsed_{}'.format(k)] = parsed.__next__()
for bot_name in target_robots:
current_task.update_state(state='PROGRESS', meta={
'status': "in process",
'position': "{} classification".format(bot_name)}
)
results = bots[bot_name].api_annotate(articles)
for a, r in zip(articles, results):
if not a.get('skip_annotations', False):
a[bot_name] = r
# delete the parsed text
for article in articles:
for k in ["ti", "ab", "fullText"]:
article.pop('parsed_{}'.format(k), None)
c = rr_sql_conn.cursor()
current_task.update_state(state='PROGRESS', meta={
'status': "in process",
'position': "writing the predictions to database"}
)
c.execute("INSERT INTO api_done (report_uuid, annotations, timestamp) VALUES(?, ?, ?)", (report_uuid, json.dumps(articles), timestamp))
rr_sql_conn.commit()
c.close()
# finally delete the data from the queue
c = rr_sql_conn.cursor()
c.execute("DELETE FROM api_queue WHERE report_uuid=?", (report_uuid, ))
rr_sql_conn.commit()
c.close()
current_task.update_state(state='done')
return {"status": 100, "task": "completed"}
def pdf_annotate_study(data, bot_names=["bias_bot"]):
#
# ANNOTATION TAKES PLACE HERE
# change the line below if you wish to customise or
# add a new annotator
#
log.info("REQUESTING ANNOTATIONS FROM SET OF PDFs (annotate_study)")
annotations = pdf_annotation_pipeline(bot_names, data)
return annotations
def pdf_annotation_pipeline(bot_names, data):
# makes it here!
log.info("STARTING PIPELINE (made it to annotation_pipeline)")
# DEBUG
current_task.update_state(state='PROGRESS',meta={'process_percentage': 78, 'task': 'starting annotation pipeline'})
for bot_name in bot_names:
log.info("STARTING {} BOT (annotation_pipeline)".format(bot_name))
log.debug("Sending doc to {} for annotation...".format(bots[bot_name].__class__.__name__))
current_task.update_state(state='PROGRESS',meta={'process_percentage': 79, 'task': friendly_bots[bot_name]})
data = bots[bot_name].pdf_annotate(data)
log.debug("{} done!".format(bots[bot_name].__class__.__name__))
log.info("COMPLETED {} BOT (annotation_pipeline)".format(bot_name))
# current_task.update_state(state='PROGRESS',meta={'process_percentage': 79, 'task': 'Bot {} complete!'.format(bot_name)})
return data
|
gpl-3.0
| 5,669,721,844,746,491,000 | 34.271137 | 253 | 0.650025 | false |
IronLanguages/ironpython3
|
Tests/test_syntax.py
|
1
|
31258
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import sys
import unittest
from iptest import IronPythonTestCase, is_cli, is_cpython, run_test, skipUnlessIronPython, stderr_trapper
def run_compile_test(self, code, msg, lineno):
if sys.version_info >= (3,8): msg = msg.replace("can't", "cannot")
filename = "the file name"
with self.assertRaises(SyntaxError) as cm:
compile(code, filename, "exec")
e = cm.exception
self.assertEqual(e.msg, msg)
self.assertEqual(e.lineno, lineno)
self.assertEqual(e.filename, filename)
def test_compile(self):
c = compile("0o71 + 1", "Error", "eval")
self.assertRaises(SyntaxError, compile, "0o88 + 1", "Error", "eval")
self.assertRaises(SyntaxError, compile, "0o99 + 1", "Error", "eval")
self.assertRaises(SyntaxError, compile, """
try:
pass
""", "Error", "single")
self.assertRaises(SyntaxError, compile, "x=10\ny=x.", "Error", "exec")
compile_tests = [
("for x notin []:\n pass", "invalid syntax", 1),
("global 1", "invalid syntax", 1),
("x=10\nyield x\n", "'yield' outside function", 2),
("return\n", "'return' outside function", 1),
("def f(x=10, y):\n pass", "non-default argument follows default argument", 1),
("def f(for):\n pass", "invalid syntax", 1),
("f(3 = )", 'expression cannot contain assignment, perhaps you meant "=="?' if sys.version_info >= (3,9) else "expected name" if is_cli else "invalid syntax", 1),
("dict(a=1,a=2)", "keyword argument repeated: a" if sys.version_info >= (3,9) else "keyword argument repeated", 1),
("def f(a,a): pass", "duplicate argument 'a' in function definition", 1),
("def f((a,b),(c,b)): pass", "invalid syntax", 1),
("x = 10\nx = x[]", "invalid syntax", 2),
("break", "'break' outside loop", 1),
("if 1:\n\tbreak", "'break' outside loop", 2),
("if 1:\n\tx+y=22", "can't assign to operator", 2),
("if 1:\n\tdel f()", "can't delete function call", 2),
("if 1:\nfoo()\n", "expected an indented block", 2),
("'abc'.1", "invalid syntax", 1),
("'abc'.1L", "invalid syntax", 1),
("'abc'.1j", "invalid syntax", 1),
("'abc'.0xFFFF", "invalid syntax", 1),
("'abc' 1L", "invalid syntax", 1),
("'abc' 1.0", "invalid syntax", 1),
("'abc' 0j", "invalid syntax", 1),
("x = 'abc'\nx.1", "invalid syntax", 2),
("x = 'abc'\nx 1L", "invalid syntax", 2),
("x = 'abc'\nx 1.0", "invalid syntax", 2),
("x = 'abc'\nx 0j", "invalid syntax", 2),
('def f():\n del (yield 5)\n', "can't delete yield expression", 2),
('a,b,c += 1,2,3', "'tuple' is an illegal expression for augmented assignment" if sys.version_info >= (3,9) else "illegal expression for augmented assignment", 1),
('def f():\n a = yield 3 = yield 4', "can't assign to yield expression" if is_cli else "assignment to yield expression not possible", 2),
('((yield a), 2,3) = (2,3,4)', "can't assign to yield expression", 1),
('(2,3) = (3,4)', "can't assign to literal", 1),
("def e():\n break", "'break' outside loop", 2),
("def g():\n for x in range(10):\n print(x)\n break\n", "'break' outside loop", 4),
("def g():\n for x in range(10):\n print(x)\n if True:\n break\n", "'break' outside loop", 5),
("def z():\n if True:\n break\n", "'break' outside loop", 3),
('from import abc', "invalid syntax", 1),
("'abc'.", "unexpected EOF while parsing" if is_cli else "invalid syntax", 1),
("None = 2", "cannot assign to None" if sys.version_info >= (3,8) else "can't assign to keyword", 1),
]
if sys.version_info < (3,6):
compile_tests.append(('() = 1', "can't assign to ()", 1))
if sys.version_info < (3,8):
compile_tests.append(("""for x in range(100):\n"""
""" try:\n"""
""" [1,2][3]\n"""
""" except IndexError:\n"""
""" pass\n"""
""" finally:\n"""
""" continue\n""", "'continue' not supported inside 'finally' clause", 7))
# different error messages, ok
for test in compile_tests:
run_compile_test(self, *test)
self.assertEqual(float(repr(2.5)), 2.5)
self.assertEqual(eval("1, 2, 3,"), (1, 2, 3))
# eval validates end of input
self.assertRaises(SyntaxError, compile, "1+2 1", "Error", "eval")
# empty test list in for expression
self.assertRaises(SyntaxError, compile, "for x in : print(x)", "Error", "exec")
self.assertRaises(SyntaxError, compile, "for x in : print(x)", "Error", "eval")
self.assertRaises(SyntaxError, compile, "for x in : print(x)", "Error", "single")
# empty backquote
self.assertRaises(SyntaxError, compile, "``", "Error", "exec")
self.assertRaises(SyntaxError, compile, "``", "Error", "eval")
self.assertRaises(SyntaxError, compile, "``", "Error", "single")
# empty assignment expressions
self.assertRaises(SyntaxError, compile, "x = ", "Error", "exec")
self.assertRaises(SyntaxError, compile, "x = ", "Error", "eval")
self.assertRaises(SyntaxError, compile, "x = ", "Error", "single")
self.assertRaises(SyntaxError, compile, "x = y = ", "Error", "exec")
self.assertRaises(SyntaxError, compile, "x = y = ", "Error", "eval")
self.assertRaises(SyntaxError, compile, "x = y = ", "Error", "single")
self.assertRaises(SyntaxError, compile, " = ", "Error", "exec")
self.assertRaises(SyntaxError, compile, " = ", "Error", "eval")
self.assertRaises(SyntaxError, compile, " = ", "Error", "single")
self.assertRaises(SyntaxError, compile, " = 4", "Error", "exec")
self.assertRaises(SyntaxError, compile, " = 4", "Error", "eval")
self.assertRaises(SyntaxError, compile, " = 4", "Error", "single")
self.assertRaises(SyntaxError, compile, "x <= ", "Error", "exec")
self.assertRaises(SyntaxError, compile, "x <= ", "Error", "eval")
self.assertRaises(SyntaxError, compile, "x <= ", "Error", "single")
#indentation errors - BUG 864
self.assertRaises(IndentationError if is_cli or sys.version_info >= (3,9) else SyntaxError, compile, "class C:\nx=2\n", "Error", "exec")
self.assertRaises(IndentationError if sys.version_info >= (3,9) else SyntaxError, compile, "class C:\n\n", "Error", "single")
#allow \f
compile('\f\f\f\f\fclass C:\f\f\f pass', 'ok', 'exec')
compile('\f\f\f\f\fclass C:\n\f\f\f print("hello")\n\f\f\f\f\f\f\f\f\f\f print("goodbye")', 'ok', 'exec')
compile('class C:\n\f\f\f print("hello")\n\f\f\f\f\f\f\f\f\f\f print("goodbye")', 'ok', 'exec')
compile('class \f\f\f\fC:\n\f print("hello")\n\f\f\f\f\f\f\f\f\f\f print("goodbye")', 'ok', 'exec')
# multiline expression passed to exec (positive test)
s = """
title = "The Cat"
self.assertTrue(title.istitle())
x = 2 + 5
self.assertEqual(x, 7)
"""
exec(s)
if is_cpython:
# this seems to be a CPython bug, Guido says:
# I usually append some extra newlines before passing a string to compile(). That's the usual work-around.
# There's probably a subtle bug in the tokenizer when reading from a string -- if you find it,
# please upload a patch to the tracker!
# http://mail.python.org/pipermail/python-dev/2009-May/089793.html
self.assertRaises(SyntaxError, compile, "def f(a):\n\treturn a\n\t", "", "single")
self.assertRaises(SyntaxError, compile, "def f(a):\n\treturn a\n\t", "", "single", 0x200)
# should work
s = "def f():\n\treturn 3"
compile(s, "<string>", "single")
self.assertRaises(SyntaxError, compile, s, "<string>", "single", 0x200)
# Assignment to None and constant
def NoneAssign():
exec('None = 2')
def LiteralAssign():
exec("'2' = '3'")
self.assertRaises(SyntaxError, NoneAssign)
self.assertRaises(SyntaxError, LiteralAssign)
# beginning of the file handling
c = compile(" # some comment here \nprint(10)", "", "exec")
c = compile(" \n# some comment\n \nprint(10)", "", "exec")
self.assertRaises(SyntaxError, compile, " x = 10\n\n", "", "exec")
self.assertRaises(SyntaxError, compile, " \n #comment\n x = 10\n\n", "", "exec")
c = compile(u"\u0391 = 10\nif \u0391 != 10: 1/0", "", "exec")
exec(c)
# from __future__ tests
self.assertRaises(SyntaxError, compile, "def f():\n from __future__ import division", "", "exec")
self.assertRaises(SyntaxError, compile, "'doc'\n'doc2'\nfrom __future__ import division", "", "exec")
# del x - used to fail with Python 2
compile("def a(x):\n def b():\n print(x)\n del x", "ok", "exec")
compile("def f():\n del x\n def g():\n return x\n", "ok", "exec")
compile("def f():\n def g():\n return x\n del x\n", "ok", "exec")
compile("def f():\n class g:\n def h(self):\n print(x)\n pass\n del x\n", "ok", "exec")
# add global to the picture
c = compile("def f():\n x=10\n del x\n def g():\n global x\n return x\n return g\nf()()\n", "", "exec")
self.assertRaises(NameError, eval, c)
c = compile("def f():\n global x\n x=10\n del x\n def g():\n return x\n return g\nf()()\n", "", "exec")
self.assertRaises(NameError, eval, c)
# global following definition test
# affected by bug# 1145
c = compile("def f():\n global a\n global a\n a = 1\n", "", "exec")
# unqualified exec in nested function - used to fail with Python 2
compile("def f():\n x = 1\n def g():\n exec('pass')\n print(x)", "", "exec")
# correct case - qualified exec in nested function
c = compile("def f():\n x = 10\n def g():\n exec('pass') in {}\n print(x)\n", "", "exec")
def test_expr_support_impl(self):
x = 10
self.assertEqual(x, 10)
del x
try: y = x
except NameError: pass
else: self.fail("x not deleted")
(x) = 20
self.assertEqual((x), 20)
del (x)
try: y = x
except NameError: pass
else: self.fail("x not deleted")
# this is comment \
a=10
self.assertEqual(a, 10)
x = "Th\
\
e \
qu\
ick\
br\
ow\
\
n \
fo\
\
x\
ju\
mp\
s \
ove\
\
r \
th\
e l\
az\
\
y d\
og\
.\
\
\
\
12\
34\
567\
89\
0"
y="\
The\
q\
ui\
\
c\
k b\
\
r\
o\
w\
n\
\
fo\
x\
\
jum\
ps\
ov\
er \
t\
he\
la\
\
\
zy\
\
\
d\
og\
. 1\
2\
\
3\
\
\
\
\
4\
567\
\
8\
\
90\
"
self.assertEqual(x, y)
self.assertEqual("\101", "A")
x=b'\a\b\c\d\e\f\g\h\i\j\k\l\m\n\o\p\q\r\s\t\u\v\w\y\z'.decode("ascii")
y=u'\u0007\u0008\\\u0063\\\u0064\\\u0065\u000C\\\u0067\\\u0068\\\u0069\\\u006a\\\u006b\\\u006c\\\u006d\u000A\\\u006f\\\u0070\\\u0071\u000D\\\u0073\u0009\\\u0075\u000B\\\u0077\\\u0079\\\u007a'
self.assertTrue(x == y)
self.assertEqual(x, y)
for a,b in zip(x,y):
self.assertEqual(a,b)
self.assertTrue((10==20)==(20==10))
self.assertEqual(10==20, 20==10)
self.assertEqual(4e4-4, 4e4 - 4)
def test_private_names(self):
class C:
__x = 10
class ___:
__y = 20
class D:
__z = 30
self.assertEqual(C._C__x, 10)
self.assertEqual(C.___.__y, 20)
self.assertEqual(C.D._D__z, 30)
class B(object):
def method(self, __a):
return __a
self.assertEqual(B().method("__a passed in"), "__a passed in")
class B(object):
def method(self, X):
(__a, ) = X
return __a
self.assertEqual(B().method(("__a passed in", )), "__a passed in")
class B(object):
def __f(self):
pass
self.assertTrue('_B__f' in dir(B))
class B(object):
class __C(object): pass
self.assertTrue('_B__C' in dir(B))
class B(object):
x = lambda self, __a : __a
self.assertEqual(B.x(B(), _B__a='value'), 'value')
#Hit negative case of 'sublist' in http://www.python.org/doc/2.5.1/ref/grammar.txt.
self.assertRaises(SyntaxError, compile, "def f((1)): pass", "", "exec")
#
# Make sure that augmented assignment also binds in the given scope
#
augassign_code = """
x = 10
def f():
x %s 10
f()
"""
def test_augassign_binding():
for op in ["+=", "-=", "**=", "*=", "//=", "/=", "%=", "<<=", ">>=", "&=", "|=", "^="]:
code = augassign_code % op
try:
exec(code, {}, {})
except:
pass
else:
Assert(False, "augassign binding test didn't raise exception")
return True
self.assertTrue(test_augassign_binding())
class SyntaxTest(IronPythonTestCase):
def test_compile_method(self):
test_compile(self)
def test_expr_support_method(self):
test_expr_support_impl(self)
def test_private_names_method(self):
test_private_names(self)
def test_date_check(self):
year = 2005
month = 3
day = 16
hour = 14
minute = 53
second = 24
if 1900 < year < 2100 and 1 <= month <= 12 \
and 1 <= day <= 31 and 0 <= hour < 24 \
and 0 <= minute < 60 and 0 <= second < 60: # Looks like a valid date
pass
def test_multiline_compound_stmts(self):
class MyException(Exception): pass
tests = [
"if False: print('In IF')\nelse: x = 2; raise MyException('expected')",
"if False: print('In IF')\nelif True: x = 2;raise MyException('expected')\nelse: print('In ELSE')",
"for i in (1,2): x = i\nelse: x = 5; raise MyException('expected')",
"while 5 in (1,2): print(i)\nelse:x = 2;raise MyException('expected')",
"try: x = 2\nexcept: print('In EXCEPT')\nelse: x=20;raise MyException('expected')",
]
for test in tests:
try:
c = compile(test,"","exec")
exec(c)
except MyException:
pass
else:
self.fail("multiline_compound stmt test did not raise exception. test = " + test)
# Generators couldn't have return statements with values in them in Python 2. Verify that these now work.
def test_generator_with_nonempty_return(self):
tests = [
"def f():\n return 42\n yield 3",
"def f():\n yield 42\n return 3",
"def f():\n yield 42\n return None",
"def f():\n if True:\n return 42\n yield 42",
"def f():\n try:\n return 42\n finally:\n yield 23"
]
for test in tests:
compile(test, "", "exec")
#Verify that when there is no return value error is not thrown.
def f():
yield 42
return
def test_return_from_finally(self):
# compile function which returns from finally, but does not yield from finally.
c = compile("def f():\n try:\n pass\n finally:\n return 1", "", "exec")
def ret_from_finally():
try:
pass
finally:
return 1
return 2
self.assertEqual(ret_from_finally(), 1)
def ret_from_finally2(x):
if x:
try:
pass
finally:
return 1
else:
return 2
self.assertEqual(ret_from_finally2(True), 1)
self.assertEqual(ret_from_finally2(False), 2)
def ret_from_finally_x(x):
try:
1/0
finally:
return x
self.assertEqual(ret_from_finally_x("Hi"), "Hi")
def ret_from_finally_x2():
try:
1/0
finally:
raise AssertionError("This one")
try:
ret_from_finally_x2()
except AssertionError as e:
self.assertEqual(e.args[0], "This one")
else:
Fail("Expected AssertionError, got none")
try:
pass
finally:
pass
# The try block can only have one default except clause, and it must be last
try_syntax_error_tests = [
"""
try:
pass
except:
pass
except Exception, e:
pass
""",
"""
try:
pass
except Exception, e:
pass
except:
pass
except:
pass
""",
"""
try:
pass
except:
pass
except:
pass
"""
]
for code in try_syntax_error_tests:
self.assertRaises(SyntaxError, compile, code, "code", "exec")
def test_break_in_else_clause(self):
def f():
exec('''
while i >= 0:
pass
else:
break''')
self.assertRaises(SyntaxError, f)
def test_no_throw(self):
#Just make sure these don't throw
print("^L")
temp = 7
print(temp)
print("No ^L's...")
def test_syntaxerror_text(self):
method_missing_colon = (" def MethodTwo(self)\n", """
class HasASyntaxException:
def MethodOne(self):
print('hello')
print('world')
print('again')
def MethodTwo(self)
print('world')""")
if is_cpython: #http://ironpython.codeplex.com/workitem/28380
function_missing_colon1 = ("def f()\n", "def f()")
else:
function_missing_colon1 = ("def f()", "def f()")
function_missing_colon2 = ("def f()\n", "def f()\n")
if is_cpython: #http://ironpython.codeplex.com/workitem/28380
function_missing_colon3 = ("def f()\n", "def f()\r\n")
function_missing_colon4 = ("def f()\n", "def f()\r")
else:
function_missing_colon3 = ("def f()\r\n", "def f()\r\n")
function_missing_colon4 = ("def f()\r", "def f()\r")
function_missing_colon2a = ("def f()\n", "print(1)\ndef f()\nprint(3)")
if is_cpython: #http://ironpython.codeplex.com/workitem/28380
function_missing_colon3a = ("def f()\n", "print(1)\ndef f()\r\nprint(3)")
function_missing_colon4a = ("def f()\n", "print(1)\ndef f()\rprint(3)")
else:
function_missing_colon3a = ("def f()\r\n", "print(1)\ndef f()\r\nprint(3)")
function_missing_colon4a = ("def f()\rprint(3)", "print(1)\ndef f()\rprint(3)")
tests = (
method_missing_colon,
#function_missing_body,
function_missing_colon1,
function_missing_colon2,
function_missing_colon3,
function_missing_colon4,
function_missing_colon2a,
function_missing_colon3a,
function_missing_colon4a,
)
for expectedText, testCase in tests:
try:
exec(testCase)
except SyntaxError as e:
self.assertEqual(e.text, expectedText)
def test_error_parameters(self):
tests = [("if 1:", 0x200, ('expected an indented block' if sys.version_info >= (3,9) else 'unexpected EOF while parsing', ('dummy', 1, 6 if is_cli else 5, 'if 1:')) ),
("if 1:\n", 0x200, ('expected an indented block' if sys.version_info >= (3,9) else 'unexpected EOF while parsing', ('dummy', 1, 6, 'if 1:\n')) ),
("if 1:", 0x000, ('expected an indented block' if sys.version_info >= (3,9) else 'unexpected EOF while parsing', ('dummy', 1, 6 if is_cli else 5, 'if 1:')) ),
("if 1:\n", 0x000, ('expected an indented block' if sys.version_info >= (3,9) else 'unexpected EOF while parsing', ('dummy', 1, 6, 'if 1:\n')) ),
("if 1:\n\n", 0x200, ('expected an indented block' if sys.version_info >= (3,9) else 'unexpected EOF while parsing', ('dummy', 1, 6, 'if 1:\n') if is_cli else ('dummy', 2, 1, '\n')) ),
("if 1:\n\n", 0x000, ('expected an indented block' if sys.version_info >= (3,9) else 'unexpected EOF while parsing', ('dummy', 1, 6, 'if 1:\n') if is_cli else ('dummy', 2, 1, '\n')) ),
("if 1:\n if 1:", 0x200, ('expected an indented block' if sys.version_info >= (3,9) else 'unexpected EOF while parsing', ('dummy', 2, 8 if is_cli else 7, ' if 1:')) ),
("if 1:\n if 1:\n", 0x200, ('expected an indented block' if is_cli or sys.version_info >= (3,9) else 'unexpected EOF while parsing', ('dummy', 2, 8, ' if 1:\n')) ),
("if 1:\n if 1:", 0x000, ('expected an indented block' if is_cli or sys.version_info >= (3,9) else 'unexpected EOF while parsing', ('dummy', 2, 8 if is_cli else 7, ' if 1:')) ),
("if 1:\n if 1:\n", 0x000, ('expected an indented block' if is_cli or sys.version_info >= (3,9) else 'unexpected EOF while parsing', ('dummy', 2, 8, ' if 1:\n')) ),
("if 1:\n if 1:\n\n", 0x200, ('expected an indented block' if is_cli or sys.version_info >= (3,9) else 'unexpected EOF while parsing', ('dummy', 2, 8, ' if 1:\n') if is_cli else ('dummy', 3, 1, '\n')) ),
("if 1:\n if 1:\n\n", 0x000, ('expected an indented block' if is_cli or sys.version_info >= (3,9) else 'unexpected EOF while parsing', ('dummy', 2, 8, ' if 1:\n') if is_cli else ('dummy', 3, 1, '\n')) ),
("class MyClass(object):\n\tabc = 42\n\tdef __new__(cls):\n", 0x200, ('expected an indented block' if is_cli or sys.version_info >= (3,9) else 'unexpected EOF while parsing', ('dummy', 3, 19, '\tdef __new__(cls):\n')) ),
("class MyClass(object):\n\tabc = 42\n\tdef __new__(cls):\n", 0x000, ('expected an indented block' if is_cli or sys.version_info >= (3,9) else 'unexpected EOF while parsing', ('dummy', 3, 19, '\tdef __new__(cls):\n')) ),
("def Foo():\n\n # comment\n\n Something = -1\n\n\n\n ", 0x000, ('unindent does not match any outer indentation level', ('dummy', 9, 2, ' '))),
("def Foo():\n\n # comment\n\n Something = -1\n\n\n\n ", 0x200, ('unindent does not match any outer indentation level', ('dummy', 9, 2, ' '))),
("def Foo():\n\n # comment\n\n Something = -1\n\n\n\n ", 0x000, ('unindent does not match any outer indentation level', ('dummy', 9, 3, ' '))),
("def Foo():\n\n # comment\n\n Something = -1\n\n\n\n ", 0x200, ('unindent does not match any outer indentation level', ('dummy', 9, 3, ' '))),
]
for input, flags, res in tests:
#print(repr(input), flags)
with self.assertRaises(SyntaxError) as cm:
compile(input, "dummy", "single", flags, 1)
self.assertEqual(cm.exception.args, res)
with self.assertRaises(IndentationError) as cm:
exec("""
def f():
x = 3
y = 5""")
self.assertEqual(cm.exception.lineno, 2)
@skipUnlessIronPython()
def test_parser_recovery(self):
# bunch of test infrastructure...
import clr
clr.AddReference('IronPython')
clr.AddReference('Microsoft.Scripting')
clr.AddReference('Microsoft.Dynamic')
from Microsoft.Scripting import (
TextContentProvider, SourceCodeKind, SourceUnit, ErrorSink,
SourceCodeReader
)
from Microsoft.Scripting.Runtime import CompilerContext
from IronPython.Runtime import PythonOptions
from IronPython.Compiler import Parser, Tokenizer, PythonCompilerOptions, Ast
from System.IO import StringReader
from System.Text import Encoding
class MyErrorSink(ErrorSink):
def __init__(self):
self.Errors = []
def Add(self, *args):
if type(args[0]) is str:
self.AddWithPath(*args)
else:
self.AddWithSourceUnit(*args)
def AddWithPath(self, message, path, code, line, span, error, severity):
err = (
message,
path,
span,
error
)
self.Errors.append(err)
def AddWithSourceUnit(self, source, message, span, errorCode, severity):
err = (
message,
source.Path,
span,
errorCode
)
self.Errors.append(err)
class MyTextContentProvider(TextContentProvider):
def __init__(self, text):
self.text = text
def GetReader(self):
return SourceCodeReader(StringReader(self.text), Encoding.GetEncoding(0))
def parse_text(text):
errorSink = MyErrorSink()
sourceUnit = SourceUnit(
clr.GetCurrentRuntime().GetLanguageByName('python'),
MyTextContentProvider(text),
'foo',
SourceCodeKind.File
)
parser = Parser.CreateParser(
CompilerContext(sourceUnit, PythonCompilerOptions(), errorSink),
PythonOptions()
)
parser.ParseFile(True)
return errorSink
def TestErrors(text, errors):
res = parse_text(text)
self.assertEqual(len(res.Errors), len(errors))
for curErr, expectedMsg in zip(res.Errors, errors):
self.assertEqual(curErr[0], expectedMsg)
def PrintErrors(text):
"""helper for creating new tests"""
errors = parse_text(text)
print()
for err in errors.Errors:
print(err)
TestErrors("""class
def x(self):
pass""", ["invalid syntax"])
TestErrors("""class x
def x(self):
pass
""", ["invalid syntax"])
TestErrors("""class x(
def x(self):
pass""", ["invalid syntax", "invalid syntax", "invalid syntax"]) # https://github.com/IronLanguages/ironpython3/issues/1034
TestErrors("""class X:
if x:
def x(): pass""", ['expected an indented block'])
TestErrors("""class X:
if x is None:
x =
def x(self): pass""", ["invalid syntax"])
TestErrors("""class X:
def f(
def g(self): pass""", ["invalid syntax"])
TestErrors("""class X:
def f(*
def g(self): pass""", ["invalid syntax"])
TestErrors("""class X:
def f(**
def g(self): pass""", ["invalid syntax"])
TestErrors("""class X:
def f(*a, **
def g(self): pass""", ["invalid syntax"])
TestErrors("""f() += 1""", ["can't assign to function call"])
def test_syntax_warnings(self):
if is_cli or sys.version_info >= (3,6):
with self.assertRaisesRegex(SyntaxError, "name 'a' is assigned to before global declaration") as cm:
compile("def f():\n a = 1\n global a\n", "", "exec")
self.assertEqual(cm.exception.lineno, 3)
with self.assertRaisesRegex(SyntaxError, "name 'a' is assigned to before global declaration") as cm:
compile("def f():\n def a(): pass\n global a\n", "", "exec")
self.assertEqual(cm.exception.lineno, 3)
with self.assertRaisesRegex(SyntaxError, "name 'a' is assigned to before global declaration") as cm:
compile("def f():\n for a in []: pass\n global a\n", "", "exec")
self.assertEqual(cm.exception.lineno, 3)
with self.assertRaisesRegex(SyntaxError, "name 'a' is used prior to global declaration" if is_cli else "name 'a' is assigned to before global declaration") as cm:
compile("def f():\n global a\n a = 1\n global a\n", "", "exec")
self.assertEqual(cm.exception.lineno, 4)
with self.assertRaisesRegex(SyntaxError, "name 'a' is used prior to global declaration") as cm:
compile("def f():\n print(a)\n global a\n", "", "exec")
self.assertEqual(cm.exception.lineno, 3)
with self.assertRaisesRegex(SyntaxError, "name 'a' is assigned to before global declaration") as cm:
compile("def f():\n a = 1\n global a\n global a\n a = 1", "", "exec")
self.assertEqual(cm.exception.lineno, 3)
with self.assertRaisesRegex(SyntaxError, "name 'x' is assigned to before global declaration") as cm:
compile("x = 10\nglobal x\n", "", "exec")
self.assertEqual(cm.exception.lineno, 2)
else:
# syntax error warnings are outputted using warnings.showwarning. our own warning trapper therefore
# doesn't see them. So we trap stderr here instead. We could use CPython's warning trapper if we
# checked for the presence of the stdlib.
with stderr_trapper() as trapper:
compile("def f():\n a = 1\n global a\n", "", "exec")
self.assertEqual(trapper.messages, [":3: SyntaxWarning: name 'a' is assigned to before global declaration"])
with stderr_trapper() as trapper:
compile("def f():\n def a(): pass\n global a\n", "", "exec")
self.assertEqual(trapper.messages, [":3: SyntaxWarning: name 'a' is assigned to before global declaration"])
with stderr_trapper() as trapper:
compile("def f():\n for a in []: pass\n global a\n", "", "exec")
self.assertEqual(trapper.messages, [":3: SyntaxWarning: name 'a' is assigned to before global declaration"])
with stderr_trapper() as trapper:
compile("def f():\n global a\n a = 1\n global a\n", "", "exec")
self.assertEqual(trapper.messages, [":4: SyntaxWarning: name 'a' is assigned to before global declaration"])
with stderr_trapper() as trapper:
compile("def f():\n print(a)\n global a\n", "", "exec")
self.assertEqual(trapper.messages, [":3: SyntaxWarning: name 'a' is used prior to global declaration"])
with stderr_trapper() as trapper:
compile("def f():\n a = 1\n global a\n global a\n a = 1", "", "exec")
self.assertEqual(trapper.messages,
[":3: SyntaxWarning: name 'a' is assigned to before global declaration",
":4: SyntaxWarning: name 'a' is assigned to before global declaration"])
with stderr_trapper() as trapper:
compile("x = 10\nglobal x\n", "", "exec")
self.assertEqual(trapper.messages, [":2: SyntaxWarning: name 'x' is assigned to before global declaration"])
run_test(__name__)
|
apache-2.0
| 3,578,807,755,610,094,000 | 36.256257 | 236 | 0.539958 | false |
sassoftware/mirrorball
|
scripts/gem_updater.py
|
1
|
9089
|
#!/usr/bin/python
#
# Copyright (c) SAS Institute, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import time
mirrorballDir = os.path.abspath('../')
sys.path.insert(0, mirrorballDir)
from conary.lib import cfg
import os
import requests
import json
from urlparse import urljoin
from conary.lib import util
sys.excepthook = util.genExcepthook()
from updatebot import bot
from updatebot import config
from updatebot import log
from updatebot import conaryhelper
from conary.lib import util
from updatebot.errors import NoManifestFoundError
class Helper(conaryhelper.ConaryHelper):
def __init__(self, cfg):
conaryhelper.ConaryHelper.__init__(self, cfg)
def getManifestCfg(self, pkgname, manifest, version=None):
"""
Get the manifest file from the source component for a
given package.
@param pkgname: name of the package to retrieve
@type pkgname: string
@param version optional source version to checkout.
@type version conary.versions.Version
@return manifest for pkgname
"""
print('retrieving manifest for %s' % pkgname)
recipeDir = self._edit(pkgname, version=version)
manifestFileName = util.joinPaths(recipeDir, 'manifest')
if not os.path.exists(manifestFileName):
raise NoManifestFoundError(pkgname=pkgname, dir=recipeDir)
manifest.read(manifestFileName)
return manifest
def setManifestCfg(self, pkgname, manifest, version=None):
"""
Create/Update a manifest file from config.
@param pkgname: name of the package
@type pkgname: string
@param manifest: list of files to go in the manifest file
@type manifest: list(string, string, ...)
"""
print('setting manifest for %s' % pkgname)
recipeDir = self._edit(pkgname, version=version)
# Update manifest file.
manifestFileName = util.joinPaths(recipeDir, 'manifest')
manifest.writeToFile(manifestFileName)
# Make sure manifest file has been added.
self._addFile(recipeDir, 'manifest')
class GemManifest(cfg.ConfigFile):
name = cfg.CfgString
version = cfg.CfgString
gem_uri = cfg.CfgString
api = (cfg.CfgString, 'https://rubygems.org/api/v1/gems/')
build_requires = cfg.CfgLineList(cfg.CfgString)
gem_requires = cfg.CfgLineList(cfg.CfgString)
environment = cfg.CfgDict(cfg.CfgString)
require_exceptions = cfg.CfgQuotedLineList(cfg.CfgString)
class GemInfo(object):
def __init__(self, gemname, api=None):
self.name = gemname
self.api = api or 'https://rubygems.org/api/v1/gems/'
self.uri = urljoin(self.api,self.name)
def jsonToInfo(self, str):
def j2o(load):
if isinstance(load, dict):
return type('info', (), dict([(k,j2o(v)) for k,v in load.iteritems()]) )
else:
if isinstance(load, unicode):
return load.encode()
return load
return j2o(json.loads(str))
def getInformation(self):
r = requests.get(self.uri)
if not r.ok:
raise
return self.jsonToInfo(r.text)
class GemUpdater(object):
def __init__(self, gemname, info, ccfg, pkgname=None, version=None, prefix=None):
self.name = gemname
self.pkgname = pkgname or gemname
self.info = info
self.helper = Helper(ccfg)
self.helper._newPkgFactory = ccfg.gemPackageFactory
self.version = version
if not self.version:
self.version = self.info.version
self.prefix = prefix
if not self.prefix:
self.prefix = ''
self.msg = "Gem Updater Auto Commit"
def getManifest(self, mcfg, version=None):
return self.helper.getManifestCfg(self.pkgname, mcfg, version)
def setManifest(self, mcfg, version=None):
return self.helper.setManifestCfg(self.pkgname, mcfg, version)
def readManifest(self):
manifest = GemManifest()
return self.getManifest(manifest)
def _newRequires(self, requires=[]):
requires = [ x['name'].encode() for x in self.info.dependencies.runtime
if x not in requires ]
reqmap = [ (self.prefix + x, None, None) for x in requires ]
reqtroves = self.helper.findTroves(reqmap)
#import epdb;epdb.st()
reqs = [ x[0] for x in reqtroves ]
missing = [ x for x in reqs if x[0].replace(self.prefix, '') not in requires ]
if missing:
print "Missing reqs : %s" % missing
return reqs
def check(self, manifest, info):
flag = False
# FIXME Just in case we passed in a version...
if not self.version:
self.version = info.version
if manifest.name != self.name:
print "[ERROR] Names do not match!!!"
raise
if manifest.gem_uri != info.gem_uri:
print "[WARN] gem_uri do not match!"
flag = True
if manifest.version > self.version:
print "[WARN] version goes backwards"
flag = True
if manifest.version < self.version:
print "[WARN] found newer version : %s " % self.version
flag = True
if self._newRequires(manifest.build_requires):
print "[WARN] New build requires found!"
flag = True
return flag
def _update(self, manifest):
requires = self._newRequires(manifest.build_requires)
# FIXME Just in case we passed in a version...
manifest.gem_uri = self.info.gem_uri
# TODO should look up components before adding
# should flag missing build requires so
# we can delay building
#import epdb;epdb.st()
# Skipping for now
#manifest.build_requires.extend(requires)
manifest.version = self.version
return manifest
def _commit(self, manifest, version=None):
self.setManifest(manifest, version)
if version:
self.helper.setVersion(self.pkgname, version)
return self.helper.commit(self.pkgname, version, self.msg)
def create(self):
manifest = GemManifest()
manifest.name = self.name
manifest = self._update(manifest)
return self._commit(manifest)
def update(self):
manifest = self.readManifest()
manifest = self._update(manifest)
return self._commit(manifest, self.version)
logfile = '%s_%s.log' % (sys.argv[0][:-3], time.strftime('%Y-%m-%d_%H%M%S'))
log.addRootLogger(logfile)
_cfg = config.UpdateBotConfig()
_cfg.read(mirrorballDir + '/config/%s/updatebotrc' % sys.argv[1])
obj = bot.Bot(_cfg)
helper = conaryhelper.ConaryHelper(_cfg)
toBuild = []
gemNames = _cfg.gemPackage
prefix = _cfg.gemPrefix
recreate = _cfg.recreate
# Update all of the unique sources.
fail = set()
toBuild = set()
preBuiltPackages = set()
total = len(gemNames)
current = 1
verCache = helper.getLatestVersions()
for gemName in gemNames:
pkgName = prefix + gemName
info = GemInfo(gemName).getInformation()
try:
# Only import packages that haven't been imported before
version = verCache.get('%s:source' % pkgName)
if not version or recreate:
print('attempting to import %s (%s/%s)'
% (pkgName, current, total))
print "IMPORTING!!! %s" % pkgName
gu = GemUpdater(gemName, info, _cfg, pkgName)
version = gu.create()
if version.trailingRevision().version != info.version:
print "UPDATING!!! %s" % pkgName
gu = GemUpdater(gemName, info, _cfg, pkgName)
version = gu.update()
if (not verCache.get(pkgName) or
verCache.get(pkgName).getSourceVersion() != version
or recreate):
toBuild.add((pkgName, version, None))
else:
print('not building %s' % pkgName)
preBuiltPackages.add((pkgName, version, None))
except Exception, e:
print('failed to import %s: %s' % (pkgName, e))
fail.add((pkgName, e))
current += 1
if toBuild:
from updatebot import build
from updatebot.cmdline import display
from updatebot.cmdline import UserInterface
ui = UserInterface()
builder = build.Builder(_cfg, ui, rmakeCfgFn='rmakerc')
TrvMap = builder.build(toBuild)
if TrvMap:
print 'Built the following troves:'
print display.displayTroveMap(TrvMap)
|
apache-2.0
| 7,188,269,726,848,830,000 | 29.706081 | 88 | 0.629992 | false |
kevincwright/quagmire
|
atomic/Li6.py
|
1
|
2539
|
# -*- coding: utf-8 -*-
"""
@author: Kevin
"""
from pybec.atomic.alkalimetal import AlkaliMetal, AlkaliMetalLevel
from Li6Data import Li6PhysicalData, Li6LevelDataList, Li6TransitionDataList
#------------------------------------------------------------------------------
# Provide necessary physical constants
#------------------------------------------------------------------------------
import scipy.constants as spc
mu_0 = spc.physical_constants['Bohr magneton'][0] # J / T
m_e = spc.physical_constants['electron mass'][0] # kg
gS = spc.physical_constants['electron g factor'][0] # dimensionless
#------------------------------------------------------------------------------
class Li6(AlkaliMetal):
"""
Representation of a Lithium 6 Atom
"""
# define class constants - Li6 atomic data
SPECIES = 'Li6'
PHYSICALDATA = Li6PhysicalData
LEVELDATALIST = Li6LevelDataList
TRANSITIONDATALIST = Li6TransitionDataList
def __init__(self, kwargs={}):
# initialize the levels
self.levels = []
I = self.Inuc
for leveldata in self.LEVELDATALIST:
leveldata['gL']= self.gL
leveldata['mass']= self.mass
n,L,S,J = leveldata['qnums']
self.levels.append(AlkaliMetalLevel(n, L, S, J, I, leveldata))
@property
def mass(self):
return self.PHYSICALDATA['mass']
@property
def Inuc(self):
return self.PHYSICALDATA['Inuc']
@property
def gI(self):
return self.PHYSICALDATA['gI']
@property
def gL(self): #"orbital g-factor, corrected for the reduced mass"
return ( 1 - m_e / self.PHYSICALDATA['mass'] )
def __repr__(self):
""" create a label for the alkali metal atomic model
indicating the atomic species and the levels included in the model
"""
lvlstr = ''.join(
[' ' * 4 + level.term_symbol + ' Level\n' for level in self.levels])
return '%s (Alkali Metal Atom)\n%s' % (self.SPECIES, lvlstr)
if __name__ == "__main__":
# instantiate a Li6 atom
At = Li6()
# Extract the ground, first and second excited states
GND, D1E, D2E = At.levels[0:3]
# Verify calculation of the hyperfine energy shifts of the F levels
print(GND._E_hfs(F=3/2)-GND._E_hfs(F=1/2))
print(D1E._E_hfs(F=3/2)-D1E._E_hfs(F=1/2))
print(D2E._E_hfs(F=5/2)-D2E._E_hfs(F=3/2))
|
gpl-3.0
| 1,265,146,134,606,209,800 | 34.3 | 81 | 0.531705 | false |
buckbaskin/Insight
|
flaskserver/tests/performance/test_endpoints.py
|
1
|
1199
|
'''
Test that each of the expected code paths functions without error. This will get
more complicated as time goes on. For tests about response time/latency
performance see test/test_app_sla.
'''
import os
from app import server as flask_app
from nose.tools import ok_, assert_equal
from nose.tools import timed
app_client = None
def setup_module():
global flask_app
flask_app.config['TESTING'] = True
if 'STATUS' not in os.environ:
os.environ['STATUS'] = 'TESTING'
global app_client
app_client = flask_app.test_client()
app_client.set_cookie('127.0.0.1', 'user_id', str(42))
def teardown_module():
pass
def test_jsload_happy():
res = app_client.get('/performance/jsload?page=/&m=jane&resp=10&load=20')
if (not res.status_code == 200):
print(str(res.data))
print(str(res.status_code))
print(str(res.headers.items()))
assert_equal(200, res.status_code)
def test_jsload_missing():
res = app_client.get('/performance/jsload?page=/&m=jane')
if (not res.status_code == 400):
print(str(res.data))
print(str(res.status_code))
print(str(res.headers.items()))
assert_equal(400, res.status_code)
|
apache-2.0
| 2,884,008,129,006,198,000 | 29.74359 | 80 | 0.667223 | false |
deter-project/magi
|
magi/messaging/buffers.py
|
1
|
3078
|
class ListBuffer(object):
""" Make a buffer out of a list of buffers to avoid any appending during collection, in memory only """
def __init__(self, buf = None):
self.data = list()
self.length = 0
if buf is not None:
self.add(buf)
def add(self, buf):
self.data.append(buf)
self.length += len(buf)
def chunk(self, suggestsize):
return self.data[0]
def release(self, size):
while size > 0 and len(self.data) > 0:
p = self.data.pop(0)
if len(p) <= size:
size -= len(p)
continue
# else size if only part of p
self.data.insert(0, p[size:])
break
self.length -= size
def __len__(self):
return self.length
class FileBuffer(object):
""" Implements a characters indexed object like a string that is stored in file(s) """
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
# repr, lt, le, eq, ne, gt, ge, cast, cmp, mul, imul
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __getslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
return self.__class__(self.data[i:j])
def __setslice__(self, i, j, other):
i = max(i, 0); j = max(j, 0)
if isinstance(other, UserList):
self.data[i:j] = other.data
elif isinstance(other, type(self.data)):
self.data[i:j] = other
else:
self.data[i:j] = list(other)
def __delslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
del self.data[i:j]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
else:
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def count(self, item): return self.data.count(item)
def index(self, item, *args): return self.data.index(item, *args)
def reverse(self): self.data.reverse()
def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
|
gpl-2.0
| 6,015,252,095,808,577,000 | 28.883495 | 104 | 0.643275 | false |
terborg/kml
|
cpuinfo.py
|
1
|
23921
|
#!/usr/bin/env python
"""
cpuinfo
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of the Enthought nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
$Revision: 1.6 $
$Date: 2004/10/04 09:30:54 $
Pearu Peterson
"""
__version__ = "$Id: cpuinfo.py,v 1.6 2004/10/04 09:30:54 pearu Exp $"
__all__ = ['cpu']
import sys,string,re,types
class cpuinfo_base:
"""Holds CPU information and provides methods for requiring
the availability of various CPU features.
"""
def _try_call(self,func):
try:
return func()
except:
pass
def __getattr__(self,name):
if name[0]!='_':
if hasattr(self,'_'+name):
attr = getattr(self,'_'+name)
if type(attr) is types.MethodType:
return lambda func=self._try_call,attr=attr : func(attr)
else:
return lambda : None
raise AttributeError,name
def _getNCPUs(self):
return 1
def _is_32bit(self):
return not self.is_64bit()
class linux_cpuinfo(cpuinfo_base):
info = None
def __init__(self):
if self.info is not None:
return
info = []
try:
for line in open('/proc/cpuinfo').readlines():
name_value = map(string.strip,string.split(line,':',1))
if len(name_value)!=2:
continue
name,value = name_value
if not info or info[-1].has_key(name): # next processor
info.append({})
info[-1][name] = value
import commands
status,output = commands.getstatusoutput('uname -m')
if not status:
if not info: info.append({})
info[-1]['uname_m'] = string.strip(output)
except:
print sys.exc_value,'(ignoring)'
self.__class__.info = info
def _not_impl(self): pass
# Athlon
def _is_AMD(self):
return self.info[0]['vendor_id']=='AuthenticAMD'
def _is_AthlonK6_2(self):
return self._is_AMD() and self.info[0]['model'] == '2'
def _is_AthlonK6_3(self):
return self._is_AMD() and self.info[0]['model'] == '3'
def _is_AthlonK6(self):
return re.match(r'.*?AMD-K6',self.info[0]['model name']) is not None
def _is_AthlonK7(self):
return re.match(r'.*?AMD-K7',self.info[0]['model name']) is not None
def _is_AthlonMP(self):
return re.match(r'.*?Athlon\(tm\) MP\b',
self.info[0]['model name']) is not None
def _is_Athlon64(self):
return re.match(r'.*?Athlon\(tm\) 64\b',
self.info[0]['model name']) is not None
def _is_AthlonHX(self):
return re.match(r'.*?Athlon HX\b',
self.info[0]['model name']) is not None
def _is_Opteron(self):
return re.match(r'.*?Opteron\b',
self.info[0]['model name']) is not None
def _is_Hammer(self):
return re.match(r'.*?Hammer\b',
self.info[0]['model name']) is not None
# Alpha
def _is_Alpha(self):
return self.info[0]['cpu']=='Alpha'
def _is_EV4(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4'
def _is_EV5(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5'
def _is_EV56(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56'
def _is_PCA56(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56'
# Intel
#XXX
_is_i386 = _not_impl
def _is_Intel(self):
return self.info[0]['vendor_id']=='GenuineIntel'
def _is_i486(self):
return self.info[0]['cpu']=='i486'
def _is_i586(self):
return self.is_Intel() and self.info[0]['cpu family'] == '5'
def _is_i686(self):
return self.is_Intel() and self.info[0]['cpu family'] == '6'
def _is_Celeron(self):
return re.match(r'.*?Celeron',
self.info[0]['model name']) is not None
def _is_Pentium(self):
return re.match(r'.*?Pentium',
self.info[0]['model name']) is not None
def _is_PentiumII(self):
return re.match(r'.*?Pentium.*?II\b',
self.info[0]['model name']) is not None
def _is_PentiumPro(self):
return re.match(r'.*?PentiumPro\b',
self.info[0]['model name']) is not None
def _is_PentiumMMX(self):
return re.match(r'.*?Pentium.*?MMX\b',
self.info[0]['model name']) is not None
def _is_PentiumIII(self):
return re.match(r'.*?Pentium.*?III\b',
self.info[0]['model name']) is not None
def _is_PentiumIV(self):
return re.match(r'.*?Pentium.*?(IV|4)\b',
self.info[0]['model name']) is not None
def _is_Itanium(self):
return re.match(r'.*?Itanium\b',
self.info[0]['model name']) is not None
def _is_XEON(self):
return re.match(r'.*?XEON\b',
self.info[0]['model name']) is not None
# Varia
def _is_singleCPU(self):
return len(self.info) == 1
def _getNCPUs(self):
return len(self.info)
def _has_fdiv_bug(self):
return self.info[0]['fdiv_bug']=='yes'
def _has_f00f_bug(self):
return self.info[0]['f00f_bug']=='yes'
def _has_mmx(self):
return re.match(r'.*?\bmmx\b',self.info[0]['flags']) is not None
def _has_sse(self):
return re.match(r'.*?\bsse\b',self.info[0]['flags']) is not None
def _has_sse2(self):
return re.match(r'.*?\bsse2\b',self.info[0]['flags']) is not None
def _has_sse3(self):
return re.match(r'.*?\bsse3\b',self.info[0]['flags']) is not None
def _has_3dnow(self):
return re.match(r'.*?\b3dnow\b',self.info[0]['flags']) is not None
def _has_3dnowext(self):
return re.match(r'.*?\b3dnowext\b',self.info[0]['flags']) is not None
def _is_64bit(self):
if self.is_Alpha():
return 1
if self.info[0].get('clflush size','')=='64':
return 1
if self.info[0]['uname_m']=='x86_64':
return 1
return 0
class irix_cpuinfo(cpuinfo_base):
info = None
def __init__(self):
if self.info is not None:
return
info = []
try:
import commands
status,output = commands.getstatusoutput('sysconf')
if status not in [0,256]:
return
for line in output.split('\n'):
name_value = map(string.strip,string.split(line,' ',1))
if len(name_value)!=2:
continue
name,value = name_value
if not info:
info.append({})
info[-1][name] = value
except:
print sys.exc_value,'(ignoring)'
self.__class__.info = info
#print info
def _not_impl(self): pass
def _is_singleCPU(self):
return self.info[0].get('NUM_PROCESSORS') == '1'
def _getNCPUs(self):
return int(self.info[0].get('NUM_PROCESSORS'))
def __cputype(self,n):
return self.info[0].get('PROCESSORS').split()[0].lower() == 'r%s' % (n)
def _is_r2000(self): return self.__cputype(2000)
def _is_r3000(self): return self.__cputype(3000)
def _is_r3900(self): return self.__cputype(3900)
def _is_r4000(self): return self.__cputype(4000)
def _is_r4100(self): return self.__cputype(4100)
def _is_r4300(self): return self.__cputype(4300)
def _is_r4400(self): return self.__cputype(4400)
def _is_r4600(self): return self.__cputype(4600)
def _is_r4650(self): return self.__cputype(4650)
def _is_r5000(self): return self.__cputype(5000)
def _is_r6000(self): return self.__cputype(6000)
def _is_r8000(self): return self.__cputype(8000)
def _is_r10000(self): return self.__cputype(10000)
def _is_r12000(self): return self.__cputype(12000)
def _is_rorion(self): return self.__cputype('orion')
def get_ip(self):
try: return self.info[0].get('MACHINE')
except: pass
def __machine(self,n):
return self.info[0].get('MACHINE').lower() == 'ip%s' % (n)
def _is_IP19(self): return self.__machine(19)
def _is_IP20(self): return self.__machine(20)
def _is_IP21(self): return self.__machine(21)
def _is_IP22(self): return self.__machine(22)
def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000()
def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000()
def _is_IP24(self): return self.__machine(24)
def _is_IP25(self): return self.__machine(25)
def _is_IP26(self): return self.__machine(26)
def _is_IP27(self): return self.__machine(27)
def _is_IP28(self): return self.__machine(28)
def _is_IP30(self): return self.__machine(30)
def _is_IP32(self): return self.__machine(32)
def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000()
def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000()
class darwin_cpuinfo(cpuinfo_base):
info = None
def __init__(self):
if self.info is not None:
return
info = []
try:
import commands
status,output = commands.getstatusoutput('arch')
if not status:
if not info: info.append({})
info[-1]['arch'] = string.strip(output)
status,output = commands.getstatusoutput('machine')
if not status:
if not info: info.append({})
info[-1]['machine'] = string.strip(output)
status,output = commands.getstatusoutput('sysctl hw')
if not status:
if not info: info.append({})
d = {}
for l in string.split(output,'\n'):
l = map(string.strip,string.split(l, '='))
if len(l)==2:
d[l[0]]=l[1]
info[-1]['sysctl_hw'] = d
except:
print sys.exc_value,'(ignoring)'
self.__class__.info = info
def _not_impl(self): pass
def _getNCPUs(self):
try: return int(self.info[0]['sysctl_hw']['hw.ncpu'])
except: return 1
def _is_Power_Macintosh(self):
return self.info[0]['sysctl_hw']['hw.machine']=='Power Macintosh'
def _is_i386(self):
return self.info[0]['arch']=='i386'
def _is_ppc(self):
return self.info[0]['arch']=='ppc'
def __machine(self,n):
return self.info[0]['machine'] == 'ppc%s'%n
def _is_ppc601(self): return self.__machine(601)
def _is_ppc602(self): return self.__machine(602)
def _is_ppc603(self): return self.__machine(603)
def _is_ppc603e(self): return self.__machine('603e')
def _is_ppc604(self): return self.__machine(604)
def _is_ppc604e(self): return self.__machine('604e')
def _is_ppc620(self): return self.__machine(620)
def _is_ppc630(self): return self.__machine(630)
def _is_ppc740(self): return self.__machine(740)
def _is_ppc7400(self): return self.__machine(7400)
def _is_ppc7450(self): return self.__machine(7450)
def _is_ppc750(self): return self.__machine(750)
def _is_ppc403(self): return self.__machine(403)
def _is_ppc505(self): return self.__machine(505)
def _is_ppc801(self): return self.__machine(801)
def _is_ppc821(self): return self.__machine(821)
def _is_ppc823(self): return self.__machine(823)
def _is_ppc860(self): return self.__machine(860)
class sunos_cpuinfo(cpuinfo_base):
info = None
def __init__(self):
if self.info is not None:
return
info = []
try:
import commands
status,output = commands.getstatusoutput('arch')
if not status:
if not info: info.append({})
info[-1]['arch'] = string.strip(output)
status,output = commands.getstatusoutput('mach')
if not status:
if not info: info.append({})
info[-1]['mach'] = string.strip(output)
status,output = commands.getstatusoutput('uname -i')
if not status:
if not info: info.append({})
info[-1]['uname_i'] = string.strip(output)
status,output = commands.getstatusoutput('uname -X')
if not status:
if not info: info.append({})
d = {}
for l in string.split(output,'\n'):
l = map(string.strip,string.split(l, '='))
if len(l)==2:
d[l[0]]=l[1]
info[-1]['uname_X'] = d
status,output = commands.getstatusoutput('isainfo -b')
if not status:
if not info: info.append({})
info[-1]['isainfo_b'] = string.strip(output)
status,output = commands.getstatusoutput('isainfo -n')
if not status:
if not info: info.append({})
info[-1]['isainfo_n'] = string.strip(output)
status,output = commands.getstatusoutput('psrinfo -v 0')
if not status:
if not info: info.append({})
for l in string.split(output,'\n'):
m = re.match(r'\s*The (?P<p>[\w\d]+) processor operates at',l)
if m:
info[-1]['processor'] = m.group('p')
break
except:
print sys.exc_value,'(ignoring)'
self.__class__.info = info
def _not_impl(self): pass
def _is_32bit(self):
return self.info[0]['isainfo_b']=='32'
def _is_64bit(self):
return self.info[0]['isainfo_b']=='64'
def _is_i386(self):
return self.info[0]['isainfo_n']=='i386'
def _is_sparc(self):
return self.info[0]['isainfo_n']=='sparc'
def _is_sparcv9(self):
return self.info[0]['isainfo_n']=='sparcv9'
def _getNCPUs(self):
try: return int(self.info[0]['uname_X']['NumCPU'])
except: return 1
def _is_sun4(self):
return self.info[0]['arch']=='sun4'
def _is_SUNW(self):
return re.match(r'SUNW',self.info[0]['uname_i']) is not None
def _is_sparcstation5(self):
return re.match(r'.*SPARCstation-5',self.info[0]['uname_i']) is not None
def _is_ultra1(self):
return re.match(r'.*Ultra-1',self.info[0]['uname_i']) is not None
def _is_ultra250(self):
return re.match(r'.*Ultra-250',self.info[0]['uname_i']) is not None
def _is_ultra2(self):
return re.match(r'.*Ultra-2',self.info[0]['uname_i']) is not None
def _is_ultra30(self):
return re.match(r'.*Ultra-30',self.info[0]['uname_i']) is not None
def _is_ultra4(self):
return re.match(r'.*Ultra-4',self.info[0]['uname_i']) is not None
def _is_ultra5_10(self):
return re.match(r'.*Ultra-5_10',self.info[0]['uname_i']) is not None
def _is_ultra5(self):
return re.match(r'.*Ultra-5',self.info[0]['uname_i']) is not None
def _is_ultra60(self):
return re.match(r'.*Ultra-60',self.info[0]['uname_i']) is not None
def _is_ultra80(self):
return re.match(r'.*Ultra-80',self.info[0]['uname_i']) is not None
def _is_ultraenterprice(self):
return re.match(r'.*Ultra-Enterprise',self.info[0]['uname_i']) is not None
def _is_ultraenterprice10k(self):
return re.match(r'.*Ultra-Enterprise-10000',self.info[0]['uname_i']) is not None
def _is_sunfire(self):
return re.match(r'.*Sun-Fire',self.info[0]['uname_i']) is not None
def _is_ultra(self):
return re.match(r'.*Ultra',self.info[0]['uname_i']) is not None
def _is_cpusparcv7(self):
return self.info[0]['processor']=='sparcv7'
def _is_cpusparcv8(self):
return self.info[0]['processor']=='sparcv8'
def _is_cpusparcv9(self):
return self.info[0]['processor']=='sparcv9'
class win32_cpuinfo(cpuinfo_base):
info = None
pkey = "HARDWARE\\DESCRIPTION\\System\\CentralProcessor"
# XXX: what does the value of
# HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0
# mean?
def __init__(self):
if self.info is not None:
return
info = []
try:
#XXX: Bad style to use so long `try:...except:...`. Fix it!
import _winreg
pkey = "HARDWARE\\DESCRIPTION\\System\\CentralProcessor"
prgx = re.compile(r"family\s+(?P<FML>\d+)\s+model\s+(?P<MDL>\d+)"\
"\s+stepping\s+(?P<STP>\d+)",re.IGNORECASE)
chnd=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,pkey)
pnum=0
while 1:
try:
proc=_winreg.EnumKey(chnd,pnum)
except _winreg.error:
break
else:
pnum+=1
print proc
info.append({"Processor":proc})
phnd=_winreg.OpenKey(chnd,proc)
pidx=0
while True:
try:
name,value,vtpe=_winreg.EnumValue(phnd,pidx)
except _winreg.error:
break
else:
pidx=pidx+1
info[-1][name]=value
if name=="Identifier":
srch=prgx.search(value)
if srch:
info[-1]["Family"]=int(srch.group("FML"))
info[-1]["Model"]=int(srch.group("MDL"))
info[-1]["Stepping"]=int(srch.group("STP"))
except:
print sys.exc_value,'(ignoring)'
self.__class__.info = info
def _not_impl(self): pass
# Athlon
def _is_AMD(self):
return self.info[0]['VendorIdentifier']=='AuthenticAMD'
def _is_Am486(self):
return self.is_AMD() and self.info[0]['Family']==4
def _is_Am5x86(self):
return self.is_AMD() and self.info[0]['Family']==4
def _is_AMDK5(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model'] in [0,1,2,3]
def _is_AMDK6(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model'] in [6,7]
def _is_AMDK6_2(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model']==8
def _is_AMDK6_3(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model']==9
def _is_Athlon(self):
return self.is_AMD() and self.info[0]['Family']==6
def _is_Athlon64(self):
return self.is_AMD() and self.info[0]['Family']==15 \
and self.info[0]['Model']==4
def _is_Opteron(self):
return self.is_AMD() and self.info[0]['Family']==15 \
and self.info[0]['Model']==5
# Intel
def _is_Intel(self):
return self.info[0]['VendorIdentifier']=='GenuineIntel'
def _is_i386(self):
return self.info[0]['Family']==3
def _is_i486(self):
return self.info[0]['Family']==4
def _is_i586(self):
return self.is_Intel() and self.info[0]['Family']==5
def _is_i686(self):
return self.is_Intel() and self.info[0]['Family']==6
def _is_Pentium(self):
return self.is_Intel() and self.info[0]['Family']==5
def _is_PentiumMMX(self):
return self.is_Intel() and self.info[0]['Family']==5 \
and self.info[0]['Model']==4
def _is_PentiumPro(self):
return self.is_Intel() and self.info[0]['Family']==6 \
and self.info[0]['Model']==1
def _is_PentiumII(self):
return self.is_Intel() and self.info[0]['Family']==6 \
and self.info[0]['Model'] in [3,5,6]
def _is_PentiumIII(self):
return self.is_Intel() and self.info[0]['Family']==6 \
and self.info[0]['Model'] in [7,8,9,10,11]
def _is_PentiumIV(self):
return self.is_Intel() and self.info[0]['Family']==15
# Varia
def _is_singleCPU(self):
return len(self.info) == 1
def _getNCPUs(self):
return len(self.info)
def _has_mmx(self):
if self.is_Intel():
return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \
or (self.info[0]['Family'] in [6,15])
elif self.is_AMD():
return self.info[0]['Family'] in [5,6,15]
def _has_sse(self):
if self.is_Intel():
return (self.info[0]['Family']==6 and \
self.info[0]['Model'] in [7,8,9,10,11]) \
or self.info[0]['Family']==15
elif self.is_AMD():
return (self.info[0]['Family']==6 and \
self.info[0]['Model'] in [6,7,8,10]) \
or self.info[0]['Family']==15
def _has_sse2(self):
return self.info[0]['Family']==15
def _has_3dnow(self):
# XXX: does only AMD have 3dnow??
return self.is_AMD() and self.info[0]['Family'] in [5,6,15]
def _has_3dnowext(self):
return self.is_AMD() and self.info[0]['Family'] in [6,15]
if sys.platform[:5] == 'linux': # variations: linux2,linux-i386 (any others?)
cpuinfo = linux_cpuinfo
elif sys.platform[:4] == 'irix':
cpuinfo = irix_cpuinfo
elif sys.platform == 'darwin':
cpuinfo = darwin_cpuinfo
elif sys.platform[:5] == 'sunos':
cpuinfo = sunos_cpuinfo
elif sys.platform[:5] == 'win32':
cpuinfo = win32_cpuinfo
elif sys.platform[:6] == 'cygwin':
cpuinfo = linux_cpuinfo
#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices.
else:
cpuinfo = cpuinfo_base
cpu = cpuinfo()
if __name__ == "__main__":
cpu.is_blaa()
cpu.is_Intel()
cpu.is_Alpha()
print 'CPU information:',
for name in dir(cpuinfo):
if name[0]=='_' and name[1]!='_':
r = getattr(cpu,name[1:])()
if r:
if r!=1:
print '%s=%s' %(name[1:],r),
else:
print name[1:],
print
|
lgpl-2.1
| 4,084,521,724,906,250,000 | 33.567919 | 88 | 0.544793 | false |
MahjongRepository/mahjong
|
doc/test.py
|
1
|
2941
|
from mahjong.hand_calculating.hand import HandCalculator
from mahjong.hand_calculating.hand_config import HandConfig, OptionalRules
from mahjong.locale.text_reporter import TextReporter
from mahjong.tile import TilesConverter
calculator = HandCalculator()
# useful helper
def print_hand_result(hand_result, locale='Chinese'):
reporter = TextReporter(locale=locale)
str_dict = reporter.report(hand_result)
if hand_result.error:
print(str_dict['error'])
else:
print(str_dict['fu_details'])
print(str_dict['yaku'])
print(str_dict['cost'])
print('')
####################################################################
# Tanyao hand by ron #
####################################################################
# we had to use all 14 tiles in that array
tiles = TilesConverter.string_to_136_array(man='112233', pin='667788', sou='44')
win_tile = TilesConverter.string_to_136_array(sou='4')[0]
config = HandConfig()
# config.is_dealer = True
config.is_tsumo = True
result = calculator.estimate_hand_value(tiles, win_tile, config=config)
print_hand_result(result, locale='Chinese')
####################################################################
# Bug: Yakuman and Non-yakuman Yakus should not add together #
####################################################################
config = HandConfig(is_renhou=True)
# renhou as an yakuman - old style
config.yaku.renhou.han_closed = 13
# if you directly change this, it would lead to 32 total han, but it should be 13
dora_indicators = [
TilesConverter.string_to_136_array(man='1')[0],
TilesConverter.string_to_136_array(man='1')[0],
TilesConverter.string_to_136_array(man='1')[0],
TilesConverter.string_to_136_array(man='1')[0],
]
tiles = TilesConverter.string_to_136_array(man='22334466557788')
win_tile = TilesConverter.string_to_136_array(man='4')[0]
result = calculator.estimate_hand_value(tiles, win_tile, dora_indicators=dora_indicators, config=config)
print_hand_result(result)
####################################################################
# Bug: Yakuman and Non-yakuman Yakus should not add together #
####################################################################
config = HandConfig(is_renhou=True, options=OptionalRules(renhou_as_yakuman=True))
# renhou as an yakuman - old style
# This should be the correct way to count Renhou as Yakuman
dora_indicators = [
TilesConverter.string_to_136_array(man='1')[0],
TilesConverter.string_to_136_array(man='1')[0],
TilesConverter.string_to_136_array(man='1')[0],
TilesConverter.string_to_136_array(man='1')[0]
]
tiles = TilesConverter.string_to_136_array(man='22334466557788')
win_tile = TilesConverter.string_to_136_array(man='4')[0]
result = calculator.estimate_hand_value(tiles, win_tile, dora_indicators=dora_indicators, config=config)
print_hand_result(result)
|
mit
| 4,933,016,763,453,475,000 | 35.7625 | 104 | 0.613737 | false |
jlinn/pylastica
|
tests/query/test_ids.py
|
1
|
2081
|
__author__ = 'Joe Linn'
import unittest
import pylastica
from tests.base import Base
class IdsTest(unittest.TestCase, Base):
def setUp(self):
index = self._create_index('test')
doc_type1 = index.get_doc_type('helloworld1')
doc_type2 = index.get_doc_type('helloworld2')
doc_type1.add_document(pylastica.Document(1, {'name': 'hello world'}))
doc_type1.add_document(pylastica.Document(2, {'name': 'Joe Linn'}))
doc_type1.add_document(pylastica.Document(3, {'name': 'Linn'}))
doc_type2.add_document(pylastica.Document(4, {'name': 'hello world again'}))
index.refresh()
self._doc_type = doc_type1
self._index = index
def tearDown(self):
self._index.delete()
def test_set_ids_search_single(self):
query = pylastica.query.Ids()
query.set_ids(['1'])
result_set = self._doc_type.search(query)
self.assertEqual(1, len(result_set))
def test_set_ids_search_list(self):
query = pylastica.query.Ids()
query.set_ids(['1', '2'])
result_set = self._doc_type.search(query)
self.assertEqual(2, len(result_set))
def test_add_ids_search_single(self):
query = pylastica.query.Ids()
query.add_id('3')
result_set = self._doc_type.search(query)
self.assertEqual(1, len(result_set))
def test_combo_ids_search_list(self):
query = pylastica.query.Ids()
query.set_ids(['1', '2'])
query.add_id('3')
result_set = self._doc_type.search(query)
self.assertEqual(3, len(result_set))
def test_set_type_single_search_single(self):
query = pylastica.query.Ids('helloworld1', ['1'])
result_set = self._doc_type.search(query)
self.assertEqual(1, len(result_set))
def test_set_type_singel_search_list_doc_in_other_type(self):
query = pylastica.query.Ids('helloworld1', ['1', '4'])
result_set = self._doc_type.search(query)
self.assertEqual(1, len(result_set))
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -5,237,796,223,068,207,000 | 33.683333 | 84 | 0.608842 | false |
dc3-plaso/dfvfs
|
dfvfs/resolver/__init__.py
|
1
|
1656
|
# -*- coding: utf-8 -*-
"""Imports for the path specification resolver."""
try:
from dfvfs.resolver import bde_resolver_helper
except ImportError:
pass
from dfvfs.resolver import compressed_stream_resolver_helper
from dfvfs.resolver import cpio_resolver_helper
from dfvfs.resolver import data_range_resolver_helper
from dfvfs.resolver import encoded_stream_resolver_helper
from dfvfs.resolver import encrypted_stream_resolver_helper
try:
from dfvfs.resolver import ewf_resolver_helper
except ImportError:
pass
from dfvfs.resolver import fake_resolver_helper
try:
from dfvfs.resolver import fvde_resolver_helper
except ImportError:
pass
from dfvfs.resolver import gzip_resolver_helper
try:
from dfvfs.resolver import lvm_resolver_helper
except ImportError:
pass
try:
from dfvfs.resolver import ntfs_resolver_helper
except ImportError:
pass
from dfvfs.resolver import os_resolver_helper
try:
from dfvfs.resolver import qcow_resolver_helper
except ImportError:
pass
try:
from dfvfs.resolver import raw_resolver_helper
except ImportError:
pass
from dfvfs.resolver import sqlite_blob_resolver_helper
from dfvfs.resolver import tar_resolver_helper
try:
from dfvfs.resolver import tsk_partition_resolver_helper
except ImportError:
pass
try:
from dfvfs.resolver import tsk_resolver_helper
except ImportError:
pass
try:
from dfvfs.resolver import vhdi_resolver_helper
except ImportError:
pass
try:
from dfvfs.resolver import vmdk_resolver_helper
except ImportError:
pass
try:
from dfvfs.resolver import vshadow_resolver_helper
except ImportError:
pass
from dfvfs.resolver import zip_resolver_helper
|
apache-2.0
| 2,267,401,551,693,512,000 | 19.962025 | 60 | 0.799517 | false |
wtolson/planetaryimage
|
tests/test_pds3file.py
|
1
|
4100
|
# -*- coding: utf-8 -*-
import pytest
import os
import numpy
from numpy.testing import assert_almost_equal
from planetaryimage.pds3image import PDS3Image, Pointer
from pvl import Units
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data/')
filename = os.path.join(DATA_DIR, 'pds3_1band.IMG')
gzipped_filename = os.path.join(DATA_DIR, 'pds3_1band.IMG.gz')
bz2_filename = os.path.join(DATA_DIR, 'pds3_1band.IMG.bz2')
@pytest.fixture
def expected():
return numpy.loadtxt(os.path.join(DATA_DIR, 'pds3_1band.txt')).reshape(
(1, 10, 10)
)
def test_pds3_1band_labels(expected):
image = PDS3Image.open(filename)
assert image.filename == filename
assert image.bands == 1
assert image.lines == 10
assert image.samples == 10
assert image.format == 'BAND_SEQUENTIAL'
assert image.dtype == numpy.dtype('>i2')
assert image.start_byte == 640
assert image.shape == (1, 10, 10)
# FIXME: Doublecheck that consolidating pixel_type and byte order
# is actually OK for PDS images. I think here at the object level
# its OK even though in the PDS labels the information is separate.
assert image.size == 100
assert image.compression is None
# Testing .label
assert image.label['FILE_RECORDS'] == 42
assert image.label['IMAGE']['SAMPLE_TYPE'] == 'MSB_INTEGER'
# Testing .data
assert image.data.shape == (1, 10, 10)
assert image.data.dtype == numpy.dtype('>i2')
assert_almost_equal(image.data, expected)
def test_gz_pds3_1band_labels(expected):
image = PDS3Image.open(gzipped_filename)
assert image.filename == gzipped_filename
assert image.bands == 1
assert image.lines == 10
assert image.samples == 10
assert image.format == 'BAND_SEQUENTIAL'
assert image.dtype == numpy.dtype('>i2')
assert image.start_byte == 640
assert image.shape == (1, 10, 10)
assert image.size == 100
assert image.compression == 'gz'
# Testing .label
assert image.label['FILE_RECORDS'] == 42
assert image.label['IMAGE']['SAMPLE_TYPE'] == 'MSB_INTEGER'
# Testing .data
assert image.data.shape == (1, 10, 10)
assert image.data.dtype == numpy.dtype('>i2')
assert_almost_equal(image.data, expected)
def test_bz2_pds3_1band_labels(expected):
image = PDS3Image.open(bz2_filename)
assert image.filename == bz2_filename
assert image.bands == 1
assert image.lines == 10
assert image.samples == 10
assert image.format == 'BAND_SEQUENTIAL'
assert image.dtype == numpy.dtype('>i2')
assert image.start_byte == 640
assert image.shape == (1, 10, 10)
assert image.size == 100
assert image.compression == 'bz2'
# Testing .label
assert image.label['FILE_RECORDS'] == 42
assert image.label['IMAGE']['SAMPLE_TYPE'] == 'MSB_INTEGER'
# Testing .data
assert image.data.shape == (1, 10, 10)
assert image.data.dtype == numpy.dtype('>i2')
assert_almost_equal(image.data, expected)
def test_parse_pointer():
# ^PTR = nnn
# Example tests/mission_data/1p432690858esfc847p2111l2m1.img
assert Pointer.parse(56, 640) == Pointer(None, 35200)
# ^PTR = nnn <BYTES>
assert Pointer.parse(Units(101337, 'BYTES'), 0) == Pointer(None, 101337)
# ^PTR = "filename"
assert Pointer.parse('W1782844276_1.IMG', 1024) == Pointer('W1782844276_1.IMG', 0)
# ^PTR = ("filename")
assert Pointer.parse(['W1782844276_1.IMG'], 1024) == Pointer('W1782844276_1.IMG', 0)
# ^PTR = ("filename", nnn)
# Example tests/mission_data/W1782844276_1.LBL
assert Pointer.parse(['W1782844276_1.IMG', 5], 1024) == Pointer('W1782844276_1.IMG', 4096)
# ^PTR = ("filename", nnn <BYTES>)
assert Pointer.parse(['W1782844276_1.IMG', Units(101337, 'BYTES')], 1024) == Pointer('W1782844276_1.IMG', 101337)
# Test bad type
with pytest.raises(ValueError):
Pointer.parse(None, 64)
# Test wrong sized arrays
with pytest.raises(ValueError):
Pointer.parse([], 64)
with pytest.raises(ValueError):
Pointer.parse(['W1782844276_1.IMG', 5, 6], 64)
|
bsd-3-clause
| -8,439,754,940,793,868,000 | 31.539683 | 117 | 0.656585 | false |
bellowsj/aiopogo
|
aiopogo/pogoprotos/networking/requests/messages/evolve_pokemon_message_pb2.py
|
1
|
3240
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/requests/messages/evolve_pokemon_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.inventory.item import item_id_pb2 as pogoprotos_dot_inventory_dot_item_dot_item__id__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/evolve_pokemon_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nDpogoprotos/networking/requests/messages/evolve_pokemon_message.proto\x12\'pogoprotos.networking.requests.messages\x1a\'pogoprotos/inventory/item/item_id.proto\"q\n\x14\x45volvePokemonMessage\x12\x12\n\npokemon_id\x18\x01 \x01(\x06\x12\x45\n\x1a\x65volution_item_requirement\x18\x02 \x01(\x0e\x32!.pogoprotos.inventory.item.ItemIdb\x06proto3')
,
dependencies=[pogoprotos_dot_inventory_dot_item_dot_item__id__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EVOLVEPOKEMONMESSAGE = _descriptor.Descriptor(
name='EvolvePokemonMessage',
full_name='pogoprotos.networking.requests.messages.EvolvePokemonMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pokemon_id', full_name='pogoprotos.networking.requests.messages.EvolvePokemonMessage.pokemon_id', index=0,
number=1, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='evolution_item_requirement', full_name='pogoprotos.networking.requests.messages.EvolvePokemonMessage.evolution_item_requirement', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=154,
serialized_end=267,
)
_EVOLVEPOKEMONMESSAGE.fields_by_name['evolution_item_requirement'].enum_type = pogoprotos_dot_inventory_dot_item_dot_item__id__pb2._ITEMID
DESCRIPTOR.message_types_by_name['EvolvePokemonMessage'] = _EVOLVEPOKEMONMESSAGE
EvolvePokemonMessage = _reflection.GeneratedProtocolMessageType('EvolvePokemonMessage', (_message.Message,), dict(
DESCRIPTOR = _EVOLVEPOKEMONMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.evolve_pokemon_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.EvolvePokemonMessage)
))
_sym_db.RegisterMessage(EvolvePokemonMessage)
# @@protoc_insertion_point(module_scope)
|
mit
| -8,126,262,847,528,678,000 | 40.012658 | 365 | 0.764506 | false |
pathmann/pyTSon
|
tools/bundle.py
|
1
|
16294
|
#!/usr/bin/env python3
import sys
import os
import zipfile
import shutil
import tempfile
import subprocess
from argparse import ArgumentParser
# [("path to file on disk", "path (without file) in zip")]
FILES = [("ressources/octicons", "plugins/pyTSon/ressources/octicons"),
("ressources/python.png", "plugins/pyTSon/python.png"),
("ressources/python/pytsonui", "plugins/pyTSon/include/pytsonui"),
("ressources/python/pytson.py", "plugins/pyTSon/include/pytson.py"),
("ressources/python/devtools.py",
"plugins/pyTSon/include/devtools.py"),
("ressources/python/ts3client.py",
"plugins/pyTSon/include/ts3client.py"),
("ressources/ui/pyTSon-configdialog.ui",
"plugins/pyTSon/ressources/pyTSon-configdialog.ui"),
("ressources/ui/repository.ui",
"plugins/pyTSon/ressources/repository.ui"),
("ressources/ui/installer.ui",
"plugins/pyTSon/ressources/installer.ui"),
("ressources/repositorymaster.json",
"plugins/pyTSon/ressources/repositorymaster.json"),
("ressources/python/ts3plugin.py",
"plugins/pyTSon/scripts/ts3plugin.py"),
("ressources/python/pluginhost.py",
"plugins/pyTSon/scripts/pluginhost.py"),
("generated/pregen/ts3defines.py",
"plugins/pyTSon/include/ts3defines.py"),
("generated/pregen/api.pdf", "plugins/pyTSon/docs/pyTSon.pdf"),
("generated/pregen/ts3help.py", "plugins/pyTSon/include/ts3help.py"),
("ressources/python/ts3widgets", "plugins/pyTSon/include/ts3widgets"),
("ressources/loading.gif", "plugins/pyTSon/ressources/loading.gif"),
("ressources/python/signalslot.py",
"plugins/pyTSon/include/signalslot.py"),
("generated/pregen/pyTSon-de_DE.qm",
"plugins/pyTSon/ressources/i18n/pyTSon-de_DE.qm"),
("Changelog.html", "plugins/pyTSon/Changelog.html"),
("tools/pylupdate.py", "plugins/pyTSon/include/pylupdate.py"),
("LICENSE", "plugins/pyTSon/LICENSE.txt"),
("VERSION", "plugins/pyTSon/VERSION"),
("ressources/python/ts3lib.py", "plugins/pyTSon/include/ts3lib.py")]
ARCHFILES = {'win32': [("build/pyTSon.dll", "plugins/pyTSon_win32.dll"),
("build/python.exe", "plugins/pyTSon/python.exe")],
'win64': [("build/pyTSon.dll", "plugins/pyTSon_win64.dll"),
("build/python.exe", "plugins/pyTSon/python.exe")],
'linux_x86': [("build/libpyTSon.so.1.0.0",
"plugins/libpyTSon_linux_x86.so"),
("build/python", "plugins/pyTSon/python")],
'linux_amd64': [("build/libpyTSon.so.1.0.0",
"plugins/libpyTSon_linux_amd64.so"),
("build/python", "plugins/pyTSon/python")],
'mac': [("build/libpyTSon.1.0.0.dylib",
"plugins/libpyTSon_mac.dylib"),
("build/python", "plugins/pyTSon/python")]}
PYTHONFILES = {'win32': [("python35.dll", "plugins/pyTSon/python35.dll"),
("Lib", "plugins/pyTSon/lib"),
("DLLs/_bz2.pyd",
"plugins/pyTSon/lib/lib-dynload/_bz2.pyd"),
("DLLs/_ctypes.pyd",
"plugins/pyTSon/lib/lib-dynload/_ctypes.pyd"),
("DLLs/_decimal.pyd",
"plugins/pyTSon/lib/lib-dynload/_decimal.pyd"),
("DLLs/_elementtree.pyd",
"plugins/pyTSon/lib/lib-dynload/_elementtree.pyd"),
("DLLs/_hashlib.pyd",
"plugins/pyTSon/lib/lib-dynload/_hashlib.pyd"),
("DLLs/_lzma.pyd",
"plugins/pyTSon/lib/lib-dynload/_lzma.pyd"),
("DLLs/_msi.pyd",
"plugins/pyTSon/lib/lib-dynload/_msi.pyd"),
("DLLs/_multiprocessing.pyd",
"plugins/pyTSon/lib/lib-dynload/_multiprocessing.pyd"),
("DLLs/_overlapped.pyd",
"plugins/pyTSon/lib/lib-dynload/_overlapped.pyd"),
("DLLs/_socket.pyd",
"plugins/pyTSon/lib/lib-dynload/_socket.pyd"),
("DLLs/_ssl.pyd",
"plugins/pyTSon/lib/lib-dynload/_ssl.pyd"),
("DLLs/pyexpat.pyd",
"plugins/pyTSon/lib/lib-dynload/pyexpat.pyd"),
("DLLs/select.pyd",
"plugins/pyTSon/lib/lib-dynload/select.pyd"),
("DLLs/unicodedata.pyd",
"plugins/pyTSon/lib/lib-dynload/unicodedata.pyd"),
("DLLs/winsound.pyd",
"plugins/pyTSon/lib/lib-dynload/winsound.pyd"),
("DLLs/sqlite3.dll",
"plugins/pyTSon/lib/lib-dynload/sqlite3.dll"),
("DLLs/tcl86t.dll",
"plugins/pyTSon/lib/lib-dynload/tcl86t.dll"),
("DLLs/tk86t.dll",
"plugins/pyTSon/lib/lib-dynload/tk86t.dll"),
("include", "plugins/pyTSon/include/python3.5m"), ],
'win64': [("python35.dll", "plugins/pyTSon/python35.dll"),
("Lib", "plugins/pyTSon/lib"),
("DLLs/_bz2.pyd",
"plugins/pyTSon/lib/lib-dynload/_bz2.pyd"),
("DLLs/_ctypes.pyd",
"plugins/pyTSon/lib/lib-dynload/_ctypes.pyd"),
("DLLs/_decimal.pyd",
"plugins/pyTSon/lib/lib-dynload/_decimal.pyd"),
("DLLs/_elementtree.pyd",
"plugins/pyTSon/lib/lib-dynload/_elementtree.pyd"),
("DLLs/_hashlib.pyd",
"plugins/pyTSon/lib/lib-dynload/_hashlib.pyd"),
("DLLs/_lzma.pyd",
"plugins/pyTSon/lib/lib-dynload/_lzma.pyd"),
("DLLs/_msi.pyd",
"plugins/pyTSon/lib/lib-dynload/_msi.pyd"),
("DLLs/_multiprocessing.pyd",
"plugins/pyTSon/lib/lib-dynload/_multiprocessing.pyd"),
("DLLs/_overlapped.pyd",
"plugins/pyTSon/lib/lib-dynload/_overlapped.pyd"),
("DLLs/_socket.pyd",
"plugins/pyTSon/lib/lib-dynload/_socket.pyd"),
("DLLs/_sqlite3.pyd",
"plugins/pyTSon/lib/lib-dynload/_sqlite3.pyd"),
("DLLs/_ssl.pyd",
"plugins/pyTSon/lib/lib-dynload/_ssl.pyd"),
("DLLs/pyexpat.pyd",
"plugins/pyTSon/lib/lib-dynload/pyexpat.pyd"),
("DLLs/select.pyd",
"plugins/pyTSon/lib/lib-dynload/select.pyd"),
("DLLs/unicodedata.pyd",
"plugins/pyTSon/lib/lib-dynload/unicodedata.pyd"),
("DLLs/winsound.pyd",
"plugins/pyTSon/lib/lib-dynload/winsound.pyd"),
("DLLs/sqlite3.dll",
"plugins/pyTSon/lib/lib-dynload/sqlite3.dll"),
("DLLs/tcl86t.dll",
"plugins/pyTSon/lib/lib-dynload/tcl86t.dll"),
("DLLs/tk86t.dll",
"plugins/pyTSon/lib/lib-dynload/tk86t.dll"),
("include", "plugins/pyTSon/include/python3.5m"), ],
'linux_x86': [("lib/libpython3.5m.so.1.0",
"plugins/pyTSon/libpython3.5m_32.so"),
("lib/python3.5",
"plugins/pyTSon/lib/python3.5"),
("include", "plugins/pyTSon/include"), ],
'linux_amd64': [("lib/libpython3.5m.so.1.0",
"plugins/pyTSon/libpython3.5m_64.so"),
("lib/python3.5",
"plugins/pyTSon/lib/python3.5"),
("include", "plugins/pyTSon/include"), ],
'mac': [("lib/libpython3.5m.dylib",
"plugins/pyTSon/libpython3.5m.dylib"),
("lib/python3.5", "plugins/pyTSon/lib/python3.5"),
("include", "plugins/pyTSon/include"), ]}
INIBASE = """
Name = pyTSon
Type = Plugin
Author = Thomas \"PLuS\" Pathmann
Version = %s
Platforms = %s
Description = "pyTSon - A python plugin to enhance the TS3 client with python\
scripts"
"""
def copyFiles(root, files, tempdir):
for loc, inzip in files:
locpath = os.path.join(root, loc)
if os.path.isfile(locpath):
filepath = os.path.join(tempdir, inzip)
filedir = os.path.dirname(filepath)
if not os.path.isdir(filedir):
os.makedirs(filedir)
shutil.copy(locpath, filepath)
else:
for base, dirs, files in os.walk(locpath):
for f in files:
fn = os.path.join(base, f)
filepath = os.path.join(tempdir, inzip +
fn[len(locpath):])
filedir = os.path.dirname(filepath)
if not os.path.isdir(filedir):
os.makedirs(filedir)
shutil.copy(fn, filepath)
def writeZip(tempdir, tozip):
for base, dirs, files in os.walk(tempdir):
if not os.path.basename(base) == "__pycache__":
for f in files:
fn = os.path.join(base, f)
tozip.write(fn, fn[len(tempdir):])
def main(root, pythondir, outdir, arches, buildbase, update):
verpath = os.path.join(root, "VERSION")
if not os.path.isfile(verpath):
print("Could not find VERSION file in rootdir")
sys.exit(1)
with open(verpath, "r") as f:
ver = f.readline()
for a in arches:
tempdir = tempfile.mkdtemp()
if update:
copyFiles(root, FILES, tempdir)
elif buildbase:
copyFiles(root, ARCHFILES[a], tempdir)
copyFiles(pythondir, PYTHONFILES[a], tempdir)
else:
copyFiles(root, FILES + ARCHFILES[a], tempdir)
copyFiles(pythondir, PYTHONFILES[a], tempdir)
if not update:
stddir = os.path.join(tempdir, "plugins", "pyTSon", "lib")
if not os.path.isdir(stddir):
print("The python standard library coult not be found, check "
"%s for the directory structure" % tempdir)
if a in ["win32", "win64"]:
intpath = os.path.join(tempdir, "plugins", "pyTSon",
"python.exe")
else:
intpath = os.path.join(tempdir, "plugins", "pyTSon", "python")
if not os.path.isfile(intpath):
print("The python interpreter could not be found, check %s "
"for the directory structure" % tempdir)
sys.exit(1)
if a in ["win32", "win64"]:
sitedir = os.path.join(stddir, "site-packages")
else:
sitedir = os.path.join(stddir, "python3.5", "site-packages")
if not os.path.isdir(sitedir):
print("The site directory could not be found, check "
"%s for the directory structure" % tempdir)
sys.exit(1)
# update pip: python -m pip install --update --target sitedir pip
p = subprocess.Popen([intpath, "-m", "pip", "install",
"--no-cache-dir", "--upgrade", "pip"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
print("Error updating pip:")
print(err)
if out:
print(out)
sys.exit(1)
if err:
print("Warnings generated updating pip:")
print(err)
if out:
print(out)
reqfile = os.path.join(root, "requirements.txt")
if os.path.isfile(reqfile):
p = subprocess.Popen([intpath, "-m", "pip", "install",
"--no-cache-dir", "-r", reqfile],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if err:
print("Error installing requirements:")
print(err)
if out:
print(out)
sys.exit(1)
elif out:
print(out)
else:
print("Warning: No requirements.txt found")
# move the lib dir to lib_new
shutil.move(stddir, os.path.join(tempdir, "plugins", "pyTSon",
"lib_new"))
if update:
shutil.copyfile(os.path.join(outdir, "pyTSon_%s.base" % a),
os.path.join(outdir, "pyTSon_%s.ts3_plugin" % a))
zipout = zipfile.ZipFile(os.path.join(outdir,
"pyTSon_%s.ts3_plugin" % a),
"a", zipfile.ZIP_DEFLATED)
elif buildbase:
zipout = zipfile.ZipFile(os.path.join(outdir,
"pyTSon_%s.base" % a),
"w", zipfile.ZIP_DEFLATED)
else:
zipout = zipfile.ZipFile(os.path.join(outdir,
"pyTSon_%s.ts3_plugin" % a),
"w", zipfile.ZIP_DEFLATED)
writeZip(tempdir, zipout)
if not buildbase:
zipout.writestr("package.ini", INIBASE % (ver, a))
zipout.close()
shutil.rmtree(tempdir)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('rootdir', help='The root directory of pyTSon')
parser.add_argument('pythondir',
help='The directory, python is installed in')
parser.add_argument('outputdir',
help='The directory, where output files should be \
placed in')
parser.add_argument('arch', nargs='+', help='architecture to bundle')
parser.add_argument('-u', '--update', dest='update', action='store_true',
help='Create a bundle by merging a base arch bundle \
with the non-arch-dependent files, argument pythondir \
will be ignored')
parser.add_argument('-b', '--base', dest='base', action='store_true',
help='Create a base arch bundle')
args = parser.parse_args()
if args.update and args.base:
print("update and base are mutual exclusive")
sys.exit(1)
if not args.update and len(args.arch) > 1:
print("Only updates are supported on foreign platforms")
sys.exit(1)
for key in args.arch:
if key not in ARCHFILES:
print("Unrecognized architecture, possible values are: \
%s" % ", ".join(ARCHFILES.keys()))
sys.exit(1)
elif args.update:
path = os.path.join(args.outputdir, "pyTSon_%s.base" % key)
if not os.path.isfile(path):
print("Could not find base arch bundle in outputdir for \
architecture %s" % key)
sys.exit(1)
main(args.rootdir, args.pythondir, args.outputdir, args.arch, args.base,
args.update)
|
gpl-3.0
| -7,942,131,341,461,669,000 | 45.289773 | 81 | 0.486437 | false |
davidlhoumaud/LeeWee
|
http/httpserver.py
|
1
|
9213
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011 <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import BaseHTTPServer, CGIHTTPServer
import sys, os, urllib, select, optparse,random, time
php_path = None
possible_php_paths = [ '/usr/lib/cgi-bin/php5',
'PROGRAM_PATH/lib/php.py' ]
cgi_directories = ['/','/cgi-bin', '/htbin','/python','/perl']
def setup_php(program_path):
global php_path
for p in possible_php_paths:
p = p.replace('PROGRAM_PATH', program_path)
if os.path.exists(p):
php_path = p
return
raise Exception("No php binary found - not even php.py (program_path=%s) !"%program_path)
class PHPHTTPRequestHandler(CGIHTTPServer.CGIHTTPRequestHandler):
def is_cgi(self):
if os.path.split(self.path)[1] == '':
index_php = os.path.join(self.path, 'index.php')
if os.path.exists(self.translate_path(index_php)):
self.path = index_php
if self.path.find('.php') != -1:
self.cgi_info = os.path.split(self.path)
return True
for p in self.cgi_directories:
p = os.path.join(p,'')
if self.path.startswith(p):
print p
self.cgi_info = os.path.split(self.path)
return True
elif self.path.find('.sh') != -1 or self.path.find('.cgi') != -1 or self.path.find('.py') != -1:
self.cgi_info = os.path.split(self.path)
return True
return False
def run_cgi(self):
"""Execute a CGI script."""
dir, rest = self.cgi_info
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
is_php = script.endswith('.php')
# print "#### cgi_info=%s,dir=%s,rest=%s,script=%s,scriptname=%s,is_php=%s"%(self.cgi_info,dir,rest,script,scriptname,is_php)
if self.path.find('.sh') != -1 or self.path.find('.cgi') != -1 or self.path.find('.py') != -1:
ispy = True
else:
ispy = self.is_python(scriptname)
if is_php:
if not php_path: raise Exception('php_path not set')
scriptfile = php_path
sourcefile = self.translate_path(scriptname)
else:
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%s)" % `scriptname`)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%s)" %
`scriptname`)
return
if not ispy:
if not (self.have_fork or self.have_popen2 or self.have_popen3):
self.send_error(403, "CGI script is not a Python script (%s)" %
`scriptname`)
return
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%s)" %
`scriptname`)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = {}
env['DOCUMENT_ROOT'] = os.getcwd()
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.unquote(self.cgi_info[1])
env['REQUEST_URI'] = self.path
# env['PATH_INFO'] = uqrest
# env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
env['SCRIPT_FILENAME'] = self.translate_path(scriptname)
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
env['REDIRECT_STATUS'] = '1' # for php
# XXX AUTH_TYPE
# XXX REMOTE_USER
# XXX REMOTE_IDENT
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.getheader('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.getheaders('cookie'))
if co:
env['HTTP_COOKIE'] = ', '.join(co)
# XXX Other HTTP_* headers
if not self.have_fork:
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE'):
env.setdefault(k, "")
os.environ.update(env)
self.send_response(200, "Script output follows")
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
if is_php:
args = [php_path, sourcefile]
else:
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
# throw away additional data [see bug #427345]
while select.select([self.rfile], [], [], 0)[0]:
try:
if not self.rfile.read(1):
break
except:
break
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
if 0:
time.sleep(.1)
fn = '/tmp/a%d'%random.randint(1000,10000)
f = open(fn, 'w')
s = ''
while select.select([self.rfile], [], [], 0)[0]:
try:
c = self.rfile.read(1)
if not c:
break
s += c
except:
break
print '### input:', repr(s)
print >>f, s
f.close()
self.rfile = open(fn, 'r')
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.chdir(self.translate_path(dir)) # KC
os.execve(scriptfile, args, os.environ)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
else:
raise SystemExit('need fork()')
def serve(bind, port, path, handler=PHPHTTPRequestHandler):
os.chdir(path)
httpd = BaseHTTPServer.HTTPServer((bind,port), handler)
httpd.serve_forever()
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("-H", "--host", default="", help="\t\tdefault localhost")
parser.add_option("-p", "--port", type="int", default=8000, help="\t\tdefault 8000")
parser.add_option("-P", "--path", default=os.path.realpath(os.path.dirname(sys.argv[0]))+"/www", help="\t\tdefault "+os.path.realpath(os.path.dirname(sys.argv[0]))+"/www")
options, remaining_args = parser.parse_args(sys.argv)
setup_php(os.path.realpath(os.path.dirname(sys.argv[0])))
serve(options.host,options.port,options.path)
|
gpl-2.0
| -838,983,163,491,900,800 | 39.231441 | 175 | 0.524693 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.