repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
xfaxca/pymlkit
|
pymlkit/preproc/eda.py
|
Python
|
gpl-3.0
| 2,894 | 0.003455 |
# explore.py
"""
Module containing functionality for exploratory data analysis and visualization.
"""
import seaborn as sns
import matplotlib.pyplot as plt
__all__ = [
'class_proportions',
'see_nulls',
'distplots',
'pairplots'
]
# ====== Data Statistics
def class_proportions(y):
"""
Function to calculate the proportion of classes for a given set of class labels. Returns a dictionary of class
proportions where the keys are the labels and the values are the percentage of the total number of samples that
occur for each class label.
:param y: (int) List of class labels (typically int in classification problems, but can be passed as strings)
:return:
"""
if not isinstance(y, list):
y = list(y)
counts_dict = {i: y.count(i) for i in y}
prop_dict = {}
for key, val in zip(counts_dict.keys(), counts_dict.values()):
print('Class: %10s | counts: %i (%0.2f%%)' % (key, val, (100 * val / len(y))))
prop_dict[key] = (100 * val / len(y))
print('Total number of samples:', len(y))
return prop_dict
# ====== Visualization
def see_nulls(df):
"""
Function to visualize columns with null values for features in a pandas DataFrame
:param df: pandas DataFrame with feature data
:return:
"""
plt.figure(figsize=(14, 9))
sns.heatmap(df.isnull(), cmap='viridis', yticklabels=False, xticklabels=True, cbar=True)
plt.title("Visualization of Null Values in Data")
plt.xticks(rotation=30)
plt.show()
return None
def distplots(df, features):
"""
Function to show the distribution of a selected feature(s)
:param df: Dataframe containing features
:param features: (str/list): Feature(s) to be plotted in a distribution plot
:return:
"""
if not isinstance(features, list):
title_str = features
|
features = [features]
else:
title_str = ", ".join(features)
ax_label = ""
for feature in features:
ax_label += ('| %s |' % feature)
sns.distplot(df[feature].values, label=feature, norm_hist=True)
plt.xlabel(s=ax_label)
plt.legend(fontsize=12)
plt.title('Distribution of %s' % title_str)
plt.show(
|
)
def pairplots(df, features, kind='reg', diag_kind='kde'):
"""
Function to make a quick pairplot of selected features
:param df: DataFrame containing the feature matrix
:param features: (str/list) Features selected for inclusion in pairplot.
:param kind: (str) Kind of plot for the non-identity relationships ('scatter', 'reg').
:param diag_kind: (str) Kind of plot for the diagonal subplots ('hist', 'kde').
:return:
"""
if not isinstance(features, list):
features = [features]
data = df[features]
sns.pairplot(data=data, vars=features, kind=kind,
diag_kind=diag_kind, dropna=True)
plt.show()
|
timwaizenegger/osecm-sdos
|
mcm/__init__.py
|
Python
|
mit
| 1,307 | 0.012242 |
#!/usr/bin/python
# coding=utf-8
"""
Project MCM - Micro Content Management
SDOS - Secure Delete Object Store
Copyright (C) <2016> Tim Waizenegger, <University of Stuttgart>
This software may be modified and distributed under the terms
of the MIT license. See the LICENSE file for details.
"""
import logging, coloredlogs, sys
from mcm.sdos import conf
|
iguration
log_format = '%(asctime)s %(module)s %(name)s[%(process)d][%(thread)d] %(levelname)s %(message)s'
field_styles = {'module': {'color': 'magenta'}, 'hostname': {'color': 'magenta'}, 'programname': {'color': 'cyan'},
'name': {'color': 'blue'}, 'levelname': {'color': 'black', 'bold': True}, 'asctime': {'color': 'green'}}
coloredlogs.install(level=configuration.log_level, fmt=log_forma
|
t, field_styles=field_styles)
#logging.getLogger("werkzeug").setLevel(level=logging.WARNING)
#logging.getLogger("swiftclient").setLevel(level=logging.WARNING)
"""
logging.basicConfig(level=configuration.log_level, format=configuration.log_format)
"""
logging.error("###############################################################################")
logging.error("SDOS service running")
logging.error("Python {}".format(sys.version))
logging.error("###############################################################################")
|
mozillazg/mzgblog
|
model.py
|
Python
|
mit
| 30,298 | 0.018222 |
# -*- coding: utf-8 -*-
import os,logging
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext.db import Model as DBModel
from google.appengine.api import memcache
from google.appengine.api import mail
from google.appengine.api import urlfetch
from google.appengine.api import datastore
from datetime import datetime
import urllib, hashlib,urlparse
import zipfile,re,pickle,uuid
#from base import *
logging.info('module base reloaded')
rootpath=os.path.dirname(__file__)
def vcache(key="",time=3600):
def _decorate(method):
def _wrapper(*args, **kwargs):
if not g_blog.enable_memcache:
return method(*args, **kwargs)
result=method(*args, **kwargs)
memcache.set(key,result,time)
return result
return _wrapper
return _decorate
class Theme:
def __init__(self, name='default'):
self.name = name
self.mapping_cache = {}
self.dir = '/themes/%s' % name
self.viewdir=os.path.join(rootpath, 'view')
self.server_dir = os.path.join(rootpath, 'themes',self.name)
if os.path.exists(self.server_dir):
self.isZip=False
else:
self.isZip=True
self.server_dir =self.server_dir+".zip"
#self.server_dir=os.path.join(self.server_dir,"templates")
logging.debug('server_dir:%s'%self
|
.server_dir)
def __getattr__(sel
|
f, name):
if self.mapping_cache.has_key(name):
return self.mapping_cache[name]
else:
path ="/".join((self.name,'templates', name + '.html'))
logging.debug('path:%s'%path)
## if not os.path.exists(path):
## path = os.path.join(rootpath, 'themes', 'default', 'templates', name + '.html')
## if not os.path.exists(path):
## path = None
self.mapping_cache[name]=path
return path
class ThemeIterator:
def __init__(self, theme_path='themes'):
self.iterating = False
self.theme_path = theme_path
self.list = []
def __iter__(self):
return self
def next(self):
if not self.iterating:
self.iterating = True
self.list = os.listdir(self.theme_path)
self.cursor = 0
if self.cursor >= len(self.list):
self.iterating = False
raise StopIteration
else:
value = self.list[self.cursor]
self.cursor += 1
if value.endswith('.zip'):
value=value[:-4]
return value
#return (str(value), unicode(value))
class LangIterator:
def __init__(self,path='locale'):
self.iterating = False
self.path = path
self.list = []
for value in os.listdir(self.path):
if os.path.isdir(os.path.join(self.path,value)):
if os.path.exists(os.path.join(self.path,value,'LC_MESSAGES')):
try:
lang=open(os.path.join(self.path,value,'language')).readline()
self.list.append({'code':value,'lang':lang})
except:
self.list.append( {'code':value,'lang':value})
def __iter__(self):
return self
def next(self):
if not self.iterating:
self.iterating = True
self.cursor = 0
if self.cursor >= len(self.list):
self.iterating = False
raise StopIteration
else:
value = self.list[self.cursor]
self.cursor += 1
return value
def getlang(self,language):
from django.utils.translation import to_locale
for item in self.list:
if item['code']==language or item['code']==to_locale(language):
return item
return {'code':'en_US','lang':'English'}
class BaseModel(db.Model):
def __init__(self, parent=None, key_name=None, _app=None, **kwds):
self.__isdirty = False
DBModel.__init__(self, parent=None, key_name=None, _app=None, **kwds)
def __setattr__(self,attrname,value):
"""
DataStore api stores all prop values say "email" is stored in "_email" so
we intercept the set attribute, see if it has changed, then check for an
onchanged method for that property to call
"""
if (attrname.find('_') != 0):
if hasattr(self,'_' + attrname):
curval = getattr(self,'_' + attrname)
if curval != value:
self.__isdirty = True
if hasattr(self,attrname + '_onchange'):
getattr(self,attrname + '_onchange')(curval,value)
DBModel.__setattr__(self,attrname,value)
class Cache(db.Model):
cachekey = db.StringProperty(multiline=False)
content = db.TextProperty()
class Blog(db.Model):
owner = db.UserProperty()
author=db.StringProperty(default='admin')
rpcuser=db.StringProperty(default='admin')
rpcpassword=db.StringProperty(default='')
description = db.TextProperty()
baseurl = db.StringProperty(multiline=False,default=None)
urlpath = db.StringProperty(multiline=False)
title = db.StringProperty(multiline=False,default='Micolog')
subtitle = db.StringProperty(multiline=False,default='This is a micro blog.')
entrycount = db.IntegerProperty(default=0)
posts_per_page= db.IntegerProperty(default=10)
feedurl = db.StringProperty(multiline=False,default='/feed')
blogversion = db.StringProperty(multiline=False,default='0.30')
theme_name = db.StringProperty(multiline=False,default='default')
enable_memcache = db.BooleanProperty(default = False)
link_format=db.StringProperty(multiline=False,default='%(year)s/%(month)s/%(day)s/%(postname)s.html')
comment_notify_mail=db.BooleanProperty(default=True)
#评论顺序
comments_order=db.IntegerProperty(default=0)
#每页评论数
comments_per_page=db.IntegerProperty(default=20)
#comment check type 0-No 1-算术 2-验证码 3-客户端计算
comment_check_type=db.IntegerProperty(default=1)
#0 default 1 identicon
avatar_style=db.IntegerProperty(default=0)
blognotice=db.TextProperty(default='')
domain=db.StringProperty()
show_excerpt=db.BooleanProperty(default=True)
version=0.736
timedelta=db.FloatProperty(default=8.0)# hours
language=db.StringProperty(default="en-us")
sitemap_entries=db.IntegerProperty(default=30)
sitemap_include_category=db.BooleanProperty(default=False)
sitemap_include_tag=db.BooleanProperty(default=False)
sitemap_ping=db.BooleanProperty(default=False)
default_link_format=db.StringProperty(multiline=False,default='?p=%(post_id)s')
default_theme=Theme("default")
allow_pingback=db.BooleanProperty(default=False)
allow_trackback=db.BooleanProperty(default=False)
theme=None
langs=None
application=None
def __init__(self,
parent=None,
key_name=None,
_app=None,
_from_entity=False,
**kwds):
from micolog_plugin import Plugins
self.plugins=Plugins(self)
db.Model.__init__(self,parent,key_name,_app,_from_entity,**kwds)
def tigger_filter(self,name,content,*arg1,**arg2):
return self.plugins.tigger_filter(name,content,blog=self,*arg1,**arg2)
def tigger_action(self,name,*arg1,**arg2):
return self.plugins.tigger_action(name,blog=self,*arg1,**arg2)
def tigger_urlmap(self,url,*arg1,**arg2):
return self.plugins.tigger_urlmap(url,blog=self,*arg1,**arg2)
def get_ziplist(self):
return self.plugins.get_ziplist();
def save(self):
self.put()
def initialsetup(self):
self.title = 'Your Blog Title'
self.subtitle = 'Your Blog Subtitle'
def get_theme(self):
|
andrebellafronte/stoq
|
stoq/gui/test/test_purchase.py
|
Python
|
gpl-2.0
| 11,536 | 0.00156 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <[email protected]>
##
import datetime
import mock
import gtk
from stoqlib.api import api
from stoq.gui.purchase import PurchaseApp
from stoq.gui.test.baseguitest import BaseGUITest
from stoqlib.domain.purchase import PurchaseItem, PurchaseOrder, PurchaseOrderView
from stoqlib.domain.receiving import (ReceivingOrderItem, ReceivingOrder,
PurchaseReceivingMap)
from stoqlib.gui.dialogs.purchasedetails import PurchaseDetailsDialog
from stoqlib.gui.search.searchresultview import SearchResultListView
from stoqlib.gui.wizards.consignmentwizard import ConsignmentWizard
from stoqlib.gui.wizards.productwizard import ProductCreateWizard
from stoqlib.gui.wizards.purchasefinishwizard import PurchaseFinishWizard
from stoqlib.gui.wizards.purchasequotewizard import QuotePurchaseWizard
from stoqlib.gui.wizards.purchasewizard import PurchaseWizard
from stoqlib.reporting.purchase import PurchaseReport
class TestPurchase(BaseGUITest):
def create_app(self, *args, **kwargs):
app = BaseGUITest.create_app(self, *args, **kwargs)
app.branch_filter.combo.select_item_by_data(None)
return app
def test_initial(self):
app = self.create_app(PurchaseApp, u'purchase')
for purchase in app.results:
purchase.open_date = datetime.datetime(2012, 1, 1)
self.check_app(app, u'purchase')
def test_select(self):
self.create_purchase_order()
app = self.create_app(PurchaseApp, u'purchase')
results = app.results
results.select(results[0])
@mock.patch('stoq.gui.purchase.PurchaseApp.run_dialog')
def test_edit_quote_order(self, run_dialog):
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
purchase = self.create_purchase_order()
app = self.create_app(PurchaseApp,
|
u'purchase')
for purchase in app.results:
purchase.open_date = datetime.datetime(2012, 1, 1)
olist = app.results
olist.select(olist[0])
with mock.patch('stoq.gui.purchase.api',
|
new=self.fake.api):
self.fake.set_retval(purchase)
self.activate(app.NewQuote)
self.assertEquals(run_dialog.call_count, 1)
args, kwargs = run_dialog.call_args
wizard, store, edit_mode = args
self.assertEquals(wizard, QuotePurchaseWizard)
self.assertTrue(store is not None)
self.assertEquals(edit_mode, None)
@mock.patch('stoq.gui.purchase.PurchaseApp.print_report')
def test_print_report(self, print_report):
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
self.activate(app.window.Print)
self.assertEquals(print_report.call_count, 1)
args, kwargs = print_report.call_args
report, results, views = args
self.assertEquals(report, PurchaseReport)
self.assertTrue(isinstance(results, SearchResultListView))
for view in views:
self.assertTrue(isinstance(view, PurchaseOrderView))
@mock.patch('stoq.gui.purchase.PurchaseApp.select_result')
@mock.patch('stoq.gui.purchase.PurchaseApp.run_dialog')
@mock.patch('stoq.gui.purchase.api.new_store')
def test_new_quote_order(self, new_store, run_dialog, select_result):
new_store.return_value = self.store
self.clean_domain([ReceivingOrderItem, PurchaseReceivingMap,
ReceivingOrder, PurchaseItem, PurchaseOrder])
quotation = self.create_quotation()
quotation.purchase.add_item(self.create_sellable(), 2)
quotation.purchase.status = PurchaseOrder.ORDER_PENDING
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
olist = app.results
olist.select(olist[0])
self.store.retval = olist[0]
with mock.patch.object(self.store, 'close'):
with mock.patch.object(self.store, 'commit'):
self.activate(app.Edit)
run_dialog.assert_called_once_with(PurchaseWizard,
self.store,
quotation.purchase, False)
select_result.assert_called_once_with(olist[0])
@mock.patch('stoq.gui.purchase.PurchaseApp.run_dialog')
def test_details_dialog(self, run_dialog):
self.clean_domain([ReceivingOrderItem, PurchaseReceivingMap,
ReceivingOrder, PurchaseItem, PurchaseOrder])
purchase = self.create_purchase_order()
purchase.add_item(self.create_sellable(), 2)
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
olist = app.results
olist.select(olist[0])
olist.double_click(0)
self.assertEquals(run_dialog.call_count, 1)
args, kwargs = run_dialog.call_args
dialog, store = args
self.assertEquals(dialog, PurchaseDetailsDialog)
self.assertTrue(store is not None)
self.assertEquals(kwargs[u'model'], purchase)
@mock.patch('stoq.gui.purchase.yesno')
@mock.patch('stoq.gui.purchase.api.new_store')
def test_confirm_order(self, new_store, yesno):
new_store.return_value = self.store
yesno.return_value = True
self.clean_domain([ReceivingOrderItem, PurchaseReceivingMap,
ReceivingOrder, PurchaseItem, PurchaseOrder])
purchase = self.create_purchase_order()
purchase.add_item(self.create_sellable(), 2)
purchase.status = PurchaseOrder.ORDER_PENDING
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
olist = app.results
olist.select(olist[0])
with mock.patch.object(self.store, 'close'):
with mock.patch.object(self.store, 'commit'):
self.activate(app.Confirm)
yesno.assert_called_once_with(u'The selected order will be '
u'marked as sent.',
gtk.RESPONSE_YES,
u"Confirm order", u"Don't confirm")
self.assertEquals(purchase.status, PurchaseOrder.ORDER_CONFIRMED)
@mock.patch('stoq.gui.purchase.PurchaseApp.run_dialog')
@mock.patch('stoq.gui.purchase.api.new_store')
def test_finish_order(self, new_store, run_dialog):
new_store.return_value = self.store
self.clean_domain([ReceivingOrderItem, PurchaseReceivingMap,
ReceivingOrder, PurchaseItem, PurchaseOrder])
purchase = self.create_purchase_order()
purchase.add_item(self.create_sellable(), 2)
purchase.get_items()[0].quantity_received = 2
purchase.status = PurchaseOrder.ORDER_CONFIRMED
purchase.received_quantity = 2
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
olist = app.results
olist.select(olist[0])
with mock.patch.object(self.store, 'close'):
with mock.patch.
|
ZanyLeonic/LeonicBinaryTool
|
ConUpdate.py
|
Python
|
gpl-3.0
| 5,039 | 0.009724 |
import argparse, requests, sys,
|
configparser, zipfile, os, shutil
from urllib.parse import urlparse, parse_qs
appname="ConverterUpdater"
author="Leo Durrant (2017)"
builddate="05/10/17"
version="0.1a"
release="alpha"
filesdelete=['ConUpdate.py', 'Converter.py', 'LBT.py', 'ConverterGUI.py', 'LB
|
TGUI.py']
directoriesdelete=['convlib\\', 'LBTLIB\\', "data\\images\\", "data\\text\\"]
def readvaluefromconfig(filename, section, valuename):
try:
config = configparser.ConfigParser()
config.read(filename)
try:
val = config[section][valuename]
return val
except Exception as e:
print("Cannot find value %s in %s. Check %s.\n Exception: %s" % (valuename, section, filename, str(e)))
return None
except Exception as e:
print("Cannot read %s.\n Exception: %s" % (filename, str(e)))
return None
parser = argparse.ArgumentParser(description='Updater for Converter')
parser.add_argument('-cfg', '--config', nargs="?", help="The path to the configuration file. (Usually generated by Converter.)")
args= parser.parse_args()
parameterfile=args.config
if parameterfile == None:
parameterfile="updater.ini"
else:
parameterfile=str(parameterfile)
executeafterupdate=True
updatedownloadurl=urlparse(readvaluefromconfig(parameterfile, "updater", "downloadurl"))
appinstall=readvaluefromconfig(parameterfile, "updater", "appinstall")
executablefile=readvaluefromconfig(parameterfile, "updater", "executablefn")
keepconfig=readvaluefromconfig(parameterfile, "updater", "keepconfig")
if os.path.exists(appinstall):
if os.path.isdir(appinstall):
print("Directory found!")
else:
print("Path is not a directory.")
sys.exit(1)
else:
print("Path doesn't exist.")
sys.exit(1)
if not os.path.exists("{}\\{}".format(appinstall, executablefile)):
executeafterupdate=False
temporaryfile="download.tmp"
# print(str(args.config))
def downloadfile():
try:
with open(temporaryfile, "wb") as f:
print("Connecting...", end="")
response = requests.get(updatedownloadurl.geturl(), stream=True)
print("\rConnected! ")
total_length = response.headers.get('content-length')
if not total_length is None:
print("Downloading %s to %s (%s B)" % (str(updatedownloadurl.geturl()), temporaryfile, total_length))
else:
print("Downloading %s..." % (temporaryfile))
if total_length is None:
f.write(response.content)
else:
total_length=int(total_length)
for data in response.iter_content(chunk_size=4096):
# done = int(50 * dl / total_length)
# print("\r%s/%sB" % (done, total_length))
# dl += len(data)
f.write(data)
cleanfiles()
#print("\r%s/%sB" % (done, total_length))
except Exception as e:
print("\n\nFailed to connect to %s. Check the update parameters or try again later.\nException: %s" % (str(updatedownloadurl.geturl()), str(e)))
def cleanfiles():
for file in filesdelete:
fullpath="{}\\{}".format(appinstall, file)
if not os.path.exists(fullpath):
print("%s does not exist." % (fullpath))
else:
try:
os.remove(fullpath)
print("Deleted %s!" % (fullpath))
except Exception as e:
print("\n\nFailed to delete %s!\nException: %s" % (fullpath, str(e)))
for dirs in directoriesdelete:
fullpath="{}\\{}".format(appinstall, dirs)
if not os.path.exists(fullpath):
print("%s does not exist." % (fullpath))
else:
try:
shutil.rmtree(fullpath)
print("Deleted %s!" % (fullpath))
except Exception as e:
print("\n\nFailed to delete %s!\nException: %s" % (fullpath, str(e)))
extractfile(temporaryfile)
def extractfile(file):
print("Extracting %s to %s. Please wait!" % (str(file), appinstall))
try:
with zipfile.ZipFile(file, "r") as zip_r:
zip_r.extractall(appinstall)
except zipfile.BadZipfile as e:
print("\n\nAttempted to extract a bad zip file '%s'!\nException: %s" % (file, str(e)))
except Exception as e:
print("\n\nAn error occurred while trying to extract '%s'.\nException %s" % (file, str(e)))
print("Cleaning temporary files...")
try:
os.remove(file)
except Exception as e:
print("\n\nAn erro occurred while trying to delete temporary files.\n Exception: %s" % (str(e)))
runapp()
def runapp():
try:
pythonlocation=sys.executable
executablefullpath="{}\\{}".format(appinstall, executablefile)
print("Attempting to run app...")
os.system('{} {}'.format(pythonlocation, executablefullpath))
except Exception as e:
raise e
downloadfile()
|
hycis/Pynet
|
hps/models/Laura_Two_Layers.py
|
Python
|
apache-2.0
| 2,815 | 0.005329 |
from jobman import DD, expand, flatten
import pynet.layer as layer
from pynet.model import *
from pynet.layer import *
from pynet.datasets.mnist import Mnist, Mnist_Blocks
import pynet.datasets.spec as spec
import pynet.datasets.mnist as mnist
import pynet.datasets.transfactor as tf
import pynet.datasets.mapping as mapping
import pynet.learning_method as learning_methods
from pynet.learning_rule import LearningRule
from pynet.log import Log
from pynet.train_object import TrainObject
from pynet.cost import Cost
import pynet.datasets.preprocessor as preproc
import pynet.datasets.dataset_noise as noisy
import pynet.layer_noise as layer_noise
import cPickle
import os
from hps.models.model import AE
import theano
from theano.sandbox.cuda.var import CudaNdarraySharedVariable
floatX = theano.config.floatX
class Laura_Two_Layers(AE):
def __init__(self, state):
self.state = state
def build_model(self, input_dim):
with open(os.environ['PYNET_SAVE_PATH'] + '/'
+ self.state.hidden1.model + '/model.pkl') as f1:
model1 = cPickle.load(f1)
with open(os.environ['PYNET_SAVE_PATH'] + '/'
+ self.state.hidden2.model + '/model.pkl') as f2:
model2 = cPickle.load(f2)
model = AutoEncoder(input_dim=input_dim)
while len(model1.encode_layers) > 0:
model.add_encode_layer(model1.pop_encode_layer())
while len(model2.encode_layers) > 0:
model.add_encode_layer(model2.pop_encode_layer())
while len(model2.decode_layers) > 0:
model.add_decode_layer(model2.pop_decode_layer())
while len(model1.decode_layers) > 0:
model.add_decode_layer(model1.pop_decode_layer())
return model
def run(self):
dataset = self.build_dataset()
learning_rule = self.build_learning_rule()
learn_method = self.build_learning_method()
model = self.build_model(dataset.feature_size())
model.layers[0].dropout_below = self.state.hidden1.dropout_below
if self.state.log.save_to_database_name:
database = self.build_database(dataset, learning_rule, learn_method, model)
database['records']['h1_
|
model'] = self.state.hidden1.model
database['records']['h2_mo
|
del'] = self.state.hidden2.model
log = self.build_log(database)
log.info("Fine Tuning")
for layer in model.layers:
layer.dropout_below = None
layer.noise = None
train_obj = TrainObject(log = log,
dataset = dataset,
learning_rule = learning_rule,
learning_method = learn_method,
model = model)
train_obj.run()
|
rwl/muntjac
|
muntjac/demo/sampler/features/menubar/MenuBarItemStylesExample.py
|
Python
|
apache-2.0
| 2,645 | 0.000378 |
from muntjac.ui.vertical_layout import VerticalLayout
from muntjac.ui.menu_bar import MenuBar, ICommand
from muntjac.terminal.external_resource import ExternalResource
class MenuBarItemStylesExample(VerticalLayout):
def __init__(self):
super(MenuBarItemStylesExample, self).__init__()
self._menubar = MenuBar()
menuCommand = MenuCommand(self)
# Save reference to individual items so we can add sub-menu items to
# them
f = self._menubar.addItem('File', None)
newItem = f.addItem('New', None)
f.addItem('Open f...', menuCommand)
f.addSeparator()
# Add a style name for a menu item, then use CSS to alter the visuals
f.setStyleName('file')
newItem.addItem('File', menuCommand)
newItem.addItem('Folder', menuCommand)
newItem.addItem('Project...', menuComm
|
and)
f.addItem('Close', menuCommand)
f.addItem('Close All', menuCommand).setStyleName('close-all')
f.addSeparator()
f.addItem('Save', menuCommand)
f.addItem('Save As...', menuCommand)
f.addItem('Save All', menuCommand)
edit = self._menubar.addItem('Edit', None)
edit.
|
addItem('Undo', menuCommand)
edit.addItem('Redo', menuCommand).setEnabled(False)
edit.addSeparator()
edit.addItem('Cut', menuCommand)
edit.addItem('Copy', menuCommand)
edit.addItem('Paste', menuCommand)
edit.addSeparator()
find = edit.addItem('Find/Replace', menuCommand)
# Actions can be added inline as well, of course
find.addItem('Google Search', SearchCommand(self))
find.addSeparator()
find.addItem('Find/Replace...', menuCommand)
find.addItem('Find Next', menuCommand)
find.addItem('Find Previous', menuCommand)
view = self._menubar.addItem('View', None)
view.addItem('Show/Hide Status Bar', menuCommand)
view.addItem('Customize Toolbar...', menuCommand)
view.addSeparator()
view.addItem('Actual Size', menuCommand)
view.addItem('Zoom In', menuCommand)
view.addItem('Zoom Out', menuCommand)
self.addComponent(self._menubar)
class SearchCommand(ICommand):
def __init__(self, c):
self._c = c
def menuSelected(self, selectedItem):
er = ExternalResource('http://www.google.com')
self._c.getWindow().open(er)
class MenuCommand(ICommand):
def __init__(self, c):
self._c = c
def menuSelected(self, selectedItem):
self._c.getWindow().showNotification('Action '
+ selectedItem.getText())
|
krafczyk/spack
|
var/spack/repos/builtin/packages/picard/package.py
|
Python
|
lgpl-2.1
| 4,400 | 0.001136 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import glob
import os.path
import re
class Picard(Package):
"""Picard is a set of command line tools for manipulating high-throughput
sequencing (HTS) data and formats such as SAM/BAM/CRAM and VCF.
"""
homepage = "http://broadinstitute.github.io/picard/"
url = "https://github.com/broadinstitute/picard/releases/download/2.9.2/picard.jar"
_urlfmt = "https://github.com/broadinstitute/picard/releases/download/{0}/picard.jar"
_oldurlfmt = 'https://github.com/broadinstitute/picard/releases/download/{0}/picard-tools-{0}.zip'
# They started distributing a single jar file at v2.6.0, prior to
# that it was a .zip file with multiple .jar and .so files
version('2.18.3', '181b1b0731fd35f0d8bd44677d8787e9', expand=False)
|
version('2.18.0', '20045ff141e4a67512365f0b6bbd8229', expand=False)
version('2.17.0', '72cc527f1e4ca6a799ae0117af60b54e', expand=False)
version('2.16.0', 'fed8928b03bb36e355656f349e579083', expand=False)
version('2.15.0', '3f5751630b1a3449edda47a0712a64e4', expand=False)
version('2.13.2', '3d7b33fd1f43ad2129e6ec7883af5
|
6f5', expand=False)
version('2.10.0', '96f3c11b1c9be9fc8088bc1b7b9f7538', expand=False)
version('2.9.4', '5ce72af4d5efd02fba7084dcfbb3c7b3', expand=False)
version('2.9.3', '3a33c231bcf3a61870c3d44b3b183924', expand=False)
version('2.9.2', '0449279a6a89830917e8bcef3a976ef7', expand=False)
version('2.9.0', 'b711d492f16dfe0084d33e684dca2202', expand=False)
version('2.8.3', '4a181f55d378cd61d0b127a40dfd5016', expand=False)
version('2.6.0', '91f35f22977d9692ce2718270077dc50', expand=False)
version('1.140', '308f95516d94c1f3273a4e7e2b315ec2')
depends_on('java@8:', type='run')
def install(self, spec, prefix):
mkdirp(prefix.bin)
# The list of files to install varies with release...
# ... but skip the spack-{build.env}.out files.
files = [x for x in glob.glob("*") if not re.match("^spack-", x)]
for f in files:
install(f, prefix.bin)
# Set up a helper script to call java on the jar file,
# explicitly codes the path for java and the jar file.
script_sh = join_path(os.path.dirname(__file__), "picard.sh")
script = prefix.bin.picard
install(script_sh, script)
set_executable(script)
# Munge the helper script to explicitly point to java and the
# jar file.
java = self.spec['java'].prefix.bin.java
kwargs = {'ignore_absent': False, 'backup': False, 'string': False}
filter_file('^java', java, script, **kwargs)
filter_file('picard.jar', join_path(prefix.bin, 'picard.jar'),
script, **kwargs)
def setup_environment(self, spack_env, run_env):
"""The Picard docs suggest setting this as a convenience."""
run_env.prepend_path('PICARD',
join_path(self.prefix, 'bin', 'picard.jar'))
def url_for_version(self, version):
if version < Version('2.6.0'):
return self._oldurlfmt.format(version)
else:
return self._urlfmt.format(version)
|
calpaterson/dircast
|
dircast/feed.py
|
Python
|
gpl-3.0
| 848 | 0.005896 |
from feedgen.feed import FeedGenerator
def format_itunes_duration(td):
return "{hours:02d}:{minutes:02d}:{seconds:02d}".format(
hours=td.seconds//3600,
minutes=(td.seconds//60)%60,
seconds=int(td.seconds%60)
)
def add_entry(fg, md):
fe = fg.add_entry()
fe.id(md.id)
fe.title(md.title)
fe.enclosure(md.link, str(md.leng
|
th), "audio/mpeg")
if md.duration is not None:
fe.podcast.itunes_duration(format_itunes_duration(md.duration))
def generate_feed(channel_dict, file_metadatas):
fg = FeedGenerator()
fg.load_extension("podcast")
fg.link(href=channel_dict["url"], rel="self")
fg.title(channe
|
l_dict["title"])
fg.description(channel_dict["description"])
for file_metadata in file_metadatas:
add_entry(fg, file_metadata)
return fg.rss_str(pretty=True)
|
BhallaLab/moose
|
moose-examples/izhikevich/Izhikevich.py
|
Python
|
gpl-3.0
| 23,767 | 0.009593 |
# Izhikevich.py ---
#
# Filename: Izhikevich.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Fri May 28 14:42:33 2010 (+0530)
# Version:
# Last-Updated: Tue Sep 11 14:27:18 2012 (+0530)
# By: subha
# Update #: 1212
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# threhold variablity to be checked.
# Bistability not working.
# DAP working with increased parameter value 'a'
# inhibition induced spiking kind of working but not matching with the paper figure
# inhibition induced bursting kind of working but not matching with the paper figure
# Accommodation cannot work with the current implementation: because the equation for u is not what is mentioned in the paper
# it is: u = u + tau*a*(b*(V+65)); [It is nowhere in the paper and you face it only if you look at the matlab code for figure 1].
# It is not possible to tune a, b, c, d in any way to produce this from: u = u + tau*a*(b*V - u)
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import time
from numpy import *
import os
import sys
import moose
class IzhikevichDemo:
"""Class to setup and simulate the various kind of neuronal behaviour using Izhikevich model.
Fields:
"""
# Paramteres for different kinds of behaviour described by Izhikevich
# (1. IEEE TRANSACTIONS ON NEURAL NETWORKS, VOL. 14, NO. 6, NOVEMBER 2003
# and 2. IEEE TRANSACTIONS ON NEURAL NETWORKS, VOL. 15, NO. 5, SEPTEMBER
# 2004)
# Modified and enhanced using: http://www.izhikevich.org/publications/figure1.m
# The entries in the tuple are as follows:
# fig. no. in paper (2), parameter a, parameter b, parameter c (reset value of v in mV), parameter d (after-spike reset value of u), injection current I (uA), initial value of Vm, duration of simulation (ms)
#
# They are all in whatever unit they were in the paper. Just before use we convert them to SI.
parameters = {
"tonic_spiking": ['A', 0.02 , 0.2 , -65.0, 6.0 , 14.0, -70.0, 100.0], # Fig. 1.A
"phasic_spiking": ['B', 0.02 , 0.25 , -65.0, 6.0 , 0.5, -64.0, 200.0], # Fig. 1.B
"tonic_bursting": ['C', 0.02 , 0.2 , -50.0, 2.0 , 15.0, -70.0, 220.0], # Fig. 1.C
"phasic_bursting": ['D', 0.02 , 0.25 , -55.0, 0.05 , 0.6, -64.0, 200.0], # Fig. 1.D
"mixed_mode": ['E', 0.02 , 0.2 , -55.0, 4.0 ,
|
10.0, -70.0, 160.0], # Fig. 1.E
"
|
spike_freq_adapt": ['F', 0.01 , 0.2 , -65.0, 8.0 , 30.0, -70.0, 85.0 ], # Fig. 1.F # spike frequency adaptation
"Class_1": ['G', 0.02 , -0.1 , -55.0, 6.0 , 0, -60.0, 300.0], # Fig. 1.G # Spikining Frequency increases with input strength
"Class_2": ['H', 0.2 , 0.26 , -65.0, 0.0 , 0, -64.0, 300.0], # Fig. 1.H # Produces high frequency spikes
"spike_latency": ['I', 0.02 , 0.2 , -65.0, 6.0 , 7.0, -70.0, 100.0], # Fig. 1.I
"subthresh_osc": ['J', 0.05 , 0.26 , -60.0, 0.0 , 0, -62.0, 200.0], # Fig. 1.J # subthreshold oscillations
"resonator": ['K', 0.1 , 0.26 , -60.0, -1.0 , 0, -62.0, 400.0], # Fig. 1.K
"integrator": ['L', 0.02 , -0.1 , -55.0, 6.0 , 0, -60.0, 100.0], # Fig. 1.L
"rebound_spike": ['M', 0.03 , 0.25 , -60.0, 4.0 , -15, -64.0, 200.0], # Fig. 1.M
"rebound_burst": ['N', 0.03 , 0.25 , -52.0, 0.0 , -15, -64.0, 200.0], # Fig. 1.N
"thresh_var": ['O', 0.03 , 0.25 , -60.0, 4.0 , 0, -64.0, 100.0], # Fig. 1.O # threshold variability
"bistable": ['P', 0.1 , 0.26 , -60.0, 0.0 , 1.24, -61.0, 300.0], # Fig. 1.P
"DAP": ['Q', 1.15 , 0.2 , -60.0, -21.0 , 20, -70.0, 50.0], # Fig. 1.Q # Depolarizing after-potential - a had to be increased in order to reproduce the figure
"accommodation": ['R', 0.02 , 1.0 , -55.0, 4.0 , 0, -65.0, 400.0], # Fig. 1.R
"iispike": ['S', -0.02 , -1.0 , -60.0, 8.0 , 75.0, -63.8, 350.0], # Fig. 1.S # inhibition-induced spiking
"iiburst": ['T', -0.026, -1.0 , -45.0, 0.0 , 75.0, -63.8, 350.0] # Fig. 1.T # inhibition-induced bursting
}
documentation = {
"tonic_spiking": """
Neuron is normally silent but spikes when stimulated with a current injection.""",
"phasic_spiking": """
Neuron fires a single spike only at the start of a current pulse.""",
"tonic_bursting": """
Neuron is normally silent but produces bursts of spikes when
stimulated with current injection.""",
"phasic_bursting": """
Neuron is normally silent but produces a burst of spikes at the
beginning of an input current pulse.""",
"mixed_mode": """
Neuron fires a burst at the beginning of input current pulse, but then
switches to tonic spiking.""",
"spike_freq_adapt": """
Neuron fires spikes when a current injection is applied, but at a
gradually reducing rate.""",
"Class_1": """
Neuron fires low frequency spikes with weak input current injection.""",
"Class_2": """
Neuron fires high frequency (40-200 Hz) spikes when stimulated with
current injection.""",
"spike_latency": """
The spike starts after a delay from the onset of current
injection. The delay is dependent on strength of input.""",
"subthresh_osc": """
Even at subthreshold inputs a neuron exhibits oscillatory membrane potential.""",
"resonator": """
Neuron fires spike only when an input pulsetrain of a frequency
similar to that of the neuron's subthreshold oscillatory frequency is
applied.""",
"integrator": """
The chances of the neuron firing increases with increase in the frequency
of input pulse train.""",
"rebound_spike": """
When the neuron is released from an inhibitory input, it fires a spike.""",
"rebound_burst": """
When the neuron is released from an inhibitory input, it fires a burst
of action potentials.""",
"thresh_var": """
Depending on the previous input, the firing threshold of a neuron may
change. In this example, the first input pulse does not produce
spike, but when the same input is applied after an inhibitory input,
it fires.""",
"bistable": """
These neurons switch between two stable modes (resting and tonic spiking).
The switch happens via an excitatory or inhibitory input.""",
"DAP": """
After firing a spike, the membrane potential shows a prolonged depolarized
after-potential.""",
"accommodation": """
These neurons do not respond to slowly rising input, but a sharp increase
in input may cause firing.""",
"iispike": """
These neurons fire in response to inhibitory input.""",
"iiburst": """
These neurons show bursting in response to inhibitory input."""
}
def __init__(self):
"""Initialize the object."""
self.model_container = moose.Neutral('/model')
|
ivmech/iviny-scope
|
lib/xlsxwriter/test/vml/test_write_idmap.py
|
Python
|
gpl-3.0
| 748 | 0 |
################
|
###############################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ...vml import Vml
class TestWriteOidmap(unittest.TestCase):
"""
Test the Vml _write_idmap() method.
"""
def setUp(self):
self.fh = StringIO()
self.vml = Vml()
self.vml._set_filehandle(self.fh)
def
|
test_write_idmap(self):
"""Test the _write_idmap() method"""
self.vml._write_idmap(1)
exp = """<o:idmap v:ext="edit" data="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
if __name__ == '__main__':
unittest.main()
|
DBrianKimmel/PyHouse
|
Project/src/Modules/House/Family/Insteon/_test/test_insteon_light.py
|
Python
|
mit
| 6,285 | 0.002546 |
"""
@name: PyHouse/src/Modules/Families/Insteon/_test/test_Insteon_HVAC.py
@author: D. Brian Kimmel
@contact: [email protected]
@copyright: (c) 2014-2020 by D. Brian Kimmel
@license: MIT License
@note: Created on Dec 6, 2014
@Summary:
Passed all 2 tests - DBK - 2015-07-29
"""
__updated__ = '2020-02-17'
# Import system type stuff
from twisted.trial import unittest
# Import PyMh files
from _test.testing_mixin import SetupPyHouseObj
|
from Modules.House.Lighting.Controllers.controllers import Api as controllerApi
from Modules.House.Lighting.Lights.lights import Api as lightingApi
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
from Modules.House.Family.Insteon.insteon_uti
|
ls import Decode as utilDecode
from Modules.House.Family.Insteon import insteon_decoder
from Modules.House.Family.Insteon.insteon_light import DecodeResponses as Decode_Light
# 16.C9.D0 =
# 1B.47.81 =
MSG_50_A = bytearray(b'\x02\x50\x16\x62\x2d\x1b\x47\x81\x27\x09\x00')
MSG_50_B = bytearray(b'\x02\x50\x21\x34\x1F\x1b\x47\x81\x27\x6e\x4f')
class DummyApi:
def MqttPublish(self, p_topic, p_msg):
return
class SetupMixin(object):
"""
"""
def setUp(self):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj()
self.m_xml = SetupPyHouseObj().BuildXml()
self.m_cntl_api = controllerApi()
self.m_light_api = lightingApi()
class A0(unittest.TestCase):
def test_00_Print(self):
_x = PrettyFormatAny.form('_test', 'title', 190) # so it is defined when printing is cleaned up.
print('Id: test_Insteon_Light')
class A1_Prep(SetupMixin, unittest.TestCase):
""" This section tests the setup
"""
def setUp(self):
SetupMixin.setUp(self)
self.m_device = None
def test_01_PyHouse(self):
""" Did we get everything set up for the rest of the tests of this class.
"""
self.assertIsInstance(self.m_pyhouse_obj.House, HouseInformation)
def test_02_FindXml(self):
""" Did we get everything set up for the rest of the tests of this class.
"""
self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)
self.assertEqual(self.m_xml.house_div.tag, 'HouseDivision')
self.assertEqual(self.m_xml.lighting_sect.tag, 'LightingSection')
self.assertEqual(self.m_xml.light_sect.tag, 'LightSection')
self.assertEqual(self.m_xml.button_sect.tag, 'ButtonSection')
self.assertEqual(self.m_xml.controller_sect.tag, 'ControllerSection')
def test_03_House(self):
""" Did we get everything set up for the rest of the tests of this class.
"""
pass
def test_04_Objs(self):
""" Did we get everything set up for the rest of the tests of this class.
"""
pass
def test_05_XML(self):
""" Did we get everything set up for the rest of the tests of this class.
"""
pass
def test_06_Device(self):
""" Be sure that the XML contains the right stuff.
"""
class B1_Util(SetupMixin, unittest.TestCase):
"""This tests the utility section of decoding
"""
def setUp(self):
SetupMixin.setUp(self)
self.m_ctrlr = ControllerInformation()
def test_01_GetObjFromMsg(self):
self.m_ctrlr._Message = MSG_50_A
self.m_controllers = self.m_cntl_api.read_all_controllers_xml(self.m_pyhouse_obj)
self.m_pyhouse_obj.House.Lighting.Controllers = self.m_controllers
print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Lighting, 'B1-01-A Lighting'))
l_ctlr = self.m_pyhouse_obj.House.Lighting.Controllers[0]
print(PrettyFormatAny.form(l_ctlr, 'B1-01-B Controller'))
self.assertEqual(l_ctlr.Name, TESTING_CONTROLLER_NAME_0)
def test_02_NextMsg(self):
self.m_ctrlr._Message = MSG_50_A
# l_msg = Util().get_next_message(self.m_ctrlr)
# print(PrintBytes(l_msg))
# self.assertEqual(l_msg[1], 0x50)
# self.m_ctrlr._Message = bytearray()
# l_msg = self.m_util.get_next_message(self.m_ctrlr)
# self.assertEqual(l_msg, None)
# self.m_ctrlr._Message = MSG_62 + MSG_50
# l_msg = self.m_util.get_next_message(self.m_ctrlr)
# print('Msg {}'.format(FormatBytes(l_msg)))
# print('remaning: {}'.format(FormatBytes(self.m_ctrlr._Message)))
# self.assertEqual(l_msg[1], 0x62)
self.assertEqual(self.m_ctrlr._Message[1], 0x50)
class B2_Decode(SetupMixin, unittest.TestCase):
"""This tests the utility section of decoding
"""
def setUp(self):
SetupMixin.setUp(self)
self.m_ctrlr = ControllerInformation()
self.m_decode = Insteon_decoder.DecodeResponses(self.m_pyhouse_obj, self.m_ctrlr)
def test_01_GetObjFromMsg(self):
self.m_ctrlr._Message = MSG_50_A
l_ctlr = self.m_decode.decode_message(self.m_ctrlr)
print(l_ctlr, 'B2-01-A Controller')
class C1_Light(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self)
self.m_pyhouse_obj.House.Lighting.Controllers = self.m_cntl_api.read_all_controllers_xml(self.m_pyhouse_obj)
self.m_pyhouse_obj.House.Lighting.Lights = self.m_light_api.read_all_lights_xml(self.m_pyhouse_obj)
self.m_ctrlr = self.m_pyhouse_obj.House.Lighting.Controllers[0]
# print(PrettyFormatAny.form(self.m_ctrlr, "C1-0Controlelrs"))
self.m_pyhouse_obj.Core.MqttApi = DummyApi()
def test_01_x(self):
self.m_ctrlr._Message = MSG_50_A
l_device_obj = utilDecode().get_obj_from_message(self.m_pyhouse_obj, self.m_ctrlr._Message[2:5])
l_decode = Decode_Light().decode_0x50(self.m_pyhouse_obj, self.m_ctrlr, l_device_obj)
print(PrettyFormatAny.form(l_device_obj, "C1-01-A - Decode"))
self.assertEqual(len(self.m_ctrlr._Message), 0)
def test_02_x(self):
self.m_ctrlr._Message = MSG_50_B
l_device_obj = utilDecode().get_obj_from_message(self.m_pyhouse_obj, self.m_ctrlr._Message[2:5])
l_decode = Decode_Light().decode_0x50(self.m_pyhouse_obj, self.m_ctrlr, l_device_obj)
print(PrettyFormatAny.form(l_device_obj, "C1-02-A - Decode"))
self.assertEqual(len(self.m_ctrlr._Message), 0)
# ## END DBK
|
bingweichen/GOKU
|
backend/server/utility/__init__.py
|
Python
|
apache-2.0
| 61 | 0.016393 |
#
|
from server.utility.service_utility import count_total_
|
page
|
KonichiwaKen/band-dashboard
|
authentication/views.py
|
Python
|
mit
| 4,259 | 0.000704 |
import json
from django.contrib.auth import authenticate
from django.contrib.auth import login
from django.contrib.auth import logout
from django.contrib.auth import update_session_auth_hash
from rest_framework import status
from rest_framework import views
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from authentication.models import Account
from authentication.permissions import CanCreateAccount
from authentication.permissions import IsAccountAdminOrAccountOwner
from authentication.serializers import AccountSerializer
from attendance.m
|
odels import Band
from emails.tasks import send_unsent_emails
from members.models import BandMember
class AccountViewSet(viewsets.ModelViewSet):
queryset = Account.objects.all()
serializer_class = AccountSerializer
permission_classes = (
IsAccountAdminOrAccountOwner,
IsAuthenticated,
)
def create(self, request):
serializer = self.serializer_class(data=requ
|
est.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.validated_data, status=status.HTTP_201_CREATED)
return Response({
'status': 'Bad request',
'message': 'Account could not be created with received data.',
}, status=status.HTTP_400_BAD_REQUEST)
def partial_update(self, request, pk=None):
data = json.loads(request.body)
if 'password' in data and request.user.id != int(pk):
return Response({
'status': "Forbidden",
'message': "Don't have permission to update password",
}, status=status.HTTP_403_FORBIDDEN)
return super(AccountViewSet, self).partial_update(request, pk=pk)
class LoginView(views.APIView):
def post(self, request, format=None):
data = json.loads(request.body)
email = data.get('email', None)
password = data.get('password', None)
account = authenticate(email=email, password=password)
if account is not None:
if account.is_active:
login(request, account)
serialized = AccountSerializer(account)
return Response(serialized.data)
else:
return Response({
'status': 'Unauthorized',
'message': 'This account has been disabled.'
}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({
'status': 'Unauthorized',
'message': 'Email/password combination invalid.'
}, status=status.HTTP_401_UNAUTHORIZED)
class LogoutView(views.APIView):
permission_classes = (IsAuthenticated,)
def post(self, request, format=None):
logout(request)
return Response({}, status=status.HTTP_204_NO_CONTENT)
class CreateAccountsView(views.APIView):
permission_classes = (CanCreateAccount, IsAuthenticated,)
def post(self, request, format=None):
data = json.loads(request.body)
for account_data in data['accounts']:
section = account_data.pop('section')
account = Account.objects.create_user(**account_data)
band_member = BandMember.objects.create(section=section, account=account)
for band in Band.objects.all():
band.unassigned_members.add(band_member)
band.save()
return Response({}, status=status.HTTP_201_CREATED)
class CreatePasswordView(views.APIView):
def post(self, request, format=None):
data = json.loads(request.body)
email = request.user.email
password = data.get('password')
if not email or not password:
return Response({}, status=status.HTTP_400_BAD_REQUEST)
try:
account = Account.objects.get(email=email)
account.is_registered = True
account.set_password(password)
account.save()
update_session_auth_hash(request, account)
return Response({}, status=status.HTTP_204_NO_CONTENT)
except Account.DoesNotExist:
return Response({}, status=status.HTTP_400_BAD_REQUEST)
|
MaxTyutyunnikov/lino
|
lino/utils/jscompressor.py
|
Python
|
gpl-3.0
| 5,152 | 0.011258 |
## {{{ http://code.activestate.com/recipes/496882/ (r8)
'''
http://code.activestate.com/recipes/496882/
Author: Michael Palmer 13 Jul 2006
a regex-based JavaScript code compression kludge
'''
import re
class JSCompressor(object):
def __init__(self, compressionLevel=2, measureCompression=False):
'''
compressionLevel:
0 - no compression, script returned unchanged. For debugging only -
try if you suspect that compression compromises your script
1 - Strip comments and empty lines, don't change line breaks and indentation (code remains readable)
2 - Additionally strip insignificant whitespace (code will become quite unreadable)
measureCompression: append a comment stating the extent of compression
'''
self.compressionLevel = compressionLevel
self.measureCompression = measureCompression
# a bunch of regexes used in compression
# first, exempt string and regex literals from compression by transient substitution
findLiterals = re.compile(r'''
(\'.*?(?<=[^\\])\') | # single-quoted strings
(\".*?(?<=[^\\])\") | # double-quoted strings
((?<![\*\/])\/(?![\/\*]).*?(?<![\\])\/) # JS regexes, trying hard not to be tripped up by comments
''', re.VERBOSE)
# literals are temporarily replaced by numbered placeholders
literalMarker = '@_@%d@_@' # temporary replacement
backSubst = re.compile('@_@(\d+)@_@') # put the string literals back in
mlc1 = re.compile(r'(\/\*.*?\*\/)') # /* ... */ comments on single line
mlc = re.compile(r'(\/\*.*?\*\/)', re.DOTALL) # real multiline comments
slc = re.compile('\/\/.*') # remove single line comments
collapseWs = re.compile('(?<=\S)[ \t]+') # collapse successive non-leading white space characters into one
squeeze = re.compile('''
\s+(?=[\}\]\)\:\&\|\=\;\,\.\+]) | # remove whitespace preceding control characters
(?<=[\{\[\(\:\&\|\=\;\,\.\+])\s+ | # ... or following such
[ \t]+(?=\W) | # remove spaces or tabs preceding non-word characters
(?<=\W)[ \t]+ # ... or following such
'''
, re.VERBOSE | re.DOTALL)
def compress(self, script):
'''
perform compression and return compressed script
'''
if self.compressionLevel == 0:
return script
lengthBefore = len(script)
# first, substitute string literals by placeholders to prevent the regexes messing with them
literals = []
def insertMarker(mo):
l = mo.group()
literals.append(l)
return self.literalMarker % (len(literals) - 1)
script = self.findLiterals.sub(insertMarker, script)
# now, to the literal-stripped carcass, apply some kludgy regexes for deflation...
script = self.slc.sub('', script) # strip single line comments
script = self.mlc1.sub(' ', script) # replace /* .. */ comments on single lines by space
script = self.mlc.sub('\n', script) # replace real multiline comments by newlines
# remove empty lines and trailing whitespace
script = '\n'.join([l.rstrip() for l in script.splitlines() if l.strip()])
if self.compressionLevel == 2: # squeeze out any dispensible whitespace
script = self.squeeze.sub('', script)
elif self.compressionLevel == 1: # only collapse multiple whitespace characters
script = self.collapseWs.sub(' ', script)
# now back-substitute the string and regex literals
def backsub(mo):
return literals[int(mo.group(1))]
script = self.back
|
Subst.sub(backsub, script)
if self.measureCompression:
lengthAfter = float(len(script))
squeezedBy = int(100*(1-lengthAfter/lengthBefore))
script += '\n// squeezed out %s%%\n' % squeezedBy
return script
if __name__ == '__main__':
script = '''
/* this is a totally useless multiline comment, containing a sill
|
y "quoted string",
surrounded by several superfluous line breaks
*/
// and this is an equally important single line comment
sth = "this string contains 'quotes', a /regex/ and a // comment yet it will survive compression";
function wurst(){ // this is a great function
var hans = 33;
}
sthelse = 'and another useless string';
function hans(){ // another function
var bill = 66; // successive spaces will be collapsed into one;
var bob = 77 // this line break will be preserved b/c of lacking semicolon
var george = 88;
}
'''
for x in range(1,3):
print '\ncompression level', x, ':\n--------------'
c = JSCompressor(compressionLevel=x, measureCompression=True)
cpr = c.compress(script)
print cpr
print 'length', len(cpr)
## end of http://code.activestate.com/recipes/496882/ }}}
|
tschaume/pymatgen
|
pymatgen/analysis/chemenv/coordination_environments/tests/test_coordination_geometries.py
|
Python
|
mit
| 19,397 | 0.005104 |
#!/usr/bin/env python
__author__ = 'waroquiers'
import unittest
import numpy as np
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import ExplicitPermutationsAlgorithm
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import SeparationPlane
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import CoordinationGeometry
allcg = AllCoordinationGeometries()
class FakeSite:
def __init__(self, coords):
self.coords = coords
class CoordinationGeometriesTest(PymatgenTest):
def test_algorithms(self):
expl_algo = ExplicitPermutationsAlgorithm(permutations=[[0, 1, 2], [1, 2, 3]])
expl_algo2 = ExplicitPermutationsAlgorithm.from_dict(expl_algo.as_dict)
self.assertEqual(expl_algo.permutations, expl_algo2.permutations)
sepplane_algos_oct = allcg['O:6'].algorithms
self.assertEqual(len(sepplane_algos_oct[0].safe_separation_permutations()), 24)
self.assertEqual(len(sepplane_algos_oct[1].safe_separation_permutations()), 36)
sepplane_algos_oct_0 = SeparationPlane.from_dict(sepplane_algos_oct[0].as_dict)
self.assertEqual(sepplane_algos_oct[0].plane_points, sepplane_algos_oct_0.plane_points)
self.assertEqual(sepplane_algos_oct[0].mirror_plane, sepplane_algos_oct_0.mirror_plane)
self.assertEqual(sepplane_algos_oct[0].ordered_plane, sepplane_algos_oct_0.ordered_plane)
self.assertEqual(sepplane_algos_oct[0].point_groups, sepplane_algos_oct_0.point_groups)
self.assertEqual(sepplane_algos_oct[0].ordered_point_groups, sepplane_algos_oct_0.ordered_point_groups)
self.assertTrue(all([np.array_equal(perm, sepplane_algos_oct_0.explicit_optimized_permutations[iperm])
for iperm, perm in enumerate(sepplane_algos_oct[0].explicit_optimized_permutations)]))
self.assertEqual(sepplane_algos_oct[0].__str__(),
'Separation plane algorithm with the following reference separation :\n'
'[[4]] | [[0, 2, 1, 3]] | [[5]]')
def test_hints(self):
hints = CoordinationGeometry.NeighborsSetsHints(hints_type='single_cap',
options={'cap_index': 2, 'csm_max': 8})
myhints = hints.hints({'csm': 12.0})
self.assertEqual(myhints, [])
hints2 = CoordinationGeometry.NeighborsSetsHints.from_dict(hints.as_dict())
self.assertEqual(hints.hints_type, hints2.hints_type)
self.assertEqual(hints.options, hints2.options)
def test_coordination_geometry(self):
cg_oct = allcg['O:6']
cg_oct2 = CoordinationGeometry.from_dict(cg_oct.as_dict())
self.assertArrayAlmostEqual(cg_oct.central_site, cg_oct2.central_site)
self.assertArrayAlmostEqual(cg_oct.points, cg_oct2.points)
self.assertEqual(cg_oct.__str__(), 'Coordination geometry type : Octahedron (IUPAC: OC-6 || IUCr: [6o])\n'
'\n'
' - coordination number : 6\n'
' - list of points :\n'
' - [0.0, 0.0, 1.0]\n'
' - [0.0, 0.0, -1.0]\n'
' - [1.0, 0.0, 0.0]\n'
' - [-1.0, 0.0, 0.0]\n'
' - [0.0, 1.0, 0.0]\n'
' - [0.0, -1.0, 0.0]\n'
'------------------------------------------------------------\n')
self.assertEqual(cg_oct.__len__(), 6)
self.assertEqual(cg_oct.ce_symbol, cg_oct.mp_symbol)
self.assertTrue(cg_oct.is_implemented())
self.assertEqual(cg_oct.get_name(), 'Octahedron')
self.assertEqual(cg_oct.IUPAC_symbol, 'OC-6')
self.assertEqual(cg_oct.IUPAC_symbol_str, 'OC-6')
self.assertEqual(cg_oct.IUCr_symbol, '[6o]')
self.assertEqual(cg_oct.IUCr_symbol_str, '[6o]')
cg_oct.permutations_safe_override = True
self.assertEqual(cg_oct.number_of_permutatio
|
ns, 720.0)
self.assertEqual(cg_oct.ref_permutation([0, 3, 2, 4, 5, 1]), (0, 3, 1, 5, 2, 4))
sites = [FakeSite(coords=pp) for pp in cg_oct.points]
faces = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
|
[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]],
[[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]]
self.assertArrayAlmostEqual(cg_oct.faces(sites=sites, permutation=[0, 3, 2, 4, 5, 1]), faces)
faces = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]]]
self.assertArrayAlmostEqual(cg_oct.faces(sites=sites), faces)
edges = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 0.0, -1.0]],
[[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[0.0, 1.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]]
self.assertArrayAlmostEqual(cg_oct.edges(sites=sites, permutation=[0, 3, 2, 4, 5, 1]), edges)
edges = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, -1.0], [1.0, 0.0, 0.0]],
[[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]],
[[0.0, 0.0, -1.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, -1.0], [0.0, -1.0, 0.0]],
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]]]
self.assertArrayAlmostEqual(cg_oct.edges(sites=sites), edges)
self.assertArrayAlmostEqual(cg_oct.solid_angles(),
[2.0943951, 2.0943951, 2.0943951, 2.0943951, 2.0943951, 2.0943951])
pmeshes = cg_oct.get_pmeshes(sites=sites)
self.assertEqual(pmeshes[0]['pmesh_string'],
'14\n 0.00000000 0.00000000 1.00000000\n'
' 0.00000000 0.00000000 -1.00000000\n'
' 1.00000000 0.00000000 0.00000000\n'
' -1.00000000 0.00000000 0.00000000\n'
' 0.00000000 1.00000000 0.00000000\n'
' 0.00000000 -1.00000000 0.00000000\n'
' 0.33333333 0.33333333 0.3333
|
denim2x/Vintageous
|
ex/parser/nodes.py
|
Python
|
mit
| 6,391 | 0.001095 |
from Vintageous.ex.ex_error import ERR_NO_RANGE_ALLOWED
from Vintageous.ex.ex_error import VimError
from Vintageous.ex.parser.tokens import TokenDigits
from Vintageous.ex.parser.tokens import TokenDollar
from Vintageous.ex.parser.tokens import TokenDot
from Vintageous.ex.parser.tokens import TokenMark
from Vintageous.ex.parser.tokens import TokenOffset
from Vintageous.ex.parser.tokens import TokenOfSearch
from Vintageous.ex.parser.tokens import TokenPercent
from Vintageous.ex.parser.tokens import TokenSearchBackward
from Vintageous.ex.parser.tokens import TokenSearchForward
from Vintageous.vi.search import reverse_search_by_pt
from Vintageous.vi.utils import first_sel
from Vintageous.vi.utils import R
from Vintageous.vi.utils import row_at
class Node(object):
pass
class RangeNode(Node):
'''
Represents a Vim line range.
'''
def __init__(self, start=None, end=None, separator=None):
self.start = start or []
self.end = end or []
self.separator = separator
def __str__(self):
return '{0}{1}{2}'.format(
''.join(str(x) for x in self.start),
str(self.separator) if self.separator else '',
''.join(str(x) for x in self.end),
)
def __rpr__(self):
return ('RangeNode<{0}(start:{1}, end:{2}, separator:{3}]>'
.format(self.__class__.__name__, self.start, self.end, self.separator))
def __eq__(self, other):
if not isinstance(other, RangeNode):
return False
return (self.start == other.start and
self.end == other.end and
self.separator == other.separator)
@property
def is_empty(self):
'''
Indicates whether this range has ever been defined. For example, in
interactive mode, if `true`, it means that the user hasn't provided
any line range on the command line.
'''
return not any((self.start, self.end, self.separator))
def resolve_notation(self, view, token, current):
'''
Returns a line number.
'''
if isinstance(token, TokenDot):
pt = view.text_point(current, 0)
return row_at(view, pt)
if isinstance(token, TokenDigits):
return max(int(str(token)) - 1, -1)
if isinstance(token, TokenPercent):
return row_at(view, view.size())
if isinstance(token, TokenDollar):
return row_at(view, view.size())
if isinstance(token, TokenOffset):
return current + sum(token.content)
if isinstance(token, TokenSearchForward):
start_pt = view.text_point(current, 0)
match = view.find(str(token)[1:-1], start_pt)
if not match:
# TODO: Convert this to a VimError or something like that.
raise ValueError('pattern not found')
return row_at(view, match.a)
if isinstance(token, TokenSearchBackward):
start_pt = view.text_point(current, 0)
match = reverse_search_by_pt(view, str(token)[1:-1], 0, start_pt)
if not match:
# TODO: Convert this to a VimError or something like that.
raise ValueError('pattern not found')
return row_at(view, match.a)
if isinstance(token, TokenMark):
return self.resolve_mark(view, token)
raise NotImplementedError()
def resolve_mark(self, view, token):
if token.content == '<':
sel = list(view.sel())[0]
view.sel().clear()
view.sel().add(sel)
if sel.a < sel.b:
return row_at(view, sel.a)
else:
return row_at(view, sel.a - 1)
|
if token.content == '>':
sel = list(view.sel())[0]
view.sel().clear()
view.sel().add(sel)
if sel.a < sel.b
|
:
return row_at(view, sel.b - 1)
else:
return row_at(view, sel.b)
raise NotImplementedError()
def resolve_line_reference(self, view, line_reference, current=0):
'''
Calculates the line offset determined by @line_reference.
@view
The view where the calculation is made.
@line_reference
The sequence of tokens defining the line range to be calculated.
@current
Line number where we are now.
'''
last_token = None
# XXX: what happens if there is no selection in the view?
current = row_at(view, first_sel(view).b)
for token in line_reference:
# Make sure a search forward doesn't overlap with a match obtained
# right before this search.
if isinstance(last_token, TokenOfSearch) and isinstance(token, TokenOfSearch):
if isinstance(token, TokenSearchForward):
current += 1
current = self.resolve_notation(view, token, current)
last_token = token
return current
def resolve(self, view):
'''
Returns a Sublime Text range representing the Vim line range that the
ex command should operate on.
'''
start = self.resolve_line_reference(view, self.start or [TokenDot()])
if not self.separator:
if start == -1:
return R(-1, -1)
if len(self.start) == 1 and isinstance(self.start[0], TokenPercent):
return R(0, view.size())
return view.full_line(view.text_point(start, 0))
new_start = start if self.separator == ';' else 0
end = self.resolve_line_reference(view, self.end or [TokenDot()], current=new_start)
return view.full_line(R(view.text_point(start, 0), view.text_point(end, 0)))
class CommandLineNode(Node):
def __init__(self, line_range, command):
# A RangeNode
self.line_range = line_range
# A TokenOfCommand
self.command = command
def __str__(self):
return '{0}, {1}'.format(str(self.line_range), str(self.command))
def validate(self):
'''
Raises an error for known conditions.
'''
if not (self.command and self.line_range):
return
if not self.command.addressable and not self.line_range.is_empty:
raise VimError(ERR_NO_RANGE_ALLOWED)
|
torkelsson/meta-package-manager
|
meta_package_manager/base.py
|
Python
|
gpl-2.0
| 11,025 | 0 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2018 Kevin Deldycke <[email protected]>
# and contributors.
# All Rights Reserved.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals
)
import os
from boltons.cacheutils import cachedproperty
from boltons.strutils import indent, strip_ansi
from packaging.specifiers import SpecifierSet
from packaging.version import parse as parse_version
from . import logger
from .bitbar import run
from .platform import current_os
try:
from shutil import which
except ImportError:
from backports.shutil_which import which
# Rendering format of CLI in JSON fields.
CLI_FORMATS = frozenset(['plain', 'fragments', 'bitbar'])
class CLIError(Exception):
""" An error occured when running package manager CLI. """
def __init__(self, code, output, error):
""" The exception internally keeps the result of CLI execution. """
super(CLIError, self).__init__()
self.code = code
self.output = output
self.error = error
def __str__(self):
""" Human-readable error. """
margin = ' ' * 2
return indent((
"\nReturn code: {}\n"
"Output:\n{}\n"
"Error:\n{}").format(
self.code,
indent(str(self.output), margin),
indent(str(self.error), margin)), margin)
class PackageManager(object):
""" Base class from which all package manager definitions should inherits.
"""
# Systematic options passed to package manager CLI. Might be of use to
# force silencing or high verbosity for instance.
cli_args = []
# List of platforms supported by the manager.
platforms = frozenset()
# Version requirement specifier.
requirement = None
def __init__(self):
# Tell the manager either to raise or continue on errors.
self.raise_on_cli_error = False
# Some managers have the ability to report or ignore packages
# possessing their own auto-update mecanism.
self.ignore_auto_updates = True
# Log of all encountered CLI errors.
self.cli_errors = []
@cachedproperty
def cli_name(self):
""" Package manager's CLI name.
Is derived by default from the manager's ID.
"""
return self.id
@cachedproperty
def cli_path(self):
""" Fully qualified path to the package manager CLI.
Automaticcaly search the location of the CLI in the system.
Returns `None` if CLI is not found or is not a file.
"""
cli_path = which(self.cli_name, mode=os.F_OK)
logger.debug(
"CLI found at {}".format(cli_path) if cli_path
else "{} CLI not found.".format(self.cli_name))
return cli_path
def get_version(self):
""" Invoke the manager and extract its own reported version. """
raise NotImplementedError
@cachedproperty
def version_string(self):
""" Raw but cleaned string of the package manager version.
Returns `None` if the manager had an issue extracting its version.
"""
if self.executable:
version = self.get_version()
if version:
return version.strip()
@cachedproperty
def version(self):
""" Parsed and normalized package manager's own version.
Returns an instance of ``packaging.Version`` or None.
"""
if self.version_string:
return parse_version(self.version_string)
@cachedproperty
def id(self):
""" Return package manager's ID. Defaults based on class name.
This ID must be unique among all package manager definitions and
lower-case as they're used as feature flags for the :command:`mpm` CLI.
"""
return self.__class__.__name__.lower()
@cachedproperty
def name(self):
""" Return package manager's common name. Defaults based on class name.
"""
return self.__class__.__name__
@cachedproperty
def supported(self):
""" Is the package manager supported on that platform? """
return current_os()[0] in self.platforms
@cachedproperty
def executable(self):
""" Is the package manager CLI can be executed by the current user? """
if not self.cli_path:
return False
if not os.access(self.cli_path, os.X_OK):
logger.debug("{} not executable.".format(self.cli_path))
return False
return True
@cachedproperty
def fresh(self):
""" Does the package manager match the version requirement? """
# Version is mandatory.
if not self.version:
return False
if self.requirement:
if self.version not in SpecifierSet(self.requirement):
logger.debug(
"{} {} doesn't fit the '{}' version requirement.".format(
self.id, self.version, self.requirement))
return False
return True
@cachedproperty
def available(self):
""" Is the package manager available and ready-to-use on the system?
Returns True only if the main CLI:
1 - is supported on the current platform,
2 - was found on the system,
3 - is executable, and
4 - match the version requirement.
"""
return bool(
self.supported and
self.cli_path and
self.executable and
self.fresh)
def run(self, args, dry_run=False):
""" Run a shell command, return the output and keep error message.
Removes ANSI es
|
cape codes, and returns ready-to-use strings.
"""
assert isinstance(args, list)
logger.debug("Running `{}`...".format(' '.join(args)))
code = 0
output = None
error = None
if not dry_run:
code, output, error = run(*args)
else:
logger.warning("Dry-run mode active: skip execution of command.")
# Normalize messages.
if error:
error = strip_ansi(error)
|
error = error if error else None
if output:
output = strip_ansi(output)
output = output if output else None
if code and error:
exception = CLIError(code, output, error)
if self.raise_on_cli_error:
raise exception
else:
logger.error(error)
self.cli_errors.append(exception)
logger.debug(output)
return output
@property
def sync(self):
""" Refresh local manager metadata from remote repository. """
logger.info('Sync {} package info...'.format(self.id))
@property
def installed(self):
""" List packages currently installed on the system.
Returns a dict indexed by package IDs. Each item is a dict with
package ID, name and version.
"""
raise NotImplementedError
@staticmethod
def exact_match(query, result):
""" Compare search query and matching result.
Returns `True` if the matching result exactly match the search query.
Still pplies a light normalization and tokenization of strings before
comparison to make the "exactiness" in the human sense instead of
strictly machine sense.
|
tseaver/google-cloud-python
|
logging/google/cloud/logging/_helpers.py
|
Python
|
apache-2.0
| 3,909 | 0 |
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common logging helpers."""
import logging
import requests
from google.cloud.logging.entries import LogEntry
from google.cloud.logging.entries import ProtobufEntry
from google.cloud.logging.entries import StructEntry
from google.cloud.logging.entries import TextEntry
try:
from google.cloud.logging_v2.gapic.enums import LogSeverity
except ImportError: # pragma: NO COVER
class LogSeverity(object):
"""Map severities for non-GAPIC usage."""
DEFAULT = 0
DEBUG = 100
INFO = 200
NOTICE = 300
WARNING = 400
ERROR = 500
CRITICAL = 600
ALERT = 700
EMERGENCY = 800
_NORMALIZED_SEVERITIES = {
logging.CRITICAL: LogSeverity.CRITICAL,
logging.ERROR: LogSeverity.ERROR,
logging.WARNING: LogSeverity.WARNING,
logging.INFO: LogSeverity.INFO,
logging.DEBUG: LogSeverity.DEBUG,
logging.NOTSET: LogSeverity.DEFAULT,
}
METADATA_URL = "http://metadata.google.internal./computeMetadata/v1/"
METADATA_HEADERS = {"Metadata-Flavor": "Google"}
def entry_from_resource(resource, client, loggers):
"""Detect correct entry type from resource and ins
|
tantiate.
:type resource: dict
:param resource: One entry resource from API response.
:type client: :class:`~google.cloud.logging.client.Client`
:param client: Client that
|
owns the log entry.
:type loggers: dict
:param loggers:
A mapping of logger fullnames -> loggers. If the logger
that owns the entry is not in ``loggers``, the entry
will have a newly-created logger.
:rtype: :class:`~google.cloud.logging.entries._BaseEntry`
:returns: The entry instance, constructed via the resource
"""
if "textPayload" in resource:
return TextEntry.from_api_repr(resource, client, loggers)
if "jsonPayload" in resource:
return StructEntry.from_api_repr(resource, client, loggers)
if "protoPayload" in resource:
return ProtobufEntry.from_api_repr(resource, client, loggers)
return LogEntry.from_api_repr(resource, client, loggers)
def retrieve_metadata_server(metadata_key):
"""Retrieve the metadata key in the metadata server.
See: https://cloud.google.com/compute/docs/storing-retrieving-metadata
:type metadata_key: str
:param metadata_key: Key of the metadata which will form the url. You can
also supply query parameters after the metadata key.
e.g. "tags?alt=json"
:rtype: str
:returns: The value of the metadata key returned by the metadata server.
"""
url = METADATA_URL + metadata_key
try:
response = requests.get(url, headers=METADATA_HEADERS)
if response.status_code == requests.codes.ok:
return response.text
except requests.exceptions.RequestException:
# Ignore the exception, connection failed means the attribute does not
# exist in the metadata server.
pass
return None
def _normalize_severity(stdlib_level):
"""Normalize a Python stdlib severity to LogSeverity enum.
:type stdlib_level: int
:param stdlib_level: 'levelno' from a :class:`logging.LogRecord`
:rtype: int
:returns: Corresponding Stackdriver severity.
"""
return _NORMALIZED_SEVERITIES.get(stdlib_level, stdlib_level)
|
mswart/openvpn2dns
|
tests/test_parser.py
|
Python
|
mit
| 1,061 | 0.00377 |
# -*- coding: UTF-8 -*-
from openvpnzone import extract_zones_from_status_file
from IPy import IP
def test_empty_server():
assert extract_zones_from_status_file('tests/samples/empty.ovpn-status-v1') \
== {}
def test_one_client_on_server():
assert extract_zones_from_status_file('tests/samples/one.ovpn-status-v1') \
== {'one.vpn.example.org': [IP('198.51.100.8')]}
def test_multiple_client_on_server():
assert extract_zones_from_status_file('tests/samples/multiple.ovpn-status-v1') \
== {
'one.vpn.example.org': [IP('198.51.100.8')],
'two.vpn.example.org': [IP('198.51.100.12')],
'three.vpn.example.org': [IP('198.51.100.16')]
}
def test_subnet_for_client():
assert extract_zones_from_status_file('tests/samples/subnet.ovpn-status-v1') \
|
== {'one.vpn.example.org': [IP('198.51.100.8')]}
def test_cached_route():
assert extract_zones_from_status_file('tests/samples/cached-route.ovpn-status-v1') \
== {'one.vpn.example.org': [I
|
P('198.51.100.8')]}
|
baile/infojoiner
|
infojoiner/datacenter/functional_test.py
|
Python
|
mit
| 2,451 | 0.001224 |
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class NewVisitorTest(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_can_show_main_menu_and_go_to_each_section(self):
# Jon has heard about a cool new online data joining app. He goes
# to check out its homepage
self.browser.get('http://localhost:8000')
# The page title and header mention data center
self.assertIn('InfoJoiner DataCenter', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('InfoJoiner DataCenter', header_text)
# There is a HTML5 nav menu
# Iterate all menu items "sources","views","tags"
|
# Foreach menu item enter in page and check title is
# "Menu Title - IJDC"
"""
# He is invited to enter a to-do item straight away
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
|
)
# He types "Buy peacock feathers" into a text box (Edith's hobby
# is tying fly-fishing lures)
inputbox.send_keys('Buy peacock feathers')
# When He hits enter, the page updates, and now the page lists
# "1: Buy peacock feathers" as an item in a to-do list table
inputbox.send_keys(Keys.ENTER)
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertTrue(
any('1: Buy peacock feathers' in row.text for row in rows),
"New to-do item did not appear in table"
)
# There is still a text box inviting her to add another item. He
# enters "Use peacock feathers to make a fly" (Edith is very
# methodical)
self.fail('Finish the test!')
# The page updates again, and now shows both items on her list
# Edith wonders whether the site will remember her list. Then He sees
# that the site has generate a unique URL for her -- there is some
# explanatory text to that effect.
# He visits that URL - her to-do list is still there.
# Satisfied, He goes back to sleep
"""
if __name__ == '__main__':
unittest.main()
|
atelier-cartographique/static-sectioner
|
sectioner/template.py
|
Python
|
agpl-3.0
| 1,286 | 0.002333 |
# Copyright (C) 2016 Pierre Marchand <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pathlib import Path
from .parser import TemplateParser
pars
|
er = TemplateParser()
def load_template (dirpath, name, required=True):
home = Path(dirpath)
template_path = home.joinpath(name + '.html')
try:
with template_path.open() as template_file:
template = template_file.read()
ex
|
cept Exception as exc:
if required:
raise exc
else:
return ''
return template
def apply_template (template, data):
data_local = dict(data)
return parser.apply_template(template, data)
|
aurora-pro/apex-sigma
|
sigma/plugins/moderation/other/custom_command_detection.py
|
Python
|
gpl-3.0
| 805 | 0.003727 |
from config import Prefix
from sigma.core.blacklist import check_black
async def custom_command_detection(ev, message, args):
if message.guild:
if message.content.startswith(Prefix):
cmd = message.content[len(Prefix):].lower()
if cmd not in ev.bot.plugin_manager.commands:
|
if not check_black(ev.db, message):
try:
custom_commands = ev.db.get_settings(message.guild.id, 'CustomCommands')
except:
|
ev.db.set_settings(message.guild.id, 'CustomCommands', {})
custom_commands = {}
if cmd in custom_commands:
response = custom_commands[cmd]
await message.channel.send(response)
|
CodeReclaimers/btce-api
|
btceapi/public.py
|
Python
|
mit
| 6,783 | 0.001622 |
# Copyright (c) 2013-2017 CodeReclaimers, LLC
# Public API v3 description: https://btc-e.com/api/3/documentation
from collections import namedtuple
from . import common, scraping
PairInfoBase = namedtuple("PairInfoBase",
["decimal_places", "min_price", "max_price", "min_amount", "hidden", "fee"])
class PairInfo(PairInfoBase):
def format_currency(self, value):
return common.formatCurrencyDigits(value, self.decimal_places)
def truncate_amount(self, value):
return common.truncateAmountDigits(value, self.decimal_places)
def validate_order(self, trade_type, rate, amount):
if trade_type not in ("buy", "sell"):
raise common.InvalidTradeTypeException("Unrecognized trade type: %r" % trade_type)
if rate < self.min_price or rate > self.max_price:
raise common.InvalidTradePriceException(
"Allowed price range is from %f to %f" % (self.min_price, self.max_price))
formatted_min_amount = self.format_currency(self.min_amount)
if amount < self.min_amount:
msg = "Trade amount %r too small; should be >= %s" % \
(amount, formatted_min_amount)
raise common.InvalidTradeAmountException(msg)
class APIInfo(object):
def __init__(self, connection):
self.connection = connection
self.currencies = None
self.pair_names = None
self.pairs = None
self.server_time = None
self._scrape_pair_index = 0
self.update()
def update(self):
info = self.connection.makeJSONRequest("/api/3/info")
if type(info) is not dict:
raise TypeError("The response is not a dict.")
self.server_time = info.get(u"server_time")
pairs = info.get(u"pairs")
if type(pairs) is not dict:
raise TypeError("The pairs item is not a dict.")
self.pairs = {}
currencies = set()
for name, data in pairs.items():
self.pairs[name] = PairInfo(**data)
a, b = name.s
|
plit(u"_")
currencies.add(a)
currencies.add(b)
self.currencies = list(currencies)
self.currencies.sort()
self.pair_names = list(self.pairs.keys())
self.pair_names.sort()
def validate_pair(self, pair):
if pair not in self.pair_names:
if "_" in pair:
a, b = pair.split("_", 1)
swapped_pair = "%s_%s" % (b, a)
if swapp
|
ed_pair in self.pair_names:
msg = "Unrecognized pair: %r (did you mean %s?)"
msg = msg % (pair, swapped_pair)
raise common.InvalidTradePairException(msg)
raise common.InvalidTradePairException("Unrecognized pair: %r" % pair)
def get_pair_info(self, pair):
self.validate_pair(pair)
return self.pairs[pair]
def validate_order(self, pair, trade_type, rate, amount):
self.validate_pair(pair)
pair_info = self.pairs[pair]
pair_info.validate_order(trade_type, rate, amount)
def format_currency(self, pair, amount):
self.validate_pair(pair)
pair_info = self.pairs[pair]
return pair_info.format_currency(amount)
def scrapeMainPage(self):
parser = scraping.BTCEScraper()
# Rotate through the currency pairs between chat requests so that the
# chat pane contents will update more often than every few minutes.
self._scrape_pair_index = (self._scrape_pair_index + 1) % len(self.pair_names)
current_pair = self.pair_names[self._scrape_pair_index]
response = self.connection.makeRequest('/exchange/%s' % current_pair, with_cookie=True)
parser.feed(parser.unescape(response.decode('utf-8')))
parser.close()
r = scraping.ScraperResults()
r.messages = parser.messages
r.devOnline = parser.devOnline
r.supportOnline = parser.supportOnline
r.adminOnline = parser.adminOnline
return r
Ticker = namedtuple("Ticker",
["high", "low", "avg", "vol", "vol_cur", "last", "buy", "sell", "updated"])
def getTicker(pair, connection=None, info=None):
"""Retrieve the ticker for the given pair. Returns a Ticker instance."""
if info is not None:
info.validate_pair(pair)
if connection is None:
connection = common.BTCEConnection()
response = connection.makeJSONRequest("/api/3/ticker/%s" % pair)
if type(response) is not dict:
raise TypeError("The response is a %r, not a dict." % type(response))
elif u'error' in response:
print("There is a error \"%s\" while obtaining ticker %s" % (response['error'], pair))
ticker = None
else:
ticker = Ticker(**response[pair])
return ticker
def getDepth(pair, connection=None, info=None):
"""Retrieve the depth for the given pair. Returns a tuple (asks, bids);
each of these is a list of (price, volume) tuples."""
if info is not None:
info.validate_pair(pair)
if connection is None:
connection = common.BTCEConnection()
response = connection.makeJSONRequest("/api/3/depth/%s" % pair)
if type(response) is not dict:
raise TypeError("The response is not a dict.")
depth = response.get(pair)
if type(depth) is not dict:
raise TypeError("The pair depth is not a dict.")
asks = depth.get(u'asks')
if type(asks) is not list:
raise TypeError("The response does not contain an asks list.")
bids = depth.get(u'bids')
if type(bids) is not list:
raise TypeError("The response does not contain a bids list.")
return asks, bids
Trade = namedtuple("Trade", ['pair', 'type', 'price', 'tid', 'amount', 'timestamp'])
def getTradeHistory(pair, connection=None, info=None, count=None):
"""Retrieve the trade history for the given pair. Returns a list of
Trade instances. If count is not None, it should be an integer, and
specifies the number of items from the trade history that will be
processed and returned."""
if info is not None:
info.validate_pair(pair)
if connection is None:
connection = common.BTCEConnection()
response = connection.makeJSONRequest("/api/3/trades/%s" % pair)
if type(response) is not dict:
raise TypeError("The response is not a dict.")
history = response.get(pair)
if type(history) is not list:
raise TypeError("The response is a %r, not a list." % type(history))
result = []
# Limit the number of items returned if requested.
if count is not None:
history = history[:count]
for h in history:
h["pair"] = pair
t = Trade(**h)
result.append(t)
return result
|
noemis-fr/old-custom
|
e3z_add_delivery_method/sale_order.py
|
Python
|
agpl-3.0
| 2,622 | 0.002288 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-2013 Elanz (<http://www.openelanz.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have
|
received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class sale_order(osv.osv):
_inherit = 'sale.order'
def action_button_confirm(self, cr, uid, ids, context=None):
# fetch the partner's id and subscribe the partner to the sale order
assert
|
len(ids) == 1
order = self.browse(cr, uid, ids[0], context=context)
add_delivery_method = True
only_service = True
delivery_method = self.pool.get('delivery.carrier').search(cr, uid, [('default_in_sales', '=', True)])
if delivery_method:
delivery_method = self.pool.get('delivery.carrier').browse(cr, uid, delivery_method[0])
if order.amount_untaxed < delivery_method.min_amount and not order.carrier_id:
if order.partner_id.without_delivery:
add_delivery_method = False
else:
for order_line in order.order_line:
if order_line.product_id:
if order_line.product_id.without_delivery:
add_delivery_method = False
break
elif order_line.product_id.type != 'service':
only_service = False
if only_service:
add_delivery_method = False
if add_delivery_method:
delivery_method = delivery_method.id
self.write(cr, uid, ids[0], {'carrier_id': delivery_method})
return super(sale_order, self).action_button_confirm(cr, uid, ids, context=context)
|
dgaston/ddbio-variantstore
|
Misc_and_Old/create_sample_coverage_reports.py
|
Python
|
mit
| 3,582 | 0.005025 |
#!/usr/bin/env python
import argparse
import getpass
import sys
import csv
from cassandra.auth import PlainTextAuthProvider
from cassandra.cqlengine import connection
from ddb import configuration
import utils
from coveragestore import SampleCoverage
from collections import defaultdict
def get_target_amplicons(filename):
amplicons_list = list()
sys.stdout.write("Opening file {} to retrieve reporting amplicons\n".format(filename))
with open(filename, "r") as bedfile:
reader = csv.reader(bedfile, dialect='excel-tab')
for row in reader:
amplicons_list.append(row[3])
return amplicons_list
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--samples_file', help="Input configuration file for samples")
parser.add_argument('-c', '--configuration', help="Configuration file for various settings")
parser.add_argument('-r', '--report', help="Root name for reports (per sample)")
parser.add_argument('-a', '--address', help="IP Address for Cassandra connection", default='127.0.0.1')
parser.add_argument('-u', '--username', help='Cassandra username for login', default=None)
args = parser.parse_args()
sys.stdout.write("Parsing configuration data\n")
config = configuration.configure_runtime(args.configuration)
sys.stdout.write("Parsing sample data\n")
samples = configuration.configure_samples(args.samples_file, config)
if args.username:
password = getpass.getpass()
auth_provider = PlainTextAuthProvider(username=args.username, password=password)
connection.setup([args.address], "variantstore", auth_provider=auth_provider)
else:
connection.setup([args.address], "variantst
|
ore")
sys.stdout.write("Processing samples\n")
for sample in samples:
sys.stdout.write("Processing coverage for sample {}\n".format(sample))
report_panel_path = "/mnt/shared-data/ddb-configs/disease_p
|
anels/{}/{}".format(samples[sample]['panel'],
samples[sample]['report'])
target_amplicons = get_target_amplicons(report_panel_path)
reportable_amplicons = list()
for amplicon in target_amplicons:
coverage_data = SampleCoverage.objects.timeout(None).filter(
SampleCoverage.sample == samples[sample]['sample_name'],
SampleCoverage.amplicon == amplicon,
SampleCoverage.run_id == samples[sample]['run_id'],
SampleCoverage.library_name == samples[sample]['library_name'],
SampleCoverage.program_name == "sambamba"
)
ordered_variants = coverage_data.order_by('amplicon', 'run_id').limit(coverage_data.count() + 1000)
for variant in ordered_variants:
reportable_amplicons.append(variant)
with open("{}_{}.txt".format(sample, args.report), "w") as coverage_report:
coverage_report.write("Sample\tLibrary\tAmplicon\tNum Reads\tCoverage\n")
for amplicon in reportable_amplicons:
coverage_report.write("{}\t{}\t{}\t{}\t{}\n".format(amplicon.sample,
amplicon.library_name,
amplicon.amplicon,
amplicon.num_reads,
amplicon.mean_coverage))
|
Alex-Ian-Hamilton/sunpy
|
sunpy/wcs/__init__.py
|
Python
|
bsd-2-clause
| 2,364 | 0.000423 |
"""
The WCS package provides functions to parse World Coordinate System (WCS)
coordinates for solar images as well as convert between various solar
coordinate systems. The solar coordinates supported are
* Helioprojective-Cartesian (HPC): The most often used solar coordinate
system. Describes positions on the Sun as angles measured from the
center of the solar disk (usually in arcseconds) using cartesian
coordinates (X, Y)
* Helioprojective-Radial (HPR): Describes positions on the Sun using angles,
similar to HPC, but uses a radial coordinate (rho, psi) system centered
on solar disk where psi is measured in the counter clock wise direction.
* Heliocentric-Cartesian (HCC): The same as HPC but with positions expressed
in true (de
|
projected) physical distances instead of angles on the
celestial sphere.
* Heliocentric-Radial (HCR): The same as HPR but with rho expressed in
true (deprojected) physical distances instead of angles on the celestial
sphere.
* Stonyhurst-Heliographic (HG): Expressed posi
|
tions on the Sun using
longitude and latitude on the solar sphere but with the origin which is
at the intersection of the solar equator and the central meridian as
seen from Earth. This means that the coordinate system remains fixed
with respect to Earth while the Sun rotates underneath it.
* Carrington-Heliographic (HG): Carrington longitude is offset
from Stonyhurst longitude by a time-dependent scalar value, L0. At the
start of each Carrington rotation, L0 = 360, and steadily decreases
until it reaches L0 = 0, at which point the next Carrington rotation
starts.
Some definitions
* b0: Tilt of the solar North rotational axis toward the observer
(helio- graphic latitude of the observer). Note that SOLAR_B0,
HGLT_OBS, and CRLT_OBS are all synonyms.
* l0: Carrington longitude of central meridian as seen from Earth.
* dsun_meters: Distance between observer and the Sun. Default is 1 AU.
* rsun_meters: Radius of the Sun in meters. Default is 6.955e8 meters. This valued is stored
locally in this module and can be modified if necessary.
References
----------
| Thompson (2006), A&A, 449, 791 <http://dx.doi.org/10.1051/0004-6361:20054262>
| PDF <http://fits.gsfc.nasa.gov/wcs/coordinates.pdf>
"""
from __future__ import absolute_import
from sunpy.wcs.wcs import *
|
LTKills/languages
|
python/17.py
|
Python
|
gpl-3.0
| 66 | 0.030303 |
#Conver
|
t to lower (lol)
string = input()
print (string.l
|
ower())
|
lnmds/jose
|
ext/nsfw.py
|
Python
|
mit
| 7,881 | 0 |
import logging
import random
import urllib.parse
import collections
import aiohttp
import discord
import motor.motor_asyncio
from discord.ext import commands
from .common import Cog
log = logging.getLogger(__name__)
class BooruError(Exception):
pass
class BooruProvider:
url = ''
@classmethod
def transform_file_url(cls, url):
return url
@classmethod
def get_author(cls, post):
return post['author']
@classmethod
async def get_posts(cls, bot, tags, *, limit=15):
headers = {
'User-Agent': 'Yiffmobile v2 (José, https://github.com/lnmds/jose)'
}
tags = urllib.parse.quote(' '.join(tags), safe='')
async with bot.session.get(
f'{cls.url}&limit={limit}&tags={tags}',
headers=headers) as resp:
results = await resp.json()
if not results:
return []
try:
# e621 sets this to false
# when the request fails
if not results.get('success', True):
raise BooruError(results.get('reason'))
except AttributeError:
# when the thing actually worked and
# its a list of posts and not a fucking
# dictionary
# where am I gonna see good porn APIs?
pass
# transform file url
for post in results:
post['file_url'] = cls.transform_file_url(post['file_url'])
return results
class E621Booru(BooruProvider):
url = 'https://e621.net/post/index.json?'
url_post = 'https://e621.net/post/show/{0}'
class HypnohubBooru(BooruProvider):
url = 'http://hypnohub.net/post/index.json?'
url_post = 'https://hypnohub.net/post/show/{0}'
@classmethod
def transform_file_url(cls, url):
return 'https:' + url.replace('.net//', '.net/')
class GelBooru(BooruProvider):
url = 'https://gelbooru.com/index.php?page=dapi&s=post&json=1&q=index'
url_post = 'https://gelbooru.com/index.php?page=post&s=view&id={0}'
@classmethod
def get_author(cls, post):
return post['owner']
class NSFW(Cog, requires=['config']):
"""NSFW commands.
Fetching works on a "non-repeataibility" basis (unless
the bot restarts). This means that with each set of tags
you give for José to search, it will record the given post
and make sure it doesn't repeat again.
"""
def __init__(self, bot):
super().__init__(bot)
self.whip_coll = self.config.jose_db['whip']
self.repeat_cache = collections.defaultdict(dict)
def key(self, tags):
return ','.join(tags)
def mark_post(self, ctx, tags: list, post: dict):
"""Mark this post as seen."""
cache = self.repeat_cache[ctx.guild.id]
k = self.key(tags)
used = cache.get(k, [])
used.append(post['id'])
cache[k] = used
def filter(self, ctx, tags: list, posts):
"""Filter the posts so we get the only posts
that weren't seen."""
cache = self.repeat_cache[ctx.guild.id]
used_posts = cache.get(self.key(tags), [])
return list(filter(lambda post: post['id'] not in used_posts, posts))
async def booru(self, ctx, booru, tags: list):
if ctx.channel.topic and '[jose:no_nsfw]' in ctx.channel.topic:
return
# taxxx
await self.jcoin.pricing(ctx, self.prices['API'])
try:
# grab posts
posts = await booru.get_posts(ctx.bot, tags)
posts = self.filter(ctx, tags, posts)
if not posts:
return await ctx.send('Found nothing.\n'
'(this can be caused by an exhaustion '
f'of the tags `{ctx.prefix}help NSFW`)')
# grab random post
post = random.choice(posts)
self.mark_post(ctx, tags, post)
post_id = post.get('id')
post_author = booru.get_author(post)
log.info('%d posts from %s, chose %d', len(posts), booru.__name__,
post_id)
tags = (post['tags'].replace('_', '\\_'))[:500]
# add stuffs
embed = discord.Embed(title=f'Posted by {post_author}')
embed.set_image(url=post['file_url'])
embed.add_field(name='Tags', value=tags)
embed.add_field(name='URL', value=booru.url_post.format(post_id))
# hypnohub doesn't have this
if 'fav_count' in post and 'score' in post:
embed.add_field(
name='Votes/Favorites',
value=f"{post['score']} votes, "
f"{post['fav_count']} favorites")
# send
await ctx.send(embed=embed)
except BooruError as err:
raise self.SayException(f'Error while fetching posts: `{err!r}`')
except aiohttp.ClientError as err:
log.exception('nsfw client error')
raise self.SayException(f'Something went wrong. Sorry! `{err!r}`')
@commands.command()
@commands.is_nsfw()
async def e621(self, ctx, *tags):
"""Randomly searches e621 for posts."""
async with ctx.typing():
await self.booru(ctx, E621Booru, tags)
@commands.command(aliases=['hh'])
@commands.is_nsfw()
async def hypnohub(self, ctx, *tags):
"""Randomly searches Hypnohub for posts."""
async with ctx.typing():
await self.booru(ctx, HypnohubBooru, tags)
@commands.command()
|
@commands.is_nsfw()
async def gelbooru(self, ctx, *tags):
"""Randomly searches Gelbooru for posts."""
async with ctx.typing():
await self.booru(ctx, GelBooru, tags)
@commands.command()
@commands.is_nsfw()
async def penis(self, ctx):
"""get penis from e621 bb"""
await ctx.invoke(self.bot.get_command('e621'), 'penis')
@commands.command()
@commands.cooldown(5, 1800, commands.BucketT
|
ype.user)
async def whip(self, ctx, *, person: discord.User = None):
"""Whip someone.
If no arguments provided, shows how many whips you
received.
The command has a 5/1800s cooldown per-user
"""
if not person:
whip = await self.whip_coll.find_one({'user_id': ctx.author.id})
if not whip:
return await ctx.send(f'**{ctx.author}** was never whipped')
return await ctx.send(f'**{ctx.author}** was whipped'
f' {whip["whips"]} times')
if person == ctx.author:
return await ctx.send('no')
uid = person.id
whip = await self.whip_coll.find_one({'user_id': uid})
if not whip:
whip = {
'user_id': uid,
'whips': 0,
}
await self.whip_coll.insert_one(whip)
await self.whip_coll.update_one({
'user_id': uid
}, {'$inc': {
'whips': 1
}})
await ctx.send(f'**{ctx.author}** whipped **{person}** '
f'They have been whipped {whip["whips"] + 1} times.')
@commands.command()
async def whipboard(self, ctx):
"""Whip leaderboard."""
e = discord.Embed(title='Whip leaderboard')
data = []
cur = self.whip_coll.find().sort('whips',
motor.pymongo.DESCENDING).limit(15)
async for whip in cur:
u = self.bot.get_user(whip['user_id'])
u = str(u)
data.append(f'{u:30s} -> {whip["whips"]}')
joined = '\n'.join(data)
e.description = f'```\n{joined}\n```'
await ctx.send(embed=e)
def setup(bot):
bot.add_jose_cog(NSFW)
|
hephaestus9/Radio
|
radio/logger.py
|
Python
|
mit
| 2,105 | 0.0019 |
# -*- coding: utf-8 -*-
import logging
import logging.handlers
import radio
import datetime
import sys
import os
class RadioLogger():
"""Radio logger"""
def __init__(self, LOG_FILE, VERBOSE):
"""init the logger"""
# set up formatting for console and the two log files
confor = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s', '%H:%M:%S')
warfor = logging.Formatter('%(asctime)s :: %(levelname)-8s :: %(message)s', '%b-%d %H:%M:%S')
# set up logging to STDOUT for all levels DEBUG and higher
con = logging.StreamHandler(sys.stdout)
con.setLevel(logging.DEBUG)
con.setFormatter(confor)
# set up logging to a file for all levels DEBUG and higher
war = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=500000, backupCount=3)
war.setLevel(logging.DEBUG)
war.setFormatter(warfor)
# create Logger object
self.mylogger = logging.getLogger('MAIN')
self.mylogger.setLevel(logging.DEBUG)
if VERBOSE:
self.mylogger.addHandler(con)
self.mylogger.addHandler(war)
from radio import DEVELOPMENT
if DEVELOPMENT:
werkzeug_logger = logging.getLogger('werkzeug')
werkzeug_logger.setLevel(logging.DEBUG)
werkzeug_logger.addHandler(con)
werkzeug_logger.addHandler(war)
def log(self, toLog, logLevel):
"""wrapper for logger output"""
try:
if
|
logLevel == 'DEBUG':
self.mylogger.debug(toLog)
elif logLevel == 'INFO':
self.mylogger.info(toLog)
elif logLevel == 'WARNING':
self.mylogger.warning(toLog)
elif logLevel == 'ERROR':
self.mylogger.error(toLog)
elif logLevel == 'CRITICAL':
self.mylogg
|
er.critical(toLog)
time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
radio.LOG_LIST.append({'level': logLevel, 'message': toLog, 'time': time})
except ValueError:
pass
|
93lorenzo/software-suite-movie-market-analysis
|
testMovies.py
|
Python
|
gpl-3.0
| 6,486 | 0.015264 |
from __future__ import division
import numpy as np
from Tkinter import *
import json
import io
import unicodecsv as csv
#import csv
#file = open("moviesTest-1970.txt",'r')
act = open("invertedIndexActorsWeightedAll.txt",'r')
dir = open("invertedIndexDirectorsWeightedAll.txt", 'r')
wri = open("invertedIndexWritersWeightedAll.txt", 'r')
#line = file.readline()
lact = act.readline()
ldir = dir.readline()
lwri = wri.readline()
#gson = json.loads(line)
jact = json.loads(lact)
jdir = json.loads(ldir)
jwri = json.loads(lwri)
#file.close()
act.close()
dir.close()
wri.close()
class Test:
def calcolaMedie(self,actors,directors,writers):
mediaAct = 0
mediaDir = 0
mediaWri = 0
for elem in actors:
print elem
mediaAct += float(jact.get(elem).get("rating"))
for elem in directors:
mediaDir += float(jdir.get(elem).get("rating"))
for elem in writers:
mediaWri += float(jwri.get(elem).get("rating"))
mediaAct = float(mediaAct/len(actors))
mediaDir = float(mediaDir/len(directors))
mediaWri = float(mediaWri/len(writers))
return mediaAct,mediaDir,mediaWri
#### extract data from the json files ####
def readData(self,filename):
file = open(filename, 'r')
line = file.readline()
print line
gson = json.loads(line)
file.close()
vector = []
input = []
labels = []
titles = []
#indice = 0
for elem in gson:
#titles.append(gson.get(elem).get("title"))
actors = gson.get(elem).get("actors")
directors = gson.get(elem).get("director")
writers = gson.get(elem).get("w
|
riter")
input.append([actors,directors,writers])
#imdbRating = float(gson.get(elem).get("imdbRating"))
mediaAct, mediaDir, mediaWri = self.calcolaMedie(actors, directors, writers)
vect = [1,mediaAct, mediaDir, mediaWri]
|
vector.append(vect)
#labels.append(int(imdbRating)) ## CAST PER CLASSI DISCRETE ##
data = np.array(vector)
#labels = np.array(labels)
#train_data,test_data,train_labels,test_labels = train_test_split(data,labels, train_size= 0.5)
#return train_data, train_labels,test_data,test_labels
print "lettura terminata"
return data,input
def hypothesis(self,x,theta):
l_theta = []
for i in range(len(theta)):
#print theta[i]
thetaX = x.dot(theta[i])# wx
thetaX_exp = np.exp(thetaX) # exp(wx)
l_theta.append(thetaX_exp)
l_theta = np.array(l_theta)
#print np.shape(l_theta)
thetaX_exp_sum = np.sum(l_theta) # sum of exp(wx)
#print thetaX_exp_sum
p = l_theta.T / thetaX_exp_sum # 5xlen(x) predicted results
if np.isinf(p).any(): # deal with overflow in results.
inf_idx = np.isinf(p) # idx where overflow occurs
val = np.sum(p, 0) / np.sum(inf_idx, 0) * inf_idx # values to be used to substitution
p[inf_idx] = val[inf_idx] # substitute values
return p.T
#### predict the labels for a set of observations ####
def test(self,data,theta):
pred_lab = []
correct = 0
for i in range(len(data)):
p = self.hypothesis(data[i], theta)
max = 0
ind = 0
for k, x in enumerate(p):
if x > max:
max = x
ind = k
pred_lab.append(ind+1)
'''for j in range(len(labels)):
if labels[j] == pred_lab[j]:
correct += 1
correctness = (correct * 100) / len(labels)'''
return pred_lab
#### predict the label for a single observation ####
def singleTest(self,data,theta):
max = 0
ind = 0
p = self.hypothesis(data,theta)
for k, x in enumerate(p):
if x > max:
max = x
ind = k
pred_lab = ind+1
return pred_lab
#### reads the theta from file ####
def getTheta(self):
filenameTheta = "thetas.txt"
fileTheta = open(filenameTheta, 'r')
lines = fileTheta.readlines()
theta = []
for line in lines:
line = line.replace("\n", "")
line = line.rstrip()
l = line.split(' ')
for i in range(len(l)):
l[i] = float(l[i])
theta.append(l)
theta = np.array(theta)
return theta
#### print the results on a file in the case of a batch prediction ####
def results(self,fileResult,input,pred_lab):
fileRes = open(fileResult,'w')
writer = csv.writer(fileRes,delimiter = ',')
writer.writerow(("ACTORS","DIRECTORS","WRITERS","PREDICTED"))
for i in range(len(pred_lab)):
writer.writerow((input[i][0],input[i][1],input[i][2],pred_lab[i]))
#writer.writerow(unicode(titles[i]) + unicode("\t") + unicode(labels[i]) + unicode("\t") + unicode(
#pred_lab[i]) + unicode("\n"))
fileRes.close()
#### initialization for a set of predictions ####
def init2(self,filename,fileResult):
data,input =self.readData(filename)
theta = self.getTheta()
pred_lab = self.test(data,theta)
self.results(fileResult,input,pred_lab)
#print "ACCURACY ON TEST FILE IS: " + str(correctness) + "% "
return 1
#### initialization for a single prediction ####
def init(self,actors,directors,writers):
act = [x for x in actors if x != "None"]
dir = [x for x in directors if x != "None"]
wri = [x for x in writers if x != "None"]
mediaAct,mediaDir,mediaWri = self.calcolaMedie(act,dir,wri)
data = [1,mediaAct,mediaDir,mediaWri]
data = np.array(data)
#data,labels = self.readData()
filenameTheta = "thetas.txt"
fileTheta = open(filenameTheta,'r')
lines = fileTheta.readlines()
theta = []
for line in lines:
line = line.replace("\n","")
line = line.rstrip()
l = line.split(' ')
for i in range(len(l)):
l[i] = float(l[i])
theta.append(l)
theta = np.array(theta)
label = self.singleTest(data,theta)
return label
#print " LABEL PREDICTED: "+ str(label)
|
0--key/lib
|
portfolio/2009_GoogleAppEngine/apps/0--key/models.py
|
Python
|
apache-2.0
| 651 | 0.004608 |
from google.appengine.ext import db
|
class Stuff (db.Model):
owner = db.UserProperty(required=True, auto_current_user=True)
pulp = db.BlobProperty()
class Greeting(db.Model):
author = db.UserProperty()
content = db.StringProperty(multiline=True)
avatar = db.BlobProperty()
date = db.DateTimeProperty(auto_now_add=True)
class Placebo(db.Model):
developer = db.StringProperty()
OID = db.StringProperty()
concept = db.StringProper
|
ty()
category = db.StringProperty()
taxonomy = db.StringProperty()
taxonomy_version = db.StringProperty()
code = db.StringProperty()
descriptor = db.StringProperty()
|
jubalh/MAT
|
libmat/audio.py
|
Python
|
gpl-2.0
| 1,375 | 0 |
""" Care about audio fileformat
"""
try:
from mutagen.flac import FLAC
from mutagen.oggvorbis import OggVorbis
except ImportError:
pass
import parser
import mutagenstripper
class MpegAudioStripper(parser.GenericParser):
""" Represent mpeg audio file (mp3, ...)
"""
def _should_remove(self, field):
return field.name in ("id3v1", "id3v2")
class OggStripper(mutagenstripper.MutagenStripper):
""" Represent an ogg vorbis file
"""
def _create_mfile(self):
self.mfile = OggVorbis(self.filename)
class FlacStripper(mutagenstripper.MutagenStripper):
""" Represent a Flac audio file
"""
def _create_mfile(self):
self.mfile = FLAC(self.filename)
def remove_all(self):
""" Remove the "metadata" block from the file
"""
super(FlacStripper, self).remove_all()
self.mfile.clear_pictures()
self.mfile.save()
return True
def is_clean(self):
""" Check if the "metadata" block is present in the file
"""
return super(FlacStripper, self).is_clean() and not sel
|
f.mfile.pictures
def get_meta(self):
""" Return the content of the metadata block if present
"""
metadata = super(FlacStripper, self).get_meta()
if self.mfile.pictures:
meta
|
data['picture:'] = 'yes'
return metadata
|
bigdig/vnpy
|
vnpy/gateway/ctp/__init__.py
|
Python
|
mit
| 35 | 0.028571 |
from .ctp_ga
|
teway import CtpGatew
|
ay
|
neuropsychology/Neuropsydia.py
|
neuropsydia/tests/test_color.py
|
Python
|
mpl-2.0
| 210 | 0.009524 |
from unittest import TestCase
import neuropsydia as n
n.start(open_window=False)
|
class TestColor(
|
TestCase):
def test_is_string(self):
c = n.color("w")
self.assertTrue(isinstance(c, tuple))
|
bowen0701/algorithms_data_structures
|
alg_tower_of_hanoi.py
|
Python
|
bsd-2-clause
| 1,320 | 0.001515 |
"""The tower of Hanoi."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
def tower_of_hanoi(height, from_pole, to_pole, with_pole, counter):
"""Tower of Hanoi.
Time complexity: T(1) = 1, T(n) = 2T(n - 1) + 1 => O(2^n).
Space complexity: O(1).
"""
if height == 1:
counter[0] += 1
print('{0} -> {1}'.format(from_pole, to_pole))
else:
tower_of_hanoi(height - 1, from_pole, with_pole, to_pole, counter)
tower_of_hanoi(1, from_pole, to_pole, with_pole, counter)
tower_of_hanoi(height - 1, with_pole, to_pole, from_pole, counter)
def main():
from_pole = 'A'
to_pole = 'B'
with_pole = 'C'
height = 1
counter = [0]
print('height: {}'.format(height))
tower_of_hanoi(height, from_pole, to_pole, with_pole, counter)
print('counter: {}'.format(counter[0]))
height = 2
counter = [0]
print('height: {}'.format(height))
tower_of_hanoi(height, from_pole, to_pole, with_pole, counter)
print('counter: {}'.format(counter[0]))
|
height = 5
counter = [0]
print('height: {}'.format(height))
tower_of_hanoi(height, from_pole, to_pole, with_pole, counter)
print('counter: {}'.format(counter[0]))
if __name__
|
== '__main__':
main()
|
wli/django-allauth
|
allauth/socialaccount/providers/shopify/provider.py
|
Python
|
mit
| 1,032 | 0 |
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class ShopifyAccount(ProviderAccount):
pass
class ShopifyProvider(OAuth2Provider):
id = 'shopify'
name = 'Shopify'
account_class = ShopifyAccount
def get_auth_params(self, request, action):
ret = super(ShopifyProvider, self).get_auth_params(request, action)
shop = request.GET.get('shop', None)
if shop:
ret.update({'shop': shop})
return ret
def get_default_
|
scope(self):
return ['read_orders', 'read_products']
def extract_uid(self, data):
return str(data['shop']['id'])
def extract_common_fields(self, data):
# See: https://docs.shopify.com/api/shop
# User is only available with Shopify Plus, email is the only
# common field
return dict(email=data['shop']['email'])
providers.registry.register(Shopif
|
yProvider)
|
ytsapras/robonet_site
|
scripts/tests/test_survey_data_utilities.py
|
Python
|
gpl-2.0
| 4,078 | 0.01643 |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 18 12:04:44 2017
@author: rstreet
"""
from os import getcwd, path, remove, environ
from sys import path as systempath
cwd = getcwd()
systempath.append(path.join(cwd,'..'))
import artemis_subscriber
import log_utilities
import glob
from datetime import datetime
import pytz
import survey_data_utilities
import event_classes
def test_read_ogle_param_files():
"""Function to test whether the OGLE parameter files can be parsed
properly
"""
# Note that OGLE lenses.par files are searched for using a glob call
# which resolves the year suffices so that need not be given here
config = {
'ogle_data_local_location': '../../data/',
'ogle_time_stamp_file': 'ogle.last.changed',
'ogle_lenses_file': 'lenses.par',
'ogle_updated
|
_file': 'ogle.last.updated',
}
ogle_data = survey_data_utilities.read_ogle_param_files(config)
last_changed = datetime(2016, 11, 2, 1, 4, 39, 360000)
last_changed= last_chan
|
ged.replace(tzinfo=pytz.UTC)
assert ogle_data.last_changed == last_changed
last_updated = datetime(2017, 1, 23, 22, 30, 16)
last_updated= last_updated.replace(tzinfo=pytz.UTC)
assert ogle_data.last_updated == last_updated
assert len(ogle_data.lenses) == 1927
lens = event_classes.Lens()
assert type(ogle_data.lenses['OGLE-2016-BLG-0110']) == type(lens)
def test_read_moa_param_files():
"""Function to test whether the MOA parameter files can be parsed
properly
"""
config = {
'moa_data_local_location': '../../data/',
'moa_time_stamp_file': 'moa.last.changed',
'moa_lenses_file': 'moa_lenses.par',
'moa_updated_file': 'moa.last.updated',
}
moa_data = survey_data_utilities.read_moa_param_files(config)
last_changed = datetime(2016, 11, 4, 4, 0, 35)
last_changed= last_changed.replace(tzinfo=pytz.UTC)
assert moa_data.last_changed == last_changed
last_updated = datetime(2017, 1, 23, 22, 30, 19)
last_updated= last_updated.replace(tzinfo=pytz.UTC)
assert moa_data.last_updated == last_updated
assert len(moa_data.lenses) == 618
lens = event_classes.Lens()
assert type(moa_data.lenses['MOA-2016-BLG-618']) == type(lens)
def test_scrape_rtmodel():
year = 2019
event='OB190011'
output = survey_data_utilities.scrape_rtmodel(year, event)
assert len(output) == 5
assert 'http' in output[0]
assert 'http' in output[2]
assert type(output[3]) == type(True)
assert type(output[4]) == type(True)
def test_scrape_mismap():
year = 2019
event='OB190011'
output = survey_data_utilities.scrape_mismap(year, event)
assert len(output) == 4
assert 'http' in output[0]
assert 'png' in output[1]
assert type(output[2]) == type(True)
assert type(output[3]) == type(True)
def test_scrape_moa():
year = 2019
event='OB190011'
output = survey_data_utilities.scrape_moa(year, event)
assert len(output) == 4
assert 'http' in output[0]
assert 'jpg' in output[1]
assert type(output[2]) == type(True)
assert type(output[3]) == type(True)
def test_scrape_kmt():
year = 2019
event='OB190335'
output = survey_data_utilities.scrape_kmt(year, event)
assert len(output) == 4
assert 'http' in output[0]
assert 'jpg' in output[1] or 'N/A' in output[1]
assert type(output[2]) == type(True)
assert type(output[3]) == type(True)
print(output)
def test_fetch_ogle_fchart():
year = 2019
event='OB190011'
output = survey_data_utilities.fetch_ogle_fchart(year, event)
assert len(output) == 2
assert 'http' in output[0]
assert 'jpg' in output[0]
assert type(output[1]) == type(True)
if __name__ == '__main__':
#test_scrape_rtmodel()
#test_scrape_mismap()
#test_scrape_moa()
test_scrape_kmt()
#test_fetch_ogle_fchart()
|
mjames-upc/python-awips
|
dynamicserialize/dstypes/com/raytheon/uf/common/dataplugin/gfe/request/ExecuteIfpNetCDFGridRequest.py
|
Python
|
bsd-3-clause
| 6,327 | 0.000632 |
##
##
# File auto-generated against equivalent DynamicSerialize Java class
# and then modified post-generation to use AbstractGfeRequest and
# implement str(), repr()
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# xx/xx/?? dgilling Initial Creation.
# 03/13/13 1759 dgilling Add software history header.
# 05/13/15 4427 dgilling Add siteIdOverride field.
#
#
from dynamicserialize.dstypes.com.raytheon.uf.common.dataplugin.gfe.request import AbstractGfeRequest
from dynamicserialize.dstypes.com.raytheon.uf.common.message import WsId
class ExecuteIfpNetCDFGridRequest(AbstractGfeRequest):
def __init__(self, outputFilename=None, parmList=[], databaseID=None,
startTime=None, endTime=None, mask=None, geoInfo=False,
compressFile=False, configFileName=No
|
ne, compressFileFactor=0,
trim=False, krunch=False, userID=None, logFileName=None, siteIdOverride=None):
super(ExecuteIfpNetCDFGridRequest, self).__init__()
self.outputFilename = outputFilename
self.parmList = parmList
|
self.databaseID = databaseID
self.startTime = startTime
self.endTime = endTime
self.mask = mask
self.geoInfo = geoInfo
self.compressFile = compressFile
self.configFileName = configFileName
self.compressFileFactor = compressFileFactor
self.trim = trim
self.krunch = krunch
self.userID = userID
self.logFileName = logFileName
self.siteIdOverride = siteIdOverride
if self.userID is not None:
self.workstationID = WsId(progName='ifpnetCDF', userName=self.userID)
if self.databaseID is not None:
self.siteID = self.databaseID.getSiteId()
def __str__(self):
retVal = "ExecuteIfpNetCDFGridRequest["
retVal += "wokstationID: " + str(self.workstationID) + ", "
retVal += "siteID: " + str(self.siteID) + ", "
retVal += "outputFilename: " + str(self.outputFilename) + ", "
retVal += "parmList: " + str(self.parmList) + ", "
retVal += "databaseID: " + str(self.databaseID) + ", "
retVal += "startTime: " + str(self.startTime) + ", "
retVal += "endTime: " + str(self.endTime) + ", "
retVal += "mask: " + str(self.mask) + ", "
retVal += "geoInfo: " + str(self.geoInfo) + ", "
retVal += "compressFile: " + str(self.compressFile) + ", "
retVal += "configFileName: " + str(self.configFileName) + ", "
retVal += "compressFileFactor: " + str(self.compressFileFactor) + ", "
retVal += "trim: " + str(self.trim) + ", "
retVal += "krunch: " + str(self.krunch) + ", "
retVal += "userID: " + str(self.userID) + ", "
retVal += "logFileName: " + str(self.logFileName) + ", "
retVal += "siteIdOverride: " + str(self.siteIdOverride)
retVal += "]"
return retVal
def __repr__(self):
retVal = "ExecuteIfpNetCDFGridRequest("
retVal += "wokstationID=" + repr(self.workstationID) + ", "
retVal += "siteID=" + repr(self.siteID) + ", "
retVal += "outputFilename=" + repr(self.outputFilename) + ", "
retVal += "parmList=" + repr(self.parmList) + ", "
retVal += "databaseID=" + repr(self.databaseID) + ", "
retVal += "startTime=" + repr(self.startTime) + ", "
retVal += "endTime=" + repr(self.endTime) + ", "
retVal += "mask=" + repr(self.mask) + ", "
retVal += "geoInfo=" + repr(self.geoInfo) + ", "
retVal += "compressFile=" + repr(self.compressFile) + ", "
retVal += "configFileName=" + repr(self.configFileName) + ", "
retVal += "compressFileFactor=" + repr(self.compressFileFactor) + ", "
retVal += "trim=" + repr(self.trim) + ", "
retVal += "krunch=" + repr(self.krunch) + ", "
retVal += "userID=" + repr(self.userID) + ", "
retVal += "logFileName=" + repr(self.logFileName) + ", "
retVal += "siteIdOverride: " + str(self.siteIdOverride)
retVal += ")"
return retVal
def getOutputFilename(self):
return self.outputFilename
def setOutputFilename(self, outputFilename):
self.outputFilename = outputFilename
def getParmList(self):
return self.parmList
def setParmList(self, parmList):
self.parmList = parmList
def getDatabaseID(self):
return self.databaseID
def setDatabaseID(self, databaseID):
self.databaseID = databaseID
def getStartTime(self):
return self.startTime
def setStartTime(self, startTime):
self.startTime = startTime
def getEndTime(self):
return self.endTime
def setEndTime(self, endTime):
self.endTime = endTime
def getMask(self):
return self.mask
def setMask(self, mask):
self.mask = mask
def getGeoInfo(self):
return self.geoInfo
def setGeoInfo(self, geoInfo):
self.geoInfo = geoInfo
def getCompressFile(self):
return self.compressFile
def setCompressFile(self, compressFile):
self.compressFile = compressFile
def getConfigFileName(self):
return self.configFileName
def setConfigFileName(self, configFileName):
self.configFileName = configFileName
def getCompressFileFactor(self):
return self.compressFileFactor
def setCompressFileFactor(self, compressFileFactor):
self.compressFileFactor = compressFileFactor
def getTrim(self):
return self.trim
def setTrim(self, trim):
self.trim = trim
def getKrunch(self):
return self.krunch
def setKrunch(self, krunch):
self.krunch = krunch
def getUserID(self):
return self.userID
def setUserID(self, userID):
self.userID = userID
def getLogFileName(self):
return self.logFileName
def setLogFileName(self, logFileName):
self.logFileName = logFileName
def getSiteIdOverride(self):
return self.siteIdOverride
def setSiteIdOverride(self, siteIdOverride):
self.siteIdOverride = siteIdOverride
|
terhorst/psmcpp
|
smcpp/observe.py
|
Python
|
gpl-3.0
| 1,330 | 0 |
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
import weakref
import functools
# Decorator to target specific messages.
def targets(target_messages, no_first=False):
if isinstance(target_messages, str):
target_messages = [target_messages]
def wrapper(f):
@functools.wraps(f)
def _(self, *args, **kwargs):
message = args[0]
if message in target_messages:
if no_first and kwargs["i"] == 0:
return
f(self, *args, **kwargs)
return _
return wrapper
class Observer(object):
__metaclass__ = ABCMeta
@abst
|
ractmethod
def update(self, *args, **kwargs):
pass
class Observab
|
le(object):
def __init__(self):
self.observers = weakref.WeakSet()
def register(self, observer):
self.observers.add(observer)
def unregister(self, observer):
self.observers.discard(observer)
def unregister_all(self):
self.observers.clear()
def update_observers(self, *args, **kwargs):
for observer in self.observers:
observer.update(*args, **kwargs)
def __getstate__(self):
state = self.__dict__.copy()
# Do not try to pickle observers.
del state["observers"]
return state
|
lthurlow/Network-Grapher
|
proj/external/numpy-1.7.0/numpy/testing/tests/test_decorators.py
|
Python
|
mit
| 4,070 | 0.001966 |
import numpy as np
from numpy.testing import *
from numpy.testing.noseclasses import KnownFailureTest
import nose
def test_slow():
@dec.slow
def slow_func(x,y,z):
pass
assert_(slow_func.slow)
def test_setastest():
@dec.setastest()
def f_default(a):
pass
@dec.setastest(True)
def f_istest(a):
pass
@dec.setastest(False)
def f_isnottest(a):
pass
assert_(f_default.__test__)
assert_(f_istest.__test__)
assert_(not f_isnottest.__test__)
class DidntSkipException(Exception):
pass
def test_skip_functions_hardcoded():
@dec.skipif(True)
def f1(x):
raise DidntSkipException
try:
f1('a')
except DidntSkipException:
raise Exception('Failed to skip')
except nose.SkipTest:
pass
@dec.skipif(False)
def f2(x):
raise DidntSkipException
try:
f2('a')
except DidntSkipException:
pass
except nose.SkipTest:
raise Exception('Skipped when not expected to')
def test_skip_functions_callable():
def skip_tester():
return skip_flag == 'skip me!'
@dec.skipif(skip_tester)
def f1(x):
raise DidntSkipException
try:
skip_flag = 'skip me!'
f1('a')
except DidntSkipException:
raise Exception('Failed to skip')
except nose.SkipTest:
pass
@dec.skipif(skip_tester)
def f2(x):
raise DidntSkipException
try:
skip_flag = 'five is right out!'
f2('a')
except DidntSkipException:
pass
except nose.SkipTest:
raise Exception('Skipped when not expected to')
def test_skip_generators_hardcoded():
@dec.knownfailureif(True, "This test is known to fail")
def g1(x):
for i in xrange(x):
yield i
try:
for j in g1(10):
pass
except KnownFailureTest:
pass
else:
raise Exception('Failed to mark as known failure')
@dec.knownfailureif(False, "This test is NOT known to fail")
def g2(x):
for i in xrange(x):
yield i
raise DidntSkipException('FAIL')
try:
for j in g2(10):
pass
except KnownFailureTest:
raise Exception('Marked incorretly as known failure')
except DidntS
|
kipException:
pass
def test_skip_generators_callable():
def skip_tester():
return skip_flag == 'skip me!'
@dec.knownfailureif(skip_tester, "This test is known to fail")
def g1(x):
for i in xrange(x):
|
yield i
try:
skip_flag = 'skip me!'
for j in g1(10):
pass
except KnownFailureTest:
pass
else:
raise Exception('Failed to mark as known failure')
@dec.knownfailureif(skip_tester, "This test is NOT known to fail")
def g2(x):
for i in xrange(x):
yield i
raise DidntSkipException('FAIL')
try:
skip_flag = 'do not skip'
for j in g2(10):
pass
except KnownFailureTest:
raise Exception('Marked incorretly as known failure')
except DidntSkipException:
pass
def test_deprecated():
@dec.deprecated(True)
def non_deprecated_func():
pass
@dec.deprecated()
def deprecated_func():
import warnings
warnings.warn("TEST: deprecated func", DeprecationWarning)
@dec.deprecated()
def deprecated_func2():
import warnings
warnings.warn("AHHHH")
raise ValueError
@dec.deprecated()
def deprecated_func3():
import warnings
warnings.warn("AHHHH")
# marked as deprecated, but does not raise DeprecationWarning
assert_raises(AssertionError, non_deprecated_func)
# should be silent
deprecated_func()
# fails if deprecated decorator just disables test. See #1453.
assert_raises(ValueError, deprecated_func2)
# first warnings is not a DeprecationWarning
assert_raises(AssertionError, deprecated_func3)
if __name__ == '__main__':
run_module_suite()
|
idjung96/mng_files
|
mng_files/wsgi.py
|
Python
|
gpl-3.0
| 498 | 0.004016 |
"""
WSGI config for mng_files project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more i
|
nformation on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
import sys
path = os.path.abspath(__file__+'/../..')
if path not in sys.path:
sys.path.append(path)
from django.core.wsgi import get_wsgi_appl
|
ication
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mng_files.settings")
application = get_wsgi_application()
|
repotvsupertuga/tvsupertuga.repository
|
plugin.video.youtube/resources/lib/youtube_plugin/kodion/impl/xbmc/xbmc_progress_dialog.py
|
Python
|
gpl-2.0
| 911 | 0 |
__author__ = 'bromix'
import xbmcgui
from ..abstract_progress_dialog import AbstractProgressDialog
class XbmcProgressDialog(AbstractProgressDialog):
def __init__(self, heading, text):
AbstractProgressDialog.__init__(self, 100)
self._dialog = xbmcgui.DialogProgress()
self._dialog.create(heading, text)
# simple reset because KODI won't do it :(
self._position = 1
self.update(steps=-1)
def close(self):
if self._dialog:
self._dialog.close()
self._dialo
|
g = None
def update(self, steps=1, text=None):
self._position += steps
position = int(float(100.0 / self._total) * self._position)
|
if isinstance(text, basestring):
self._dialog.update(position, text)
else:
self._dialog.update(position)
def is_aborted(self):
return self._dialog.iscanceled()
|
openlabs/payment-gateway-authorize-net
|
tests/test_transaction.py
|
Python
|
bsd-3-clause
| 23,226 | 0.000043 |
# -*- coding: utf-8 -*-
"""
test_transaction.py
:copyright: (C) 2014-2015 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
import unittest
import datetime
import random
import authorize
from dateutil.relativedelta import relativedelta
from trytond.tests.test_tryton import DB_NAME, USER, CONTEXT, POOL
import trytond.tests.test_tryton
from trytond.transaction import Transaction
from trytond.exceptions import UserError
class TestTransaction(unittest.TestCase):
"""
Test transaction
"""
def setUp(self):
"""
Set up data used in the tests.
"""
trytond.tests.test_tryton.install_module('payment_gateway')
self.Currency = POOL.get('currency.currency')
self.Company = POOL.get('company.company')
self.Party = POOL.get('party.party')
self.User = POOL.get('res.user')
self.Journal = POOL.get('account.journal')
self.PaymentGateway = POOL.get('payment_gateway.gateway')
self.PaymentTransaction = POOL.get('payment_gateway.transaction')
self.AccountMove = POOL.get('account.move')
self.PaymentProfile = POOL.get('party.payment_profile')
self.UseCardView = POOL.get('payment_gateway.transaction.use_card.view')
def _create_fiscal_year(self, date=None, company=None):
"""
Creates a fiscal year and requried sequences
"""
FiscalYear = POOL.get('account.fiscalyear')
Sequence = POOL.get('ir.sequence')
Company = POOL.get('company.company')
if date is None:
date = datetime.date.today()
if company is None:
company, = Company.search([], limit=1)
fiscal_year, = FiscalYear.create([{
'name': '%s' % date.year,
'start_date': date + relativedelta(month=1, day=1),
'end_date': date + relativedelta(month=12, day=31),
'company': company,
'post_move_sequence': Sequence.create([{
'name': '%s' % date.year,
'code': 'account.move',
'company': company,
}])[0],
}])
FiscalYear.create_period([fiscal_year])
return fiscal_year
def _create_coa_minimal(self, company):
"""Create a minimal chart of accounts
"""
AccountTemplate = POOL.get('account.account.template')
Account = POOL.get('account.account')
account_create_chart = POOL.get(
'account.create_chart', type="wizard")
account_template, = AccountTemplate.search(
[('parent', '=', None)]
)
session_id, _, _ = account_create_chart.create()
create_chart = account_create_chart(session_id)
create_chart.account.account_template = account_template
create_chart.account.company = company
create_chart.transition_create_account()
receivable, = Account.search([
('kind', '=', 'receivable'),
('company', '=', company),
])
payable, = Account.search([
('kind', '=', 'payable'),
('company', '=', company),
])
create_chart.properties.company = company
create_chart.properties.account_receivable = receivable
create_chart.properties.account_payable = payable
create_chart.transition_create_properties()
def _get_account_by_kind(self, kind, company=None, silent=True):
"""Returns an account with given spec
:param kind: receivable/payable/expense/revenue
:param silent: dont raise error if account is not found
"""
Account = POOL.get('account.account')
Company = POOL.get('company.company')
if company is None:
company, = Company.search([], limit=1)
accounts = Account.search([
('kind', '=', kind),
('company', '=', company)
], limit=1)
if not accounts and not silent:
raise Exception("Account not found")
if not accounts:
return None
account, = accounts
return account
def setup_defaults(self):
"""
Creates default data for testing
"""
currency, = self.Currency.create([{
'name': 'US Dollar',
'code': 'USD',
'symbol': '$',
}])
with Transaction().set_context(company=None):
company_party, = self.Party.create([{
'name': 'Openlabs'
}])
self.company, = self.Company.create([{
'party': company_party,
'currency': currency,
}])
self.User.write([self.User(USER)], {
'company': self.company,
'main_company': self.company,
})
CONTEXT.update(self.User.get_preferences(context_only=True))
# Create Fiscal Year
self._create_fiscal_year(company=self.company.id)
# Create Chart of Accounts
self._create_coa_minimal(company=self.company.id)
# Create Cash journal
self.cash_journal, = self.Journal.search(
[('type', '=', 'cash')], limit=1
)
self.Journal.write([self.cash_journal], {
'debit_account': self._get_account_by_kind('expense').id
})
self.auth_net_gateway = self.PaymentGateway(
name='Authorize.net',
journal=self.cash_journal,
provider='authorize_net',
method='credit_card',
authorize_net_login=
|
'327deWY74422',
authorize_net_transaction_key='32jF65cTxja88ZA2',
test=True
)
self.auth_net_gateway.save()
# Create parties
self.party1, = self.Party.create([{
'name': 'Test party - 1',
'addresses': [('create', [{
'name': 'Test Party %s' % random.randint(1, 999),
'street': 'Test Street %s' % random.randint(1, 999),
'cit
|
y': 'Test City %s' % random.randint(1, 999),
}])],
'account_receivable': self._get_account_by_kind(
'receivable').id,
}])
self.party2, = self.Party.create([{
'name': 'Test party - 2',
'addresses': [('create', [{
'name': 'Test Party',
'street': 'Test Street',
'city': 'Test City',
}])],
'account_receivable': self._get_account_by_kind(
'receivable').id,
}])
self.party3, = self.Party.create([{
'name': 'Test party - 3',
'addresses': [('create', [{
'name': 'Test Party',
'street': 'Test Street',
'city': 'Test City',
}])],
'account_receivable': self._get_account_by_kind(
'receivable').id,
}])
self.card_data1 = self.UseCardView(
number='4111111111111111',
expiry_month='04',
expiry_year=str(random.randint(2016, 2020)),
csc=str(random.randint(100, 555)),
owner='Test User -1',
)
self.card_data2 = self.UseCardView(
number='4111111111111111',
expiry_month='08',
expiry_year=str(random.randint(2016, 2020)),
csc=str(random.randint(556, 999)),
owner='Test User -2',
)
self.invalid_card_data = self.UseCardView(
number='4111111111111111',
expiry_month='08',
expiry_year='2022',
csc=str(911),
owner='Test User -2',
)
self.payment_profile = self.PaymentProfile(
party=self.party1,
address=self.party1.addresses[0].id,
gateway=self.auth_net_gateway.id,
last_4_digits='1111',
expiry_month='01',
expiry_year='2018',
provider_reference='27527167',
authorize_profile_id='28545177',
)
self.payment_profile.save()
def test_0010_test_add_payment_profile(self):
"""
Test adding payment profile to a Party
"""
|
django-salesforce/django-salesforce
|
salesforce/backend/utils.py
|
Python
|
mit
| 17,530 | 0.003023 |
"""
CursorWrapper (like django.db.backends.utils)
"""
import decimal
import logging
import warnings
from itertools import islice
from typing import Any, Callable, Iterable, Iterator, List, Tuple, TypeVar, Union, overload
from django.db import models, NotSupportedError
from django.db.models.sql import subqueries, Query, RawQuery
from salesforce.backend import DJANGO_30_PLUS
from salesforce.dbapi.driver import (
DatabaseError, InternalError, SalesforceWarning, merge_dict,
register_conversion, arg_to_json)
from salesforce.fields import NOT_UPDATEABLE, NOT_CREATEABLE
V = TypeVar('V')
if not DJANGO_30_PLUS:
# a "do nothing" stub for Django < 3.0, where is no decorator @async_unsafe
F = TypeVar('F', bound=Callable)
F2 = TypeVar('F2', bound=Callable)
@overload
def async_unsafe(message: F) -> F:
...
@overload
def async_unsafe(message: str) -> Callable[[F2], F2]:
...
def async_unsafe(message: Union[F, str]) -> Union[F, Callable[[F2], F2]]:
def decorator(func: F2) -> F2:
return func
# If the message is actually a function, then be a no-arguments decorator.
if callable(message):
func = message
message = 'You cannot call this from an async context - use a thread or sync_to_async.'
return decorator(func)
return decorator
else:
from django.utils.asyncio import ( # type: ignore[import,no-redef] # noqa pylint:disable=unused-import,ungrouped-imports
async_unsafe
)
log = logging.getLogger(__name__)
DJANGO_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f-00:00'
MIGRATIONS_QUERY_TO_BE_IGNORED = "SELECT django_migrations.app, django_migrations.name FROM django_migrations"
def extract_values(query):
"""
Extract values from insert or update query.
Supports bulk_create
"""
if isinstance(query, subqueries.UpdateQuery):
row = query.values
return extract_values_inner(row, query)
if isinstance(query, subqueries.InsertQuery):
ret = []
for row in query.objs:
ret.append(extract_values_inner(row, query))
return ret
raise NotSupportedError
def extract_values_inner(row, query):
d = dict()
fields = query.model._meta.fields
for _, field in enumerate(fields):
sf_read_only = getattr(field, 'sf_read_only', 0)
is_date_auto = getattr(field, 'auto_now', False) or getattr(field, 'auto_now_add', False)
if field.get_internal_type() == 'AutoField':
continue
if isinstance(query, subqueries.UpdateQuery):
if (sf_read_only & NOT_UPDATEABLE) != 0 or is_date_auto:
continue
value_or_empty = [value for qfield, model, value in query.values if qfield.name == field.name]
if value_or_empty:
[value] = value_or_empty
else:
assert len(query.values) < len(fields), \
"Match name can miss only with an 'update_fields' argument."
continue
if hasattr(value, 'default'):
warnings.warn(
"The field '{}.{}' has been saved again with DEFAULTED_ON_CREATE value. "
"It is better to set a real value to it or to refresh it from the database "
"or restrict updated fields explicitly by 'update_fields='."
.format(field.model._meta.object_name, field.name),
SalesforceWarning
)
continue
elif isinstance(query, subqueries.InsertQuery):
value = getattr(row, field.attname)
if (sf_read_only & NOT_CREATEABLE) != 0 or hasattr(value, 'default'):
continue # skip not createable or DEFAULTED_ON_CREATE
else:
raise InternalError('invalid query type')
d[field.column] = arg_to_json(value)
return d
class CursorWrapper:
"""
A wrapper that emulates the behavior of a database cursor.
This is the class that is actually responsible for making connections
to the SF REST API
"""
# pylint:disable=too-many-instance-attributes,too-many-public-methods
def __init__(self, db):
"""
Connect to the Salesforce API.
"""
self.db = db
self.query = None
self.session = db.sf_session # this creates a TCP connection if doesn't exist
self.rowcount = None
self.first_row = None
self.lastrowid = None # not moved to driver because INSERT is implemented here
self.cursor = self.db.connection.cursor()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@property
def oauth(self):
return self.session.auth.get_auth()
def execute(self, q, args=()):
"""
Send a query to the Salesforce API.
"""
# pylint:disable=too-many-branches
self.rowcount = None
response = None
if self.query is None:
self.execute_select(q, args)
else:
response = self.execute_django(q, args)
if isinstance(response, list):
return
# the encoding is detected automatically, e.g. from headers
if response and response.text:
# parse_float set to decimal.Decimal to avoid precision errors when
# converting from the json number to a float and then to a Decimal object
# on a model's DecimalField. This converts from json number directly
# to a Decimal object
data = response.json(parse_float=decimal.Decimal)
# a SELECT query
if 'totalSize' in data:
# SELECT
self.rowcount = data['totalSize']
# a successful INSERT query, return after getting PK
elif 'success' in data and 'id' in data:
self.lastrowid = data['id']
return
elif 'compositeResponse' in data:
# TODO treat error reporting for composite requests
self.lastrowid = [x['body']['id'] if x['body'] is not None else x['referenceId']
for x in data['compositeResponse']]
return
elif data['hasErrors'] is False:
# it is from Composite Batch request
# save id from bulk_create even if Django don't use it
if data['results'] and data['results'][0]['result']:
self.lastrowid = [item['result']['id'] for item in data['results']]
return
# something we don't recognize
else:
raise DatabaseError(data)
if not q.upper().startswith('SELECT COUNT() FROM'):
self.first_row = data['records'][0] if data['records'] else None
def prepare_query(self, query):
self.query = query
def execute_django(self, soql: str, args: Tuple[Any, ...] = ()):
"""
Fixed execute for queries coming from Django query compilers
"""
response = None
sqltype = soql.split(None, 1)[0].upper()
if isinstance(self.query, subqueries.InsertQuery):
response = self.execute_insert(self.query)
elif isinstance(self.query, subqueries.UpdateQ
|
uery):
response = self.execute_update(self.query)
elif isinstance(self.query, subqueries.DeleteQuery):
response = self.
|
execute_delete(self.query)
elif isinstance(self.query, RawQuery):
self.execute_select(soql, args)
elif sqltype in ('SAVEPOINT', 'ROLLBACK', 'RELEASE'):
log.info("Ignored SQL command '%s'", sqltype)
return
elif isinstance(self.query, Query):
self.execute_select(soql, args)
else:
raise DatabaseError("Unsupported query: type %s: %s" % (type(self.query), self.query))
return response
def execute_select(self, soql: str, args) -> None:
if soql != MIGRATIONS_QUERY_TO_BE_IGNORED:
# normal query
query_all = self.que
|
jor-/scipy
|
scipy/fft/__init__.py
|
Python
|
bsd-3-clause
| 3,965 | 0.001261 |
"""
==============================================
Discrete Fourier transforms (:mod:`scipy.fft`)
=========================
|
=====================
.. currentmodule:: scipy.fft
Fast Fourier Transforms (FFTs)
==============================
.. autosummary::
:toctree: generated/
fft - Fast (discrete) Fourier Transform (FFT)
ifft - Inverse FFT
fft2 - Two dimensional FFT
ifft2 - Two dimensional inverse FFT
fftn - n-dimensional FFT
ifftn - n-dimensional inverse FFT
rfft - FFT of strictly real-valued sequence
irfft - Inverse of rfft
rfft2 - Two dimensional FFT of real sequence
irfft2 - Inverse of rfft2
rfftn - n-dimensional FFT of real sequence
irfftn - Inverse of rfftn
hfft - FFT of a Hermitian sequence (real spectrum)
ihfft - Inverse of hfft
hfft2 - Two dimensional FFT of a Hermitian sequence
ihfft2 - Inverse of hfft2
hfftn - n-dimensional FFT of a Hermitian sequence
ihfftn - Inverse of hfftn
Discrete Sin and Cosine Transforms (DST and DCT)
================================================
.. autosummary::
:toctree: generated/
dct - Discrete cosine transform
idct - Inverse discrete cosine transform
dctn - n-dimensional Discrete cosine transform
idctn - n-dimensional Inverse discrete cosine transform
dst - Discrete sine transform
idst - Inverse discrete sine transform
dstn - n-dimensional Discrete sine transform
idstn - n-dimensional Inverse discrete sine transform
Helper functions
================
.. autosummary::
:toctree: generated/
fftshift - Shift the zero-frequency component to the center of the spectrum
ifftshift - The inverse of `fftshift`
fftfreq - Return the Discrete Fourier Transform sample frequencies
rfftfreq - DFT sample frequencies (for usage with rfft, irfft)
next_fast_len - Find the optimal length to zero-pad an FFT for speed
set_workers - Context manager to set default number of workers
get_workers - Get the current default number of workers
Backend control
===============
.. autosummary::
:toctree: generated/
set_backend - Context manager to set the backend within a fixed scope
skip_backend - Context manager to skip a backend within a fixed scope
set_global_backend - Sets the global fft backend
register_backend - Register a backend for permanent use
"""
from __future__ import division, print_function, absolute_import
from ._basic import (
fft, ifft, fft2, ifft2, fftn, ifftn,
rfft, irfft, rfft2, irfft2, rfftn, irfftn,
hfft, ihfft, hfft2, ihfft2, hfftn, ihfftn)
from ._realtransforms import dct, idct, dst, idst, dctn, idctn, dstn, idstn
from ._helper import next_fast_len
from ._backend import (set_backend, skip_backend, set_global_backend,
register_backend)
from numpy.fft import fftfreq, rfftfreq, fftshift, ifftshift
from ._pocketfft.helper import set_workers, get_workers
__all__ = [
'fft', 'ifft', 'fft2','ifft2', 'fftn', 'ifftn',
'rfft', 'irfft', 'rfft2', 'irfft2', 'rfftn', 'irfftn',
'hfft', 'ihfft', 'hfft2', 'ihfft2', 'hfftn', 'ihfftn',
'fftfreq', 'rfftfreq', 'fftshift', 'ifftshift',
'next_fast_len',
'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn',
'set_backend', 'skip_backend', 'set_global_backend', 'register_backend',
'get_workers', 'set_workers']
from numpy.dual import register_func
for k in ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2']:
register_func(k, eval(k))
del k, register_func
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
# Hack to allow numpy.fft.fft to be called as scipy.fft
import sys
class _FFTModule(sys.modules[__name__].__class__):
@staticmethod
def __call__(*args, **kwargs):
import numpy as np
return np.fft.fft(*args, **kwargs)
import os
if os.environ.get('_SCIPY_BUILDING_DOC') != 'True':
sys.modules[__name__].__class__ = _FFTModule
del os
del _FFTModule
del sys
|
|
EventBuck/EventBuck
|
shop/handlers/event/show.py
|
Python
|
mit
| 2,800 | 0.018571 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 4 juin 2013
@author: Aristote Diasonama
'''
from shop.handlers.event.base import BaseHandler
from shop.handlers.base_handler import asso_required
from shop.models.event import Event
from shop.shop_exceptions import EventNotFoundException
class ShowEventHandler(BaseHandler):
@asso_required
def get(self):
try:
self.try_to_show_the_event()
except EventNotFoundException:
self.show_all_events()
def try_to_show_the_event(self):
if not self.event:
raise EventNotFoundException
context = self.get_template_context_showing_one_event()
self.render_template('view_event.html', context=context)
def show_all_events(self):
context = self.get_template_context_showing_all_events()
self.render_template('view_event.html', context=context)
def get_template_context_showing_one_event(self):
context = dict()
context['event'] = self.event.get_event_in_dict_extended()
context['event']['image'] = self.uri_for('imageEvent', event_id = self.event_key.id())
context['isShowingAllEvents'] = False
context['sidebar_active'] = "overview"
context['url_for_editEvent'] = self.uri_for('editEvent', event_id = self.event_key.id())
context['url_to_publish_event'] = self.uri_for('rpc_publishEvent', event_id = self.event_key.id())
context['url_to_delete_event'] = self.uri_for('rpc_deleteEvent', event_id = self.event_key.id())
if self.event.type == 'paid':
context.update(self.get_template_context_paid_event())
return context
def get_template_context_p
|
aid_event(self):
context = dict()
tickets = self.event.get_all_tickets()
if tickets is not None:
tickets_urls = map(lambda ticket: self.uri_for('editTicket',
event_id=self.event_key.id(), ticket_id=ticket.key.id()),
tickets.fetch())
context['tickets'] = zip(tickets, tickets_urls) if tickets else None
context['url_for_createTicket'] = self.uri_for('createTicket', event_id = s
|
elf.event_key.id())
context['url_for_rpc_create_ticket'] = self.uri_for('rpc_createTicket', event_id=self.event_key.id())
def get_template_context_showing_all_events(self):
events = self.user.get_all_events()
context = dict()
context['events'] = events
context['showingAllEvents'] = True
context['sidebar_active'] = "allEvent"
return context
|
skosukhin/spack
|
lib/spack/spack/cmd/use.py
|
Python
|
lgpl-2.1
| 1,713 | 0 |
##############################################################################
# Copyright (c
|
) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Labor
|
atory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import argparse
from spack.cmd.common import print_module_placeholder_help
description = "add package to environment using dotkit"
section = "environment"
level = "long"
def setup_parser(subparser):
"""Parser is only constructed so that this prints a nice help
message with -h. """
subparser.add_argument(
'spec', nargs=argparse.REMAINDER,
help='spec of package to use with dotkit')
def use(parser, args):
print_module_placeholder_help()
|
ngageoint/geoevents
|
geoevents/operations/migrations/0012_auto__add_settings.py
|
Python
|
mit
| 17,700 | 0.00791 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Settings'
db.create_table('operations_settings', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('value', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal('operations', ['Settings'])
def backwards(self, orm):
# D
|
eleting model 'Settings'
db.delete_table('operations_settings')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'pri
|
mary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'maps.map': {
'Meta': {'object_name': 'Map'},
'center_x': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'center_y': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '800', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'projection': ('django.db.models.fields.CharField', [], {'default': "'EPSG:4326'", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'}),
'zoom': ('django.db.models.fields.IntegerField', [], {})
},
'operations.agency': {
'Meta': {'ordering': "['name']", 'object_name': 'Agency'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'operations.deployment': {
'Meta': {'object_name': 'Deployment'},
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deployers': ('django.db.models.fields.related.ManyToManyField', [], {'max_length': '250', 'to': "orm['auth.User']", 'null': 'True', 'symmetrical': 'False', 'blank': 'True'}),
'deployment_location': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'description': ('tinymce.models.HTMLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['operations.Event']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {}),
'longitude': ('django.db.models.fields.FloatField', [], {}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '1'})
},
'operations.event': {
'Meta': {'ordering': "['-last_updated']", 'object_name': 'Event'},
'agencies': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['operations.Agency']", 'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'collaboration_link': ('django.db.models.fields.URLField', [], {'default': "'https://connect.dco.dod.mil/r3ops?launcher=false'", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'event_location': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'filedropoff_path': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'gq_job_ids': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'gq_project_ids': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('djang
|
unicamp-lbic/small_world_ca
|
analysis.py
|
Python
|
gpl-2.0
| 19,617 | 0.005251 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
my_site = os.path.join(os.environ["HOME"], ".local/lib/python2.7/site-packages")
sys.path.insert(0, my_site)
import h5py
import networkx as nx
import numpy as np
import pycuda.driver as cuda
import scipy.stats as st
import sys
import aux
from consts import *
def to_graph(connections):
graph = nx.DiGraph()
ca_size = connections.shape[0]
for cell in xrange(ca_size):
for neighbor in connections[cell]:
graph.add_edge(neighbor, cell)
# Count the number of rewired connection this cell has
graph.node[cell]['rew'] = (connections[cell] !=
(np.arange(cell - 3, cell + 4) % ca_size)).sum()
return graph
class AnalysisIndividual:
__cuda_module = False
def __init__(self, individual, correct, executions, ca_size,
connection_radius, ca_iterations, ca_repeat, k_history,
save_executions=0):
self.__ca_size = ca_size
self.__connection_radius = connection_radius
self.__n_connections = 2 * self.__connection_radius + 1
self.__ca_iterations = ca_iterations
self.__ca_repeat = ca_repeat
self.__k_history = k_history
self.__n_possible_history = 2 ** self.__k_history
self.__n_observations = self.__ca_repeat * \
(self.__ca_iterations - self.__k_history + 1)
self.__save_executions = save_executions
self.__individual = individual
self.__individual_number = self.__individual.number
self.__rules = self.__individual.gene_rules
self.__connections = self.__individual.connections
self.__graph = to_graph(self.__connections)
self.__executions = executions
density = np.mean(self.__executions[:, 0], axis=1)
self.__majority = np.round(density).astype(np.uint32)
# The closer the density is to .5 the harder the configuration is to
# decide
self.__difficult = 1 - np.abs(density - .5) / .5
# Checking which repetitions converged to a single state
self.__converged = np.all(self.__executions[:, -1] ==
self.__executions[:, -1, 0].reshape(-1, 1),
axis=1)
# Checking how many cells in each repetition converged to the right
# state
self.__cells_correct = np.mean(self.__executions[:, -1] ==
self.__majority.reshape(-1, 1), axis=1)
self.__correct = correct
self.__fitness = np.mean(self.__correct)
self.__gini = None
self.__limits = None
self.__entropy_rate = None
self.__base_table = None
self.__correlations = None
# Initialize the CUDA module
if not AnalysisIndividual.__cuda_module:
AnalysisIndividual.__cuda_module = True
cuda_module = aux.CudaModule('analysis.cu',
(self.__ca_size, self.__ca_iterations,
self.__ca_repeat,
self.__connection_radius,
self.__n_connections,
self.__n_observations,
self.__k_history,
self.__n_possible_history))
AnalysisIndividual.__kernel_calc_diffs = \
cuda_module.get_function("kernel_calc_diffs")
AnalysisIndividual.__kernel_probabilities = \
cuda_module.get_function("kernel_probabilities")
AnalysisIndividual.__kernel_active_storage = \
cuda_module.get_function("kernel_active_storage")
AnalysisIndividual.__kernel_entropy_rate = \
cuda_module.get_function("kernel_entropy_rate")
def __calculate_gini(self, values):
# Calculate the Gini coefficient to measure the inequality in a
# distribution of values
cum_values = np.sort(values).cumsum()
return 1 - (cum_values[0] + (cum_values[1:] + cum_values[:-1]).sum()) \
/ float(cum_values[-1] * cum_values.size)
def __get_limits(self):
# This function implements a heuristic to calculate how many times a
# cell has the role of "limit" of a diffusion in a simulation.
# The main idea here is that, usually, information in cellular automata
# flows in a given direction at a constant speed. If we know this
# direction and speed, we can check how many times a cell interrupts a
# flow.
sum_diffs = np.zeros(self.__ca_size, dtype=np.uint32)
try:
self.__kernel_calc_diffs(cuda.In
|
(self.__majority),
cuda.In(self.__executions),
|
cuda.InOut(sum_diffs),
block=(self.__ca_size, 1, 1), grid=(1,))
cuda.Context.synchronize()
except cuda.Error as e:
sys.exit("CUDA: Execution failed ('%s')!" % e)
# For all repetitions, calculate the ratio of total iterations each
# cell acted as a "limit"
self.__limits = sum_diffs / \
float(self.__ca_repeat * self.__ca_iterations)
def get_individual_info(self):
if self.__gini != None:
# If all metrics are already computed, just return them!
return self.__fitness, self.__gini, self.__prop_max_min, \
self.__individual.epoch, self.__individual_number, \
self.__clustering, self.__average_k_neigh, \
self.__average_shortest_path, self.__diameter
self.__get_limits()
self.__gini = self.__calculate_gini(self.__limits)
self.__prop_max_min = self.__limits.max() / self.__limits.min()
# As clustering coefficient is not defined for directed graphs, we
# convert the graph to its undirected version
self.__clustering = nx.average_clustering(nx.Graph(self.__graph))
self.__average_shortest_path = \
nx.average_shortest_path_length(self.__graph)
try:
self.__diameter = nx.diameter(self.__graph)
except nx.exception.NetworkXError:
self.__diameter = float('nan')
self.__convergence = np.mean(self.__converged)
table_individual = {
# Serial number
"i_num": np.array([self.__individual_number], dtype=np.int),
# Individual fitness
"fit": np.array([self.__fitness], dtype=np.float),
# Ratio of the repetitions that converged to a single state
"conv": np.array([self.__convergence], dtype=np.float),
# gini and max_min are metrics intended to measure the inequality
# in the number of times each cell is a "limit"
"gini": np.array([self.__gini], dtype=np.float),
"max_min": np.array([self.__prop_max_min], dtype=np.float),
# Epoch in the evolution
"epoch": np.array([self.__individual.epoch], dtype=np.float),
# Clustering coefficient
"clust": np.array([self.__clustering], dtype=np.float),
# Average shortests path between each pair of cells
"short": np.array([self.__average_shortest_path], dtype=np.float),
# Maximum distance between any two cells
"diam": np.array([self.__diameter], dtype=np.float)}
return table_individual
def __get_probs_entropy(self):
# Calculate information theoretical metrics to evaluate the
# computational role of each cell
if self.__entropy_rate != None:
# If all metrics are already computed, just return them!
return self.__entropy_rate, self.__active_storage, \
self.__cond_entropy
p_joint_table = np.zeros((self.__ca_size, self.__n_possible_history,
|
amitjamadagni/sympy
|
sympy/core/expr.py
|
Python
|
bsd-3-clause
| 102,305 | 0.000655 |
from core import C
from sympify import sympify
from basic import Basic, Atom
from singleton import S
from evalf import EvalfMixin, pure_complex
from decorators import _sympifyit, call_highest_priority
from cache import cacheit
from compatibility import reduce, as_int, default_sort_key
from sympy.mpmath.libmp import mpf_log, prec_to_dps
from collections import defaultdict
from inspect import getmro
class Expr(Basic, EvalfMixin):
__slots__ = []
@property
def _diff_wrt(self):
"""Is it allowed to take derivative wrt to this instance.
This determines if it is allowed to take derivatives wrt this object.
Subclasses such as Symb
|
ol, Function and Derivative should return True
to enable derivatives wrt them. The implementation in Derivative
separates the Symbol and non-Symbol _diff_wrt=True variables and
temporarily converts the non-Symbol vars in Symbols when performing
the differentiation.
Note, see the docstring of Deriva
|
tive for how this should work
mathematically. In particular, note that expr.subs(yourclass, Symbol)
should be well-defined on a structural level, or this will lead to
inconsistent results.
Examples
========
>>> from sympy import Expr
>>> e = Expr()
>>> e._diff_wrt
False
>>> class MyClass(Expr):
... _diff_wrt = True
...
>>> (2*MyClass()).diff(MyClass())
2
"""
return False
@cacheit
def sort_key(self, order=None):
coeff, expr = self.as_coeff_Mul()
if expr.is_Pow:
expr, exp = expr.args
else:
expr, exp = expr, S.One
if expr.is_Atom:
args = (str(expr),)
else:
if expr.is_Add:
args = expr.as_ordered_terms(order=order)
elif expr.is_Mul:
args = expr.as_ordered_factors(order=order)
else:
args = expr.args
args = tuple(
[ default_sort_key(arg, order=order) for arg in args ])
args = (len(args), tuple(args))
exp = exp.sort_key(order=order)
return expr.class_key(), args, exp, coeff
def rcall(self, *args):
"""Apply on the argument recursively through the expression tree.
This method is used to simulate a common abuse of notation for
operators. For instance in SymPy the the following will not work:
``(x+Lambda(y, 2*y))(z) == x+2*z``,
however you can use
>>> from sympy import Lambda
>>> from sympy.abc import x,y,z
>>> (x + Lambda(y, 2*y)).rcall(z)
x + 2*z
"""
return Expr._recursive_call(self, args)
@staticmethod
def _recursive_call(expr_to_call, on_args):
def the_call_method_is_overridden(expr):
for cls in getmro(type(expr)):
if '__call__' in cls.__dict__:
return cls != Expr
if callable(expr_to_call) and the_call_method_is_overridden(expr_to_call):
if isinstance(expr_to_call, C.Symbol): # XXX When you call a Symbol it is
return expr_to_call # transformed into an UndefFunction
else:
return expr_to_call(*on_args)
elif expr_to_call.args:
args = [Expr._recursive_call(
sub, on_args) for sub in expr_to_call.args]
return type(expr_to_call)(*args)
else:
return expr_to_call
# ***************
# * Arithmetics *
# ***************
# Expr and its sublcasses use _op_priority to determine which object
# passed to a binary special method (__mul__, etc.) will handle the
# operation. In general, the 'call_highest_priority' decorator will choose
# the object with the highest _op_priority to handle the call.
# Custom subclasses that want to define their own binary special methods
# should set an _op_priority value that is higher than the default.
#
# **NOTE**:
# This is a temporary fix, and will eventually be replaced with
# something better and more powerful. See issue 2411.
_op_priority = 10.0
def __pos__(self):
return self
def __neg__(self):
return Mul(S.NegativeOne, self)
def __abs__(self):
return C.Abs(self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__radd__')
def __add__(self, other):
return Add(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__add__')
def __radd__(self, other):
return Add(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rsub__')
def __sub__(self, other):
return Add(self, -other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__sub__')
def __rsub__(self, other):
return Add(other, -self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmul__')
def __mul__(self, other):
return Mul(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return Mul(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
return Pow(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__pow__')
def __rpow__(self, other):
return Pow(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rdiv__')
def __div__(self, other):
return Mul(self, Pow(other, S.NegativeOne))
@_sympifyit('other', NotImplemented)
@call_highest_priority('__div__')
def __rdiv__(self, other):
return Mul(other, Pow(self, S.NegativeOne))
__truediv__ = __div__
__rtruediv__ = __rdiv__
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmod__')
def __mod__(self, other):
return Mod(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mod__')
def __rmod__(self, other):
return Mod(other, self)
def __int__(self):
# Although we only need to round to the units position, we'll
# get one more digit so the extra testing below can be avoided
# unless the rounded value rounded to an integer, e.g. if an
# expression were equal to 1.9 and we rounded to the unit position
# we would get a 2 and would not know if this rounded up or not
# without doing a test (as done below). But if we keep an extra
# digit we know that 1.9 is not the same as 1 and there is no
# need for further testing: our int value is correct. If the value
# were 1.99, however, this would round to 2.0 and our int value is
# off by one. So...if our round value is the same as the int value
# (regardless of how much extra work we do to calculate extra decimal
# places) we need to test whether we are off by one.
r = self.round(2)
if not r.is_Number:
raise TypeError("can't convert complex to int")
i = int(r)
if not i:
return 0
# off-by-one check
if i == r and not (self - i).equals(0):
isign = 1 if i > 0 else -1
x = C.Dummy()
# in the following (self - i).evalf(2) will not always work while
# (self - r).evalf(2) and the use of subs does; if the test that
# was added when this comment was added passes, it might be safe
# to simply use sign to compute this rather than doing this by hand:
diff_sign = 1 if (self - x).evalf(2, subs={x: i}) > 0 else -1
if diff_sign != isign:
i -= isign
return i
def __float__(self):
# Don't bother testing if it's a number; if it's not this is going
# to fail, and if it is we still need to check that it evalf'ed to
# a number.
result = self.evalf()
if result.is_Number:
return float(result)
if res
|
KirillMysnik/ArcJail
|
srcds/addons/source-python/plugins/arcjail/modules/lrs/win_reward.py
|
Python
|
gpl-3.0
| 4,013 | 0 |
# This file is part of ArcJail.
#
# ArcJail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ArcJail is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ArcJail. If not, see <http://www.gnu.org/licenses/>.
from listeners.tick import Delay
from controlled_cvars.handlers import float_handler
from ...resource.strings import build_module_strings
from ..damage_hook import get_hook, protected_player_manager
from ..players import player_manager
from .. import build_module_config
from .base_classes.jail_game import JailGame
from . import game_event_handler, stage
strings_module = build_module_strings('lrs/win_reward')
config_manager = bui
|
ld_module_config('lrs/win_reward')
config_manager.controlled_cvar(
float_handler,
"duration",
default=10,
description="Duration of Win Reward"
)
config_manager.controlled_cvar(
float_handler,
"loser_speed",
default=0.5,
description="Loser's speed"
)
class WinReward(JailGame):
caption = "Win Reward"
stage_groups = {
'winreward-star
|
t': [
"equip-damage-hooks",
"set-start-status",
"winreward-entry",
],
'winreward-timed-out': ["winreward-timed-out", ],
}
def __init__(self, players, **kwargs):
super().__init__(players, **kwargs)
self._counters = {}
self._results = {
'winner': kwargs['winner'],
'loser': kwargs['loser'],
}
@stage('basegame-entry')
def stage_basegame_entry(self):
self.set_stage_group('winreward-start')
@stage('equip-damage-hooks')
def stage_equip_damage_hooks(self):
winner, loser = self._results['winner'], self._results['loser']
def hook_hurt_for_loser(counter, info):
return info.attacker == winner.index
for player in self._players:
p_player = protected_player_manager[player.index]
counter = self._counters[player.index] = p_player.new_counter()
if player == winner:
counter.hook_hurt = get_hook('SW')
else:
counter.hook_hurt = hook_hurt_for_loser
p_player.set_protected()
@stage('undo-equip-damage-hooks')
def stage_undo_equip_damage_hooks(self):
for player in self._players_all:
p_player = protected_player_manager[player.index]
p_player.delete_counter(self._counters[player.index])
p_player.unset_protected()
@stage('winreward-entry')
def stage_winreward_entry(self):
winner, loser = self._results['winner'], self._results['loser']
loser.speed = config_manager['loser_speed']
def timeout_callback():
self.set_stage_group('winreward-timed-out')
self._delays.append(
Delay(config_manager['duration'], timeout_callback))
@stage('winreward-timed-out')
def stage_wireward_timed_out(self):
winner, loser = self._results['winner'], self._results['loser']
loser.take_damage(loser.health, attacker_index=winner.index)
@game_event_handler('jailgame-player-death', 'player_death')
def event_jailgame_player_death(self, game_event):
player = player_manager.get_by_userid(game_event['userid'])
if player not in self._players:
return
self._players.remove(player)
winner, loser = self._results['winner'], self._results['loser']
if player == winner:
loser.take_damage(loser.health + 1, attacker_index=winner.index)
self.set_stage_group('destroy')
|
Goodmind/sunflower-fm
|
application/widgets/emblems_renderer.py
|
Python
|
gpl-3.0
| 2,375 | 0.032842 |
import gtk
import cairo
import gobject
class CellRendererEmblems(gtk.CellRenderer):
"""Cell renderer that accepts list of icon names."""
__gproperties__ = {
'emblems': (
gobject.TYPE_PYOBJECT,
'Emblem list',
'List of icon names to display',
gobject.PARAM_READWRITE
),
'is-link': (
gobject.TYPE_BOOLEAN,
'Link indicator',
'Denotes if item is a link or regular file',
False,
gobject.PARAM_READWRITE
)
}
def __init__(self):
gtk.CellRenderer.__init__(self)
self.emblems = None
self.is_link = None
self.icon_size = 16
self.spacing = 2
self.padding = 1
def do_set_property(self, prop, value):
"""Set renderer property."""
if prop.name == 'emblems':
self.emblems = value
elif prop.name == 'is-link':
self.is_link = value
else:
setattr(self, prop.name, value)
def do_get_property(self, prop):
"""Get renderer property."""
if prop.name == 'emblems':
result = self.emblems
elif prop.name == 'is-link':
result = self.is_link
else:
result = getattr(self, prop.name)
return result
def do_render(self, window, widget, background_area, cell_area, expose_area, flags):
"""Render emblems on tree view."""
if not self.is_link and (self.emblems is None or len(self.emblems) == 0):
return
# cache constants locally
icon_size = self.icon_size
spacing = self.spacing
emblems = self.emblems or []
icon_theme = gtk.icon_theme_get_default()
context = window.cairo_create()
# add symbolic link emblem if needed
if self.is_link:
emblems.insert(0, 'emblem-symbolic-link')
# position of next icon
pos_x = cell_area[0] + cell_area[2]
pos_y = cell_area[1] + ((cell_area[3] - icon_size) / 2)
# draw all the icons
for emblem in emblems:
# load icon from the theme
pixbuf = icon_theme.load_icon(emblem, 16, 0)
# move position of next icon
pos_x -= icon_size + spacing
# draw icon
context.set_source_pixbuf(pixbuf, pos_x, pos_y)
context.paint()
def do_get_size(self, widget, cell_area=None):
"""Calculate
|
size taken by emblems."""
count = 5 # optimum size, we can still render more or less emblems
width = self.icon_size * count + (self.spacing * (count - 1))
height = self.icon_size
result = (
0,
0,
width + 2 * self.padding,
height + 2 * self.padding
)
return res
|
ult
|
cathyyul/sumo-0.18
|
tools/build/pythonPropsMSVC.py
|
Python
|
gpl-3.0
| 1,517 | 0.005274 |
#!/usr/bin/env python
"""
@file pythonPropsMSVC.py
@author Michael Behrisch
@author Daniel Krajzewicz
@author Jakob Erdmann
@date 2011
@version $Id: pythonPropsMSVC.py 14425 2013-08-16 20:11:47Z behrisch $
This script rebuilds "../../build/msvc/python.props", the file which
gives information about the python includes and library.
SUMO, Simulation of Urban MObility; see http://sumo-sim.org/
Copyright (C) 2011-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import sys, distutils.sysconfig
from os.path import dirname, join
propsFile = join(dirname(__file__), '..', '..', 'build', 'msvc10', 'python.props')
print('generating %s ' % propsFile)
props = open(p
|
ropsFile, 'w')
print >> props, """<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup Label="UserMacros">
<PYTHON_LIB>%s\libs\python%s%s.lib</PYTHON_LIB>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>
<AdditionalIncludeDirectories>%s;%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>HAVE_PYTHON;%%(
|
PreprocessorDefinitions)</PreprocessorDefinitions>
</ClCompile>
</ItemDefinitionGroup>
<ItemGroup>
<BuildMacro Include="PYTHON_LIB">
<Value>$(PYTHON_LIB)</Value>
</BuildMacro>
</ItemGroup>
</Project>""" % (sys.prefix, sys.version[0], sys.version[2],
distutils.sysconfig.get_config_var('INCLUDEPY'))
props.close()
|
a25kk/stv
|
src/stv.sitecontent/stv/sitecontent/browser/contentpage.py
|
Python
|
mit
| 4,948 | 0 |
# -*- coding: utf-8 -*-
"""Module providing views for the folderish content page type"""
from Acquisition import aq_inner
from Products.Five.browser import BrowserView
from zope.component import getMultiAdapter
IMG = 'data:image/gif;base64,R0lGODlhAQABAIAAAP///wAAACwAAAAAAQABAAACAkQBADs='
class ContentPageView(BrowserView):
""" Folderish content page default view """
def has_leadimage(self):
context = aq_inner(self.context)
try:
lead_img = context.image
except AttributeError:
lead_img = None
if lead_img is not None:
return True
return False
def display_gallery(self):
context = aq_inner(self.context)
try:
display = context.displayGallery
except AttributeError:
display = None
if display is not None:
return display
return False
def rendered_gallery(self):
context = aq_inner(self.context)
template = context.restrictedTraverse('@@gallery-view')()
return template
def image_data(self):
data = {}
sizes = ['small', 'medium', 'large']
idx = 0
for size in sizes:
idx += 0
img = self._get_scaled_img(size)
data[size] = '{0} {1}w'.format(img['url'], img['width'])
return data
def _get_scaled_img(self, size):
context = aq_inner(self.context)
scales = getMultiAdapter((context, self.request), name='images')
if size == 'small':
scale = scales.scale('image', width=300, height=300)
if size == 'medium':
scale = scales.scale('image', width=600, height=600)
else:
scale = scales.scale('image', width=900, height=900)
item = {}
if scale is not None:
item['url'] = scale.url
item['width'] = scale.width
item['height'] = scale.height
else:
item['url'] = IMG
item['width'] = '1px'
item['height'] = '1px'
return item
class GalleryPreview(BrowserView):
"""Preview embeddable image gallery"""
def __call__(self):
self.has_assets = len(self.contained_images()) > 0
return self.render()
def render(self):
return self.index()
def rendered_gallery(self):
context = aq_inner(self.context)
template = context.restrictedTraverse('@@gallery-view')()
return template
class GalleryView(BrowserView):
"""Provide gallery of contained image content"""
def __call__(self):
self.has_assets = len(self.contained_images()) > 0
return self.render()
def render(self):
return self.index()
def has_leadimage(self):
context = aq_inner(self.context)
try:
lead_img = context.image
except AttributeError:
lead_img = None
if lead_img is not None:
return True
return False
def leadimage_tag(self):
context = aq_inner(self.context)
scales = getMultiAdapter((context, self.request), name='images')
scale = scales.scale('image', width=900, height=900)
item = {}
if scale is not None:
item['url'] = scale.url
item['width'] = scale.width
item['height'] = scale.height
else:
item['url'] = IMG
item['width'] = '1px'
item['height'] = '1
|
px'
return item
def contained_images(self):
context = aq_inner(self.context)
data = context.restrictedTraverse('@@folderListing')(
portal_type='Image',
sort_on='getObjPositionInParent')
return data
def image_tag(self, image):
context = image.getObject()
scales = getMultiAdapter((context, self.request), name='images')
scale = scales.scale('image', width=900, height=900)
item
|
= {}
if scale is not None:
item['url'] = scale.url
item['width'] = scale.width
item['height'] = scale.height
else:
item['url'] = IMG
item['width'] = '1px'
item['height'] = '1px'
return item
def _get_scaled_img(self, size):
context = aq_inner(self.context)
scales = getMultiAdapter((context, self.request), name='images')
if size == 'small':
scale = scales.scale('image', width=300, height=300)
if size == 'medium':
scale = scales.scale('image', width=600, height=600)
else:
scale = scales.scale('image', width=900, height=900)
item = {}
if scale is not None:
item['url'] = scale.url
item['width'] = scale.width
item['height'] = scale.height
else:
item['url'] = IMG
item['width'] = '1px'
item['height'] = '1px'
return item
|
jeffzhengye/pylearn
|
speed/cython/scipy2013-cython-tutorial-master/exercises/hello-world/setup.py
|
Python
|
unlicense
| 299 | 0.020067 |
from distutils.core im
|
port setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
exts = [Extension("cython_hello_world",
["cython_hello_world.pyx"],
)]
setup(
cmdclass = {'build_ext': build_ext},
ext_mod
|
ules = exts,
)
|
reeshupatel/demo
|
keystone/common/serializer.py
|
Python
|
apache-2.0
| 13,041 | 0.000077 |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unle
|
ss required by applicable law or agreed to in writin
|
g, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Dict <--> XML de/serializer.
The identity API prefers attributes over elements, so we serialize that way
by convention, with a few hardcoded exceptions.
"""
from lxml import etree
import re
import six
from keystone.i18n import _
DOCTYPE = '<?xml version="1.0" encoding="UTF-8"?>'
XMLNS = 'http://docs.openstack.org/identity/api/v2.0'
XMLNS_LIST = [
{
'value': 'http://docs.openstack.org/identity/api/v2.0'
},
{
'prefix': 'OS-KSADM',
'value': 'http://docs.openstack.org/identity/api/ext/OS-KSADM/v1.0',
},
]
PARSER = etree.XMLParser(
resolve_entities=False,
remove_comments=True,
remove_pis=True)
# NOTE(dolph): lxml.etree.Entity() is just a callable that currently returns an
# lxml.etree._Entity instance, which doesn't appear to be part of the
# public API, so we discover the type dynamically to be safe
ENTITY_TYPE = type(etree.Entity('x'))
def from_xml(xml):
"""Deserialize XML to a dictionary."""
if xml is None:
return None
deserializer = XmlDeserializer()
return deserializer(xml)
def to_xml(d, xmlns=None):
"""Serialize a dictionary to XML."""
if d is None:
return None
serialize = XmlSerializer()
return serialize(d, xmlns)
class XmlDeserializer(object):
def __call__(self, xml_str):
"""Returns a dictionary populated by decoding the given xml string."""
dom = etree.fromstring(xml_str.strip(), PARSER)
return self.walk_element(dom, True)
def _deserialize_links(self, links):
return dict((x.attrib['rel'], x.attrib['href']) for x in links)
@staticmethod
def _qualified_name(tag, namespace):
"""Returns a qualified tag name.
The tag name may contain the namespace prefix or not, which can
be determined by specifying the parameter namespace.
"""
m = re.search('[^}]+$', tag)
tag_name = m.string[m.start():]
if not namespace:
return tag_name
bracket = re.search('[^{]+$', tag)
ns = m.string[bracket.start():m.start() - 1]
# If the namespace is
# http://docs.openstack.org/identity/api/ext/OS-KSADM/v1.0 for the
# root element, a prefix needs to add in front of the tag name.
prefix = None
for xmlns in XMLNS_LIST:
if xmlns['value'] == ns:
prefix = xmlns.get('prefix')
break
if prefix is not None:
return '%(PREFIX)s:%(tag_name)s' % {
'PREFIX': prefix, 'tag_name': tag_name}
else:
return tag_name
def walk_element(self, element, namespace=False):
"""Populates a dictionary by walking an etree element."""
values = {}
for k, v in six.iteritems(element.attrib):
# boolean-looking attributes become booleans in JSON
if k in ['enabled', 'truncated']:
if v in ['true']:
v = True
elif v in ['false']:
v = False
values[self._qualified_name(k, namespace)] = v
text = None
if element.text is not None:
text = element.text.strip()
# current spec does not have attributes on an element with text
values = values or text or {}
decoded_tag = XmlDeserializer._qualified_name(element.tag, namespace)
list_item_tag = None
if (decoded_tag[-1] == 's' and not values and
decoded_tag != 'access'):
# FIXME(gyee): special-case lists for now unti we
# figure out how to properly handle them.
# If any key ends with an 's', we are assuming it is a list.
# List element have no attributes.
values = list(values)
if decoded_tag == 'policies':
list_item_tag = 'policy'
else:
list_item_tag = decoded_tag[:-1]
if decoded_tag == 'links':
return {'links': self._deserialize_links(element)}
links = None
truncated = False
for child in [self.walk_element(x) for x in element
if not isinstance(x, ENTITY_TYPE)]:
if list_item_tag:
# FIXME(gyee): special-case lists for now until we
# figure out how to properly handle them.
# If any key ends with an 's', we are assuming it is a list.
if list_item_tag in child:
values.append(child[list_item_tag])
else:
if 'links' in child:
links = child['links']
else:
truncated = child['truncated']
else:
values = dict(values.items() + child.items())
# set empty and none-list element to None to align with JSON
if not values:
values = ""
d = {XmlDeserializer._qualified_name(element.tag, namespace): values}
if links:
d['links'] = links
d['links'].setdefault('next')
d['links'].setdefault('previous')
if truncated:
d['truncated'] = truncated['truncated']
return d
class XmlSerializer(object):
def __call__(self, d, xmlns=None):
"""Returns an xml etree populated by the given dictionary.
Optionally, namespace the etree by specifying an ``xmlns``.
"""
links = None
truncated = False
# FIXME(dolph): skipping links for now
for key in d.keys():
if '_links' in key:
d.pop(key)
# NOTE(gyee, henry-nash): special-case links and truncation
# attribute in collections
if 'links' == key:
if links:
# we have multiple links
raise Exception('Multiple links found')
links = d.pop(key)
if 'truncated' == key:
if truncated:
# we have multiple attributes
raise Exception(_('Multiple truncation attributes found'))
truncated = d.pop(key)
assert len(d.keys()) == 1, ('Cannot encode more than one root '
'element: %s' % d.keys())
# name the root dom element
name = d.keys()[0]
m = re.search('[^:]+$', name)
root_name = m.string[m.start():]
prefix = m.string[0:m.start() - 1]
for ns in XMLNS_LIST:
if prefix == ns.get('prefix'):
xmlns = ns['value']
break
# only the root dom element gets an xlmns
root = etree.Element(root_name, xmlns=(xmlns or XMLNS))
self.populate_element(root, d[name])
# NOTE(gyee, henry-nash): special-case links and truncation attribute
if links:
self._populate_links(root, links)
if truncated:
self._populate_truncated(root, truncated)
# TODO(dolph): you can get a doctype from lxml, using ElementTrees
return '%s\n%s' % (DOCTYPE, etree.tostring(root, pretty_print=True))
def _populate_links(self, element, links_json):
links = etree.Element('links')
for k, v in six.iteritems(links_json):
if v:
link = etree.Element('link')
link.set('rel', six.text_type(k))
link.set('href', six.text_type(v))
links.append(link)
element.append(links)
def _populate_truncated(self, element, truncated_value):
truncated =
|
ChristopheVuillot/qiskit-sdk-py
|
qiskit/qasm/_node/_customunitary.py
|
Python
|
apache-2.0
| 1,893 | 0 |
# -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Node for an OPENQASM custom gate statement.
"""
from ._node import Node
class CustomUnitary(Node):
"""Node for an OPENQASM custom gate statement.
children[0] is an id node.
children[1] is an exp_list (if len==3) or primary_list.
children[2], if present, is a primary_list.
Has properties:
|
.id = id node
.name = gate name string
.arguments = None or exp_list node
.bitlist = primary_list node
"""
def __init__(self, children):
"""Create the custom gate node."""
Node.__init__(self, 'custom_unitary', children, None)
self.id = children[0]
self.name = self.id.name
if len(children) == 3:
self.arguments = children[1]
self.bitlist = children[2]
else:
self.arguments =
|
None
self.bitlist = children[1]
def qasm(self, prec=15):
"""Return the corresponding OPENQASM string."""
string = self.name
if self.arguments is not None:
string += "(" + self.arguments.qasm(prec) + ")"
string += " " + self.bitlist.qasm(prec) + ";"
return string
|
thomasaarholt/hyperspy
|
hyperspy/models/eelsmodel.py
|
Python
|
gpl-3.0
| 38,199 | 0.000052 |
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import logging
import warnings
from hyperspy import components1d
from hyperspy._signals.eels import EELSSpectrum
from hyperspy.components1d import EELSCLEdge, PowerLaw
from hyperspy.docstrings.model import FIT_PARAMETERS_ARG
from hyperspy.models.model1d import Model1D
_logger = logging.getLogger(__name__)
class EELSModel(Model1D):
"""Build an EELS model
Parameters
----------
spectrum : a Signal1D (or any Signal1D subclass) instance
auto_background : bool
If True, and if spectrum is an EELS instance adds automatically
a powerlaw to the model and estimate the parameters by the
two-area method.
auto_add_edges : bool
If True, and if spectrum is an EELS instance, it will
automatically add the ionization edges as defined in the
Signal1D instance. Adding a new element to the spectrum using
the components.EELSSpectrum.add_elements method automatically
add the corresponding ionisation edges to the model.
ll : {None, EELSSpectrum}
If an EELSSPectrum is provided, it will be assumed that it is
a low-loss EELS spectrum, and it will be used to simulate the
effect of multiple scattering by convolving it with the EELS
spectrum.
GOS : {'hydrogenic', 'Hartree-Slater', None}
The GOS to use when auto adding core-loss EELS edges.
If None it will use the Hartree-Slater GOS if
they are available, otherwise it will use the hydrogenic GOS.
dictionary : {dict, None}
A dictionary to be used to recreate a model. Usually generated using
:meth:`hyperspy.model.as_dictionary`
"""
def __init__(self, signal1D, auto_background=True,
auto_add_edges=True, ll=None,
GOS=None, dictionary=None):
Model1D.__init__(self, signal1D)
# When
|
auto
|
matically setting the fine structure energy regions,
# the fine structure of an EELS edge component is automatically
# disable if the next ionisation edge onset distance to the
# higher energy side of the fine structure region is lower that
# the value of this parameter
self._min_distance_between_edges_for_fine_structure = 0
self._preedge_safe_window_width = 2
self.signal1D = signal1D
self._suspend_auto_fine_structure_width = False
self.convolved = False
self.low_loss = ll
self.GOS = GOS
self.edges = []
self._background_components = []
if dictionary is not None:
auto_background = False
auto_add_edges = False
self._load_dictionary(dictionary)
if auto_background is True:
background = PowerLaw()
self.append(background)
if self.signal.subshells and auto_add_edges is True:
self._add_edges_from_subshells_names()
@property
def signal1D(self):
return self._signal
@signal1D.setter
def signal1D(self, value):
if isinstance(value, EELSSpectrum):
self._signal = value
else:
raise ValueError(
"This attribute can only contain an EELSSpectrum "
"but an object of type %s was provided" %
str(type(value)))
def append(self, component):
"""Append component to EELS model.
Parameters
----------
component
HyperSpy component1D object.
Raises
------
NotImplementedError
If the signal axis is a non-uniform axis.
"""
super(EELSModel, self).append(component)
if isinstance(component, EELSCLEdge):
# Test that signal axis is uniform
if not self.axes_manager[-1].is_uniform:
raise NotImplementedError("This operation is not yet implemented "
"for non-uniform energy axes")
tem = self.signal.metadata.Acquisition_instrument.TEM
component.set_microscope_parameters(
E0=tem.beam_energy,
alpha=tem.convergence_angle,
beta=tem.Detector.EELS.collection_angle,
energy_scale=self.axis.scale)
component.energy_scale = self.axis.scale
component._set_fine_structure_coeff()
self._classify_components()
append.__doc__ = Model1D.append.__doc__
def remove(self, component):
super(EELSModel, self).remove(component)
self._classify_components()
remove.__doc__ = Model1D.remove.__doc__
def _classify_components(self):
"""Classify components between background and ionization edge
components.
This method should be called everytime that components are added and
removed. An ionization edge becomes background when its onset falls to
the left of the first non-masked energy channel. The ionization edges
are stored in a list in the `edges` attribute. They are sorted by
increasing `onset_energy`. The background components are stored in
`_background_components`.
"""
self.edges = []
self._background_components = []
for component in self:
if isinstance(component, EELSCLEdge):
if component.onset_energy.value < \
self.axis.axis[self.channel_switches][0]:
component.isbackground = True
if component.isbackground is not True:
self.edges.append(component)
else:
component.fine_structure_active = False
component.fine_structure_coeff.free = False
elif (isinstance(component, PowerLaw) or
component.isbackground is True):
self._background_components.append(component)
if self.edges:
self.edges.sort(key=EELSCLEdge._onset_energy)
self.resolve_fine_structure()
if len(self._background_components) > 1:
self._backgroundtype = "mix"
elif len(self._background_components) == 1:
self._backgroundtype = \
self._background_components[0].__repr__()
bg = self._background_components[0]
if isinstance(bg, PowerLaw) and self.edges and not \
bg.A.map["is_set"].any():
self.two_area_background_estimation()
@property
def _active_edges(self):
return [edge for edge in self.edges if edge.active]
@property
def _active_background_components(self):
return [bc for bc in self._background_components if bc.active]
def _add_edges_from_subshells_names(self, e_shells=None):
"""Create the Edge instances and configure them appropiately
Parameters
----------
e_shells : list of strings
"""
if self.signal._are_microscope_parameters_missing():
raise ValueError(
"The required microscope parameters are not defined in "
"the EELS spectrum signal metadata. Use "
"``set_microscope_parameters`` to set them."
)
if e_shells is None:
e_shells = list(self.signal.subshells)
e_shells.sort()
master_edge = EELSCLEdge(e_shells.pop(), self.GOS)
# If self.GOS was None, the GOS is set by eels_cl_edge so
# we rea
|
IndigoTiger/ezzybot
|
ezzybot/limit.py
|
Python
|
gpl-3.0
| 1,649 | 0.004245 |
from .util import bucket as tokenbucket
from . import wrappers
class Limit(object):
def __init__(self, command_limiting_initial_tokens, command_limiting_message_cost, command_limiting_restore_rate, override, permissions):
"""limit(20, 4, 0.13, ["admin"], {"admin": "user!*@*"})
Limits the use of commands
Arguments:
command_limiting_initial_tokens {Integer} -- Initial tokens for tokenbucket
|
command_limiting_message_cost {Integer} -- Message cost for tokenbucket
command_limiting_restore_rate {Integer} -- Restore rate for token bucket
override {List} -- List of permissions to override the limit
permissions {Dict} -- All of the bots permissions.
"""
self.command_limiting_initial_tokens = command_limiting_initial_tokens
self.command_limiting_message_cost = command_limiting_message_cost
self.command_limiting_restore_rate = com
|
mand_limiting_restore_rate
self.buckets = {}
self.permissions = wrappers.permissions_class(permissions)
self.override = override
def command_limiter(self, info):
#Check if admin/whatever specified
if self.permissions.check(self.override, info.mask):
return True
if info.nick not in self.buckets:
bucket = tokenbucket.TokenBucket(self.command_limiting_initial_tokens, self.command_limiting_restore_rate)
self.buckets[info.nick] = bucket
else:
bucket = self.buckets[info.nick]
if bucket.consume(self.command_limiting_message_cost):
return True
return False
|
bgschiller/winnow
|
winnow/values.py
|
Python
|
mit
| 3,473 | 0.004319 |
'''winnow/values.py
vi
|
vify and normalize each of the different field types:
- string
- collection (values a
|
re strings, left operand is collection)
- numeric
- bool
- date
To vivify is to turn from a string representation into a
live object. So for '2014-01-21T16:34:02', we would make a
datetime object. Vivify functions should also accept their
return type. So vivify_absolute_date(datetime.datetime.now())
should just return the datetime object.
To stringify is to serialize. This would be like turning the
list [1, 2, 3] into the JSON string "[1,2,3]"
'''
from __future__ import unicode_literals
import json
from datetime import datetime
from dateutil.parser import parse as parse_date
from six import string_types
from .error import WinnowError
from .relative_dates import valid_rel_date_values
# TODO : Since we're storing filters denormalized as JSON now, we probably need
# Less of this crazy vivification stuff. For another day, perhaps.
def stringify_string(value):
return str(value)
def stringify_collection(value):
return json.dumps(value)
stringify_single_choice = json.dumps
stringify_bool = str
def stringify_numeric(value):
if isinstance(value, float):
return '{:.10f}'.format(value)
return str(value)
stringify_absolute_date = datetime.isoformat
def vivify_string(value): # request for comment -- tighter check on this?
return str(value)
def vivify_collection(value):
try:
if not isinstance(value, list):
value = json.loads(value)
assert isinstance(value, list), "collection values must be lists"
assert all(isinstance(v, (dict, string_types)) for v in value), "elements of collection must be dicts (or strings, for backwards compat)"
if value and isinstance(value[0], dict): # backwards compat check.
value = [v['id'] for v in value]
return value
except (ValueError, AssertionError) as e:
raise WinnowError(e)
def vivify_single_choice(value):
try:
if not isinstance(value, dict):
value = json.loads(value)
assert isinstance(value, dict), "single choice values must be a dict"
assert 'id' in value and 'name' in value, "Choice must have keys for 'name' and 'id'"
return value
except (ValueError, AssertionError) as e:
raise WinnowError(e)
def vivify_numeric(value):
if value == '':
return 0
if isinstance(value, (float, int)):
return value
try:
return int(value)
except ValueError:
pass # int is more restrictive -- let's not get hasty
# and reject before we see if it's a float.
try:
return float(value)
except ValueError as e:
raise WinnowError(e)
def vivify_relative_date(value):
if value.lower().replace(' ', '_') in valid_rel_date_values:
return value.lower().replace(' ', '_')
raise WinnowError("Invalid relative date value: '{}'".format(value))
stringify_relative_date = vivify_relative_date
def vivify_absolute_date(value):
try:
return parse_date(value)
except TypeError:
raise WinnowError("invalid literal for date range: '{}'".format(value))
def vivify_bool(value):
if isinstance(value, string_types) and value.lower() in ('true', 'false'):
return value.lower() == 'true'
else:
assert isinstance(value, bool), "expected boolean or string. received '{}'".format(value)
return value
|
antoinecarme/pyaf
|
tests/artificial/transf_Quantization/trend_MovingAverage/cycle_0/ar_/test_artificial_1024_Quantization_MovingAverage_0__20.py
|
Python
|
bsd-3-clause
| 272 | 0.084559 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_data
|
set as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype
|
= "MovingAverage", cycle_length = 0, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 0);
|
magellancloud/poncho
|
poncho/common/utils.py
|
Python
|
bsd-3-clause
| 1,121 | 0.00446 |
#!/usr/bin/env python
"""
poncho.common.utils : Utility Functions
"""
from datetime import datetime
def readable_datetime(dt):
"""Turn a datetime into something readable, with time since or until."""
if dt is None:
return ""
dt = dt.replace(microsecond=0)
now = datetime.now().replace(microsecond=0)
low = min(dt, now)
hi = max(dt, now)
delta = hi - low
relative_times = [
('yea
|
r', delta.days // 365),
('month', delta.days // 30),
('week', delta.days // 7),
('day', delta.days),
('hour', delta.seconds // 60 // 60 % 24),
('min'
|
, delta.seconds // 60 % 60),
('sec', delta.seconds % 60),
]
modifier = "from now"
if dt < now:
modifier = "ago"
two_sizes = []
for name,ammount in relative_times:
if len(two_sizes) == 2:
break
if ammount > 0:
name += "s" if ammount != 1 else ""
two_sizes.append("%s %s" % (ammount, name))
if len(two_sizes):
return "%s (%s %s)" % (dt, ", ".join(two_sizes), modifier)
return "%s (right now)" % (dt)
|
trafi/djinni
|
test-suite/generated-src/python/map_record.py
|
Python
|
apache-2.0
| 1,058 | 0.007561 |
# AUTOGENERATED FILE - DO NOT MODIF
|
Y!
# This file generated by Djinni from map.djinni
from djinni.support import MultiSet # default imported in all files
from djinni.exception import CPyException # default imported in all files
from djinni.pycffi_marshal import CPyObject, CPyObjectProxy, CPyPrimitive, CPyRecord, CPyString
from dh
|
__map_int32_t_int32_t import MapInt32TInt32THelper
from dh__map_int32_t_int32_t import MapInt32TInt32TProxy
from dh__map_string_int64_t import MapStringInt64THelper
from dh__map_string_int64_t import MapStringInt64TProxy
from PyCFFIlib_cffi import ffi, lib
from djinni import exception # this forces run of __init__.py which gives cpp option to call back into py to create exception
class MapRecord:
c_data_set = MultiSet()
@staticmethod
def check_c_data_set_empty():
assert len(MapRecord.c_data_set) == 0
MapStringInt64THelper.check_c_data_set_empty()
MapInt32TInt32THelper.check_c_data_set_empty()
def __init__(self, map, imap):
self.map = map
self.imap = imap
|
jtopjian/st2
|
st2common/st2common/util/misc.py
|
Python
|
apache-2.0
| 1,245 | 0 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
|
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for t
|
he specific language governing permissions and
# limitations under the License.
import six
__all__ = [
'prefix_dict_keys'
]
def prefix_dict_keys(dictionary, prefix='_'):
"""
Prefix dictionary keys with a provided prefix.
:param dictionary: Dictionary whose keys to prefix.
:type dictionary: ``dict``
:param prefix: Key prefix.
:type prefix: ``str``
:rtype: ``dict``:
"""
result = {}
for key, value in six.iteritems(dictionary):
result['%s%s' % (prefix, key)] = value
return result
|
elatomczyk/dook
|
coworkok/bin/pilconvert.py
|
Python
|
gpl-3.0
| 2,354 | 0.002124 |
#!/home/ela/Python_Django/coworkok/coworkok/bin/python
#
# The Python Imaging Library.
# $Id$
#
# convert image files
#
# History:
# 0.1 96-04-20 fl Created
# 0.2 96-10-04 fl Use draft mode when converting images
# 0.3 96-12-
|
30 fl Optimize output (PNG, JPEG)
# 0.4 97-01-18 fl Made optimize an option (PNG, JPEG)
# 0.5 98-12-30 fl Fixed -f option (from Anthony Baxter)
#
from __future__ import print_function
import getopt, string, sys
from PIL import Image
def usage():
print("PIL Convert 0.5
|
/1998-12-30 -- convert image files")
print("Usage: pilconvert [option] infile outfile")
print()
print("Options:")
print()
print(" -c <format> convert to format (default is given by extension)")
print()
print(" -g convert to greyscale")
print(" -p convert to palette image (using standard palette)")
print(" -r convert to rgb")
print()
print(" -o optimize output (trade speed for size)")
print(" -q <value> set compression quality (0-100, JPEG only)")
print()
print(" -f list supported file formats")
sys.exit(1)
if len(sys.argv) == 1:
usage()
try:
opt, argv = getopt.getopt(sys.argv[1:], "c:dfgopq:r")
except getopt.error as v:
print(v)
sys.exit(1)
format = None
convert = None
options = { }
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats (* indicates output format):")
for i in id:
if i in Image.SAVE:
print(i+"*", end=' ')
else:
print(i, end=' ')
sys.exit(1)
elif o == "-c":
format = a
if o == "-g":
convert = "L"
elif o == "-p":
convert = "P"
elif o == "-r":
convert = "RGB"
elif o == "-o":
options["optimize"] = 1
elif o == "-q":
options["quality"] = string.atoi(a)
if len(argv) != 2:
usage()
try:
im = Image.open(argv[0])
if convert and im.mode != convert:
im.draft(convert, im.size)
im = im.convert(convert)
if format:
im.save(argv[1], format, **options)
else:
im.save(argv[1], **options)
except:
print("cannot convert image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/grizzled/grizzled/db/base.py
|
Python
|
bsd-3-clause
| 32,027 | 0.001186 |
# $Id: 969e4c5fd51bb174563d06c1357489c2742813ec $
"""
Base classes for enhanced DB drivers.
"""
from __future__ import absolute_import
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import re
import time
import os
import sys
from datetime import date, datetime
from collections import namedtuple
from grizzled.exception import ExceptionWithMessage
from grizzled.decorators import abstract
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['DBDriver', 'DB', 'Cursor', 'DBError', 'Error', 'Warning',
'TableMetadata', 'IndexMetadata', 'RDBMSMetadata']
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class DBError(ExceptionWithMessage):
"""
Base class for all DB exceptions.
"""
pass
class Error(DBError):
"""Thrown to indicate an error in the ``db`` module."""
pass
class Warning(DBError):
"""Thrown to indicate an error in the ``db`` module."""
pass
TableMetadata = namedtuple('TableMetadata', ['column_name',
'type_string',
'max_char_size',
'precision',
'scale',
'nullable'])
IndexMetadata = namedtuple('IndexMetadata', ['index_name',
'index_columns',
'description'])
RDBMSMetadata = namedtuple('RDBMSMetadata', ['vendor', 'product', 'version'])
class Cursor(object):
"""
Class for DB cursors returned by the ``DB.cursor()`` method. This class
conforms to the Python DB cursor interface, including the following
attributes.
:IVariables:
description : tuple
A read-only attribute that is a sequence of 7-item tuples, one per
column, from the last query executed. The tuple values are:
*(name, typecode, displaysize, internalsize, precision, scale)*
rowcount : int
A read-only attribute that specifies the number of rows
fetched in the last query, or -1 if unknown. *Note*: It's best
not to rely on the row count, because some database drivers
(such as SQLite) don't report valid row counts.
"""
def __init__(self, cursor, driver):
"""
Create a new Cursor object, wrapping the underlying real DB API
cursor.
:Parameters:
cursor
the real DB API cursor object
driver
the driver that is creating this object
"""
self.__cursor = cursor
self.__driver = driver
self.__description = None
self.__rowcount = -1
def __get_description(self):
return self.__description
description = property(__get_description,
doc='The description field. See class docs.')
def __get_rowcount(self):
return self.__rowcount
rowcount = property(__get_rowcount,
doc='Number of rows from last query, or -1')
def close(s
|
elf):
"""
Close the cursor.
:raise Warning: Non-fatal warning
:raise Error
|
: Error; unable to close
"""
dbi = self.__driver.get_import()
try:
return self.__cursor.close()
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
def execute(self, statement, parameters=None):
"""
Execute a SQL statement string with the given parameters.
'parameters' is a sequence when the parameter style is
'format', 'numeric' or 'qmark', and a dictionary when the
style is 'pyformat' or 'named'. See ``DB.paramstyle()``.
:Parameters:
statement : str
the SQL statement to execute
parameters : list
parameters to use, if the statement is parameterized
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
if parameters:
result = self.__cursor.execute(statement, parameters)
else:
result = self.__cursor.execute(statement)
try:
self.__rowcount = self.__cursor.rowcount
except AttributeError:
self.__rowcount = -1
self.__description = self.__cursor.description
return result
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
except:
raise Error(sys.exc_info()[1])
def executemany(self, statement, *parameters):
"""
Execute a SQL statement once for each item in the given parameters.
:Parameters:
statement : str
the SQL statement to execute
parameters : sequence
a sequence of sequences when the parameter style
is 'format', 'numeric' or 'qmark', and a sequence
of dictionaries when the style is 'pyformat' or
'named'.
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
result = self.__cursor.executemany(statement, *parameters)
self.__rowcount = self.__cursor.rowcount
self.__description = self.__cursor.description
return result
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
executeMany = executemany
def fetchone(self):
"""
Returns the next result set row from the last query, as a sequence
of tuples. Raises an exception if the last statement was not a query.
:rtype: tuple
:return: Next result set row
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
return self.__cursor.fetchone()
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
def fetchall(self):
"""
Returns all remaining result rows from the last query, as a sequence
of tuples. Raises an exception if the last statement was not a query.
:rtype: list of tuples
:return: List of rows, each represented as a tuple
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
return self.__cursor.fetchall()
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
fetchAll = fetchall
def fetchmany(self, n):
"""
Returns up to n remaining result rows from the last query, as a
sequence of tuples. Raises an exception if the last statement was
not a query.
:Parameters:
n : int
maximum number of result rows to get
:rtype: list of tuples
:return: List of rows, each represented as a tuple
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
self.__cursor.fetchmany(n)
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
fetchMany = fetchmany
|
chippey/gaffer
|
python/GafferTest/UndoTest.py
|
Python
|
bsd-3-clause
| 5,828 | 0.053706 |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import unittest
import IECore
import Gaffer
import GafferTest
class UndoTest( GafferTest.TestCase ) :
def testSetName( self ) :
s = Gaffer.ScriptNode()
self.assertEqual( s.undoAvailable(), False )
self.assertEqual( s.redoAvailable(), False )
self.assertRaises( Exception, s.undo )
n = Gaffer.Node()
s["a"] = n
self.assertEqual( n.getName(), "a" )
n.setName( "b" )
self.assertEqual( n.getName(), "b" )
self.assertEqual( s.undoAvailable(), False )
self.assertEqual( s.redoAvailable(), False )
self.assertRaises( Exception, s.undo )
with Gaffer.UndoContext( s ) :
n.setName( "c" )
self.assertEqual( s.undoAvailable(), True )
self.assertEqual( s.redoAvailable(), False )
s.undo()
self.assertEqual( s.undoAvailable(), False )
self.assertEqual( s.redoAvailable(), True )
self.assertEqual( n.getName(), "b" )
s.redo()
self.assertEqual( s.undoAvailable(), True )
self.assertEqual( s.redoAvailable(), False )
self.assertEqual( n.getName(), "c" )
self.assertRaises( Exception, s.redo )
def testSetInput( self ) :
s = Gaffer.ScriptNode()
n1 = GafferTest.AddNode()
n2 = GafferTest.AddNode()
s["n1"] = n1
s["n2"] = n2
with Gaffer.UndoContext( s ) :
n1["op1"].setInput( n2["sum"] )
self.assert_( n1["op1"].getInput().isSame( n2["sum"] ) )
s.undo()
self.assertEqual( n1["op1"].getInput(), None )
s.redo()
self.assert_( n1["op1"].getInput().isSame( n2["sum"] ) )
def testChildren( self ) :
s = Gaffer.ScriptNode()
n = Gaffer.Node()
self.assertEqual( n.parent(), None )
with Gaffer.UndoContext( s ) :
s["n"] = n
self.assert_( n.parent().isSame( s ) )
s.undo()
self.assertEqual( n.parent(), None )
s.redo()
self.assert_( n.parent().isSame( s ) )
def testDelete( self ) :
s = Gaffer.ScriptNode()
n1 = GafferTest.AddNode()
n2 = GafferTest.AddNode()
n3 = GafferTest.AddNode()
s.addChild( n1 )
s.addChild( n2 )
s.addChild( n3 )
n2["op1"].setInput( n1["sum"] )
n2["op2"].setInput( n1["sum"] )
n3["op1"].setInput( n2["sum"] )
n3["op2"].setInput( n2["sum"] )
self.assert_( n2["op1"].getInput().isSame( n1["sum"] ) )
self.assert_( n2["op2"].getInput().isSame( n1["sum"] ) )
self.assert_( n3["op1"].getInput().isSame( n2["sum"] ) )
self.assert_( n3["op2"].getInput().isSame(
|
n2["sum"] ) )
with Gaffer.UndoContext( s ) :
s.deleteNodes( filter = Gaffer.StandardSet( [ n2 ] ) )
self.assertEqual( n2["op1"].getInput(), None )
self.assertEqual( n2["op2"].getInput(), None )
self.assert_( n3["op1"].getInput().isSame( n1["sum"] ) )
self.assert_( n3["op2"].getInput().isSame( n1["sum"] ) )
s.undo()
self.assert_( n2["op1"].getInput().isSame( n1["sum"] ) )
self.assert_( n2["op2"].getInput().isSame( n1["sum
|
"] ) )
self.assert_( n3["op1"].getInput().isSame( n2["sum"] ) )
self.assert_( n3["op2"].getInput().isSame( n2["sum"] ) )
with Gaffer.UndoContext( s ) :
s.deleteNodes( filter = Gaffer.StandardSet( [ n2 ] ), reconnect = False )
self.assertEqual( n2["op1"].getInput(), None )
self.assertEqual( n2["op2"].getInput(), None )
self.assertEqual( n3["op1"].getInput(), None )
self.assertEqual( n3["op2"].getInput(), None )
s.undo()
self.assert_( n2["op1"].getInput().isSame( n1["sum"] ) )
self.assert_( n2["op2"].getInput().isSame( n1["sum"] ) )
self.assert_( n3["op1"].getInput().isSame( n2["sum"] ) )
self.assert_( n3["op2"].getInput().isSame( n2["sum"] ) )
def testDisable( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
with Gaffer.UndoContext( s, Gaffer.UndoContext.State.Disabled ) :
s["n"]["op1"].setValue( 10 )
self.assertFalse( s.undoAvailable() )
with Gaffer.UndoContext( s, Gaffer.UndoContext.State.Enabled ) :
with Gaffer.UndoContext( s, Gaffer.UndoContext.State.Disabled ) :
s["n"]["op1"].setValue( 20 )
self.assertFalse( s.undoAvailable() )
if __name__ == "__main__":
unittest.main()
|
jmichel-otb/s2p
|
s2plib/sift.py
|
Python
|
agpl-3.0
| 5,706 | 0.001577 |
# Copyright (C) 2015, Carlo de Franchis <[email protected]>
# Copyright (C) 2015, Gabriele Facciolo <[email protected]>
# Copyright (C) 2015, Enric Meinhardt <[email protected]>
from __future__ import print_function
import os
import numpy as np
from s2plib import common
from s2plib import rpc_utils
from s2plib import estimation
from s2plib.config import cfg
def image_keypoints(im, x, y, w, h, max_nb=None, extra_params=''):
"""
Runs SIFT (the keypoints detection and description only, no matching).
It uses Ives Rey Otero's implementation published in IPOL:
http://www.ipol.im/pub/pre/82/
Args:
im: path to the input image
max_nb (optional): maximal number of keypoints. If more keypoints are
detected, those at smallest scales are discarded
extra_params (optional): extra parameters to be passed to the sift
binary
Returns:
path to the file containing the list of descriptors
"""
keyfile = common.tmpfile('.txt')
if max_nb:
cmd = "sift_roi %s %d %d %d %d --max-nb-pts %d %s -o %s" % (im, x, y, w,
h, max_nb,
extra_params,
keyfile)
else:
cmd = "sift_roi %s %d %d %d %d %s -o %s" % (im, x, y, w, h,
extra_params, keyfile)
common.run(cmd)
return keyfile
def keypoints_match(k1, k2, method='relative', sift_thresh=0.6, F=None,
model=None):
"""
Find matches among two lists of sift keypoints.
Args:
k1, k2: paths to text files containing the lists of sift descriptors
method (optional, default is 'relative'): flag ('relative' or
'absolute') indicating wether to use absolute distance or relative
distance
sift_thresh (optional, default is 0.6): threshold for distance between SIFT
descriptors. These descriptors are 128-vectors, whose coefficients
range from 0 to 255, thus with absolute distance a reasonable value
for this threshold is between 200 and 300. With relative distance
(ie ratio between distance to nearest and distance to second
nearest), the commonly used value for the threshold is 0.6.
F (optional): affine fundamental matrix
model (optional, default is None): model imposed by RANSAC when
searching the set of inliers. If None all matches are considered as
inliers.
Returns:
if any, a numpy 2D array containing the list of inliers matches.
"""
# compute matches
mfile = common.tmpfile('.txt')
cmd = "matching %s %s -%s %f -o %s" % (k1, k2, method, sift_thresh, mfile)
if F is not None:
fij = ' '.join(str(x) for x in [F[0, 2], F[1, 2], F[2, 0],
F[2, 1], F[2, 2]])
cmd = "%s -f \"%s\"" % (cmd, fij)
common.run(cmd)
matches = np.loadtxt(mfile)
if matches.ndim == 2: # filter outliers with ransac
if model == 'fundamental' and len(matches) >= 7:
common.run("ransac fmn 1000 .3 7 %s < %s" % (mfile, mfile))
elif model == 'homography' and len(matches) >= 4:
common.run("ransac hom 1000 1 4 /dev/null /dev/null %s < %s" % (mfile,
mfile))
elif model == 'hom_fund' and len(matches) >= 7:
common.run("ransac hom 1000 2 4 /dev/null /dev/null %s < %s" % (mfile,
|
mfile))
common.run("ransac fmn 1000 .2 7 %s < %s" % (mfile, mfile))
if os.stat(mfile).st_size > 0: # return numpy array of matches
return np.loadtxt(mfile)
def matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h):
"""
Compute a list of SIFT matches between two images on a given roi.
The corresponding roi in the second image is determined using the rpc
|
functions.
Args:
im1, im2: paths to two large tif images
rpc1, rpc2: two instances of the rpc_model.RPCModel class
x, y, w, h: four integers defining the rectangular ROI in the first
image. (x, y) is the top-left corner, and (w, h) are the dimensions
of the rectangle.
Returns:
matches: 2D numpy array containing a list of matches. Each line
contains one pair of points, ordered as x1 y1 x2 y2.
The coordinate system is that of the full images.
"""
x2, y2, w2, h2 = rpc_utils.corresponding_roi(rpc1, rpc2, x, y, w, h)
# estimate an approximate affine fundamental matrix from the rpcs
rpc_matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, 5)
F = estimation.affine_fundamental_matrix(rpc_matches)
# if less than 10 matches, lower thresh_dog. An alternative would be ASIFT
thresh_dog = 0.0133
for i in range(2):
p1 = image_keypoints(im1, x, y, w, h, extra_params='--thresh-dog %f' % thresh_dog)
p2 = image_keypoints(im2, x2, y2, w2, h2, extra_params='--thresh-dog %f' % thresh_dog)
matches = keypoints_match(p1, p2, 'relative', cfg['sift_match_thresh'],
F, model='fundamental')
if matches is not None and matches.ndim == 2 and matches.shape[0] > 10:
break
thresh_dog /= 2.0
else:
print("WARNING: sift.matches_on_rpc_roi: found no matches.")
return None
return matches
|
asnorkin/sentiment_analysis
|
site/lib/python2.7/site-packages/sklearn/neighbors/regression.py
|
Python
|
mit
| 11,000 | 0 |
"""Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances,
|
and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, op
|
tional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
|
vlinhart/django-smsbrana
|
smsbrana/views.py
|
Python
|
bsd-3-clause
| 1,174 | 0.001704 |
# -*- coding: utf-8 -*-
from datetime import datetime
from django.http import HttpResponse
from smsbrana import SmsConnect
from smsbrana import signals
from smsbrana.const import DELIVERY_STATUS_DELIVERED, DATETIME_FORMAT
from smsbrana.models import SentSms
def smsconnect_notification(request):
sc = SmsConnect()
result = sc.inbox()
# print result
for delivered in result['delivery_report']:
sms_id = delivered['idsms']
if deliv
|
ered['status'] != DELIVERY_STATUS_DELIVERED:
continue
try:
sms = SentSms.objects.get(sms_id=sms_id)
if sms.delivered:
continue
sms.delivered = True
sms.delivered_date = datetime.strptime(delivered['time'
|
], DATETIME_FORMAT)
sms.save()
except SentSms.DoesNotExist:
# logger.error('sms delivered which wasn\'t sent' + str(delivered))
pass
# delete the inbox if there are 100+ items
if len(result['delivery_report']) > 100:
sc.inbox(delete=True)
signals.smsconnect_notification_received.send(sender=None, inbox=result, request=request)
return HttpResponse('OK')
|
nistormihai/superdesk-core
|
tests/io/feed_parsers/dpa_test.py
|
Python
|
agpl-3.0
| 2,118 | 0.000472 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
from superdesk.io.feed_parsers.dpa_iptc7901 import DPAIPTC7901FeedParser
from superdesk.tests import TestCase
def fixture(filename):
dirname = os.path.dirname(os.path.realpath(__file__))
return os.path.normpath(os.path.join(dirname, '../fixtures', filename))
class DPAIptcTestCase(TestCase):
parser = DPAIPTC7901FeedParser()
def open(self, filename):
provider = {'name': 'Test'}
return self.parser.parse(fixture(filename), provider)
def test_open_iptc7901_file(self):
with self.app.app_context():
item = self.open('IPTC7901.txt')
self.assertEqual('text', item['type'])
self.assertEqual('062', item['ingest_provider_sequence'])
self.assertEqual('i', item['anpa_category'][0]['qcode'])
self.assertEqual(211, item['word_count'])
self.assertEqual('Germany Social Democrats: Coalition talks with Merkel could fail =', item['headline'])
self.assertRegex(item['body_html'], '^<p></p><p>Negotiations')
self.assertEqual('Germany-politics', item['slugline'])
self.assertEqual(4, item['priority'])
self.assertEqual([{'qcode': 'i'}], item['anpa_category'])
self.assertTrue(item['ednote'].find('## Editorial contacts'))
self.assertEqual(item['dateline']['source'], 'dpa')
self.assertEqual(item['dateline']['located']['city'], 'Berlin')
def test_open_dpa_copyright(self):
with self.app.app_context():
item = self
|
.open('dpa_copyright.txt')
self.assertEqual('tex
|
t', item['type'])
self.assertEqual('rs', item['anpa_category'][0]['qcode'])
self.assertEqual('(Achtung)', item['headline'])
self.assertEqual('Impressum', item['slugline'])
|
maxalbert/tohu
|
tohu/v6/custom_generator/utils.py
|
Python
|
mit
| 3,014 | 0.00365 |
import attr
import pandas as pd
import re
from ..base impo
|
rt TohuBaseGenerator
from ..logging import logger
__all__ = ['get_tohu_
|
items_name', 'make_tohu_items_class']
def make_tohu_items_class(clsname, attr_names):
"""
Parameters
----------
clsname: string
Name of the class to be created
attr_names: list of strings
Names of the attributes of the class to be created
"""
item_cls = attr.make_class(clsname, {name: attr.ib() for name in attr_names}, repr=False, cmp=True, frozen=True)
def new_repr(self):
all_fields = ', '.join([f'{name}={repr(value)}' for name, value in attr.asdict(self).items()])
return f'{clsname}({all_fields})'
orig_eq = item_cls.__eq__
def new_eq(self, other):
"""
Custom __eq__() method which also allows comparisons with
tuples and dictionaries. This is mostly for convenience
during testing.
"""
if isinstance(other, self.__class__):
return orig_eq(self, other)
else:
if isinstance(other, tuple):
return attr.astuple(self) == other
elif isinstance(other, dict):
return attr.asdict(self) == other
else:
return NotImplemented
item_cls.__repr__ = new_repr
item_cls.__eq__ = new_eq
item_cls.keys = lambda self: attr_names
item_cls.__getitem__ = lambda self, key: getattr(self, key)
item_cls.as_dict = lambda self: attr.asdict(self)
item_cls.to_series = lambda self: pd.Series(attr.asdict(self))
return item_cls
def get_tohu_items_name(cls):
"""
Return a string which defines the name of the namedtuple class which will be used
to produce items for the custom generator.
By default this will be the first part of the class name (before '...Generator'),
for example:
FoobarGenerator -> Foobar
QuuxGenerator -> Quux
However, it can be set explicitly by the user by defining `__tohu_items_name__`
in the class definition, for example:
class Quux(CustomGenerator):
__tohu_items_name__ = 'MyQuuxItem'
"""
assert issubclass(cls, TohuBaseGenerator)
try:
tohu_items_name = cls.__dict__['__tohu_items_name__']
logger.debug(f"Using item class name '{tohu_items_name}' (derived from attribute '__tohu_items_name__')")
except KeyError:
m = re.match('^(.*)Generator$', cls.__name__)
if m is not None:
tohu_items_name = m.group(1)
logger.debug(f"Using item class name '{tohu_items_name}' (derived from custom generator name)")
else:
msg = (
"Cannot derive class name for items to be produced by custom generator. "
"Please set '__tohu_items_name__' at the top of the custom generator's "
"definition or change its name so that it ends in '...Generator'"
)
raise ValueError(msg)
return tohu_items_name
|
hujiajie/chromium-crosswalk
|
third_party/WebKit/Source/devtools/scripts/build_applications.py
|
Python
|
bsd-3-clause
| 1,259 | 0.003971 |
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Invokes concatenate_application_code for applications specified on the command line.
"""
from os import path
import concatenate_application_code
import modular_build
import sys
try:
impor
|
t simplejson as json
except ImportError:
import json
def main(argv):
try:
input_path_flag_index = argv.index('--input_path')
input_path = argv[input_path_flag_index + 1]
output_path_flag_index = argv.index('--output_path')
output_path = argv[output_path
|
_flag_index + 1]
application_names = argv[1:input_path_flag_index]
debug_flag_index = argv.index('--debug')
minify = argv[debug_flag_index + 1] == '0'
except:
print('Usage: %s app_1 app_2 ... app_N --input_path <input_path> --output_path <output_path> --debug <0_or_1>' % argv[0])
raise
loader = modular_build.DescriptorLoader(input_path)
for app in application_names:
concatenate_application_code.build_application(app, loader, input_path, output_path, minify)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
uwoseis/anemoi
|
anemoi/source.py
|
Python
|
mit
| 8,029 | 0.011085 |
from .meta import BaseModelDependent
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.special import i0 as bessi0
class BaseSource(BaseModelDependent):
pass
class FakeSource(BaseSource):
def __call__(self, loc):
return loc
class SimpleSource(BaseSource):
def __init__(self, systemConfig):
super(BaseSource, self).__init__(systemConfig)
if hasattr(self, 'ny'):
raise NotImplementedError('Sources not implemented for 3D case')
self._z, self._y, self._x = np.mgrid[
self.zorig : self.dz * self.nz : self.dz,
self.yorig : self.dy * self.ny : self.dy,
self.xorig : self.dx * self.nx : self.dx
]
else:
self._z, self._x = np.mgrid[
self.zorig : self.dz * self.nz : self.dz,
self.xorig : self.dx * self.nx : self.dx
]
def dist(self, loc):
nsrc = len(loc)
if hasattr(self, 'ny'):
raise NotImplementedError('Sources not implemented for 3D case')
dist = np.sqrt((self._x.reshape((1, self.nz, self.ny, self.nx)) - loc[:,0].reshape((nsrc, 1, 1, 1)))**2
+ (self._y.reshape((1, self.nz, self.ny, self.nx)) - loc[:,1].reshape((nsrc, 1, 1, 1)))**2
+ (self._z.reshape((1, self.nz, self.ny, self.nx)) - loc[:,2].reshape((nsrc, 1, 1, 1)))**2)
else:
dist = np.sqrt((self._x.reshape((1, self.nz, self.nx)) - loc[:,0].reshape((nsrc, 1, 1)))**2
+ (self._z.reshape((1, self.nz, self.nx)) - loc[:,1].reshape((nsrc, 1, 1)))**2)
return dist
def vecIndexOf(self, loc):
return self.toVecIndex(self.linIndexOf(loc))
def linIndexOf(self, loc):
nsrc = loc.shape[0]
dists = self.dist(loc).reshape((nsrc, self.nrow))
return np.argmin(dists, axis=1)
def __call__(self, loc):
nsrc = loc.shape[0]
q = np.zeros((nsrc, self.nrow), dtype=np.complex128)
for i, index in enumerate(self.linIndexOf(loc)):
q[i,index] = 1.
return q.T
class StackedSimpleSource(SimpleSource):
def __call__(self, loc):
q = super(StackedSimpleSource, self).__call__(loc)
return np.vstack([q, np.zeros(q.shape, dtype=np.complex128)])
class SparseKaiserSource(SimpleSource):
initMap = {
# Argument Required Rename as ... Store as type
'ireg': (False, '_ireg', np.int64),
'freeSurf': (False, '_freeSurf', tuple),
}
HC_KAISER = {
1: 1.24,
2: 2.94,
3: 4.53,
4: 6.31,
5: 7.91,
6: 9.42,
7: 10.95,
8: 12.53,
9: 14.09,
10: 14.18,
}
def kws(self, offset):
'''
Finds 2D source terms to approximate a band-limited point source, based on
Hicks, Graham J. (2002) Arbitrary source and receiver positioning in finite-difference
schemes using Kaiser windowed sinc functions. Geophysics (67) 1, 156-166.
KaiserWindowedSinc(ireg, offset) --> 2D ndarray of size (2*ireg+1, 2*ireg+1)
Input offset is the 2D offsets in fractional gridpoints between the source location and
the nearest node on the modelling grid.
'''
try:
b = self.HC_KAISER.get(self.ireg)
except KeyError:
print('Kaiser windowed sinc function not implemented for half-width of %d!'%(ireg,))
raise
freg = 2*self.ireg+1
xOffset, zOffset = offset
# Grid from 0 to freg-1
Zi, Xi = np.mgrid[:freg,:freg]
# Distances from source point
dZi = (zOffset + self.ireg - Zi)
dXi = (xOffset + self.ireg - Xi)
# Taper terms for decay function
with warnings.catch_warnings():
warnings.simplefilter('ignore')
tZi = np.nan_to_num(np.sqrt(1 - (dZi / self.ireg)**2))
tXi = np.nan_to_num(np.sqrt(1 - (dXi / self.ireg)**2))
tZi[tZi == np.inf] = 0
tXi[tXi == np.inf] = 0
# Actual tapers for Kaiser window
taperZ = bessi0(b*tZi) / bessi0(b)
taperX = bessi0(b*tXi) / bessi0(b)
# Windowed sinc responses in Z and X
responseZ = np.sinc(dZi) * taperZ
responseX = np.sinc(dXi) * taperX
# Combined 2D source response
result = responseX * responseZ
return result
def __call__(self, sLocs):
ireg = self.ireg
freeSurf = self.freeSurf
N = sLocs.shape[0]
M = self.nz * self.nx
# Scale source based on the cellsize so that changing the grid doesn't
# change the overall source amplitude
srcScale = 1. / (self.dx * self.dz)
qI = self.linIndexOf(sLocs)
if ireg == 0:
# Closest gridpoint
q = sp.coo_matrix((srcScale, (np.arange(N), qI)), shape=(N, M))
else:
# Kaiser windowed sinc function
freg = 2*ireg+1
nnz = N * freg**2
lShift, sShift = np.mgrid[-ireg:ireg+1,-ireg:ireg+1]
shift = lShift * self.nx + sShift
entries = np.zeros((nnz,), dtype=np.complex128)
columns = np.zeros((nnz,))
rows = np.zeros((nnz,))
dptr = 0
for i in xrange(N):
Zi, Xi = (qI[i] / self.nx, np.mod(qI[i], self.nx))
offset = (sLocs[i][0] - Xi * self.dx, sLocs[i][1] - Zi * self.dz)
sourceRegion = self.kws(offset)
qshift = shift.copy()
if Zi < ireg:
index = ireg-Zi
if freeSurf[2]:
lift = np.flipud(sourceRegion[:index,:])
|
sourceRegion = sourceRegion[index:,:]
qshift = qshift[index:,:]
if freeSurf[2]:
sourceRegion[:index,:] -= lift
if Zi > self.nz-ireg-1:
index = self.nz-ireg-1 - Zi
if freeSurf[0]:
lift = np.flipud(sourceR
|
egion[index:,:])
sourceRegion = sourceRegion[:index,:]
qshift = qshift[:index,:]
if freeSurf[0]:
sourceRegion[index:,:] -= lift
if Xi < ireg:
index = ireg-Xi
if freeSurf[3]:
lift = np.fliplr(sourceRegion[:,:index])
sourceRegion = sourceRegion[:,index:]
qshift = qshift[:,index:]
if freeSurf[3]:
sourceRegion[:,:index] -= lift
if Xi > self.nx-ireg-1:
index = self.nx-ireg-1 - Xi
if freeSurf[1]:
lift = np.fliplr(sourceRegion[:,index:])
sourceRegion = sourceRegion[:,:index]
qshift = qshift[:,:index]
if freeSurf[1]:
sourceRegion[:,index:] -= lift
data = srcScale * sourceRegion.ravel()
cols = qI[i] + qshift.ravel()
dlen = data.shape[0]
entries[dptr:dptr+dlen] = data
columns[dptr:dptr+dlen] = cols
rows[dptr:dptr+dlen] = i
dptr += dlen
q = sp.coo_matrix((entries[:dptr], (rows[:dptr],columns[:dptr])), shape=(N, M), dtype=np.complex128)
return q.T
@property
def ireg(self):
return getattr(self, '_ireg', 4)
class KaiserSource(SparseKaiserSource):
def __call__(self, sLocs):
q = super(KaiserSource, self).__call__(sLocs)
return q.toarray()
|
jhpyle/docassemble
|
docassemble_base/docassemble/base/core.py
|
Python
|
mit
| 790 | 0.002532 |
# This module imports names for backwards compatibility and to ensure
# that pickled objects in existing sessions can be unpickled.
__all__ = ['DAObject', 'DAList', 'DADict', 'DAOrderedDict', 'DASet', 'DAFile', 'DAFileCollection', 'DAFileList', 'DAStaticFile', 'DAEmail', 'DAEmailRecipient', 'DAEmailRecipientList', 'DATemplate', 'DAEmpty', 'DALink', 'RelationshipTree', 'DAContext']
from doc
|
assemble.base.util import DAObject, DAList, DADict, DAOrderedDict,
|
DASet, DAFile, DAFileCollection, DAFileList, DAStaticFile, DAEmail, DAEmailRecipient, DAEmailRecipientList, DATemplate, DAEmpty, DALink, RelationshipTree, DAContext, DAObjectPlusParameters, DACatchAll, RelationshipDir, RelationshipPeer, DALazyTemplate, DALazyTableTemplate, selections, DASessionLocal, DADeviceLocal, DAUserLocal
|
Catherine-Chu/DeepQA
|
chatbot_website/chatbot_website/settings.py
|
Python
|
apache-2.0
| 4,300 | 0.00093 |
"""
Django settings for chatbot_website project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['CHATBOT_SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'channels',
'chatbot_interface',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chatbot_website.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chatbot_website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/sett
|
ings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation
|
.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgi_redis.RedisChannelLayer",
"CONFIG": {
"hosts": [os.environ.get('REDIS_URL', 'redis://localhost:6379')],
},
"ROUTING": "chatbot_interface.routing.channel_routing",
},
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file_django': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'logs/debug_django.log',
},
'file_chatbot': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'logs/debug_chatbot.log',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout',
},
},
'loggers': {
'django': {
'handlers': ['console', 'file_django'],
'level': 'INFO',
'propagate': True,
},
'chatbot_interface': {
'handlers': ['console', 'file_chatbot'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
IBM/differential-privacy-library
|
diffprivlib/models/__init__.py
|
Python
|
mit
| 1,558 | 0.005777 |
# MIT License
#
# Copyright (C) IBM Corporation 2019
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
|
# doc
|
umentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Machine learning models with differential privacy
"""
from diffprivlib.models.naive_bayes import GaussianNB
from diffprivlib.models.k_means import KMeans
from diffprivlib.models.linear_regression import LinearRegression
from diffprivlib.models.logistic_regression import LogisticRegression
from diffprivlib.models.pca import PCA
from diffprivlib.models.standard_scaler import StandardScaler
from diffprivlib.models.forest import RandomForestClassifier
|
wcainboundary/boundary-api-cli
|
boundary/source_list.py
|
Python
|
apache-2.0
| 856 | 0 |
#
# Copyright 2014-2015 Boundary, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from boundary import ApiCli
class SourceList(ApiCli):
def __init__(self):
ApiCli.__init__(self)
self.path
|
= "v1/account/sources/"
self.method = "GET"
def getDescription(self):
return "Lists the sources in
|
a Boundary account"
|
mlperf/training_results_v0.7
|
NVIDIA/benchmarks/ssd/implementations/pytorch/test/opt_loss_test.py
|
Python
|
apache-2.0
| 1,453 | 0.003441 |
import torch
from base_model import Loss
from train import dboxes300_coco
from opt_loss import OptLoss
# In:
# ploc : N x 8732 x 4
# plabel : N x 8732
# gloc : N x 8732 x 4
# glabel : N x 8732
data = torch.load('loss.pth')
ploc = data['ploc'].cuda()
plabel = data['plabel'].cuda()
gloc = data['gloc'].cuda()
glabel = data['glabel'].cuda()
dboxes = dboxes300_coco()
# loss = Loss(dboxes).cuda()
loss = OptLoss(dboxes).cuda()
loss = torch.jit.trace(loss, (ploc, plabel, gloc, glabel))
# print(traced_loss.graph)
# timing
timing_iterations = 1000
import time
# Dry run to eliminate JIT compile overhead
dl = torch.tensor([1.], device="cuda")
l = loss(ploc, plabel, gloc, glabel)
l.backward(dl)
# fprop
torch.cuda.synchronize()
start = time.time()
with torch.no_grad():
for _ in range(timing_iterations):
l = loss(ploc, plabel, gloc, glabel)
print('loss: {}'.format(l))
torch.cuda.synchronize()
end = time.time()
time_per_fprop = (end - start) / timing_iterations
print('took {} seconds per i
|
teration (fprop)'.format(time_per_fprop))
# fprop + bprop
torch.cuda.synchronize()
start = time.time()
for _ in range(timing_iterations):
l = loss(ploc, plabel, gloc, glabel)
l.backward(dl)
torch.cuda.synchronize()
end = time.time()
time_per_fprop_bprop = (end - start) / timing_iterations
print('too
|
k {} seconds per iteration (fprop + bprop)'.format(time_per_fprop_bprop))
print(loss.graph_for(ploc, plabel, gloc, glabel))
|
hydroshare/hydroshare
|
hs_tracking/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 1,731 | 0.002889 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
]
operations = [
migrations.CreateModel(
name='Session',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False
|
, auto_created=True, primary_key=True)),
('begin', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Variable',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(max_length=32)),
('type', models.IntegerField(choices=[('Integer', int), ('Floating Point', float), ('Text', str), ('Flag', bool)])),
('value', models.CharField(max_length=130)),
('session', models.ForeignKey(to='hs_tracking.Session')),
],
),
migrations.CreateModel(
name='Visitor',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('first_seen', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.AddField(
model_name='session',
name='visitor',
field=models.ForeignKey(to='hs_tracking.Visitor'),
),
]
|
dev-coop/plithos
|
src/plithos/simulations/random_mover.py
|
Python
|
mit
| 388 | 0 |
import random
from ..simulator import Simulator
|
class RandomMover(Simulator):
ACTIONS = ('up', 'down', 'left', 'right')
def start(self):
self.init_game()
while True:
self._check_pyg
|
ame_events()
for drone in self.drones:
drone.do_move(random.choice(self.ACTIONS))
self.print_map()
self._draw()
|
animekita/selvbetjening
|
selvbetjening/core/mailcenter/models.py
|
Python
|
mit
| 6,200 | 0.001774 |
import logging
import re
import markdown
from django.conf import settings
from django.db import models
from django.template import Template, Context, loader
import sys
from selvbetjening.core.mail import send_mail
logger = logging.getLogger('selvbetjening.email')
class EmailSpecification(models.Model):
BODY_FORMAT_CHOICES = (
('html', 'HTML'),
('markdown', 'Markdown')
)
CONTEXT_CHOICES = (
('user', 'User'),
('attendee', 'Attendee')
)
# template
subject = models.CharField(max_length=128)
body = models.TextField()
body_format = models.CharField(max_length=32, choices=BODY_FORMAT_CHOICES, default='markdown')
# context
template_context = models.CharField(max_length=32, choices=CONTEXT_CHOICES, default='user')
# meta
date_created = models.DateField(editable=False, auto_now_add=True)
def send_email_user(self, user, internal_sender_id):
if self.template_context == 'attendee':
raise ValueError
ok, email, err = self.render_user(user)
if not ok:
# Warn an admin and log the error silently
logger.exception('Failure rendering e-mail (template pk: %s) -- Addressed to %s', self.pk, user.email, exc_info=err, extra={
'related_user': user})
return
instance = self._send_mail(user.email, email, internal_sender_id)
logger.info('E-mail queued (%s) -- Addressed to %s', email['subject'], user.email,
extra={
'related_user': user,
'related_email': instance
})
def send_email_attendee(self, attendee, internal_sender_id):
ok, email, err = self.render_attendee(attendee)
if not ok:
# Warn an admin and log the error silently
logger.exception('Failure rendering e-mail (template pk: %s) -- Addressed to %s', self.pk, attendee.user.email, exc_info=err, extra={
'related_user': attendee.user,
'related_attendee': attendee})
return
instance = self._send_mail(attendee.user.email, email, internal_sender_id)
logger.info('E-mail queued (%s) -- Addressed to %s', email['subject'], attendee.user.email,
extra={
'related_user': attendee.user,
'related_attendee': attendee,
'related_email': instance
})
def _send_mail(self, to_address, email, internal_sender_id):
mails = send_mail(email['subject'],
email['body_plain'],
settings.DEFAULT_FROM_EMAIL,
[to_address],
body_html=email['body_html'],
internal_sender_id=internal_sender_id)
return mails[0]
def render_user(self, user):
"""
Renders the e-mail template using a user object as source.
An error is thrown if the template context is Attendee.
"""
if self.template_context == 'attendee':
raise ValueError
return self._render(self._get_context(user))
def render_attendee(self, attendee):
"""
Renders the e-mail template using a user object as source.
"""
return self._render(self._get_context(attendee.user, attendee=attendee))
def render_dummy(self):
context = {
# user context
'username': 'johndoe',
'full_name': 'John Doe',
'email': '[email protected]',
# attendee.event context
'event_title': 'Dummy Event',
'invoice_plain': 'INVOICE',
'invoice_html': 'INVOICE_HTML'
}
return self._render(context)
def _get_context(self, user, attendee=None):
# lazy import, prevent circular import in core.events
from selvbetjening.core.events.options.dynamic_selections import SCOPE, dynamic_selections
context = {
# user context
'username': user.username,
'full_name': ('%s %s' % (user.first_name, user.last_name)).strip(),
'email': user.email
}
if attendee is not None:
invoice = dynamic_selections(SCOPE.VIEW_USER_INVOICE, attendee)
invoice_html = loader.render_to_string('events/parts/invoice.html', {
'attendee': attendee,
'invoice': invoice
})
invoice_text = loader.render_to_string('events/parts/invoice_text.html', {
'attendee': attendee,
'invoice': invoice
})
context.update({
# attendee.event context
'event_title': attendee.event.title,
'attendee': attendee,
'invoice_plain': invoice_text,
'invoice_html': invoice_html,
})
for option, selection in invoice:
context['selected_%s' % option.pk] = selection is not None
return context
def _render(self, context):
context = Context(context)
try:
email = {
'subject': self.subject,
'body_plain': self._get_rendered_body_plain(context),
'body_html': self._get_rendered_body_html(context)
}
return True, email, None
except Exception:
return False, None, sys.exc_info()
def _get_rendered_body_plain(self, context):
if self.body_format == 'markdown':
body = self.body
else:
body = re.sub(r'<[^>]*?>', '', self.body)
context['invoi
|
ce'] = context.get('invoi
|
ce_plain', None)
return Template(body).render(context)
def _get_rendered_body_html(self, context):
if self.body_format == 'markdown':
body = markdown.markdown(self.body)
else:
body = self.body
context['invoice'] = context.get('invoice_html', None)
return Template(body).render(context)
def __unicode__(self):
return self.subject
|
google/graphicsfuzz
|
python/src/main/python/test_scripts/inspect_compute_results_test.py
|
Python
|
apache-2.0
| 15,285 | 0.004122 |
#!/usr/bin/env python3
# Copyright 2019 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib2
import pytest
import sys
from typing import List, Optional
HERE = os.path.abspath(__file__)
sys.path.insert(0, os.path.dirname(os.path.dirname(HERE)) + os.sep + "drivers")
import inspect_compute_results
def test_unknown_command_rejected(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['unknown', '1.json', '2.json'])
assert 'ValueError: Unknown command' in str(value_error)
def test_show_rejects_multiple_args(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['show', '1.json', '2.json'])
assert 'ValueError: Command "show" requires exactly 1 input; 2 provided' in str(value_error)
def test_exactdiff_rejects_one_arg(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['exactdiff', '1.json'])
assert 'ValueError: Command "exactdiff" requires exactly 2 inputs; 1 provided' in str(value_error)
def test_exactdiff_rejects_three_args(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['exactdiff', '1.json', '2.json', '3.json'])
assert 'ValueError: Command "exactdiff" requires exactly 2 inputs; 3 provided' in str(value_error)
def test_fuzzydiff_rejects_one_arg(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['fuzzydiff', '1.json'])
assert 'ValueError: Command "fuzzydiff" requires exactly 2 inputs; 1 provided' in str(value_error)
def test_fuzzydiff_rejects_three_args(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['fuzzydiff', '1.json', '2.json', '3.json'])
assert 'ValueError: Command "fuzzydiff" requires exactly 2 inputs; 3 provided' in str(value_error)
def test_show_handles_file_not_found(tmp_path: pathlib2.Path):
with pytest.raises(FileNotFoundError) as file_not_found_error:
inspect_compute_results.main_helper(['show', 'nofile.json'])
assert 'Fil
|
eNotFoundError: Input file "nofile.json" not found' in str(file_not_found_error)
def test_exactdiff_handles_first_file_not_found(tmp_path: pathlib2.Path):
onefile = tmp_path / 'something.json'
onefile.touch(exist_ok=False)
with pytest.raises(FileNotFoundError) as file_not_found_error:
inspect_compute_results.main_helper(['exactdiff', 'nofile.json', str(onefile)])
assert 'FileNotFoundError: Input file "nofile.json" not found' in str(file_not_found_error)
|
def test_exactdiff_handles_second_file_not_found(tmp_path: pathlib2.Path):
onefile = tmp_path / 'something.json'
onefile.touch(exist_ok=False)
with pytest.raises(FileNotFoundError) as file_not_found_error:
inspect_compute_results.main_helper(['exactdiff', str(onefile), 'nofile.json'])
assert 'FileNotFoundError: Input file "nofile.json" not found' in str(file_not_found_error)
def test_fuzzydiff_handles_first_file_not_found(tmp_path: pathlib2.Path):
onefile = tmp_path / 'something.json'
onefile.touch(exist_ok=False)
with pytest.raises(FileNotFoundError) as file_not_found_error:
inspect_compute_results.main_helper(['fuzzydiff', 'nofile.json', str(onefile)])
assert 'FileNotFoundError: Input file "nofile.json" not found' in str(file_not_found_error)
def test_fuzzydiff_handles_second_file_not_found(tmp_path: pathlib2.Path):
onefile = tmp_path / 'something.json'
onefile.touch(exist_ok=False)
with pytest.raises(FileNotFoundError) as file_not_found_error:
inspect_compute_results.main_helper(['fuzzydiff', str(onefile), 'nofile.json'])
assert 'FileNotFoundError: Input file "nofile.json" not found' in str(file_not_found_error)
def check_diff(tmp_path: pathlib2.Path, output1: str, output2: str, is_exact: bool,
extra_args: Optional[List[str]]=None) -> int:
results1_path = tmp_path / '1.info.json'
results2_path = tmp_path / '2.info.json'
with results1_path.open(mode='w') as results1_file:
results1_file.write(output1)
with results2_path.open(mode='w') as results2_file:
results2_file.write(output2)
args = ['exactdiff' if is_exact else 'fuzzydiff',
str(results1_path),
str(results2_path)]
if extra_args:
args += extra_args
return inspect_compute_results.main_helper(args)
def check_exact_diff(tmp_path: pathlib2.Path, output1: str, output2: str) -> int:
return check_diff(tmp_path, output1, output2, is_exact=True)
def check_fuzzy_diff(tmp_path: pathlib2.Path, output1: str, output2: str,
extra_args: Optional[List[str]]=None) -> int:
return check_diff(tmp_path, output1, output2, is_exact=False, extra_args=extra_args)
def test_exactdiff_pass1(tmp_path: pathlib2.Path):
assert 0 == check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "#### Start compute shader", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28,26]]}}'), (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28,26]]}}'))
def test_exactdiff_pass2(tmp_path: pathlib2.Path):
assert 0 == check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "#### Start compute shader", "outputs": '
'{"ssbo":[[2.0]]}}'), (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo": [ [2.0] ] } }'))
def test_exactdiff_pass3(tmp_path: pathlib2.Path):
assert 0 == check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "#### Start compute shader", "outputs": '
'{"ssbo":[[88.0, 12.3],[28,12,14],[1]]}}'), (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo":[[88.0, 12.3],[28,12,14],[1]]}}'))
def test_exactdiff_fail_first_invalid(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
check_exact_diff(tmp_path, (
'not_json'), (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo": [ [2.0] ] } }'))
assert 'ValueError: First input file did not contain valid SSBO data' in str(value_error)
def test_exactdiff_fail_second_invalid(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
check_exact_diff(tmp_path, (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo": [ [2.0] ] } }'), (
'not_json'))
assert 'ValueError: Second input file did not contain valid SSBO data' in str(value_error)
def test_exactdiff_fail_mismatched_number_of_fields(tmp_path: pathlib2.Path):
assert 0 != check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28,26]]}}'), (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88]]}}'))
def test_exactdiff_fail_mismatched_field_length(tmp_path: pathlib2.Path):
assert 0 != check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28,26]]}}'), (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28]]}}'))
def test_
|
DiCarloLab-Delft/PycQED_py3
|
pycqed/instrument_drivers/virtual_instruments/sim_control_CZ.py
|
Python
|
mit
| 11,275 | 0.001508 |
from qcodes.instrument.base import Instrument
from qcodes.utils import validators as vals
from qcodes.instrument.parameter import ManualParameter
import numpy as np
class SimControlCZ(Instrument):
"""
Noise and other parameters for cz_superoperator_simulation_new
"""
def __init__(self, name, **kw):
super().__init__(name, **kw)
# Noise parameters
self.add_parameter(
"T1_q0",
unit="s",
label="T1 fluxing qubit",
docstring="T1 fluxing qubit",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"T1_q1",
unit="s",
label="T1 static qubit",
docstring="T1 static qubit",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"T2_q1",
unit="s",
label="T2 static qubit",
docstring="T2 static qubit",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"T2_q0_amplitude_dependent",
docstring="fitcoefficients giving T2_q0 or Tphi_q0 as a function of inverse sensitivity (in units of w_q0/Phi_0): a, b. Function is ax+b",
parameter_class=ManualParameter,
vals=vals.Arrays(),
initial_value=np.array([-1, -1]),
)
# for flux noise simulations
self.add_parameter(
"sigma_q0",
unit="flux quanta",
docstring="standard deviation of the Gaussian from which we sample the flux bias, q0",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"sigma_q1",
unit="flux quanta",
docstring="standard deviation of the Gaussian from which we sample the flux bias, q1",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"w_q1_sweetspot",
docstring="NB: different from the operating point in general",
parameter_class=ManualParameter,
vals=vals.Numbers(),
)
self.add_parameter(
"w_q0_sweetspot",
docstring="NB: different from the operating point in general",
parameter_class=ManualParameter,
vals=vals.Numbers(),
)
self.add_parameter(
"Z_rotations_length",
unit="s",
docstring="duration of the single qubit Z rotations at the end of the pulse",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"total_idle_time",
unit="s",
docstring="duration of the idle time",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
# Control parameters for the simulations
self.add_parameter(
"dressed_compsub",
docstring="true if we use the definition of the comp subspace that uses the dressed 00,01,10,11 states",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=True,
)
self.add_parameter(
"distortions",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"voltage_scaling_factor",
unit="a.u.",
docstring="scaling factor for the voltage for a CZ pulse",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=1,
)
self.add_parameter(
"n_sampling_gaussian_vec",
docstring="array. each element is a number of samples from the gaussian distribution. Std to guarantee convergence is [11]. More are used only to verify convergence",
parameter_class=ManualParameter,
vals=vals.Arrays(),
initial_value=np.array([11]),
)
self.add_parameter(
"cluster",
docstring="true if we want to use the cluster",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"look_for_minimum",
docstring="changes cost function to optimize either research of minimum of avgatefid_pc or to get the heat map in general",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"T2_scaling",
unit="a.u.",
docstring="scaling factor for T2_q0_amplitude_dependent",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=1,
)
self.add_parameter(
"waiting_at_sweetspot",
unit="s",
docstring="time spent at sweetspot during the two halves of a netzero pulse",
parameter_class=ManualParameter,
vals=vals.Numbers(min_value=0),
initial_
|
value=0,
)
self.add_parameter(
"which_gate",
docstring="Direction of the CZ gate. E.g. 'NE'. Used to extract parameters from the fluxlutman ",
parameter_class=ManualParameter,
vals=vals.Strings(),
initial_value="NE",
)
self.add_parameter(
"simstep_div",
docstring="Division of the simulation time step. 4 is a good one, corresponding to a time step of 0.1 ns. For smaller values lands
|
capes can deviate significantly from experiment.",
parameter_class=ManualParameter,
vals=vals.Numbers(min_value=1),
initial_value=4,
)
self.add_parameter(
"gates_num",
docstring="Chain the same gate gates_num times.",
parameter_class=ManualParameter,
# It should be an integer but the measurement control cast to float when setting sweep points
vals=vals.Numbers(min_value=1),
initial_value=1,
)
self.add_parameter(
"gates_interval",
docstring="Time interval that separates the gates if gates_num > 1.",
parameter_class=ManualParameter,
unit='s',
vals=vals.Numbers(min_value=0),
initial_value=0,
)
self.add_parameter(
"cost_func",
docstring="Used to calculate the cost function based on the quantities of interest (qoi). Signature: cost_func(qoi). NB: qoi's that represent percentages will be in [0, 1] range. Inspect 'pycqed.simulations.cz_superoperator_simulation_new_functions.simulate_quantities_of_interest_superoperator_new??' in notebook for available qoi's.",
parameter_class=ManualParameter,
unit='a.u.',
vals=vals.Callable(),
initial_value=None,
)
self.add_parameter(
"cost_func_str",
docstring="Not loaded automatically. Convenience parameter to store the cost function string and use `exec('sim_control_CZ.cost_func(' + sim_control_CZ.cost_func_str() + ')')` to load it.",
parameter_class=ManualParameter,
vals=vals.Strings(),
initial_value="lambda qoi: np.log10((1 - qoi['avgatefid_compsubspace_pc']) * (1 - 0.5) + qoi['L1'] * 0.5)",
)
self.add_parameter(
"double_cz_pi_pulses",
docstring="If set to 'no_pi_pulses' or 'with_pi_pulses' will simulate two sequential CZs with or without Pi pulses simulated as an ideal superoperator multiplication.",
parameter_class=ManualParameter,
vals=vals.Strings(),
initial_value="", # Use empty string to evaluate to false
)
# for
|
okuta/chainer
|
chainer/functions/connection/bilinear.py
|
Python
|
mit
| 9,015 | 0 |
import numpy
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
def _as_mat(x):
if x.ndim == 2:
return x
return x.reshape(len(x), -1)
def _ij_ik_il_to_jkl(a, b, c):
ab = chainer.functions.matmul(a[:, :, None], b[:, None, :]) # ijk
return chainer.functions.matmul(_as_mat(ab).T, c).reshape(
a.shape[1], b.shape[1], c.shape[1])
def _ij_ik_jkl_to_il(a, b, c):
ab = chainer.functions.matmul(a[:, :, None], b[:, None, :]) # ijk
c = c.reshape(-1, c.shape[-1]) # [jk]l
return chainer.functions.matmul(_as_mat(ab), c)
def _ij_il_jkl_to_ik(a, b, c):
return _ij_ik_jkl_to_il(a, b, chainer.functions.swapaxes(c, 1, 2))
def _ik_il_jkl_to_ij(a, b, c):
return _ij_ik_jkl_to_il(a, b, chainer.functions.rollaxis(c, 0, c.ndim))
class BilinearFunction(function_node.FunctionNode):
def check_type_forward(self, in_types):
n_in = type_check.eval(in_types.size())
if n_in != 3 and n_in != 6:
raise type_check.InvalidType(
'{0} or {1}'.format(
in_types.size() == 3, in_types.size() == 6),
'{0} == {1}'.format(in_types.size(), n_in))
e1_type, e2_type, W_type = in_types[:3]
type_check_prod = type_check.make_variable(numpy.prod, 'prod')
type_check.expect(
e1_type.dtype == numpy.float32,
e1_type.ndim >= 2,
e2_type.dtype == numpy.float32,
e2_type.ndim >= 2,
e1_type.shape[0] == e2_type.shape[0],
W_type.dtype == numpy.float32,
W_type.ndim == 3,
type_check_prod(e1_type.shape[1:]) == W_type.shape[0],
type_check_prod(e2_type.shape[1:]) == W_type.shape[1],
)
if n_in == 6:
out_size = W_type.shape[2]
V1_type, V2_type, b_type = in_types[3:]
type_check.expect(
V1_type.dtype == numpy.float32,
V1_type.ndim == 2,
V1_type.shape[0] == W_type.shape[0],
V1_type.shape[1] == out_size,
V2_type.dtype == numpy.float32,
V2_type.ndim == 2,
V2_type.shape[0] == W_type.shape[1],
V2_type.shape[1] == out_size,
b_type.dtype == numpy.float32,
b_type.ndim == 1,
b_type.shape[0] == out_size,
)
def forward(self, inputs):
self.retain_inputs(tuple(range(len(inputs))))
e1 = _as_mat(inputs[0])
e2 = _as_mat(inputs[1])
W = inputs[2]
xp = backend.get_array_module(*inputs)
# optimize: y = xp.einsum('ij,ik,jkl->il', e1, e2, W)
y = xp.tensordot(xp.einsum('ij,ik->ijk', e1, e2), W, axes=2)
if len(inputs) == 6:
V1, V2, b = inputs[3:]
y += e1.dot(V1)
y += e2.dot(V2)
y += b
return y,
def backward(self, indexes, grad_outputs):
inputs = self.get_retained_inputs()
e1, e2, W = inputs[:3]
gy, = grad_outputs
if len(inputs) == 6:
V1, V2 = inputs[3], inputs[4]
return BilinearFunctionGrad().apply((e1, e2, W, V1, V2, gy))
return BilinearFunctionGrad().apply((e1, e2, W, gy))
class BilinearFunctionGrad(function_node.FunctionNode):
def forward(self, inputs):
self.retain_inputs(tuple(range(len(inputs))))
e1 = _as_mat(inputs[0])
e2 = _as_mat(inputs[1])
W, gy = inputs[2], inputs[-1]
xp = backend.get_array_module(*inputs)
# optimize: gW = xp.einsum('ij,ik,il->jkl', e1, e2, gy)
gW = xp.einsum('ij,ik->jki', e1, e2).dot(gy)
gy_W = xp.tensordot(gy, W, axes=(1, 2)) # 'il,jkl->ijk'
# optimize: ge1 = xp.einsum('ik,jkl,il->ij', e2, W, gy)
ge1 = xp.einsum('ik,ijk->ij', e2, gy_W)
# optimize: ge2 = xp.einsum('ij,jkl,il->ik', e1, W, gy)
ge2 = xp.einsum('ij,ijk->ik', e1, gy_W)
ret = ge1.reshape(inputs[0].shape), ge2.reshape(inputs[1].shape), gW
if len(inputs) == 6:
V1, V2 = inputs[3], inputs[4]
gV1 = e1.T.dot(gy)
gV2 = e2.T.dot(gy)
gb = gy.sum(0)
ge1 += gy.dot(V1.T)
ge2 += gy.dot(V2.T)
ret += gV1, gV2, gb
return ret
def backward(self, indexes, grad_outputs):
inputs = self.get_retained_inputs()
e1 = _as_mat(inputs[0])
e2 = _as_mat(inputs[1])
W, gy = inputs[2], inputs[-1]
gge1 = _as_mat(grad_outputs[0])
gge2 = _as_mat(grad_outputs[1])
ggW = grad_outputs[2]
dge1_de2 = _ij_il_jkl_to_ik(gge1, gy, W)
dge1_dW = _ij_ik_il_to_jkl(gge1, e2, gy)
dge1_dgy = _ij_ik_jkl_to_il(gge1, e2, W)
dge2_de1 = _ik_il_jkl_to_ij(gge2, gy, W)
dge2_dW = _ij_ik_il_to_jkl(e1, gge2, gy)
dge2_dgy = _ij_ik_jkl_to_il(e1, gge2, W)
dgW_de1 = _ik_il_jkl_to_ij(e2, gy, ggW)
dgW_de2 = _ij_il_jkl_to_ik(e1, gy, ggW)
dgW_dgy = _ij_ik_jkl_to_il(e1, e2, ggW)
ge1 = dgW_de1 + dge2_de1
ge2 = dgW_de2 + dge1_de2
gW = dge1_dW + dge2_dW
ggy = dgW_dgy + dge1_dgy + dge2_dgy
if len(inputs) == 6:
V1, V2 = inputs[3], inputs[4]
ggV1, ggV2, ggb = grad_outputs[3:]
gV1 = chainer.functions.matmul(gge1, gy, transa=True)
gV2 = chainer.functions.matmul(gge2, gy, transa=True)
ge1 += chainer.functions.matmul(gy, ggV1, transb=True)
ge2 += chainer.functions.matmul(gy, ggV2, transb=True)
ggy += chainer.functions.matmul(gge1, V1)
ggy += chainer.functions.matmul(gge2, V2)
ggy += chainer.functions.matmul(e1, ggV1)
ggy += chainer.functions.matmul(e2, ggV2)
ggy += chainer.functions.broadcast_to(ggb, ggy.shape)
ge1 = ge1.reshape(inputs[0].shape)
ge2 = ge2.reshape(inputs[1].shape)
if len(inputs) == 6:
return ge1, ge2, gW, gV1, gV2, ggy
return ge1, ge2, gW, ggy
def bilinear(e1, e2, W, V1=None, V2=None, b=None):
"""Applies a bilinear function based on given parameters.
This is a building block of Neural Tensor Network (see the reference paper
below). It takes two input variables and one or four parameters, and
outputs one variable.
To be precise, denote six input arrays mathematically by
:math:`e^1\\in \\mathbb{R}^{I\\cdot J}`,
|
:math:`e^2\\in \\mathbb{R}^{I\\cdot K}`,
:math:`W\\in \\mathb
|
b{R}^{J \\cdot K \\cdot L}`,
:math:`V^1\\in \\mathbb{R}^{J \\cdot L}`,
:math:`V^2\\in \\mathbb{R}^{K \\cdot L}`, and
:math:`b\\in \\mathbb{R}^{L}`,
where :math:`I` is mini-batch size.
In this document, we call :math:`V^1`, :math:`V^2`, and :math:`b` linear
parameters.
The output of forward propagation is calculated as
.. math::
y_{il} = \\sum_{jk} e^1_{ij} e^2_{ik} W_{jkl} + \\
\\sum_{j} e^1_{ij} V^1_{jl} + \\sum_{k} e^2_{ik} V^2_{kl} + b_{l}.
Note that V1, V2, b are optional. If these are not given, then this
function omits the last three terms in the above equation.
.. note::
This function accepts an input variable ``e1`` or ``e2`` of a non-matrix
array. In this case, the leading dimension is treated as the batch
dimension, and the other dimensions are reduced to one dimension.
.. note::
In the original paper, :math:`J` and :math:`K`
must be equal and the author denotes :math:`[V^1 V^2]`
(concatenation of matrices) by :math:`V`.
Args:
e1 (:class:`~chainer.Variable` or :ref:`ndarray`):
Left input variable.
e2 (:class:`~chainer.Variable` or :ref:`ndarray`):
Right input variable.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Quadratic weight variable.
V1 (:class:`~chainer.Variable` or :ref:`ndarray`):
Left coefficient variable.
V2 (:class:`~chainer.Variable` or :ref:`ndarray`):
Right coefficient variable.
b (:class:`~chainer.Variable` or :ref:
|
janeczku/calibre-web
|
cps/services/Metadata.py
|
Python
|
gpl-3.0
| 3,837 | 0.000784 |
# -*- coding: utf-8 -*-
# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web)
# Copyright (C) 2021 OzzieIsaacs
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import abc
import dataclasses
import os
import re
from typing import Dict, Generator, List, Optional, Union
from cps import constants
@dataclasses.dataclass
class MetaSourceInfo:
id: str
description: str
link: str
@dataclasses.dataclass
class MetaRecord:
id: Union[str, int]
title: str
authors: List[str]
url: str
source: MetaSourceInfo
cover: str = os.path.join(constants.STATIC_DIR, 'generic_cover.jpg')
description: Optional[str] = ""
series: Optional[str] = None
series_index: Optional[Union[int, float]] = 0
identifiers: Dict[str, Union[str, int]] = dataclasses.field(default_factory=dict)
publisher: Optional[str] = None
publishedDate: Optional[str] = None
rating: Optional[int] = 0
languages: Optional[List[str]] = dataclasses.field(default_factory=list)
tags: Optional[List[str]] = dataclasses.field(default_factory=list)
class Metadata:
__name__ = "Generic"
__id__ = "generic"
def __init__(self):
self.active = True
def set_status(self, state):
self.active = state
@abc.abstractmethod
def search(
self, query: str, generic_cover: str = "", locale: str = "en"
) -> Optional[List[MetaRecord]]:
pass
@static
|
method
def get_title_tokens(
title: str, strip_joiners: bool = True
) -> Generator[str, None, None]:
"""
Taken from calibre source code
It's a simplified (cut out what is unnecess
|
ary) version of
https://github.com/kovidgoyal/calibre/blob/99d85b97918625d172227c8ffb7e0c71794966c0/
src/calibre/ebooks/metadata/sources/base.py#L363-L367
(src/calibre/ebooks/metadata/sources/base.py - lines 363-398)
"""
title_patterns = [
(re.compile(pat, re.IGNORECASE), repl)
for pat, repl in [
# Remove things like: (2010) (Omnibus) etc.
(
r"(?i)[({\[](\d{4}|omnibus|anthology|hardcover|"
r"audiobook|audio\scd|paperback|turtleback|"
r"mass\s*market|edition|ed\.)[\])}]",
"",
),
# Remove any strings that contain the substring edition inside
# parentheses
(r"(?i)[({\[].*?(edition|ed.).*?[\]})]", ""),
# Remove commas used a separators in numbers
(r"(\d+),(\d+)", r"\1\2"),
# Remove hyphens only if they have whitespace before them
(r"(\s-)", " "),
# Replace other special chars with a space
(r"""[:,;!@$%^&*(){}.`~"\s\[\]/]《》「」“”""", " "),
]
]
for pat, repl in title_patterns:
title = pat.sub(repl, title)
tokens = title.split()
for token in tokens:
token = token.strip().strip('"').strip("'")
if token and (
not strip_joiners or token.lower() not in ("a", "and", "the", "&")
):
yield token
|
jogral/tigris-python-sdk
|
tigrissdk/session/tigris_session.py
|
Python
|
apache-2.0
| 5,263 | 0.00019 |
# coding: utf-8
from __future__ import unicode_literals, absolute_import
try:
import requests as r
except:
r = None
class TigrisSession(object):
"""
Base session layer for Tigris.
"""
def __init__(self,
base_url,
default_headers={}):
"""
:param base_url:
The customer endpoint docroot.
:type base_url:
`str`
:param default_headers
"""
self._base_url = base_url
self._session = r.Session()
self._default_headers = default_headers
self._timeout = 80
def _request(self, method, endpoint, headers, post_data=None, files=None):
"""
Makes an HTTP request
:param method:
|
The name of the method
:type method:
`str`
:param endpoint:
The name of the endpoint
:type endpoint:
`str`
:param headers:
The name of the endpoint
:type headers:
`dict`
:param post_data:
PATCH/POST/PUT data.
:type post_data:
`dict`
:rtype:
`tup
|
le` of `str`, `int`, `dict`
"""
url = '{0}/{1}'.format(self._base_url, endpoint)
try:
try:
result = self._session.request(method,
url,
headers=headers,
json=post_data,
files=files,
timeout=self._timeout)
except TypeError as e:
raise TypeError(
'WARNING: We couldn\'t find a proper instance of '
'Python `requests`. You may need to update or install '
'the library, which you can do with `pip`: '
' To update `requests`: '
''
' pip install -U requests '
' To install `requests`:'
''
' pip install requests. '
'Alternatively, your POST data may be malformed. '
'Underlying error: {0}'.format(e))
content = result.json()
status_code = result.status_code
except Exception as e:
raise Exception(e)
return content, status_code, result.headers
def _delete(self, endpoint, headers={}):
"""
Executes a DELETE request
:param endpoint:
The name of the endpoint
:type endpoint:
`url`
:rtype:
`tuple`
"""
joined_headers = dict(headers, **self._default_headers)
return self._request('delete', endpoint, joined_headers)
def _get(self, endpoint, headers={}):
"""
Executes a GET request
:param endpoint:
The name of the endpoint
:type endpoint:
`url`
:rtype:
`tuple`
"""
joined_headers = dict(headers, **self._default_headers)
return self._request('get', endpoint, joined_headers)
def _head(self, endpoint, headers={}):
"""
Executes a HEAD request
:param endpoint:
The name of the endpoint
:type endpoint:
`url`
:rtype:
`tuple`
"""
joined_headers = dict(headers, **self._default_headers)
return self._request('head', endpoint, joined_headers)
def _patch(self, endpoint, data={}, headers={}):
"""
Executes a PATCH request
:param endpoint:
The name of the endpoint
:type endpoint:
`url`
:param data:
The payload data to send
:type data:
`dict`
:rtype:
`tuple`
"""
joined_headers = dict(headers, **self._default_headers)
return self._request(
'patch',
endpoint,
joined_headers,
post_data=data)
def _post(self, endpoint, data={}, headers={}, files=None):
"""
Executes a POST request
:param endpoint:
The name of the endpoint
:type endpoint:
`url`
:param data:
The payload data to send
:type data:
`dict`
:rtype:
`tuple`
"""
joined_headers = dict(headers, **self._default_headers)
return self._request(
'post',
endpoint,
joined_headers,
post_data=data,
files=files)
def _put(self, endpoint, data={}, headers={}):
"""
Executes a PATCH request
:param endpoint:
The name of the endpoint
:type endpoint:
`url`
:param data:
The payload data to send
:type data:
`dict`
:rtype:
`tuple`
"""
joined_headers = dict(headers, **self._default_headers)
return self._request(
'put',
endpoint,
joined_headers,
post_data=data)
|
irmen/Pyro4
|
examples/messagebus/subscriber_manual_consume.py
|
Python
|
mit
| 2,432 | 0.002467 |
"""
This
|
is a subscriber meant for the 'weather' messages example.
It uses a custom code loop to get and process messages.
"""
from __future__ import print_function
import sys
import threading
import tim
|
e
import Pyro4
from messagebus.messagebus import Subscriber
from Pyro4.util import excepthook
sys.excepthook = excepthook
if sys.version_info < (3, 0):
input = raw_input
Pyro4.config.AUTOPROXY = True
@Pyro4.expose
class Subber(Subscriber):
def consume_message(self, topic, message):
# In this case, this consume message method is called by our own code loop.
print("\nPROCESSING MESSAGE:")
print(" topic:", topic)
print(" msgid:", message.msgid)
print(" created:", message.created)
print(" data:", message.data)
def manual_message_loop(self):
print("Entering manual message processing loop (5 messages).")
processed = 0
while processed < 5:
time.sleep(0.5)
print("\nApprox. number of received messages:", self.received_messages.qsize())
topic, message = self.received_messages.get() # get a message from the queue (they are put there by the Pyro messagebus)
self.consume_message(topic, message)
processed += 1
print("\nEnd.")
hostname = input("hostname to bind on (empty=localhost): ").strip() or "localhost"
# create a messagebus subscriber that uses manual message retrieval (via explicit call)
# because we're doing the message loop ourselves, the Pyro daemon has to run in a separate thread
subber = Subber(auto_consume=False)
d = Pyro4.Daemon(host=hostname)
d.register(subber)
daemon_thread = threading.Thread(target=d.requestLoop)
daemon_thread.daemon = True
daemon_thread.start()
topics = subber.bus.topics()
print("Topics on the bus: ", topics)
print("Subscribing to weather-forecast.")
subber.bus.subscribe("weather-forecast", subber)
# note: we subscribe on the bus *after* registering the subber as a Pyro object
# this results in Pyro automatically making a proxy for the subber
print("Subscribed on weather-forecast")
# run the manual message loop
print("Entering message loop, you should see the msg count increasing.")
subber.manual_message_loop()
subber.bus.unsubscribe("weather-forecast", subber)
print("Unsubscribed from the topic.")
print("Entering message loop again, you should see the msg count decrease.")
subber.manual_message_loop()
|
Lekensteyn/buildbot
|
master/buildbot/steps/mtrlogobserver.py
|
Python
|
gpl-2.0
| 18,336 | 0.000545 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from future.builtins import range
import re
import sys
from twisted.enterprise import adbapi
from twisted.internet import defer
from twisted.python import log
from buildbot.process.buildstep import LogLineObserver
from buildbot.steps.shell import Test
class EqConnectionPool(adbapi.ConnectionPool):
"""This class works the same way as
twisted.enterprise.adbapi.ConnectionPool. But it adds the ability to
compare connection pools for equality (by comparing the arguments
passed to the constructor).
This is useful when passing the ConnectionPool to a BuildStep, as
otherwise Buildbot will consider the buildstep (and hence the
containing buildfactory) to have changed every time the configuration
is reloaded.
It also sets some defaults differently from adbapi.ConnectionPool that
are more suitable for use in MTR.
"""
def __init__(self, *args, **kwargs):
self._eqKey = (args, kwargs)
adbapi.ConnectionPool.__init__(self,
cp_reconnect=True, cp_min=1, cp_max=3,
*args, **kwargs)
def __eq__(self, other):
if isinstance(other, EqConnectionPool):
return self._eqKey == other._eqKey
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
class MtrTestFailData:
def __init__(self, testname, variant, result, info, text, callback):
self.testname = testname
|
self.variant = variant
self.result = result
self.info = info
self.text = text
self.callback = callback
def add(self, line):
self.text += line
def fireCallback(self):
return self.callback(self.testname, self.variant, self.result, self.info, self.text)
class MtrLogObserver(LogLi
|
neObserver):
"""
Class implementing a log observer (can be passed to
BuildStep.addLogObserver().
It parses the output of mysql-test-run.pl as used in MySQL,
MariaDB, Drizzle, etc.
It counts number of tests run and uses it to provide more accurate
completion estimates.
It parses out test failures from the output and summarizes the results on
the Waterfall page. It also passes the information to methods that can be
overridden in a subclass to do further processing on the information."""
_line_re = re.compile(
r"^([-._0-9a-zA-z]+)( '[-_ a-zA-Z]+')?\s+(w[0-9]+\s+)?\[ (fail|pass) \]\s*(.*)$")
_line_re2 = re.compile(
r"^[-._0-9a-zA-z]+( '[-_ a-zA-Z]+')?\s+(w[0-9]+\s+)?\[ [-a-z]+ \]")
_line_re3 = re.compile(
r"^\*\*\*Warnings generated in error logs during shutdown after running tests: (.*)")
_line_re4 = re.compile(r"^The servers were restarted [0-9]+ times$")
_line_re5 = re.compile(r"^Only\s+[0-9]+\s+of\s+[0-9]+\s+completed.$")
def __init__(self, textLimit=5, testNameLimit=16, testType=None):
self.textLimit = textLimit
self.testNameLimit = testNameLimit
self.testType = testType
self.numTests = 0
self.testFail = None
self.failList = []
self.warnList = []
LogLineObserver.__init__(self)
def setLog(self, loog):
LogLineObserver.setLog(self, loog)
d = loog.waitUntilFinished()
d.addCallback(lambda l: self.closeTestFail())
def outLineReceived(self, line):
stripLine = line.strip("\r\n")
m = self._line_re.search(stripLine)
if m:
testname, variant, worker, result, info = m.groups()
self.closeTestFail()
self.numTests += 1
self.step.setProgress('tests', self.numTests)
if result == "fail":
if variant is None:
variant = ""
else:
variant = variant[2:-1]
self.openTestFail(
testname, variant, result, info, stripLine + "\n")
else:
m = self._line_re3.search(stripLine)
# pylint: disable=too-many-boolean-expressions
if m:
stuff = m.group(1)
self.closeTestFail()
testList = stuff.split(" ")
self.doCollectWarningTests(testList)
elif (self._line_re2.search(stripLine) or
self._line_re4.search(stripLine) or
self._line_re5.search(stripLine) or
stripLine == "Test suite timeout! Terminating..." or
stripLine.startswith("mysql-test-run: *** ERROR: Not all tests completed") or
(stripLine.startswith("------------------------------------------------------------")
and self.testFail is not None)):
self.closeTestFail()
else:
self.addTestFailOutput(stripLine + "\n")
def openTestFail(self, testname, variant, result, info, line):
self.testFail = MtrTestFailData(
testname, variant, result, info, line, self.doCollectTestFail)
def addTestFailOutput(self, line):
if self.testFail is not None:
self.testFail.add(line)
def closeTestFail(self):
if self.testFail is not None:
self.testFail.fireCallback()
self.testFail = None
def addToText(self, src, dst):
lastOne = None
count = 0
for t in src:
if t != lastOne:
dst.append(t)
count += 1
if count >= self.textLimit:
break
def makeText(self, done):
if done:
text = ["test"]
else:
text = ["testing"]
if self.testType:
text.append(self.testType)
fails = sorted(self.failList[:])
self.addToText(fails, text)
warns = sorted(self.warnList[:])
self.addToText(warns, text)
return text
# Update waterfall status.
def updateText(self):
self.step.step_status.setText(self.makeText(False))
strip_re = re.compile(r"^[a-z]+\.")
def displayTestName(self, testname):
displayTestName = self.strip_re.sub("", testname)
if len(displayTestName) > self.testNameLimit:
displayTestName = displayTestName[
:(self.testNameLimit - 2)] + "..."
return displayTestName
def doCollectTestFail(self, testname, variant, result, info, text):
self.failList.append("F:" + self.displayTestName(testname))
self.updateText()
self.collectTestFail(testname, variant, result, info, text)
def doCollectWarningTests(self, testList):
for t in testList:
self.warnList.append("W:" + self.displayTestName(t))
self.updateText()
self.collectWarningTests(testList)
# These two methods are overridden to actually do something with the data.
def collectTestFail(self, testname, variant, result, info, text):
pass
def collectWarningTests(self, testList):
pass
class MTR(Test):
"""
Build step that runs mysql-test-run.pl, as used in MySQL, Drizzle,
MariaDB, etc.
It uses class MtrLogObserver to parse test results out from the
output of mysql-test-run.pl, providing better completion time
estimates and summarizing test failures on the waterfall page.
It also provides access to mysqld server error logs from the test
run to help debug
|
Azure/azure-sdk-for-python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/locks/models.py
|
Python
|
mit
| 360 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rig
|
hts reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# ----------------------------------------------------------------
|
----------
from .v2016_09_01.models import *
|
SUSE-Cloud/glance
|
glance/tests/integration/legacy_functional/base.py
|
Python
|
apache-2.0
| 7,145 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import os.path
import tempfile
import fixtures
from oslo.config import cfg
from glance import tests as glance_tests
import glance.common.client
from glance.common import config
import glance.db.sqlalchemy.api
import glance.db.sqlalchemy.migration
import glance.registry.client.v1.client
import glance.store
from glance.tests import utils as test_utils
TESTING_API_PASTE_CONF = """
[pipeline:glance-api]
pipeline = versionnegotiation gzip unauthenticated-context rootapp
[pipeline:glance-api-caching]
pipeline = versionnegotiation gzip unauthenticated-context cache rootapp
[pipeline:glance-api-cachemanagement]
pipeline =
versionnegotiation
gzip
unauthenticated-context
cache
cache_manage
rootapp
[pipeline:glance-api-fakeauth]
pipeline = versionnegotiation gzip fakeauth context rootapp
[pipeline:glance-api-noauth]
pipeline = versionnegotiation gzip context rootapp
[composite:rootapp]
paste.composite_factory = glance.api:root_
|
app_factory
/:
|
apiversions
/v1: apiv1app
/v2: apiv2app
[app:apiversions]
paste.app_factory = glance.api.versions:create_resource
[app:apiv1app]
paste.app_factory = glance.api.v1.router:API.factory
[app:apiv2app]
paste.app_factory = glance.api.v2.router:API.factory
[filter:versionnegotiation]
paste.filter_factory =
glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
[filter:gzip]
paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory
[filter:cache]
paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
[filter:cache_manage]
paste.filter_factory =
glance.api.middleware.cache_manage:CacheManageFilter.factory
[filter:context]
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
[filter:unauthenticated-context]
paste.filter_factory =
glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
[filter:fakeauth]
paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory
"""
TESTING_REGISTRY_PASTE_CONF = """
[pipeline:glance-registry]
pipeline = unauthenticated-context registryapp
[pipeline:glance-registry-fakeauth]
pipeline = fakeauth context registryapp
[app:registryapp]
paste.app_factory = glance.registry.api.v1:API.factory
[filter:context]
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
[filter:unauthenticated-context]
paste.filter_factory =
glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
[filter:fakeauth]
paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory
"""
CONF = cfg.CONF
CONF.import_opt('filesystem_store_datadir', 'glance.store.filesystem')
class ApiTest(test_utils.BaseTestCase):
def setUp(self):
super(ApiTest, self).setUp()
self.init()
def init(self):
self.test_dir = self.useFixture(fixtures.TempDir()).path
self._configure_logging()
self._setup_database()
self._setup_stores()
self._setup_property_protection()
self.glance_registry_app = self._load_paste_app(
'glance-registry',
flavor=getattr(self, 'registry_flavor', ''),
conf=getattr(self, 'registry_paste_conf',
TESTING_REGISTRY_PASTE_CONF),
)
self._connect_registry_client()
self.glance_api_app = self._load_paste_app(
'glance-api',
flavor=getattr(self, 'api_flavor', ''),
conf=getattr(self, 'api_paste_conf', TESTING_API_PASTE_CONF),
)
self.http = test_utils.Httplib2WsgiAdapter(self.glance_api_app)
def _setup_property_protection(self):
self._copy_data_file('property-protections.conf', self.test_dir)
self.property_file = os.path.join(self.test_dir,
'property-protections.conf')
def _configure_logging(self):
self.config(default_log_levels=[
'amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'suds=INFO',
'keystone=INFO',
'eventlet.wsgi.server=DEBUG'
])
def _setup_database(self):
sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir
self.config(sql_connection=sql_connection)
glance.db.sqlalchemy.api.clear_db_env()
glance_db_env = 'GLANCE_DB_TEST_SQLITE_FILE'
if glance_db_env in os.environ:
# use the empty db created and cached as a tempfile
# instead of spending the time creating a new one
db_location = os.environ[glance_db_env]
test_utils.execute('cp %s %s/tests.sqlite'
% (db_location, self.test_dir))
else:
glance.db.sqlalchemy.migration.db_sync()
# copy the clean db to a temp location so that it
# can be reused for future tests
(osf, db_location) = tempfile.mkstemp()
os.close(osf)
test_utils.execute('cp %s/tests.sqlite %s'
% (self.test_dir, db_location))
os.environ[glance_db_env] = db_location
# cleanup the temp file when the test suite is
# complete
def _delete_cached_db():
try:
os.remove(os.environ[glance_db_env])
except Exception:
glance_tests.logger.exception(
"Error cleaning up the file %s" %
os.environ[glance_db_env])
atexit.register(_delete_cached_db)
def _setup_stores(self):
image_dir = os.path.join(self.test_dir, "images")
self.config(filesystem_store_datadir=image_dir)
glance.store.create_stores()
def _load_paste_app(self, name, flavor, conf):
conf_file_path = os.path.join(self.test_dir, '%s-paste.ini' % name)
with open(conf_file_path, 'wb') as conf_file:
conf_file.write(conf)
conf_file.flush()
return config.load_paste_app(name, flavor=flavor,
conf_file=conf_file_path)
def _connect_registry_client(self):
def get_connection_type(self2):
def wrapped(*args, **kwargs):
return test_utils.HttplibWsgiAdapter(self.glance_registry_app)
return wrapped
self.stubs.Set(glance.common.client.BaseClient,
'get_connection_type', get_connection_type)
def tearDown(self):
glance.db.sqlalchemy.api.clear_db_env()
super(ApiTest, self).tearDown()
|
sdurrheimer/compose
|
tests/helpers.py
|
Python
|
apache-2.0
| 1,309 | 0.000764 |
from __future__ import absolute_import
from __future__ import unicode_literals
import os
from compose.config.config import ConfigDetails
from compose.config.config import ConfigFile
from compose.config.config import load
def build_config(contents, **kwargs):
return load(build_config_details(contents, **kwargs))
def build_config_details(contents, working_dir='working_dir', filename='filename.yml'):
return ConfigDetails(
working_dir,
[ConfigFile(filename, contents)],
)
def create_host_file(client, filename):
dirname = os.path.dirname(filename)
with open(filename, 'r') as fh:
content = fh.read()
container = client.create_container(
'busybox:latest',
['sh', '-c', 'echo -n "{}" > {}'.format(content, filename)],
volumes={dirname: {}},
host_config=client.create_host_config(
binds={dirname: {'bind': dirname, 'ro': False}},
network_mode='none',
),
)
try:
client.start(container)
exitcode = client.wait(container)
if exitcode != 0:
output = client.
|
logs(container)
raise Exception(
"Container exited with code {}:\n{}".format(exitcode, output))
finally:
client.remove_container(co
|
ntainer, force=True)
|
williamHuang5468/QuicklyLearnDjango
|
mysite/mysite/settings.py
|
Python
|
mit
| 2,686 | 0 |
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yd=6gp*c%jj@jmqug!qwb0m)ksf#2gr%_w+)a1t*4t)9yc#cr#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'dja
|
ngo.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/stat
|
ic-files/
STATIC_URL = '/static/'
|
htzy/bigfour
|
cms/djangoapps/contentstore/management/commands/restore_asset_from_trashcan.py
|
Python
|
agpl-3.0
| 480 | 0.004167 |
from django.core.management.base import BaseCommand, CommandError
from xmodule.contentstore.utils imp
|
ort restore_asset_from_trashcan
class Command(BaseCommand):
help = '''Restore a deleted asset from the trashcan back to it's original course'''
def handle(self, *args, **options):
if len(args) != 1 and len(args) != 0:
raise CommandError("restore_asset_from_trashcan requires one argument: <location>")
restore_a
|
sset_from_trashcan(args[0])
|
mvaled/sentry
|
tests/sentry/api/endpoints/test_project_rules.py
|
Python
|
bsd-3-clause
| 5,710 | 0.002627 |
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import Environment, Rule
from sentry.testutils import APITestCase
class ProjectRuleListTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(teams=[team], name="foo")
self.create_project(teams=[team], name="bar")
url = reverse(
"sentry-api-0-project-rules",
kwargs={"organization_slug": project1.organization.slug, "project_slug": project1.slug},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
rule_count = Rule.objects.filter(project=project1).count()
assert len(response.data) == rule_count
class CreateProjectRuleTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
project = self.create_project()
conditions = [
|
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
"key": "foo",
"match": "eq",
"value": "bar",
}
]
actions = [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}]
url = reverse(
"sentry-api
|
-0-project-rules",
kwargs={"organization_slug": project.organization.slug, "project_slug": project.slug},
)
response = self.client.post(
url,
data={
"name": "hello world",
"actionMatch": "any",
"actions": actions,
"conditions": conditions,
"frequency": 30,
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data["id"]
rule = Rule.objects.get(id=response.data["id"])
assert rule.label == "hello world"
assert rule.data["action_match"] == "any"
assert rule.data["actions"] == actions
assert rule.data["conditions"] == conditions
assert rule.data["frequency"] == 30
def test_with_environment(self):
self.login_as(user=self.user)
project = self.create_project()
Environment.get_or_create(project, "production")
conditions = [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
"key": "foo",
"match": "eq",
"value": "bar",
}
]
actions = [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}]
url = reverse(
"sentry-api-0-project-rules",
kwargs={"organization_slug": project.organization.slug, "project_slug": project.slug},
)
response = self.client.post(
url,
data={
"name": "hello world",
"environment": "production",
"conditions": conditions,
"actions": actions,
"actionMatch": "any",
"frequency": 30,
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data["id"]
assert response.data["environment"] == "production"
rule = Rule.objects.get(id=response.data["id"])
assert rule.label == "hello world"
assert rule.environment_id == Environment.get_or_create(rule.project, "production").id
def test_with_null_environment(self):
self.login_as(user=self.user)
project = self.create_project()
conditions = [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
"key": "foo",
"match": "eq",
"value": "bar",
}
]
actions = [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}]
url = reverse(
"sentry-api-0-project-rules",
kwargs={"organization_slug": project.organization.slug, "project_slug": project.slug},
)
response = self.client.post(
url,
data={
"name": "hello world",
"environment": None,
"conditions": conditions,
"actions": actions,
"actionMatch": "any",
"frequency": 30,
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data["id"]
assert response.data["environment"] is None
rule = Rule.objects.get(id=response.data["id"])
assert rule.label == "hello world"
assert rule.environment_id is None
def test_missing_name(self):
self.login_as(user=self.user)
project = self.create_project()
conditions = [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
"key": "foo",
"match": "eq",
"value": "bar",
}
]
actions = [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}]
url = reverse(
"sentry-api-0-project-rules",
kwargs={"organization_slug": project.organization.slug, "project_slug": project.slug},
)
response = self.client.post(
url,
data={"actionMatch": "any", "actions": actions, "conditions": conditions},
format="json",
)
assert response.status_code == 400, response.content
|
jasonzzz/ansible
|
lib/ansible/module_utils/junos.py
|
Python
|
gpl-3.0
| 9,209 | 0.000869 |
#
# (c) 2015 Peter Sprygada, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import re
import shlex
import re
from distutils.version import LooseVersion
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.network import NetworkError, NetworkModule
from ansible.module_utils.network import register_transport, to_list
from ansible.module_utils.shell import CliBase
from ansible.module_utils.six import string_types
# temporary fix until modules are update. to be removed before 2.2 final
from ansible.module_utils.network import get_module
try:
from jnpr.junos i
|
mport Device
from jnpr.junos.utils.config import Config
from jnpr.junos.version import VERSION
from jnpr.junos.exception import RpcError, ConnectError, ConfigLoadError, CommitError
|
from jnpr.junos.exception import LockError, UnlockError
if LooseVersion(VERSION) < LooseVersion('1.2.2'):
HAS_PYEZ = False
else:
HAS_PYEZ = True
except ImportError:
HAS_PYEZ = False
try:
import jxmlease
HAS_JXMLEASE = True
except ImportError:
HAS_JXMLEASE = False
try:
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
SUPPORTED_CONFIG_FORMATS = ['text', 'set', 'json', 'xml']
def xml_to_json(val):
if isinstance(val, string_types):
return jxmlease.parse(val)
else:
return jxmlease.parse_etree(val)
def xml_to_string(val):
return etree.tostring(val)
class Netconf(object):
def __init__(self):
self.device = None
self.config = None
self._locked = False
self._connected = False
self.default_output = 'xml'
def raise_exc(self, msg):
if self.device:
if self._locked:
self.config.unlock()
self.disconnect()
raise NetworkError(msg)
def connect(self, params, **kwargs):
host = params['host']
port = params.get('port') or 830
user = params['username']
passwd = params['password']
try:
self.device = Device(host, user=user, passwd=passwd, port=port,
gather_facts=False)
self.device.open()
except ConnectError:
exc = get_exception()
self.raise_exc('unable to connect to %s: %s' % (host, str(exc)))
self.config = Config(self.device)
self._connected = True
def disconnect(self):
try:
self.device.close()
except AttributeError:
pass
self._connected = False
### Command methods ###
def run_commands(self, commands):
responses = list()
for cmd in commands:
meth = getattr(self, cmd.args.get('command_type'))
responses.append(meth(str(cmd), output=cmd.output))
for index, cmd in enumerate(commands):
if cmd.output == 'xml':
responses[index] = etree.tostring(responses[index])
elif cmd.args.get('command_type') == 'rpc':
responses[index] = str(responses[index].text).strip()
return responses
def cli(self, commands, output='xml'):
'''Send commands to the device.'''
try:
return self.device.cli(commands, format=output, warning=False)
except (ValueError, RpcError):
exc = get_exception()
self.raise_exc('Unable to get cli output: %s' % str(exc))
def rpc(self, command, output='xml'):
name, kwargs = rpc_args(command)
meth = getattr(self.device.rpc, name)
reply = meth({'format': output}, **kwargs)
return reply
### Config methods ###
def get_config(self, config_format="text"):
if config_format not in SUPPORTED_CONFIG_FORMATS:
self.raise_exc(msg='invalid config format. Valid options are '
'%s' % ', '.join(SUPPORTED_CONFIG_FORMATS))
ele = self.rpc('get_configuration', output=config_format)
if config_format in ['text', 'set']:
return str(ele.text).strip()
else:
return ele
def load_config(self, candidate, update='merge', comment=None,
confirm=None, format='text', commit=True):
merge = update == 'merge'
overwrite = update == 'overwrite'
self.lock_config()
try:
candidate = '\n'.join(candidate)
self.config.load(candidate, format=format, merge=merge,
overwrite=overwrite)
except ConfigLoadError:
exc = get_exception()
self.raise_exc('Unable to load config: %s' % str(exc))
diff = self.config.diff()
self.check_config()
if all((commit, diff)):
self.commit_config(comment=comment, confirm=confirm)
self.unlock_config()
return diff
def save_config(self):
raise NotImplementedError
### end of Config ###
def get_facts(self, refresh=True):
if refresh:
self.device.facts_refresh()
return self.device.facts
def unlock_config(self):
try:
self.config.unlock()
self._locked = False
except UnlockError:
exc = get_exception()
raise NetworkError('unable to unlock config: %s' % str(exc))
def lock_config(self):
try:
self.config.lock()
self._locked = True
except LockError:
exc = get_exception()
raise NetworkError('unable to lock config: %s' % str(exc))
def check_config(self):
if not self.config.commit_check():
self.raise_exc(msg='Commit check failed')
def commit_config(self, comment=None, confirm=None):
try:
kwargs = dict(comment=comment)
if confirm and confirm > 0:
kwargs['confirm'] = confirm
return self.config.commit(**kwargs)
except CommitError:
exc = get_exception()
raise NetworkError('unable to commit config: %s' % str(exc))
def rollback_config(self, identifier, commit=True, comment=None):
self.lock_config()
try:
self.config.rollback(identifier)
except ValueError:
exc = get_exception()
self._error('Unable to rollback config: $s' % str(exc))
diff = self.config.diff()
if commit:
self.commit_config(comment=comment)
self.unlock_config()
return diff
Netconf = register_transport('netconf')(Netconf)
class Cli(CliBase):
CLI_PROMPTS_RE = [
re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(r"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
CLI_ERRORS_RE = [
re.compile(r"% ?Error"),
re.compile(r"% ?Bad secret"),
re.compile(r"invalid input", re.I),
re.compile(r"(?:incomplete|ambiguous) command", re.I),
re.compile(r"connection timed out", re.I),
re.compile(r"[^\r\n]+ not found", re.I),
re.compile(r"'[^']' +returned error code: ?\d+"),
]
def connect(self, params, **kwargs):
super(Cli, self).connect(params, **kwargs)
if self.shell._matched_prompt.strip().endswith('%'):
self.execute('cli')
self.execute('set cli screen-length 0')
def configure(self, commands, **kwargs):
cmds = ['configure']
cmds.extend(to_list(commands))
if kwargs.get('comment'):
cmds.append('c
|
RocketRedNeck/PythonPlayground
|
pid_dot.py
|
Python
|
mit
| 2,665 | 0.01651 |
# -*- coding: utf-8 -*-
"""
pid - example of PID control of a simple process with a time constant
Copyright (c) 2016 - RocketRedNeck.com RocketRedNeck.net
RocketRedNeck and MIT Licenses
RocketRedNeck hereby grants license for others to copy and modify this source code for
whatever purpose other's deem worthy as long as RocketRedNeck is given credit where
where credit is due and you leave RocketRedNeck out of it for all other nefarious purposes.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF O
|
R IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import m
|
atplotlib.pyplot as plot
import numpy as np
import math
tmax = 3.0
dt = 0.01
ts = np.arange(0.0, tmax, dt)
pvs = np.zeros(len(ts))
sps = np.zeros(len(ts))
mvs = np.zeros(len(ts))
mps = np.zeros(len(ts))
kf = 0.0
kp = 20.0 #10.0
ki = 0.0
kd = 2.0 #1.0
dt = ts[1] - ts[0]
Gp = 1.0
delay = 1 * dt
tau = 1000 * dt
sp_period = 1.0
err = 0.0
intErr = 0.0
lastErr = 0.0
lastT = ts[0]
lastG = 0.0
i = 0
d = 0
exp = -np.exp(-1/tau)
mp = 0
for t in ts:
if (t > 0):
sps[i] = math.sin(sp_period*t)
sps[i] = sps[i] / abs(sps[i]) # Square wave
else:
sps[i] = 0
derr = err - lastErr
intErr = intErr + err
mv = kf*sps[i] + (kp * err) + (ki * intErr) + (kd * (derr/dt))
mvs[i] = mv
mp = mp + (mv * dt)
mps[i] = mp
G = 0.0
if (t >= delay):
G = mp * Gp * (1.0 + exp) - (lastG * exp)
else:
d += 1
pvs[i] = G
lastG = G
i += 1
lastErr = err
err = 0.0
if (t >= delay):
err = sps[i-d] - pvs[i-d]
# err += np.random.randn(1)*0.09
plot.figure(1)
plot.cla()
plot.grid()
plot.plot(ts,sps,ts,pvs)
|
smn/garelay
|
manage.py
|
Python
|
bsd-2-clause
| 259 | 0 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE"
|
, "garelay.settings")
from django.core.management i
|
mport execute_from_command_line
execute_from_command_line(sys.argv)
|
Akuli/porcupine
|
porcupine/plugins/python_tools.py
|
Python
|
mit
| 2,238 | 0.002234 |
"""
Format the current file with black or isort.
Available in Tools/Python/Black and Tools/Python/Isort.
"""
from __future__ import annotations
import logging
import subprocess
import traceback
from functools import partial
from pathlib import Path
from tkinter import messagebox
from porcupine import menubar, tabs, textutils, utils
from porcupine.plugins import python_venv
log = logging.getLogger(__name__)
def run_tool(tool: str, code: str, path: Path | None) -> str:
python = python_venv.find_python(None if path is None else utils.find_project_root(path))
if python is None:
messagebox.showerror(
"Can't find a Python installation", f"You need to install Python to run {tool}."
)
return code
fail_str = f"Running {tool} failed"
try:
# run in subprocess just to make sure that it can't crash porcupine
# set cwd so that black/isort finds its config in pyproject.toml
#
# FIXME: file must not be named black.py or similar
result = subprocess.run(
[str(python), "-m", tool, "-"],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=(Path.home() if path is None else path.parent),
input=code.encode("utf-8"),
)
return result.stdout.decode("utf-8")
except subprocess.CalledProcessError as e:
messagebox.showerror(
fail_str,
utils.tkinter_safe_string(e.stderr.decode("utf-8"), hide_unsupported_chars=True),
)
except Exception:
log.
|
exception(f"running {tool} failed")
messagebox.showerror(fail_str, traceback.format_exc())
return code
def format_code_in_textwid
|
get(tool: str, tab: tabs.FileTab) -> None:
before = tab.textwidget.get("1.0", "end - 1 char")
after = run_tool(tool, before, tab.path)
if before != after:
with textutils.change_batch(tab.textwidget):
tab.textwidget.replace("1.0", "end - 1 char", after)
def setup() -> None:
menubar.add_filetab_command("Tools/Python/Black", partial(format_code_in_textwidget, "black"))
menubar.add_filetab_command("Tools/Python/Isort", partial(format_code_in_textwidget, "isort"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.