__id__
int64 3.09k
19,722B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
256
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 3
values | repo_name
stringlengths 5
109
| repo_url
stringlengths 24
128
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 6.65k
581M
⌀ | star_events_count
int64 0
1.17k
| fork_events_count
int64 0
154
| gha_license_id
stringclasses 16
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
5.76M
⌀ | gha_stargazers_count
int32 0
407
⌀ | gha_forks_count
int32 0
119
⌀ | gha_open_issues_count
int32 0
640
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 2
classes | gha_disabled
bool 1
class | content
stringlengths 9
4.53M
| src_encoding
stringclasses 18
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | year
int64 1.97k
2.01k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4,346,506,919,185 |
4f45b711d948fa265ae9e9c7b75ad2e357d603fb
|
a704892d86252dde1bc0ff885ea5e7d23b45ce84
|
/addons-extra/base_module_merge/__terp__.py
|
406e816553d5cbdb93da1995218dd84af13e15e8
|
[] |
no_license
|
oneyoung/openerp
|
https://github.com/oneyoung/openerp
|
5685bf8cce09131afe9b9b270f6cfadf2e66015e
|
7ee9ec9f8236fe7c52243b5550fc87e74a1ca9d5
|
refs/heads/master
| 2016-03-31T18:22:41.917881 | 2013-05-24T06:10:53 | 2013-05-24T06:10:53 | 9,902,716 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Module Merger',
'version': '1.0',
'category': 'Generic Modules/Base',
'description': """
* The wizard asks a many2many of modules
* And then generate a module which is the merge of all selected modules
* The new module is provided as a .zip file
The merge will works in all situations:
* Merging all .py files with the same name in the new module
* merging all .xml files and take care of id's.
""",
'author': 'Tiny',
'website': 'http://www.openerp.com',
'depends': ['base'],
'init_xml': [],
'update_xml': ['base_module_merge_wizard.xml'],
'demo_xml': [],
'installable': True,
'certificate': '0035756279709',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
UTF-8
|
Python
| false | false | 2,013 |
292,057,807,323 |
6eec854f9430a48381b6ae32c0451911e11c1627
|
f706e37032c57057b17b7c7e36b74d8aa29c1e0a
|
/linux_arm/un/src/execve.py
|
c4efbf06d3de5d93b220e07b1cf1a2c4e2c3a6da
|
[] |
no_license
|
wjlandryiii/shellcode
|
https://github.com/wjlandryiii/shellcode
|
7a2c138fa042c2741cf32463d5791ebcba10937a
|
de55bb887dd7bc9d38610d2b2665f70b5b228676
|
refs/heads/master
| 2021-01-23T09:48:55.636162 | 2014-10-28T02:08:02 | 2014-10-28T02:08:02 | 13,736,799 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def execve(parameter_list):
params = ""
for x in reversed(parameter_list):
params += x + "\x00"
params += "\x00"
return execve_shellcode[:-(len("-a\x00/bin/uname\x00\x00")+1)] + params
|
UTF-8
|
Python
| false | false | 2,014 |
9,483,287,797,182 |
9e8d7517e3df331a1623b8628c32d32b4d28a35b
|
3a73ea47c1590cddb221267432eb689abc4bd057
|
/PIL_thumbnails.py
|
d2132a586843bc811d0d841f39b6afc99bd564b9
|
[] |
no_license
|
rajatsaxena/Python_Imaging_Lib
|
https://github.com/rajatsaxena/Python_Imaging_Lib
|
314bbf8e054486ac25c8fe2a3b5a2c8534724f49
|
3217d2e039cf0e42269bca8f5fd97d93bfca54f9
|
refs/heads/master
| 2018-12-28T10:54:17.060560 | 2014-02-07T14:13:20 | 2014-02-07T14:13:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from PIL import Image
import os, sys
size = (256,256)
saved = "Lenna.jpeg"
try:
image = Image.open("Lenna.png")
except:
print "Unable to load image"
image.thumbnail(size)
image.save(saved)
image.show()
|
UTF-8
|
Python
| false | false | 2,014 |
3,023,656,984,302 |
a2a7175c828563bece7eb12e4211d93a7f549567
|
66a149446568be1ca711aa22cc03a19cd0b741b0
|
/shitsu/utils/fix_socket.py
|
430b359b157e361623d48e1d8f4a995a5caef488
|
[
"GPL-3.0-only",
"LicenseRef-scancode-other-copyleft",
"GPL-2.0-or-later",
"LicenseRef-scancode-other-permissive",
"GPL-3.0-or-later"
] |
non_permissive
|
Kagami/shitsu
|
https://github.com/Kagami/shitsu
|
e2484b377ac6d860c5d609d0fdc63af3fd04406f
|
de975099eee976bdfc99850a88acf1a047fe06e5
|
refs/heads/master
| 2020-05-04T12:36:20.448729 | 2012-02-02T10:22:27 | 2012-02-02T10:22:27 | 2,454,499 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import socket
import datetime
# Patched version socket.create_connection from python's 2.7
# standart lib. Should work on python 2.6 and 2.7.
# Respect timeout value, do check on each cycle loop.
# Standart version can wait till `count_of_dns_records*timeout'.
def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
start = datetime.datetime.now()
delta = datetime.timedelta(seconds=timeout)
check_timeout = True
else:
check_timeout = False
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if check_timeout and datetime.datetime.now() - start >= delta:
break
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
socket.create_connection = _create_connection
|
UTF-8
|
Python
| false | false | 2,012 |
2,911,987,852,228 |
1b31bc463f85a9e49d702fd36624e4ee04fb2f57
|
97bc3094c70682c05f4d49099e53759a54f64488
|
/warehouse/astir_fun.py
|
7967894441a4866af05c69d2e57bb73cc027b487
|
[
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-only",
"LGPL-2.1-or-later",
"LGPL-2.0-or-later",
"GPL-3.0-or-later"
] |
non_permissive
|
hksonngan/astir.firework
|
https://github.com/hksonngan/astir.firework
|
48c5d9d2c6a968aeb834e93a42a5000d0c60468d
|
f5078fefb03effc38bd257a8f24cbb4b62dd00fc
|
refs/heads/master
| 2016-09-06T16:04:00.318898 | 2011-08-03T15:49:32 | 2011-08-03T15:49:32 | 37,856,516 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#
# This file is part of FIREwork
#
# FIREwork is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIREwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FIREwire. If not, see <http://www.gnu.org/licenses/>.
#
# FIREwire Copyright (C) 2008 - 2010 Julien Bert
## Convention
# import
# constants
# shell functions
# cmd functions
# shell io
# parser and caller
#=== import =====================
from pymir_kernel import image_read, image_write
from pymir_kernel import image_im2mat, image_mat2im
from pymir_kernel import image_show, image_show_get_pts
from pymir_kernel import image_plot_points, image_plot_lines, image_show_stereo_get_pts
from pymir_kernel import color_color2gray, color_gray2color, color_colormap
from pymir_kernel import space_reg_ave, space_merge, space_align, space_G_transform
from pymir_kernel import resto_wiener, image_anaglyph
from pymir_kernel import geo_homography
from math import log, sqrt
from optparse import OptionParser
from copy import deepcopy
import os, sys
import readline # allow to back line in the shell
import cPickle, atexit
#=== constants ==================
listfun = ['exit', 'ls', 'rm', 'mv', 'cp', 'mem', 'save_var',
'load_var', 'add', 'fun', 'save_world', 'load_world',
'ldir', 'load_im', 'save_im', 'show_mat', 'color2gray',
'seq2mat', 'seq_reg_ave', 'load_vid', 'wiener', 'mosaicing',
'cut_seq', 'licence', 'gray2color', 'anaglyph', 'colormap',
'sub', 'div', 'mul', 'info']
B = '\033[0;34m' # blue
BC = '\033[0;36m' # blue clear (or blue sky)
G = '\033[0;32m' # green
GB = '\033[1;32m' # green bold
R = '\033[0;31m' # red
RB = '\033[1;31m' # red bold
N = '\033[m' # neutral
Y = '\033[0;33m' # yellow
sizebar = 32
version = 'v0.36'
# WORLD structure: WORLD['keyname'] = [header, data]
# header = 'seq' or 'mat'
# data = array(high, width, nb_channel)
WORLD = {}
# read history
readline.set_history_length(500)
histfile = os.path.join(os.environ['HOME'], '.astir_history')
try:
readline.read_history_file(histfile)
except IOError:
pass
# save always before exit, even when sys.exit is raised
atexit.register(readline.write_history_file, histfile)
# errors flag: succes 1, nothing 0, error -1
#=== shell functions ============
def inbox_overwrite(name):
answer = ''
while answer != 'y' and answer != 'n':
answer = raw_input('%s??%s Overwrite %s (%s[y]%s/%sn%s): '
% (Y, N, name, GB, N, R, N))
if answer == '': answer = 'y'
return answer
def inbox_question(message):
answer = ''
while answer != 'y' and answer != 'n':
answer = raw_input('%s??%s %s (%s[y]%s/%sn%s): '
% (Y, N, message, GB, N, R, N))
if answer == '': answer = 'y'
return answer
def inbox_input(message):
while 1:
try:
answer = raw_input('%s??%s %s ' % (Y, N, message))
if answer == '':
print '%s!!%s Again' % (B, N)
continue
break
except:
print '%s!!%s Again' % (B, N)
continue
return answer
def outbox_exist(name):
print '%s!!%s %s doesn\'t exist' % (B, N, name)
def outbox_error(message):
print '%sEE%s %s' % (R, N, message)
def outbox_bang(message):
print '%s!!%s %s' % (B, N, message)
def check_name(names):
if not isinstance(names, list): names = [names]
lname = WORLD.keys()
for name in names:
if name not in lname:
outbox_exist(name)
return -1
return 1
def check_name_file(names):
if not isinstance(names, list): names = [names]
lname = os.listdir('.')
for name in names:
if name not in lname:
outbox_exist(name)
return -1
return 1
def check_overwrite(names):
if not isinstance(names, list): names = [names]
lname = WORLD.keys()
for name in names:
while name in lname:
answer = inbox_overwrite(name)
if answer == 'n': return 0
else: break
'''
while trg in lname:
answer = inbox_overwrite(trg)
if answer == 'n': trg == inbox_input('Change to a new name:')
else: break
'''
return 1
def check_overwrite_file(names):
if not isinstance(names, list): names = [names]
lname = os.listdir('.')
for name in names:
while name in lname:
answer = inbox_overwrite(name)
if answer == 'n': return 0
else: break
def check_seq(names):
if not isinstance(names, list): names = [names]
lname = WORLD.keys()
for name in names:
if WORLD[name][0] != 'seq':
outbox_error('Only seq varaible can be used')
return -1
return 1
def check_mat(names):
if not isinstance(names, list): names = [names]
lname = WORLD.keys()
for name in names:
if WORLD[name][0] != 'mat':
outbox_error('Only mat varaible can be used')
return -1
return 1
def check_RGB(im):
n = im.shape
if len(n) == 3:
if n[2] >= 3:
return 1
outbox_error('Must be in RGB format')
return -1
def check_L(im):
n = im.shape
if len(n) == 2: return 1
outbox_error('Must be in L format')
return -1
class progress_bar:
def __init__(self, valmax, maxbar, title):
if valmax == 0: valmax = 1
if maxbar > 200: maxbar = 200
valmax -= 1
self.valmax = valmax
self.maxbar = maxbar
self.title = title
def update(self, val):
sys.stdout.flush()
if val > self.valmax: val = self.valmax
perc = round((float(val) / float(self.valmax)) * 100)
scale = 100.0 / float(self.maxbar)
bar = int(perc / scale)
out = '\r%s%s %s[%s%s%s%s] %s%3d %%%s' % (BC, self.title.ljust(10), G, Y, '=' * bar, ' ' * (self.maxbar - bar), G, RB, perc, N)
sys.stdout.write(out)
if perc == 100: sys.stdout.write('\n')
#=== cmd functions =============
def call_ls(args):
'''
Listing all variables in work space.
Liste toutes les variables dans l espace de travail
'''
usage = 'ls'
prog = 'ls'
desc = call_ls.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) > 0:
p.print_help()
return 0
lname = WORLD.keys()
lname.sort()
space = 10 # cst columns size
print '%s %s %s' % ('name'.ljust(space), 'type'.ljust(space), 'size'.ljust(space))
for name in lname:
kind = WORLD[name][0]
if kind == 'mat':
dim = WORLD[name][1].shape
dtype = WORLD[name][1].dtype
h, w = dim[:2]
if len(dim) == 3:
if dim[2] == 3: mode = 'RGB'
elif dim[2] == 4: mode = 'RGBA'
else: mode = 'L'
print '%s %s%s %s%s%s' % (name.ljust(space),
G, 'mat'.ljust(space),
R, '[%ix%i %s %s]' % (w, h, mode, dtype), N)
elif kind == 'seq':
dim = WORLD[name][1].shape
dtype = WORLD[name][1].dtype
nbm = dim[0]
h, w = dim[1:3]
if len(dim) == 4:
if dim[3] == 3: mode = 'RGB'
elif dim[3] == 4: mode = 'RGBA'
else: mode = 'L'
print '%s %s%s %s%s%s' % (name.ljust(space),
G, 'seq'.ljust(space),
R, '[%i mat %ix%i %s %s]' % (nbm, w, h, mode, dtype), N)
return 1
def call_ldir(args):
'''
Listing of the current directory.
Liste du dossier courant
'''
usage = 'ldir'
prog = 'ldir'
desc = call_ldir.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) > 0:
p.print_help()
return 0
os.system('ls')
return 1
def call_rm(args):
'''
Remove variables in work space.
Efface des variables dans l espace de travail
'''
usage = 'rm <name>\nrm <name1> <name2>\nrm <na*>\nrm <*>'
prog = 'rm'
desc = call_rm.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) == 0:
p.print_help()
return 0
if args[0] == '*': args = WORLD.keys()
elif args[0].find('*') != -1:
lname = WORLD.keys()
pattern = args[0].split('*')
if len(pattern) != 2:
outbox_error('Bad pattern with the joker *')
return -1
args = []
for name in lname:
if name.find(pattern[0]) != -1 and name.find(pattern[1]) != -1:
args.append(name)
if len(args) == 0:
outbox_error('No variable matchs with the pattern *')
return -1
args.sort()
outbox_bang('%i variables match with the pattern' % len(args))
print args
answer = inbox_question('Agree to remove all of them')
if answer == 'n': return 0
if not check_name(args): return -1
for name in args: del WORLD[name]
return 1
def call_mv(args):
'''
Move/rename variable.
Deplace/renomme une variable
'''
usage = 'mv <source_name> <target_name>\nmv im0 im1'
prog = 'mv'
desc = call_ls.__doc__
p = OptionParser(description = desc, prog=prog, version=version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 2:
p.print_help()
return 0
src, trg = args
if not check_name(src): return -1
if not check_overwrite(trg): return 0
WORLD[trg] = deepcopy(WORLD[src])
del WORLD[src]
del data
return 1
def call_cp(args):
'''
Copy variable
Copie une variable
'''
usage = 'cp <source_name> <target_name>\ncp im0 im1'
prog = 'cp'
desc = call_cp.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 2:
p.print_help()
return 0
src, trg = args
if not check_name(src): return -1
if not check_overwrite(trg): return 0
WORLD[trg] = deepcopy(WORLD[src])
del data
return 1
def call_mem(args):
'''
Memories used in work space by the variables
Mémoire utilisee dans les espaces de travails par les variables
'''
usage = 'mem'
prog = 'mem'
desc = call_mem.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) > 0:
p.print_help()
return 0
space = 10
txt = ['', 'k', 'M', 'G', 'T']
nbb = {'float8':1, 'float16':2, 'float32':4, 'float64':8,
'uint8':1, 'uint16':2, 'uint32':4, 'uint64':8}
lname = WORLD.keys()
for name in lname:
size = WORLD[name][1].size
dtype = WORLD[name][1].dtype
size *= nbb[dtype]
ie = int(log(size) // log(1e3))
size /= (1e3 ** ie)
size = '%5.2f %sB' % (size, txt[ie])
print '%s %s%s %s%s%s' % (name.ljust(space),
G, kind.ljust(space),
R, size.ljust(space), N)
return 1
def call_fun(args):
'''
Listing funtions available in Astir
Liste les fonctions disponible dans Astir
'''
usage = 'fun'
prog = 'fun'
desc = call_fun.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
p.add_option('-c', action='store', type='int', default='4', help='Number of columns. Nombre de colonnes')
try: opt, args = p.parse_args(args)
except: return 0
if len(args) > 0:
p.print_help()
return 0
listfun.sort()
sfun = len(listfun)
nc = opt.c
if sfun % nc == 0: nl = sfun // nc
else: nl = (sfun // nc) + 1
smax = 0
for i in xrange(sfun):
val = len(listfun[i])
if val > smax: smax = val
for i in xrange(nl):
txt = ''
for j in xrange(nc):
ind = j * nl + i
if ind < sfun: txt += '%s ' % listfun[ind].ljust(smax)
else: txt += ''
print txt
return 1
def call_save_var(args):
'''
Save Astir variable to file.
Sauvegarde une variable Astir dans un fichier
'''
usage = 'save_var <var_name> <file_name>\nsave_var im1 image1.pck'
prog = 'save_var'
desc = call_save_var.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 2:
p.print_help()
return 0
name, fname = args
if not check_name_file(name): return -1
if not check_overwrite_file(fname): return -1
f = open(fname, 'w')
local = ['var_astir', name, WORLD[name]]
cPickle.dump(local, f, 1)
f.close()
return 1
def call_save_world(args):
'''
Save the whole work space to a file.
Sauvegarde entierement l espace de travail dans un fichier
'''
usage = 'save_world <file_name>\nsave_world backup.pck'
prog = 'save_world'
desc = call_save_world.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 1:
p.print_help()
return 0
kname = WORLD.keys()
if len(kname) == 0:
outbox_bang('Nothing to save')
return 0
fname = args[0]
if not check_overwrite_file(fname): return -1
f = open(fname, 'w')
local = ['world_astir', WORLD]
cPickle.dump(local, f, 1)
f.close()
return 1
def call_load_var(args):
'''
Load a variable from a file to work space.
Charge une variable depuis un fichier dans l espace de travail.
'''
usage = 'load_var <file_name>\nload_var mydata.pck'
prog = 'load_var'
desc = call_load_var.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 1:
p.print_help()
return 0
fname = args[0]
if not check_name_file(fname): return -1
f = open(fname, 'r')
try: local = cPickle.load(f)
except:
outbox_error('Can not open the file')
f.close()
return -1
f.close()
if local[0] != 'var_astir':
outbox_error('Not Astir format')
return -1
varname = local[1]
vardata = local[2]
lname = WORLD.keys()
while varname in lname:
answer = inbox_overwrite(varname)
if answer == 'n': varname = inbox_input('Change to a new name:')
else: break
WORLD[varname] = vardata
return 1
def call_load_world(args):
'''
Load a work space from a file.
Charge un espace de travial depuis un fichier.
'''
usage = 'load_world <file_name>\nload_world mydata.pck'
prog = 'load_world'
desc = call_load_world.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 1:
p.print_help()
return 0
fname = args[0]
if not check_name_file(fname): return -1
f = open(fname, 'r')
try: local = cPickle.load(f)
except:
outbox_error('Can not open the file')
f.close()
return -1
f.close()
if local[0] != 'world_astir':
outbox_error('Not Astir format')
return -1
answer = inbox_question('All variables will be deleted, are you agree')
if answer == 'n': return 0
del WORLD
WORLD = local[1]
return 1
def call_load_im(args):
'''
Load images from files.
Chared des images depuis des fichiers.
'''
usage = 'load_im <file_name.[bmp, jpg, png, tiff]>\nload_im file_na*.png'
prog = 'load_im'
desc = call_load_im.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) == 0:
p.print_help()
return 0
if args[0].find('*') != -1:
lname = os.listdir('.')
pattern = args[0].split('*')
if len(pattern) != 2:
outbox_error('Bad pattern with joker *')
return -1
mem = []
for name in lname:
if name.find(pattern[0]) != -1 and name.find(pattern[1]) != -1:
mem.append(name)
if len(mem) == 0:
outbox_error('No image matchs with the pattern')
return -1
fname = mem[0]
mem.sort()
outbox_bang('%i files match with the pattern' % len(mem))
print mem
answer = inbox_question('Agree to load all of them')
if answer == 'n': return 0
else:
mem = None
fname = args[0]
buf = fname.split('.')
if len(buf) == 2: name, ext = fname.split('.')
else: name, ext = None, None
if ext not in ['bmp', 'jpg', 'png', 'tiff']:
outbox_error('Bad extension (bmp, jpg, png or tiff)')
return -1
if not check_name_file(fname): return -1
if not check_overwrite(name): return 0
if mem is None:
im = image_read(fname)
WORLD[name] = ['mat', im]
del im
else:
bar = progress_bar(len(mem), sizebar, 'loading')
seq = []
name = mem[0].split('.')[0]
i = 0
for item in mem:
im = image_read(item)
seq.append(im)
bar.update(i)
i += 1
del im
seq = array(seq)
WORLD[name] = ['seq', seq]
del seq
return 1
def call_save_im(args):
'''
Save image(s) from a variable (mat/seq) to a file(s).
Sauvegarde une ou des images depuis une variable (mat/seq) vers un ou des fichiers
save_im <mat_name> <file_name.[bmp, jpg, png]>
'''
usage = 'save_im <var_name> <file_name.[bmp, jpg, png, tiff]>\nsave_im im0 im.png\nsave_im vid im.png'
prog = 'save_im'
desc = call_save_im.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 2:
p.print_help()
return 0
name = args[0]
fname = args[1]
if not check_name(name): return -1
lext = ['jpg', 'png', 'bmp', 'tiff']
if len(fname.split('.')) != 2:
outbox_error('File name must have an extension')
return -1
[fname, ext] = fname.split('.')
if ext not in lext:
outbox_error('Wrong extension, only jpg, png, bmp or tiff')
return -1
kind = WORLD[name][0]
if kind == 'mat':
fname = fname + '.' + ext
if not check_overwrite_file(fname): return -1
im = WORLD[name][1]
image_write(im, fname)
del im, fname
elif kind == 'seq':
nb = WORLD[name][1].shape[0]
names = [fname + '_%04i.' % i + ext for i in xrange(nb)]
if not check_overwrite_file(names): return -1
bar = progress_bar(nb, sizebar, 'writing')
for i in xrange(nb):
im = WORLD[name][1][i]
image_write(im, names[i])
bar.update(i)
del im, bar, nb, names
return 1
def call_show_mat(args):
'''
Display a mat variable as an image.
Affiche une variable de type mat comme une image.
'''
usage = 'show_mat <mat_name>\nshow_mat <mat_name1> <mat_name2>'
prog = 'show_mat'
desc = call_show_mat.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) == 0 or len(args) > 2:
p.print_help()
return 0
list_im = []
if not check_name(args): return -1
if not check_mat(args): return -1
for name in args:
im = WORLD[name][1]
list_im.append(im)
image_show(list_im)
del list_im, args, im
return 1
def call_color2gray(args):
'''
Convert mat/seq color (RGB or RGBA) to gray scale (Luminance).
Convertie mat/seq en couleur (RGB ou RGBA) en niveau de gris (Luminance).
'''
usage = 'Convert in place\ncolor2gray <mat_name>\nConvert to new mat\n\
color2gray <mat_name> <mat_new_name>\nConvert a mat sequence in-place\n\
color2gray <seq_name>\nConvert a mat sequence to a new one\n\
color2gray <seq_name> <seq_new_name>'
prog = 'color2gray'
desc = call_save_im.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) == 0 or len(args) > 2:
p.print_help()
return 0
if len(args) == 2:
src, trg = args
else:
src, trg = args[0], args[0]
if not check_name(src): return -1
kind = WORLD[src][0]
if kind == 'mat':
im = WORLD[src][1]
if not check_L(im): return -1
nim = color_color2gray(im)
WORLD[trg] = ['mat', nim]
del nim, im
else:
im0 = WORLD[src][1][0]
if not check_L(im0): return -1
nb = WORLD[src][1].shape[0]
bar = progress_bar(nb, sizebar, 'Processing')
data = []
for n in xrange(nb):
nim = color_color2gray(WORLD[src][1][n])
data.append(nim)
bar.update(n)
data = array(data)
WORLD[trg] = ['seq', data]
del data, nim, im0, bar
return 1
def call_gray2color(args):
'''
Convert mat/seq gray scale (Luminance) to color (RGB).
Converti mat/seq en niveau de gris (Luminance) en couleur (RGB).
'''
usage = 'Convert in-place\n\
gray2color <mat_name>\n\
Convert to new mat\n\
gray2color <mat_name> <mat_new_name>\n\
Convert a mat sequence in-place\n\
gray2color <seq_name>\n\
Convert a mat sequence to a new one\n\
gray2color <seq_name> <seq_new_name>\n'
prog = 'gray2color'
desc = call_gray2color.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) == 0 or len(args) > 2:
p.print_help()
return 0
if len(args) == 2: src, trg = args
else: src, trg = args[0], args[0]
if not check_name(src): return -1
if kind == 'mat':
im = WORLD[src][1]
if not check_L(im): return -1
nim = color_gray2color(im)
WORLD[trg] = ['mat', nim]
del nim, im
else:
im0 = WORLD[src][1][0]
if not check_L(im0): return -1
nb = WORLD[src][1].shape[0]
bar = progress_bar(nb, sizebar, 'Processing')
data = []
for n in xrange(nb):
nim = color_gray2color(WORLD[src][1][n])
data.append(nim)
bar.update(n)
data = array(data)
WORLD[trg] = ['seq', data]
del data, nim, im0, bar
return 1
def call_seq2mat(args):
'''
Extract mat variables from a sequence.
Extrait mar variables depuis une sequence.
'''
usage = 'seq2mat <seq_name> <mat_name> [options]\n\
Extract number 5\n\
seq2mat vid im -i 5\n\
Extract mat 5 to 10\n\
seq2mat vid im -s 5 -e 10\n\
Extract mat 5 through to the end\n\
seq2mat vid im -s 5\n\
Extract first mat through to the 10th (include)\n\
seq2mat vid im -e 10\n'
prog = 'seq2mat'
desc = call_seq2mat.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.add_option('-i', action='store', type='int', default='-1', help='Extract mat i')
p.add_option('-s', action='store', type='int', default='-1', help='Extract starting number')
p.add_option('-e', action='store', type='int', default='-1', help='Extract stoping number')
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 2:
p.print_help()
return 0
if opt.i != -1 and (opt.s != -1 or opt.e != -1):
outbox_error('Choose option i OR e and s, not both')
return -1
if opt.i == -1 and opt.s == -1 and opt.e == -1: opt.i = 0
src, trg = args
if not check_name(src): return -1
if not check_seq(src): return -1
if opt.i != -1: opt.s = opt.e = opti
else:
if opt.s == -1: opt.s = 0
if opt.e == -1: opt.e = WORLD[src][1].shape[0]
names = [trg + '_%04i' % i for i in xrange(opt.s, opt.e + 1)]
if not check_overwrite(names): return -1
n = 0
for i in xrange(opt.s, opt.e + 1):
im = WORLD[src][1][i]
WORLD[name[n]] = ['mat', im]
n += 1
return 1
def call_seq_reg_ave(args):
'''
This function use a simple registration to match images together
and compute the averages. Which will increase the signal-noise ratio.
Cette fonction permet de recaller les images entre elles
afin de calculer la moyenne. Qui vat augmenter le rapport signal sur bruit.
'''
usage = 'seq_reg_ave <seq_name> [option]\n\
seq_reg_ave im\n\
seq_reg_ave im -d 10 -w 35 -o res'
prog = 'seq_reg_ave'
desc = call_seq_reg_ave.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
p.add_option('-d', action='store', type='int', default='10', help='dx/dy is the translation range search on x/y (x-dx to x+dx) (default 10)')
p.add_option('-w', action='store', type='int', default='35', help='window size used to track translation between images (must be odd) (default 35)')
p.add_option('-o', action='store', type='string', default='res_ave', help='output name (default res_ave)')
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 1:
p.print_help()
return 0
src = args[0]
if not check_name(src): return -1
if not check_seq(src): return -1
if not check_overwrite(opt.o): return 0
dx = dy = p.d
ws = p.w
if ws % 2 == 0:
ws += 1
outbox_bang('Window size must be odd, set to %i' % ws)
dw = (ws - 1) // 2
im = WORLD[src][1][0]
# TODO change this part with new kernel
p = image_show_get_pts(im, 1, rad = dw)
print 'point selected:', p[0]
ave = space_reg_ave(WORLD[src][1], p[0], ws, dx, dy)
WORLD[opt.o] = ['mat', ave]
return 1
def call_load_vid(args):
'''
Load video (avi file only) as a sequence
Charge une video (fichier avi) en tant qu une sequence
load_vid <video_name> <frame_per_second>
'''
usage = 'load_vid <video_name> [option]\n\loav_vid neptune.avi -f 10'
prog = 'load_vid'
desc = call_load_vid.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
p.add_option('-f', action='store', type='int', default='10', help='frame per second (default 10)')
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 1:
p.print_help()
return 0
src = args[0]
freq = opt.f
if not check_name_file(src): return -1
name, ext = filename.split('.')
if ext != 'avi':
outbox_error('Must be an avi file')
return -1
if not check_overwrite(name): return 0
print 'Extract images...'
pattern = '.tmp_astir_'
try:
os.system('ffmpeg -i %s -r %i -f image2 "%s%%4d.png"' % (filename, freq, pattern))
except:
outbox_error('Impossible to extract images from the video')
return -1
lname = os.listdir('.')
mem = []
for file in lname:
if file.find(pattern) != -1: mem.append(file)
bar = progress_bar(len(mem), sizebar, 'loading')
seq = []
i = 0
mem.sort()
for item in mem:
im = image_read(item)
seq.append(im)
bar.update(i)
i += 1
seq = array(seq)
WORLD[name] = ['seq', seq]
os.system('rm -f %s*' % pattern)
del im, seq, bar
return 1
def call_wiener(args):
'''
Image restoration by Wiener filter.
Restauration d image par filtre de Wiener
wiener <mat_source_name> <mat_res_name>
'''
usage = 'wiener <mat_source_name> <mat_res_name>'
prog = 'wiener'
desc = call_wiener.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 1:
p.print_help()
return 0
src = args[0]
trg = args[1]
if not check_name(src): return -1
if not check_mat(src): return -1
if not check_overwrite(trg): return -1
res = resto_wiener(WORLD[src][1])
WORLD[trg] = ['mat', res]
return 1
def call_mosaicing(args):
'''
Create mosaicing from two images
mosaicing <mat_1> <mat_2>
'''
## TODO parser mosaicing
mat1 = WORLD[args[0]][1]
mat2 = WORLD[args[1]][1]
ch = len(mat1)
ws = 35
im1 = image_mat2im(mat1)
im2 = image_mat2im(mat2)
if ch == 1:
im1c = color_gray2color(im1)
im2c = color_gray2color(im2)
im1g = im1
im2g = im2
elif ch == 3:
im1g = color_color2gray(im1)
im2g = color_color2gray(im2)
im1c = im1
im2c = im2
p1, p2 = image_show_stereo_get_pts(im1c, im2c, 4)
print p2
for n in xrange(len(p1)):
print 'Aligned match points %i' % n
xp, yp = space_align(im1g, im2g, p1[n], 35, 5, 5, p2[n])
p2[0][0] = p1[0][0] + yp
p2[0][1] = p1[0][1] + xp
print p2
sys.exit()
H = geo_homography(p1, p2)
res, l, t = space_G_transform(H, im2, 'NEAREST')
print p1
print p2
print H.I
#res = space_merge(mat1, mat2, p1, p2, 'ada')
WORLD['res'] = ['mat', res]
return 1
def call_cut_seq(args):
'''
Cut a part of sequence to a new one, start and stop
specifies the part you want keep. Coupe une partie d une
sequence dans une nouvelle, start et stop specifis la partie
que vous voulez garder.
'''
usage = 'cut_seq <seq_name> <new_seq_name> [option]\n\
cut_seq vid1 newvid -s 10 -e 34\n'
prog = 'cut_seq'
desc = call_cut_seq.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
p.add_option('-s', action='store', type='int', default='-1', help='Start number (default 0)')
p.add_option('-e', action='store', type='int', default='-1', help='Stop number (default until the end)')
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 2:
p.print_help()
return 0
src, trg = args
if not check_name(src): return -1
if not check_seq(src): return -1
if not check_overwrite(trg): return 0
if opt.s == -1: opt.s = 0
if opt.e == -1: opt.e = WORLD[src][1].shape[0]
seq = []
for n in xrange(opt.s, opt.e + 1):
seq.append(WORLD[src][1][n])
seq = array(seq)
WORLD[trg] = ['seq', seq]
del seq
return 1
def call_licence(args):
data = open('COPYING', 'r').readlines()
for line in data: print line.strip('\n')
return 1
def call_anaglyph(args):
'''
Create an anaglyph image from two RGB matrix (right and left).
Creer une image anaglyphe depuis deux mat RGB (droite et gauche).
'''
usage = 'anaglyph <mat_right> <mat_left> [options]\n\
anaglyph imr img -o newim\n'
prog = 'anaglyph'
desc = call_anaglyph.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
p.add_option('-o', action='store', type='string', default='res_anag', help='Output name (default res_anag)')
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 2:
p.print_help()
return 0
src1, src2 = args
trg = opt.o
if not check_name([src1, src2]): return -1
if not check_mat([src1, src2]): return -1
if not check_overwrite(trg): return 0
im1, im2 = WORLD[src1][1], WORLD[src2][1]
if not check_RGB(im1): return -1
if not check_RGB(im2): return -1
res = image_anaglyph(im1, im2)
WORLD[trg] = ['mat', res]
return 1
def call_colormap(args):
'''
Apply false-colors to a luminance mat.
Applique des fausses couleurs sur une mat en luminance
colormap <mat_name> <kind_of_map> <new_mat_name>
different color of map: jet, hsv, hot
colormap im1 hot im_map
'''
usage = 'colormap <mat_name> <new_name> [options]\n\
colormap im1 im1color -c jet'
prog = 'colormap'
desc = call_colormap.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
p.add_option('-c', action='store', type='string', default='jet', help='Kind of colormap jet, hsv and hot (default is jet)')
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 2:
p.print_help()
return 0
src, trg = args
kind = opt.c
if not check_name(src): return -1
if not check_mat(src): return -1
if not check_overwrite(trg): return 0
if not check_L(src): return -1
if kind not in ['jet', 'hsv', 'hot']:
outbox_error('Kind of map color unknown')
return -1
res = color_colormap(WORLD[src][1], kind)
WORLD[trg] = ['mat', res]
return 1
def call_add(args):
'''
Add two mat variables (L or RGB).
Ajoute deux varaible mat (L ou RGB)
mat_c = mat_a + mat_b
'''
usage = 'add <mat_a> <mat_b> <mat_c>\n\
add im1 im2 res\n'
prog = 'add'
desc = call_add.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 3:
p.print_help()
return 0
src1, src2, trg = args
if not check_name([src1, src2]): return -1
if not check_mat([src1, src2]): return -1
if not check_overwrite(trg): return 0
mat1 = WORLD[src1][1]
mat2 = WORLD[src2][1]
res = mat1 + mat2
WORLD[trg] = ['mat', res]
return 1
def call_sub(args):
'''
Substract two mat variables (L or RGB).
Soustract deux variables mat (L ou RGB)
mat_c = mat_a - mat_b
'''
usage = 'sub <mat_a> <mat_b> <mat_c>\n\
sub im1 im2 res\n'
prog = 'sub'
desc = call_sub.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 3:
p.print_help()
return 0
src1, src2, trg = args
if not check_name([src1, src2]): return -1
if not check_mat([src1, src2]): return -1
if not check_overwrite(trg): return 0
mat1 = WORLD[src1][1]
mat2 = WORLD[src2][1]
res = mat1 - mat2
WORLD[trg] = ['mat', res]
return 1
def call_mul(args):
'''
Multiply two mat variables (L or RGB).
Multiplie deux variables mat (L ou RGB)
mat_c = mat_a * mat_b
'''
usage = 'mul <mat_a> <mat_b> <mat_c>\n\
mul im1 im2 res\n'
prog = 'mul'
desc = call_mul.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 3:
p.print_help()
return 0
src1, src2, trg = args
if not check_name([src1, src2]): return -1
if not check_mat([src1, src2]): return -1
if not check_overwrite(trg): return 0
mat1 = WORLD[src1][1]
mat2 = WORLD[src2][1]
res = mat1 * mat2
WORLD[trg] = ['mat', res]
return 1
def call_div(args):
'''
Divide two mat variables (L or RGB).
Divise deux variables mat (L ou RGB)
mat_c = mat_a / mat_b
'''
usage = 'div <mat_a> <mat_b> <mat_c>\n\
div im1 im2 res\n'
prog = 'div'
desc = call_div.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 3:
p.print_help()
return 0
src1, src2, trg = args
if not check_name([src1, src2]): return -1
if not check_mat([src1, src2]): return -1
if not check_overwrite(trg): return 0
mat1 = WORLD[src1][1]
mat2 = WORLD[src2][1]
res = mat1 / mat2
WORLD[trg] = ['mat', res]
return 1
def call_info(args):
'''
Return informations about a variable (size, stats, format, ...).
Retourne des informations a propos d une variable (taille, stats, format, ...)
'''
## TODO info on sequence
usage = 'info <mat_name>\n\
info mat1\n'
prog = 'info'
desc = call_info.__doc__
p = OptionParser(description = desc, prog = prog, version = version)
p.set_usage(usage)
try: opt, args = p.parse_args(args)
except: return 0
if len(args) != 1:
p.print_help()
return 0
src = args[0]
if not check_name(src): return -1
if not check_mat(src): return -1
mat = WORLD[src][1]
c1, c2, c3 = G, B, Y
print 'Name: %s%s%s Type: %s%s%s' % (c1, src, N, c1, 'mat', N)
if len(mat) == 1: mode = 'L'
elif len(mat) == 3: mode = 'RGB'
elif len(mat) == 4: mode = 'RGBA'
print 'Mode: %s%s%s Size: %s%ix%i%s Format: %s%s%s' % (c1, mode, N, c1, mat[0].shape[1], mat[0].shape[0], N, c1, mat[0].dtype, N)
print ''
for c in xrange(len(mat)):
print 'Channel %s%i%s' % (c2, c, N)
min = mat[c].min()
max = mat[c].max()
mean = mat[c].mean()
var = mat[c] - mean
var *= var
var = var.sum()
var /= float(mat[c].size)
std = sqrt(var)
print 'min: %s%5.3f%s max: %s%5.3f%s mean: %s%5.3f%s var: %s%5.3f%s std: %s%5.3f%s' % (c3, min, N, c3, max, N, c3, mean, N, c3, var, N, c3, std, N)
return 1
'''
#=== documentation ==============
print '# ls'
print call_ls.__doc__
print '# ldir'
print call_ldir.__doc__
print '# rm'
print call_rm.__doc__
print '# mv'
print call_mv.__doc__
print '# cp'
print call_cp.__doc__
print '# mem'
print call_mem.__doc__
print '# fun'
print call_fun.__doc__
print '# save_var'
print call_save_var.__doc__
print '# save_world'
print call_save_world.__doc__
print '# load_var'
print call_load_var.__doc__
print '# load_world'
print call_load_world.__doc__
print '# load_im'
print call_load_im.__doc__
print '# save_im'
print call_save_im.__doc__
print '# show_mat'
print call_show_mat.__doc__
print '# color2gray'
print call_color2gray.__doc__
print '# gray2color'
print call_gray2color.__doc__
print '# colormap'
print call_colormap.__doc__
print '# seq2mat'
print call_seq2mat.__doc__
print '# seq_reg_ave'
print call_seq_reg_ave.__doc__
print '# load_vid'
print call_load_vid.__doc__
print '# wiener'
print call_wiener.__doc__
print '# mosaicing'
print call_mosaicing.__doc__
print '# cut_seq'
print call_cut_seq.__doc__
print '# add'
print call_add.__doc__
print '# sub'
print call_sub.__doc__
print '# mul'
print call_mul.__doc__
print '# div'
print call_div.__doc__
print '# info'
print call_info.__doc__
sys.exit()
'''
#=== shell io ===================
# script kernel
script_flag = False
script_end = False
if len(sys.argv) != 1:
script_name = sys.argv[1]
dummy, ext = script_name.split('.')
if ext != 'sas':
outbox_error('This file %s is not a Script Astir Shell (.sas).' % script_name)
sys.exit()
script_flag = True
list_cmd = open(script_name, 'r').readlines()
# if mode shell
if script_flag:
print '** Script Astir Shell V0.36 **'
else:
print ' ___ _ _'
print ' / _ \ | | (_)'
print '/ /_\ \___| |_ _ _ __'
print '| _ / __| __| | \'__)'
print '| | | \__ \ |_| | |'
print '\_| |_/___/\__|_|_|'
print ''
print 'Astir Copyright (C) 2008 Julien Bert'
print 'This program comes with ABSOLUTELY NO WARRANTY; for details type "licence".'
print 'This is free software, and you are welcome to redistribute it'
print 'under certain conditions; type "licence" for details.'
print 'GNU General Public License version 3'
print ''
print '** Astir Shell V0.36 **\n'
ct_cmd = 1
while 1 and not script_end:
if script_flag:
cmd = list_cmd[ct_cmd - 1]
if cmd[0] == '#':
ct_cmd += 1
continue
print '%s%s%s' % (B, cmd.strip('\n'), N)
if ct_cmd == len(list_cmd):
script_end = True
else:
try: cmd = raw_input('%sastir%s %i%s %%%s ' % (B, GB, ct_cmd, G, N))
except:
print '\nbye'
sys.exit(0)
if not cmd: continue
ct_cmd += 1
parse = cmd.split()
progname = parse[0]
args = parse[1:]
if progname not in listfun:
try: print eval(cmd)
except:
outbox_bang(' 8-/')
continue
if progname == 'exit':
print 'bye'
sys.exit(0)
# caller
eval('call_%s(args)' % progname)
|
UTF-8
|
Python
| false | false | 2,011 |
15,118,284,925,016 |
264798e37477a3e7bcb3950db0cba752265015be
|
186838cfc6d4f8e00c518b4df18da88abf8648d4
|
/TESTING-REFACTOR-REWRITE/TwitterAPI/Trending/google_translate_twitter.py
|
b62cb3b1173579d6b3bd6eedb93efb19a4822c4e
|
[] |
no_license
|
alienone/OSINT
|
https://github.com/alienone/OSINT
|
62bc8f15d6ba91df1d735a096cfb5e7e1af567dc
|
491da288a3ec7a9351d83f84509d81decac6a4fe
|
refs/heads/master
| 2015-08-06T14:09:25.970946 | 2014-04-14T01:44:30 | 2014-04-14T01:44:30 | 8,273,844 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__date__ = "Nov 15, 2012"
__author__ = "AlienOne"
__copyright__ = "GPL"
__credits__ = ["AlienOne"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "AlienOne"
__email__ = "[email protected]"
__status__ = "Production"
import json, requests, csv, datetime, daemon, time
from apiclient.discovery import build
def twitter_searchUrl(twitterSearchSpace_filename):
"""Iterate through the twitterSearchSpace file"""
for element in open(twitterSearchSpace_filename):
yield(element)
def google_trans(element_list, src_lang):
"""Call google translate API to translate non-English based Tweets"""
service = build('translate', 'v2', developerKey='AIzaSyCMNKNNuCoLoL6ZtVJvZWUsUce-lMXLoqQ')
return service.translations().list(source=src_lang, target="en", q=element_list).execute()
def grabTwitterTopics(search_url, csv_filename, src_lang, country_name):
"""Process Top Ten Trending Non-English Language Based Tweets by WOEID"""
f = csv.writer(open(csv_filename, "ab+"))
request_urlGet = requests.get(search_url)
if '200' in str(request_urlGet.status_code) and 'json' in (request_urlGet.headers['content-type']):
data = json.loads(request_urlGet.text)
data_list = data[0]["trends"]
element_list = []
for element in data_list:
if src_lang != "en":
name = google_trans(element['name'], src_lang)
else:
name = element['name']
element_list.append({'name': name, 'url': element['url']})
element_list.sort()
key_list = []
for datadict in element_list:
curkey = datadict.get('name')
if curkey not in key_list:
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if src_lang != "en":
topic = datadict['name']['translations'][0]['translatedText'].strip('#').strip(' ').encode('ascii','ignore')
else:
topic = datadict['name'].strip('#').strip(' ').encode('ascii','ignore')
url = datadict['url'].encode('ascii','ignore')
f.writerow([current_time,topic,url,country_name])
def processData(element, csv_filename):
element = element.split(',')
search_url = element[0].strip('\n')
country_name = element[1].strip('\n')
src_lang = element[2].strip('\n')
grabTwitterTopics(search_url, csv_filename, src_lang, country_name)
def main():
"""Execute process every hour"""
the_date = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
csv_filename = "culled_product/Twitter-Top-Trending" + '-' + the_date + '.csv'
f = open(csv_filename,"wb+")
w = csv.writer(f)
w.writerow(["Time Observed","Trend Name","URL Request","Country"])
f.close()
twitterSearchSpace_filename = "twitterSearchSpace.csv"
for element in twitter_searchUrl(twitterSearchSpace_filename):
processData(element, csv_filename)
time.sleep(3600)
if __name__ == '__main__':
with daemon.basic_daemonize():
main()
|
UTF-8
|
Python
| false | false | 2,014 |
5,282,809,775,795 |
05c47bbf5b2c734193926569df3ca04d12737142
|
48a198aad51487f3fdd3eb4c191f864cea61486c
|
/obal.py
|
ae6770399353f5deb5f9988ae983a9b0818f8c57
|
[] |
no_license
|
philosodad/graphcomplexity.edge
|
https://github.com/philosodad/graphcomplexity.edge
|
86de4967ac9d54659947e92a865962e1640e35f2
|
afc544ee780068963b25b83f4406ce250be7d2a0
|
refs/heads/master
| 2021-01-01T19:07:43.393733 | 2009-12-14T17:56:47 | 2009-12-14T17:56:47 | 32,559,912 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import random as ran
class G(object):
bound = 300
sensor_range = 80
comm_range = 160
|
UTF-8
|
Python
| false | false | 2,009 |
9,801,115,397,965 |
7829b72c7f6a0b5757feb0c51f51fc1269493863
|
c198dbd09a25f7c21a058fe600a85c29fef4206d
|
/batma/core/group.py
|
f80488aa1bfa7ea476c079d47a5d6e0486cd3457
|
[
"MIT"
] |
permissive
|
droidguy04/batma
|
https://github.com/droidguy04/batma
|
f1a7dad12f221739e516a60c4fa2cc054334d97d
|
772a063c8ad7c4324debf1cf7f3c25da1dce4e7a
|
refs/heads/master
| 2020-12-25T23:46:43.012369 | 2012-07-23T00:27:25 | 2012-07-23T00:27:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import operator as op
__all__ = ['Group']
class Group(list):
# META ====================================================================
def __init__(self, *game_objects):
super(Group, self).__init__(game_objects)
def add(self, *game_objects):
for game_object in game_objects:
self.append(game_object)
def remove(self, *game_objects):
for game_object in game_objects:
super(Group, self).remove(game_object)
def clear(self):
super(Group, self).__init__([])
# =========================================================================
# COLLISION ===============================================================
def they_collide(self, obj1, obj2):
return obj1.collider.intersects(obj2.collider)
def objs_colliding(self, obj):
intersects = obj.collider.intersects
return [other for other in self if
(other is not obj) and intersects(other.collider)]
def iter_colliding(self, obj):
intersects = obj.collider.intersects
for other in self:
if other is not obj and intersects(other.collider):
yield other
def any_colliding(self, obj):
for other in self:
if self.they_collide(obj, other):
return True
return False
def any_near(self, obj, near_distance):
near_than = obj.collider.near_than
for other in self:
if other is not obj and near_than(other.collider, near_distance):
return other
return None
def objs_near(self, obj, near_distance):
near_than = obj.collider.near_than
return [other for other in self if
(other is not obj) and near_than(other.collider, near_distance)]
def objs_near_wdistance(self, obj, near_distance):
distance = obj.collider.distance
res = []
for other in self:
if other is obj:
continue
d = distance(other.collider)
if d <= near_distance:
res.append((other, d))
return res
def ranked_objs_near(self, obj, near_distance):
tmp = objs_near_wdistance(obj, near_distance)
tmp.sort(key=op.itemgetter(1))
return tmp
def iter_all_collisions(self):
# O(n**2)
for i, obj in enumerate(self):
intersects = obj.collider.intersects
for j, other in enumerate(self):
if j >= i:
break
if intersects(other.collider):
yield (obj, other)
def objs_touching_point(self, x, y):
touching = set()
for obj in self:
if obj.collider.touches_point(x, y):
touching.add(obj)
return touching
def objs_into_box(self, minx, maxx, miny, maxy):
into = set()
packed_box = minx, maxx, miny, maxy
for obj in self:
if obj.collider.fits_in_box(packed_box):
into.add(obj)
return into
# =========================================================================
def update(self, tick):
for game_object in self:
game_object.update(tick)
def draw(self):
for game_object in self:
game_object.draw()
|
UTF-8
|
Python
| false | false | 2,012 |
16,054,587,786,367 |
d54183e06665a21de9246482b243665bda942b31
|
ba6918522576eee61ec6d0b0d7495d6c18717376
|
/csv2avro
|
3473aae2f8dc7fab5b4cd1a2c149790054dc3ecb
|
[
"Apache-2.0"
] |
permissive
|
chinna1986/avroutils
|
https://github.com/chinna1986/avroutils
|
3677e36d542a6c2ee45497441f6fa732ca2db485
|
ec965bb47314a35de31924a7bc4dd7aa47afe746
|
refs/heads/master
| 2021-01-23T18:11:18.536611 | 2010-08-21T10:42:10 | 2010-08-21T10:42:10 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import optparse
import csv
from avro import schema, io, datafile
VERSION="0.1"
PRIMITIVE_DATA_TYPE_MAP = {
'string': lambda x: str(x),
'boolean': lambda x: bool(x),
'bytes': lambda x: bytes(x),
'int': lambda x: int(x),
'long': lambda x: long(x),
'float': lambda x: float(x),
'double': lambda x: float(x),
'null': lambda x: None,
}
def dataTypeMapper(names, namesdict, types, defaults, lineData):
""" Convert a given primitive data to its specified schema type. Use defaults if nulls are encountered for non sequence types."""
# Try converting the data to its actual type.
for index, name in enumerate(names):
data = lineData[index]
try:
namesdict[name] = PRIMITIVE_DATA_TYPE_MAP[types[index]](data)
except ValueError as valErr:
# We failed. So lets see if we have a default value to use?
if defaults[index]!= None:
namesdict[name]= PRIMITIVE_DATA_TYPE_MAP[types[index]](defaults[index])
else:
# We tried what we could. Its definitely the data or the schema that's wrong.
# Raise the exception and let the user ponder.
raise valErr
return namesdict
def createOptionParser():
""" Create an command line options parser that supports options for various types of delimiter seperated files. """
usage = "Usage: %prog [options] INPUTFILES... OUTPUTFILE"
version = "%prog " + VERSION
parser = optparse.OptionParser(usage=usage, version=version)
generalGroup = optparse.OptionGroup(parser, "[General]", "General, utility related options")
generalGroup.add_option('-r', '--reverse', action='store_true', default=False, dest='reverse', help="Convertread/write as.")
generalGroup.add_option('-o', '--overwrite', dest='overwrite', action='store_true', default=False, help="Overwrite existing output file if it already exists.")
generalGroup.add_option('-a', '--append', dest='append', action='store_true', default=False, help="Append to existing output file if it already exists.")
parser.add_option_group(generalGroup)
avroGroup = optparse.OptionGroup(parser, "[Avro]", "Avro file input/output related options")
avroGroup.add_option('-s', '--schema-file', dest='schema', help="Specify a schema to read/write as.")
avroGroup.add_option('-n', '--record-name', dest='name', help="Specify a record name. Used when no schema file is provided. Default is the first input filename")
avroGroup.add_option('-c', '--compress', dest='compress', action='store_true', default=False, help="Toggle compression of avro output file using 'deflate' codec.")
parser.add_option_group(avroGroup)
csvGroup = optparse.OptionGroup(parser, "[CSV]", "CSV file input/output related options")
csvGroup.add_option('-d', '--delimiter', dest='delimiter', default=',', help="Specify a delimiter for the CSV file's records (ex: '\\x1b'. Default is comma (',').")
csvGroup.add_option('-f', '--header', dest='header', action='store_true', default=False, help="Specify that the first record in the CSV file is (or must be) a header.")
parser.add_option_group(csvGroup)
return parser
def validateOptions(parser):
""" Validate the options from the commandline and return the remaining options. """
(opts, args) = parser.parse_args()
# Perform any required validation
if opts.overwrite and opts.append:
parser.error("Can't specify both overwrite and append options together.")
if opts.schema:
try:
schema.parse(open(opts.schema).read())
except:
parser.error("Given schema file %s is not a valid Avro schema file." % opts.schema)
return (opts, args)
def convertCsvToAvro(parser, opts, inputs, output):
""" Read the CSV files and convert them to Avro. """
# Ensure that the output destination does not already exist, if no overwriting flag is set.
if not opts.overwrite and not opts.append:
try:
open(output)
parser.error("Output file already exists. Provide an --overwrite option if you want to force an overwrite or an --append option if you want to append to it.")
except IOError:
pass
# Retrieve some sample data to sniff first.
firstFile = open(inputs[0])
sampleData = firstFile.read(5*1024) # TODO: Make this configurable as an advanced option?
firstFile.seek(0)
header = firstFile.readline()
firstFile.close()
# Unescape our delimiter, if available, for convenience.
if opts.delimiter:
delimiter = opts.delimiter.decode('string-escape')
else:
# Sniff out a dialect for the CSV file and use its delimiter.
dialect = csv.Sniffer().sniff(sampleData)
delimiter = dialect.delimiter
# If we got a schema to use, lets use it.
if opts.schema:
try:
outputSchema = schema.parse(open(opts.schema).read())
except:
parser.error("Given schema is invalid. Please provide a proper Avro schema")
else:
# Check if we have a header at least?
if not csv.Sniffer().has_header(sampleData):
parser.error("The input CSV files don't carry a header. Please provide a schema file or a proper CSV input file with headers.")
# Get the 'record' name.
name = inputs[0]
if opts.name:
name = opts.name
# Construct record fields.
# NOTE: All fields will default to 'string' type as headers don't tell us data types.
# To use your own types, provide a JSON schema instead.
fieldList = []
for fieldName in header.strip().split(delimiter):
fieldList.append({'type': 'string', 'name': fieldName})
# Schema can now be constructed.
outputSchema = schema.RecordSchema(name, None, fieldList)
# We have the schema ready. Lets begin converting.
fieldNames = [field.name for field in outputSchema.fields]
fieldNamesDict = dict((name, None) for name in fieldNames)
fieldTypes = [field.type.type for field in outputSchema.fields]
fieldDefaults = [field.default for field in outputSchema.fields]
dataMapper = functools.partial(dataTypeMapper, fieldNames, fieldNamesDict, fieldTypes, fieldDefaults)
datumWriter = io.DatumWriter(outputSchema)
dataFileWriter = datafile.DataFileWriter(open(output, 'a+' if opts.append else 'w'), datumWriter, None if opts.append else outputSchema, 'deflate' if opts.compress else 'null')
# Convert all given input files to avro.
for inputfile in inputs:
openfile = open(inputfile)
# If CSV file has a header, ignore it by reading it out.
if opts.header:
openfile.readline()
for lineno, line in enumerate(openfile):
# Convert the data types to fit the schema.
try:
lineData = dataMapper(line.strip().split(delimiter))
except ValueError as valErr:
print("Recieved a value error on line %d of file %s." % (lineno+1, inputfile))
raise valErr
# Append the converted CSV record to the Avro data file.
dataFileWriter.append(lineData)
# We've finished converting all files. Close and exit.
dataFileWriter.close()
def convertAvroToCsv(parser, opts, inputs, output):
""" Read the Avro files and convert them all to CSV """
csvFileWriter = csv.writer(open(output, 'wb'), delimiter=str(opts.delimiter.decode('string-escape')), lineterminator='\n')
# Create our datum (record) reader.
datumReader = io.DatumReader()
if opts.schema:
# If we have a custom schema, read that alone for the datum.
datumReader = io.DatumReader(expected=schema.parse(open(opts.schema).read()))
# Get schema fields early on.
if opts.schema:
fields = schema.parse(open(opts.schema).read()).fields
else:
# Get it from the first file if a custom schema was not passed.
fields = datafile.DataFileReader(open(inputs[0]), datumReader).datum_reader.writers_schema.fields
# If the header flag is on, write the header as the first record.
if opts.header:
csvFileWriter.writerow([field.name for field in fields])
# Convert all given input files to CSV.
for inputfile in inputs:
openfile = datafile.DataFileReader(open(inputfile), datumReader)
for record in openfile:
csvRow = [record[field.name] for field in fields]
csvFileWriter.writerow(csvRow)
def main():
parser = createOptionParser()
(opts, args) = validateOptions(parser)
if not len(args) > 1:
parser.error('You must specify an input and an output filename.')
inputs = args[:-1]
output = args[-1]
if opts.reverse:
convertAvroToCsv(parser, opts, inputs, output)
else:
convertCsvToAvro(parser, opts, inputs, output)
if __name__=='__main__':
main()
|
UTF-8
|
Python
| false | false | 2,010 |
19,550,691,146,679 |
14073a94784c1f9f8c82d3721d6efc47ce2985eb
|
f3a9b8f97c9bece83eecd5f8442c1d703bb16caf
|
/samplecode.py
|
5d57b63016cd4e39e52449895d25efcad3da7609
|
[] |
no_license
|
ENuge/sampleblog
|
https://github.com/ENuge/sampleblog
|
e06d8d5b7a3ab0ca71d73df68b3f779cec94f190
|
ec87ebe54b2c4605d04380f69d3a644db097e49a
|
refs/heads/master
| 2020-05-05T01:18:35.005245 | 2013-01-19T17:45:48 | 2013-01-19T17:45:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import cgi
import re
import sys
import urllib2
from xml.dom import minidom
from string import letters
import random
import string
import hashlib
import hmac
import logging
import time
from datetime import datetime, timedelta
import webapp2
import jinja2
from google.appengine.api import memcache
from google.appengine.ext import db
# Database Entries
class LoginData(db.Model):
username = db.TextProperty(required = True)
password = db.TextProperty(required = True)
class SinglePost(db.Model):
subject = db.TextProperty(required = True) # subject of blog post
content = db.TextProperty(required = True) # content of blog post
created = db.DateTimeProperty(auto_now_add = True) # date of blog post
## Cookie-related hashing
def hash_str(s):
"""
Hashes a string.
"""
return hmac.new(SECRET, s).hexdigest()
def make_secure_val(s):
"""
Hash a value in the format s|hash
"""
return "%s|%s" % (s, hash_str(s))
#
def check_secure_val(h):
"""
Checks that the given hash matches what our hashing function gives.
If so, return the unhashed value.
"""
val = h.split('|')[0]
if h == make_secure_val(val):
return val
## Password-related hashing and salting
def make_salt():
return ''.join(random.choice(string.letters) for x in xrange(5))
def make_pw_hash(name, pw, salt = None):
"""
Hashes password using sha256 algorithm. Returns output in form "hash, salt".
"""
if not salt:
salt = make_salt()
h = hashlib.sha256(name + pw + salt).hexdigest()
return '%s,%s' % (h, salt)
def valid_pw(name, pw, h):
"""
Checks if a password matches its hash.
"""
salt = h.split(',')[1]
return h == make_pw_hash(name, pw, salt)
###### Memcached related age-getting for blog posts
def age_set(key, val):
"""
Sets the age for a blog post.
"""
save_time = datetime.utcnow()
memcache.set(key, (val, save_time))
def age_get(key):
"""
Outputs the age of a blog post.
"""
r = memcache.get(key)
if r:
val, save_time = r
age = (datetime.utcnow() - save_time).total_seconds()
else:
val, age = None, 0
return val, age
def add_post(post):
"""
Adds a post to the database.
"""
post.put()
get_posts(update = True)
return post.key().id()
def edit_post(key, subject, content):
"""
Edits a post already in the database (keeping original date/time).
"""
post = db.get(key)
post.subject = subject
post.content = content
post.put()
def get_posts(update = False):
"""
Gets 30 blog posts for the front page.
"""
q = SinglePost.all().order('-created').fetch(limit = 30)
mc_key = 'BLOGS'
posts, age = age_get(mc_key)
if update or posts is None:
posts = list(q)
age_set(mc_key, posts)
return posts, age
def age_str(age):
"""
Formats our timequery nicely.
"""
s = 'queried %s seconds ago'
age = int(age)
if age == 1:
s = s.replace('seconds', 'second')
return s % age
#### sampleuser hard-coded as only valid username/password.
#### Signup page disabled.
pwhash = make_pw_hash("sampleuser", "samplepassword")
l = LoginData(username = "sampleuser", password = pwhash)
l_key = l.put()
# Note that this should be in a separate file, but this is just code to
# demonstrate how it works, in general.
SECRET = 'generichash' # 'Secret' hash for our passwords
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir))
# autoescape = True commented out so blog can use HTML formatting
class Handler(webapp2.RequestHandler):
"""
Generic handler that gets passed to each subsequent class.
"""
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
####### Start of the static webpages
class Welcome(Handler):
"""
A very basic welcome splash page for me when I log in.
"""
def render_welcome(self, username=""):
self.render("welcome.html", username = user_cookie_str)
def get(self):
usercookie = self.request.cookies.get('username')
if usercookie:
user_cookie_str = check_secure_val(usercookie)
if user_cookie_str:
self.render("welcome.html", username = user_cookie_str)
elif not usercookie or not user_cookie_str:
self.redirect('/')
class MainPage(Handler):
"""
The homepage, or about me, for the website.
"""
def get(self):
self.render("home.html")
class Projects(Handler):
"""
Details projects that I am working on.
"""
def get(self):
self.render("projects.html")
class Resume(Handler):
"""
Renders my resume. The HTML was generated using a PDF-to-HTML tool online,
but then modified a bit to look nice on the page.
"""
def get(self):
self.render("resume.html")
class Contact(Handler):
"""
Gives information for how one may contact me.
"""
def get(self):
self.render("contact.html")
####### Start of the blog-related webpages, etc.
class Login(Handler):
"""
Login page allows me to login using the only valid username/password.
"""
def render_login(self, username="",
usererror="", password="", passworderror=""):
self.render("login.html", username = username, usererror = usererror,
password = password, passworderror = passworderror)
def get(self):
self.render_login()
def post(self):
username = str(self.request.get("username"))
password = str(self.request.get("password"))
usererror = ""
passworderror = ""
usercookie = str(make_secure_val(username))
# check if the username and corresponding password
# are in the database; if so, redirect them to the login page
# otherwise, throw an error
if checkdb(username) == False and verifypwd(username, password):
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers.add_header('Set-Cookie',
'username=' + usercookie + '; Path=/')
self.redirect("/welcome")
if checkdb(username):
usererror = "This is not a valid username!"
self.render_login(username, usererror, password, passworderror)
if (verifypwd(username, password) == False and
checkdb(username)==False):
passworderror = "Wrong password!"
self.render_login(username, usererror, password, passworderror)
class Logout(Handler):
"""
Logout page allows me to logout and wipe the cookies.
"""
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers.add_header('Set-Cookie',
'username=; Path=/'
'; expires=Sun, 16-Jul-2012 23:59:59 GMT')
self.redirect('/')
class Flush(Handler):
"""
If the user is logged in, he may flush memcached by going to this
URL.
"""
def get(self):
usercookie = self.request.cookies.get('username')
if usercookie:
user_cookie_str = check_secure_val(usercookie)
if user_cookie_str:
memcache.flush_all()
self.redirect('/')
elif not usercookie or not user_cookie_str:
self.redirect('/')
class Blog(Handler):
"""
Renders the front page for the blog, showing the latest 30 blog posts.
"""
def render_blog(self, subject="", content="", error="", created=""):
posts, timequery = get_posts(True) # also updates the db
self.render("blog.html", posts=posts, timequery=age_str(timequery))
def get(self):
self.render_blog()
class NewPost(Handler):
"""
Allows the user to submit a new blog post.
"""
def render_newpost(self, subject="", content="", error=""):
self.render("newpost.html", subject = subject,
content = content, error = error)
def get(self):
usercookie = self.request.cookies.get('username')
if usercookie:
user_cookie_str = check_secure_val(usercookie)
if user_cookie_str:
self.render_newpost()
elif not usercookie or not user_cookie_str:
self.redirect('/')
def post(self):
subject = self.request.get("subject")
content = self.request.get("content")
if subject and content:
s = SinglePost(subject = subject, content = content)
s_id = add_post(s) # returns id of created post
self.redirect("/%d" % s_id)
else:
error = "you need both a subject and a body!"
self.render_newpost(subject, content, error)
class PermaLink(Handler):
"""
Unique webpage for each specific blog post.
"""
def render_post(self, subject="", content="", created="", webURL="",
timequery="", edit=""):
self.render("singlepost.html", subject = subject, content = content,
created = created, webURL = webURL, timequery = timequery,
edit = edit)
def get(self, postid):
post_key = 'POST_' + postid # key used for memcached
post, age = age_get(post_key) # gets age of posts from cache
try: # if given URL is not numeric (and not any other page)
int(postid) # then it is invalid, so redirect to the home page
except ValueError:
self.redirect('/')
return
if not post: # if post not in cache, get it from database
key = db.Key.from_path('SinglePost', int(postid))
post = db.get(key)
age_set(post_key, post) # then put it in the cache
age = 0
if not post: # if no post with this id, then invalid URL
self.error(404)
return
s = SinglePost.get_by_id(int(postid))
usercookie = self.request.cookies.get('username')
if usercookie:
user_cookie_str = check_secure_val(usercookie)
if user_cookie_str:
self.render_post(subject = s.subject, content = s.content,
created = s.created, webURL = s.key().id(),
timequery = age_str(age), edit = True)
if not usercookie or not user_cookie_str:
self.render_post(subject = s.subject, content = s.content,
created = s.created, webURL = s.key().id(),
timequery = age_str(age), edit = False)
class EditPage(Handler):
"""
Allows one to edit a specific blog post.
"""
def render_edit(self, subject="", content="", error="", timequery=""):
self.render("edit.html", subject = subject, content = content,
error = error, timequery = timequery)
def get(self, postid):
logging.error(postid)
post_key = 'POST_' + postid
post, age = age_get(post_key)
postid=postid[1:]
try:
int(postid)
except ValueError:
self.error(404)
return
if not post:
key = db.Key.from_path('SinglePost', int(postid))
post = db.get(key)
age_set(post_key, post)
age = 0
if not post:
self.error(404)
return
s = SinglePost.get_by_id(int(postid))
usercookie = self.request.cookies.get('username')
if usercookie:
user_cookie_str = check_secure_val(usercookie)
if user_cookie_str:
self.render_edit(subject=s.subject, content=s.content,
timequery=age_str(age))
if not usercookie or not user_cookie_str:
self.redirect('/')
def post(self, postid):
subject = self.request.get("subject")
content = self.request.get("content")
postid = postid[1:]
if subject and content:
key = db.Key.from_path('SinglePost', int(postid))
edit_post(key, subject, content)
self.redirect("/%d" % int(postid))
else:
error = "you need both a subject and a body!"
self.render_edit(subject, content, error, age_str(0))
PAGE_RE = r'(/(?:[a-zA-Z0-9_-]+/?)*)'
app = webapp2.WSGIApplication([('/welcome', Welcome), ('/', MainPage),
('/projects', Projects), ('/resume', Resume),
('/contact', Contact), ('/login', Login),
('/logout', Logout), ('/flush', Flush),
('/blog', Blog), ('/newpost', NewPost),
('/_edit' + PAGE_RE, EditPage),
('/(\S+)', PermaLink)],
debug = True)
def checkdb(username):
"""
Checks if our username is already in the database.
If so, return False.
"""
dbuser = db.GqlQuery("SELECT * FROM LoginData")
#"WHERE username = :user",
#user=username)
for x in dbuser:
if x.username == username:
return False
return True
# Verifypwd checks if the input password matches the hashed value saved
# in the database.
def verifypwd(username, password):
"""
Checks if the input password matches the hashed value saved in the
database.
"""
dbuser = db.GqlQuery("SELECT * FROM LoginData")
for x in dbuser:
if x.username == username:
pwhash = str(x.password)
return valid_pw(username, password, pwhash)
return False
|
UTF-8
|
Python
| false | false | 2,013 |
12,824,772,372,714 |
f2baf749f7123f7fa791bf3020cb9ab1f51f5d3e
|
f83840e93fb5901a710a20f840eb7c1647623839
|
/add_ids_to_jsons.py
|
fd2ce6c8ccf7cce8571cf5c6f2cb33e4e4b345e3
|
[
"BSD-3-Clause"
] |
permissive
|
kcranston/opentree_status
|
https://github.com/kcranston/opentree_status
|
2fb416de7df88c72917d3193d2398f32f7ad5f4d
|
a8c754d061979d4dacbbb8fb6c29ceef3cf7d10a
|
refs/heads/master
| 2021-01-23T20:50:21.485840 | 2013-08-10T02:11:26 | 2013-08-10T02:11:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# reads status jsons
# extracts studyID and adds as _id field
# so that can be import into couchdb
# assumes 1. want to import all .json files
import sys
import os
import json
import glob
def usage() :
print "Usage: add_study_ids.py source_directory target_directory"
# crappy command-line parameter parsing
source_dir=sys.argv[1]
print "source files in",source_dir
target_dir=sys.argv[2]
if os.path.isdir(target_dir):
print "writing files to",target_dir
else:
os.mkdir(target_dir)
print "creating",target_dir
# get the list of files to import
filelist=os.listdir(source_dir)
jsons={}
for f in filelist:
if f.endswith(".json"):
infile = source_dir+"/"+f
outfile = target_dir+"/"+f
contents = json.load(open(infile,'r'))
studyId = contents['study_info']['ot:studyId']
contents['_id'] = studyId
json.dump(contents,open(outfile,'w'))
print infile,outfile,studyId
jsons[studyId]=infile
else:
print "skipping file ",f
|
UTF-8
|
Python
| false | false | 2,013 |
19,722,489,823,696 |
c4cc3222ee7df6cd2a06a8f21ece77603719a4d9
|
3def0de902b598fa1a2adda8611c349b2fc7625a
|
/CallJava.py
|
460c1067eddb82db43449149fd279d1bbeb7a9c9
|
[] |
no_license
|
strategist922/pydoop
|
https://github.com/strategist922/pydoop
|
86699f23a1e9a11357c2b26fb94d508ed89305e9
|
cd6fc4b41600de4d7f7189a9821554a9a1a47b48
|
refs/heads/master
| 2020-12-25T12:28:12.060391 | 2013-03-18T15:45:52 | 2013-03-18T15:45:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
try:
import org.apache.hadoop.io.Text as Text;
import java.lang.System as System
from com.xhaus.jyson import JysonCodec as json
except ImportError: #cpython
Text = str
import json
def map(key, value, context):
try:
payload = json.loads(value)
except:
e = sys.exc_info()[0]
context.write(Text("exception:%s" % e), Text(key))
return
record = False
try:
ch = payload['chromeHangs']
if type(ch) == dict:
record = len(ch['memoryMap']) > 0
except KeyError:
pass
try:
lw = payload['lateWrites']
if type(lw) == dict:
record = record or len(lw['memoryMap']) > 0
except KeyError:
pass
if record:
outkey = Text(value)
context.write(outkey, Text())
#def reduce(key, values, context):
# context.write(key, Text())
if __name__ == "__main__":
from FileDriver import map_reduce
map_reduce(sys.modules[__name__], sys.argv[1])
|
UTF-8
|
Python
| false | false | 2,013 |
7,103,875,945,062 |
13ab3755d852ddb2222e29ee23e0cd1248d55438
|
38b9194a7dc7a8c5950a6942b65f03ff03540208
|
/Completed_run/inverse_distance_rectangular_Dutch_election_1dim/standardizep2a2.py
|
50665d568608fa18c45e9caa1c8f0f7b4e776ae4
|
[] |
no_license
|
joellevine/Dutch_Election_Inverse_Distance
|
https://github.com/joellevine/Dutch_Election_Inverse_Distance
|
018a287705afbd884064969a053fd89688867859
|
e9964aff9a55e27af8ed9e02bc2a41202b6d51be
|
refs/heads/master
| 2021-01-19T10:21:21.797563 | 2012-09-02T22:54:53 | 2012-09-02T22:54:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
## Copyright (C) 2012 Joel H. Levine
##
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the "Software"),
## to deal in the Software without restriction,including without limitation
## the rights to use, copy, modify, merge, publish, distribute, sublicense,
## and/or sell copies of the Software, and to permit persons to whom the Software
## is furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included
## in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
## OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
## THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
## IN THE SOFTWARE.
#standardizep2a2.py
from numpy import *
def standardizep2a2(original_rx,original_cx,attenuation,minkowski):
if attenuation !=2 or minkowski!=2:
print "Inappropriate ttempt to use standardization -- bypassed"
print "(Approriate only for Euclidean space and squared attenuation)"
else:
rx,cx=standardize_variance(original_rx,original_cx)
return rx,cx
def standardize_variance(original_rx,original_cx):
rx=original_rx[:,:]
cx=original_cx[:,:]
rx=mean_center(rx)
cx=mean_center(cx)
nrow=size(rx,0)
ncol=size(cx,0)
ndim=size(rx,1)
rx,cx=same_sd(rx,cx)
return rx,cx
def same_sd(rx,cx):
nrow=size(rx,0)
ncol=size(cx,0)
ndim=size(rx,1)
rss=rx[0,:]**2
css=cx[0,:]**2
for row in range(1,nrow):
rss+=rx[row,:]**2
for row in range(1,ncol):
css+=cx[row,:]**2
rss/=nrow
css/=ncol
rss**=.5 #standard deviations
css**=.5 #
#now, force a common sd in each dimension
geomean=(rss*css)**.5
x_correction=geomean/rss
y_correction=geomean/css
return rx*x_correction,cx*y_correction
def mean_center(rx):
nrow=len(rx)
su=sum(rx,0)
su= su/nrow
#for i in xrange(nrow):
mx=rx[:,:]-su
return mx
# =================================================================================
if __name__=="__main__":
random.seed(1)
x=zeros((10,2),float)
y=zeros((5,2),float)
for i in range(10):
for j in range(2):
x[i,j]=random.random()
for i in range(5):
for j in range(2):
y[i,j]=10*random.random()
print x
print
print y
x,y= standardizep2a2(x,y,2,2)
print "standardized"
print x
print
print y
|
UTF-8
|
Python
| false | false | 2,012 |
10,952,166,639,843 |
c202daa726dd840d801bbe681c3ac96d26b0b4ec
|
fbe0c7f540dbd4849e6e3002e5532d91febff73d
|
/d2c/Application.py
|
c602816d6a9d58df9cd56218941fc9ac5841ba36
|
[] |
no_license
|
willmore/D2C
|
https://github.com/willmore/D2C
|
23819fa351039b395c2da445a20812627afd01a7
|
f547690c2b4656b4d54e435976040543f0e28be0
|
refs/heads/master
| 2020-05-31T15:50:36.003669 | 2011-12-16T15:22:43 | 2011-12-16T15:22:43 | 1,342,024 | 5 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on Feb 10, 2011
@author: willmore
'''
import wx
from wx.lib.pubsub import Publisher
from d2c.gui.Gui import Gui
from d2c.controller.ConfController import ConfController
from d2c.gui.ConfPanel import CredDialog
from d2c.controller.ImageController import ImageController
from d2c.controller.AMIController import AMIController
from d2c.gui.DeploymentTemplateWizard import DeploymentTemplateWizard
from d2c.controller.DeploymentTemplateWizardController import DeploymentTemplateWizardController
from d2c.controller.DeploymentController import DeploymentController, DeploymentTemplateController
from d2c.controller.NewCloudController import NewCloudController
from d2c.gui.CloudPanel import CloudWizard
from d2c.gui.DeploymentTab import DeploymentTemplatePanel
class Application:
def __init__(self, dao, amiToolsFactory):
self._amiToolsFactory = amiToolsFactory
self._dao = dao
self._app = wx.App()
self._frame = Gui(dao)
self._imageController = ImageController(self._frame.imagePanel, self._dao)
self._amiController = AMIController(self._frame.amiPanel,
self._dao,
self._amiToolsFactory)
self.loadDeploymentPanels()
self._frame.bindAddDeploymentTool(self.addDeployment)
self._frame.bindConfTool(self.showConf)
self._frame.bindCloudTool(self.showCloudWizard)
self._frame.deploymentPanel.tree.Bind(wx.EVT_TREE_SEL_CHANGED, self.deploymentSelect)
Publisher.subscribe(self._handleNewDeploymentTemplate, "DEPLOYMENT TEMPLATE CREATED")
Publisher.subscribe(self._handleNewDeployment, "DEPLOYMENT CREATED")
Publisher.subscribe(self._handleDeleteDeployment, "DELETE DEPLOYMENT")
self._frame
self._frame.Show()
def deploymentSelect(self, event):
self._frame.deploymentPanel.displayPanel.showPanel(self._frame.deploymentPanel.tree.GetItemText(event.GetItem()))
def _handleDeleteDeployment(self, msg):
deployment = msg.data['deployment']
self._frame.deploymentPanel.removeDeployment(deployment)
self._dao.delete(deployment)
def _handleNewDeploymentTemplate(self, msg):
deployment = msg.data['deployment']
self.loadDeploymentPanel(deployment)
def _handleNewDeployment(self, msg):
deployment = msg.data['deployment']
self._frame.deploymentPanel.addDeployment(deployment)
def loadDeploymentPanels(self):
self.deplomentControllers = {}
for d in self._dao.getDeploymentTemplates():
self.loadDeploymentPanel(d)
def loadDeploymentPanel(self, deployment):
deployPanel = DeploymentTemplatePanel(deployment, self._dao, self._frame.deploymentPanel.displayPanel)
self._frame.deploymentPanel.addDeploymentTemplatePanel(deployPanel)
self.deplomentControllers[deployment.id] = DeploymentTemplateController(deployPanel, self._dao)
def addDeployment(self, event):
mywiz = DeploymentTemplateWizard(None, -1, 'Deployment Template Creation Wizard')
DeploymentTemplateWizardController(mywiz, self._dao)
mywiz.ShowModal()
mywiz.Destroy()
def showConf(self, event):
conf = CredDialog(self._dao, None, size=(800,400))
ConfController(conf, self._dao)
conf.ShowModal()
conf.Destroy()
def showCloudWizard(self, event):
cloudWiz = CloudWizard(None, -1, 'Manage Clouds', size=(500,400))
NewCloudController(cloudWiz, self._dao)
cloudWiz.ShowModal()
cloudWiz.Destroy()
def MainLoop(self):
self._app.MainLoop()
|
UTF-8
|
Python
| false | false | 2,011 |
4,294,967,335,447 |
5bfa688267e16b56cc743953d3c0d17616f9ba96
|
a23164dac9a001acda5bcec08ae5adc5c4736249
|
/pgm.py
|
9b99a1557956c94c37a7d2e30f2fc46fbd9c7ebf
|
[] |
no_license
|
kraxis/infonet
|
https://github.com/kraxis/infonet
|
c4b4109d0d3266ece389e941a1698ef732ac6c79
|
0fd0749378710e3e7759a9917781bbc7fc4129ae
|
refs/heads/master
| 2021-01-02T22:58:29.412524 | 2013-09-23T17:20:13 | 2013-09-23T17:20:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from numpy import *
import math
import matplotlib.pyplot as plt
from scipy.linalg import *
from scipy import ndimage
from scipy import stats
from sklearn import svm
import pylab as pl
import infonet_fin as repo
##propscoring #################
def propscore(loaded,iters):
X,Y,pX,pY,pXX,pYY,w,f,adj=loaded
size=int(sqrt(len(pX)))
nb=repo.allNeighbors(len(pX))
propscores=zeros(len(Y))
for img in range(len(Y)):
print 'image being processed: '+repr(img)
propsc=[None for _ in range(len(pX))]
fromNode=random.randint(0,len(pX)) #start somewhere random
prevNode=nb[fromNode][random.randint(0,len(nb[fromNode]))]
propsc[prevNode]=repo.localInd(prevNode,pX,pY,Y[img],w)
propsc[fromNode]=repo.localFunc(pX,pY,pXX,pYY,w,fromNode,prevNode,\
propsc[prevNode],Y[img])
ind=0
while(ind<iters):
temp=random.randint(0,len(nb[fromNode]))
thisNode=nb[fromNode][temp]
if (thisNode==prevNode):thisNode=nb[fromNode][(temp+1)%len(nb[fromNode])]
propsc[thisNode]=repo.localFunc(pX,pY,pXX,pYY,w,thisNode,
fromNode,propsc[fromNode],Y[img])
prevNode=fromNode
fromNode=thisNode
ind+=1
propscores[img]=sum([e for e in propsc if e is not None])
return propscores
############################ template stuff ###########################
############################ svm testing ################################
def svmplot(sc,nums):
X=sc
lendata=int(len(sc)/len(nums))
Y=zeros(len(sc))
for i in range(len(nums)):
Y[i*lendata:(i+1)*lendata]=i
h = .02
C = 1.0
svc = svm.SVC(kernel='linear', C=C).fit(X, Y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, Y)
poly_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, Y)#svm.SVC(kernel='poly', degree=3, C=C).fit(X, Y)
lin_svc = svm.LinearSVC(C=C).fit(X, Y)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = meshgrid(arange(x_min, x_max, h),
arange(y_min, y_max, h))
print 'done'
titles = ['SVC with linear kernel',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel',
'LinearSVC (linear kernel)']
print 'got here'
for i, clf in enumerate((svc, rbf_svc, poly_svc, lin_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
pl.subplot(2, 2, i + 1)
Z = clf.predict(c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.contourf(xx, yy, Z, cmap=pl.cm.Paired)
pl.axis('off')
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.title(titles[i])
pl.show()
|
UTF-8
|
Python
| false | false | 2,013 |
19,104,014,552,046 |
19fe4621800cb1f97bb1b03ece352ab74563ba4d
|
e631b3facbc6bce153dcc2d96e8e140cd11338f1
|
/setup.py
|
0353b7b96dd2498754407920c7dfd5d1d2cd930b
|
[
"GPL-3.0-only"
] |
non_permissive
|
Lattyware/unrest
|
https://github.com/Lattyware/unrest
|
0334fc3a34bb2340ab5734353693db372a528d41
|
2109b7d3d4a0d48260be6d15867398d24c58e118
|
refs/heads/master
| 2020-04-27T05:40:23.097292 | 2011-12-25T02:54:41 | 2011-12-25T02:54:41 | 3,046,756 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
This setup script is designed to freeze the game into an stand-alone executable
without any dependancies. It is NOT needed to run the game. Just do ./unrest.py
- or go and download your appropriate binary version created using this.
Before running, please check the value of PLATFORM below.
"""
from cx_Freeze import setup, Executable
################################################################################
PLATFORM = "POSIX"
#PLATFORM = "WIN32"
################################################################################
includes = ["re"]
includefiles = ["assets", "README", "COPYING"]
if PLATFORM == "WIN32":
exe = Executable(
script="unrest.py",
base="Win32GUI",
icon="assets/unrest-icon.ico",
)
else:
exe = Executable(
script="unrest.py",
icon="assets/unrest-icon.png",
)
setup(
name = "unrest",
version = "1.0",
author = "Gareth Latty",
author_email = "[email protected]",
url = "http://www.lattyware.co.uk/projects/unrest",
description = "A game in which you must escape from helplessness.",
platforms = ["POSIX", "WIN32"],
license = "GPL3",
executables = [exe],
options = {
"build_exe": {
"includes": includes,
'include_files': includefiles,
},
},
)
|
UTF-8
|
Python
| false | false | 2,011 |
1,614,907,710,925 |
9ec65152fd62c51450a538510b7cb9e9726d3cff
|
6cab0595b5d08ee15d18543cc759a04ee333ebc4
|
/nc_parse_uniq_lgt_file.py
|
cc6b69b9112cc2b6ad5684d8d40d57f4aa4ae1cc
|
[] |
no_license
|
JoshDaly/lgtscripts
|
https://github.com/JoshDaly/lgtscripts
|
f2c7753c3716e5f92a12032a1886f20f6951d93b
|
1d072d2cd9c6626e3cba322f0bc867b906a7fead
|
refs/heads/master
| 2016-09-05T23:39:39.837048 | 2014-08-13T09:55:31 | 2014-08-13T09:55:31 | 19,908,456 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
###############################################################################
#
# nc_parse.py - parse nucmer
#
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__author__ = "Michael Imelfort"
__copyright__ = "Copyright 2012"
__credits__ = ["Michael Imelfort"]
__license__ = "GPL3"
__version__ = "0.0.1"
__maintainer__ = "Michael Imelfort"
__email__ = "[email protected]"
__status__ = "Development"
###############################################################################
import argparse
import sys
#import os
#import errno
#import numpy as np
#np.seterr(all='raise')
#import matplotlib as mpl
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import axes3d, Axes3D
#from pylab import plot,subplot,axis,stem,show,figure
###############################################################################
###############################################################################
###############################################################################
###############################################################################
class NucMerParser:
"""Wrapper class for parsing nucmer output"""
# constants to make the code more readable
_START_1 = 0
_END_1 = 1
_START_2 = 2
_END_2 = 3
_LEN_1 = 4
_LEN_2 = 5
_IDENTITY = 6
_ID_1 = 7
_ID_2 = 8
def __init__(self):
self.prepped = False
def readNuc(self, fp):
"""Read through a nucmer coords file
this is a generator function
"""
line = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not self.prepped:
# we still need to strip out the header
for l in fp: # search for the first record
if l[0] == '=': # next line is good
self.prepped = True
break
# file should be prepped now
for l in fp:
fields = l.split('|')
yield ([int(i) for i in fields[0].split()] +
[int(i) for i in fields[1].split()] +
[int(i) for i in fields[2].split()] +
[float(i) for i in fields[3].split()] +
fields[4].split())
break # done!
###############################################################################
###############################################################################
###############################################################################
###############################################################################
class ContigParser:
"""Main class for reading in and parsing contigs"""
def __init__(self): pass
def readFasta(self, fp): # this is a generator function
header = None
seq = None
while True:
for l in fp:
if l[0] == '>': # fasta header line
if header is not None:
# we have reached a new sequence
yield header, "".join(seq)
header = l.rstrip()[1:].partition(" ")[0] # save the header we just saw
seq = []
else:
seq.append(l.rstrip())
# anything left in the barrel?
if header is not None:
yield header, "".join(seq)
break
###############################################################################
###############################################################################
###############################################################################
###############################################################################
def doWork( args ):
""" Main wrapper"""
#objects
contam_ids= {}
"""
#-----
# parse nucmer file
NP = NucMerParser()
with open(args.coord, 'r') as fh:
for hit in NP.readNuc(fh):
lines=hit[NP._ID_2].split("-")
contig=lines[0].rstrip()
id_a=lines[1].split(":")[1].rstrip()
genome_tree_a=lines[2].split(":")[1].rstrip()
start=int(lines[3].split(":")[1].rstrip())
stop=int(lines[4].split(":")[1].rstrip())
id_b=lines[5].split(":")[1].rstrip()
genome_tree_b=lines[6].split(":")[1].rstrip()
unique_id=lines[7].rstrip()
contam_ids[unique_id] = [contig,id_a,genome_tree_a,id_b,genome_tree_b,start,stop]
#for key in contam_ids:
# print key
"""
#-----
#read in dirty uids lists
#univec
with open(args.univec_list, 'r') as fh:
for l in fh:
id = l.rstrip()
contam_ids[id]= 1
#blast
with open(args.blast_list, "r") as fh:
for l in fh:
id = l.rstrip()
contam_ids[id]=0
#print contam_ids
# read in gut_oral_contigs_uniq.fna
with open(args.fasta_file,"r") as fh:
for l in fh:
# check if fasta header
if ">" in l:
fasta_info = l.split(">")[1].split("-")
contig= fasta_info[0].rstrip()
img_a= fasta_info[1].split(":")[1].rstrip()
genome_tree_a= fasta_info[2].split(":")[1].rstrip()
start= fasta_info[3].split(":")[1].rstrip()
stop= fasta_info[4].split(":")[1].rstrip()
img_b= fasta_info[5].split(":")[1].rstrip()
genome_tree_b= fasta_info[6].split(":")[1].rstrip()
uid= fasta_info[7].rstrip()
try:
if uid in contam_ids:
contam_ids[uid] = [contig,img_a,genome_tree_a,img_b,genome_tree_b,start,stop]
except KeyError:
pass
#print contam_ids
#-----
"""img_id_a genome_tree_id_a contig_a contig_length_a start_a stop_a length_a img_id_b genome_tree_id_b contig_b contig_length_b start_b stop_b length_b"""
#parse gut_oral_contigs.csv file
with open(args.gut_oral_contigs,"r") as fh:
#header
header=fh.readline().rstrip()
#read through file line by line
print header
for l in fh:
Good_to_print = True
tabs= l.split("\t")
img_id_a= tabs[0]
genome_tree_id_a= tabs[1]
contig_a= tabs[2]
contig_length_a= tabs[3]
start_a= tabs[4]
stop_a= tabs[5]
length_a= tabs[6]
img_id_b= tabs[7]
genome_tree_id_b= tabs[8]
contig_b= tabs[9]
contig_length_b= tabs[10]
start_b= tabs[11]
stop_b= tabs[12]
length_b= tabs[13].rstrip()
for uid in contam_ids:
if contig_a == contam_ids[uid][0] or contig_b == contam_ids[uid][0]:
if int(start_a) == int(contam_ids[uid][5]) and int(stop_a) == int(contam_ids[uid][6]):
Good_to_print = False
if int(start_b) == int(contam_ids[uid][5]) and int(stop_b) == int(contam_ids[uid][6]):
Good_to_print = False
if Good_to_print:
print l.rstrip()
#print len(contam_ids.keys())
#print test
"""
fig = plt.figure()
#-----
# make a 3d plot
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points[:,0],
points[:,1],
points[:,2],
#edgecolors='none',
#c=colors,
#s=2,
#marker='.'
)
#-----
# make a 2d plot
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(points[:,0],
points[:,1],
'*g')
#-----
# show figure
plt.show()
# or save figure
plt.savefig(filename,dpi=300,format='png')
#-----
# clean up!
plt.close(fig)
del fig
"""
return 0
def printSeq(rawSeq, start, stop):
# if start > stop
pass
def revComp(seq):
# see rob edwards
pass
###############################################################################
###############################################################################
###############################################################################
###############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c','--coord', help="nucmer coords file")
parser.add_argument('-u','--univec_list', help="List of dirty sequence IDs (UniVec)")
parser.add_argument('-f','--fasta_file', help="Fasta file containing transfer and uid information")
parser.add_argument('-b','--blast_list', help="List of dirty sequence IDs (blast)")
parser.add_argument('-go','--gut_oral_contigs', help="gut_oral_contigs.csv")
#parser.add_argument('positional_arg3', nargs='+', help="Multiple values")
#parser.add_argument('-X', '--optional_X', action="store_true", default=False, help="flag")
# parse the arguments
args = parser.parse_args()
# do what we came here to do
doWork(args)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
|
UTF-8
|
Python
| false | false | 2,014 |
5,214,090,327,461 |
a0ab36af82d3fa1b7c5506672b43787e32739183
|
1f73d07d1ece39ee01153ff032702731fb9fb26d
|
/week0/day1/fibonacci/solution.py
|
efe6a3e9e2a5fa3824fc4a45e70262d5d7be49cb
|
[] |
no_license
|
Dayana-Marinova/HackBulgariaHomework
|
https://github.com/Dayana-Marinova/HackBulgariaHomework
|
84b281f11757e6ad6b8febb674f2a666713ab00d
|
5470ef4e637b150c800716806f39763146ea93e6
|
refs/heads/master
| 2021-01-20T08:47:12.366316 | 2014-06-25T00:35:08 | 2014-06-25T00:35:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def nth_fibonacci(n):
first = 1
second = 1
if n == 1 or n == 2:
return 1
else:
for i in range(2, n):
n = first + second
first = second
second = n
return n
|
UTF-8
|
Python
| false | false | 2,014 |
14,602,888,808,394 |
a8c617f62e5b451debc1d62a3e6002f9fc2996bd
|
54d4dee046ab437121a62a7d3929bc9f092a171d
|
/cljppy/map/test/test_map.py
|
55c995ec28b5a2f176a6ef021370610a22231737
|
[] |
no_license
|
no-man-is-an-island/cljppy
|
https://github.com/no-man-is-an-island/cljppy
|
b896145fcc5f131be44980fa549953f8268c32e4
|
4f2d018a477496472cf6b2ac525daf01d89b2d1a
|
refs/heads/master
| 2021-01-01T19:16:41.940795 | 2013-11-20T15:50:56 | 2013-11-20T15:50:56 | 11,862,161 | 1 | 0 | null | false | 2013-11-11T15:51:14 | 2013-08-03T11:25:25 | 2013-11-11T15:51:14 | 2013-11-11T15:51:14 | 698 | 1 | 1 | 3 |
Python
| null | null |
from cljppy.core import partial, even, plus, identity
from cljppy.map import *
def test_assoc():
assert assoc({}) == {}
assert assoc({"a": 2}, "a", 1) == {"a": 1}
y = {"a": 2}
assert assoc(y, "a", 1, "b", 3) == {"a": 1, "b": 3}
assert y == {"a": 2}
def test_dissoc():
assert dissoc({}) == {}
assert dissoc({}, "a") == {}
x = {"a": 2, "b": 3, "c": 1}
assert dissoc(x, "a", "b") == {"c": 1}
assert x == {"a": 2, "b": 3, "c": 1}
def test_merge():
assert merge() == {}
assert merge({"a": 1}) == {"a": 1}
assert merge({"a": 1}, {"a": 2, "b": 2}) == {"a": 2, "b": 2}
def test_merge_with():
assert merge_with(plus) == {}
assert merge_with(plus, {"s": 1}) == {"s": 1}
assert merge_with(plus, {"a": 1}, {"a": 2, "b": 2}) == {"a": 3, "b": 2}
def test_map_vals():
assert map_vals({}, partial(plus, 1)) == {}
assert map_vals({"a": 1}, partial(plus, 1)) == {"a": 2}
def test_filter_keys_by_val():
assert filter_keys_by_val(even, {}) == []
assert filter_keys_by_val(even, {"a": 1, "b": 2}) == ["b"]
def test_remove_keys_by_val():
assert remove_keys_bv_val(even, {}) == []
assert remove_keys_bv_val(even, {"a": 1, "b": 2}) == ["a"]
def test_filter_vals():
assert filter_vals(even, {}) == {}
assert filter_vals(even, {"a": 2, 2: 3}) == {"a": 2}
def test_remove_vals():
assert remove_vals(even, {}) == {}
assert remove_vals(even, {"a": 2, 2: 3}) == {2: 3}
def test_filter_keys():
assert filter_keys(even, {}) == {}
assert filter_keys(even, {1: 2, 2: 3}) == {2: 3}
def test_remove_keys():
assert remove_keys(even, {}) == {}
assert remove_keys(even, {1: 2, 2: 3}) == {1: 2}
def test_select_keys():
assert select_keys({}, []) == {}
assert select_keys({}, ["a"]) == {}
assert select_keys({"a": 1, "b": 2}, ["a"]) == {"a": 1}
def test_update_each():
assert update_each(dict(a=1, b=2, c=3), ("b", "c"), plus, 1) == dict(a=1, b=3, c=4)
assert update_each(dict(a=1, b=2, c=3), (), plus, 1) == dict(a=1, b=2, c=3)
# Returns a copy
original = dict(a=1, b=2, c=3)
update_each(original, ("a", "b", "c"), plus, 1)
assert original == dict(a=1, b=2, c=3)
# Passes none to mapping function if key is not found
assert update_each(dict(a=1, b=2, c=3), ("a", "z"), identity) == dict(a=1, b=2, c=3, z=None)
|
UTF-8
|
Python
| false | false | 2,013 |
16,862,041,648,471 |
259f2034eca19b8c466b740b7e549eb70932c0e5
|
5bf7a44b70f019b4a3520f9a3e4c7bf1aba26d15
|
/SQLiteHandler.py
|
3fb6cfecefddb154ad522a886619ec62935bb38f
|
[] |
no_license
|
xycloops123/File-System-FUSE-bindings
|
https://github.com/xycloops123/File-System-FUSE-bindings
|
ee9def8295875fd173d79c9faa6dd00fc92c11f7
|
f47508a8d1ed5e91eb95eed409694e2579fcf00f
|
refs/heads/master
| 2016-09-05T19:37:51.163392 | 2014-10-21T23:55:08 | 2014-10-21T23:55:08 | 25,552,067 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#############################################################################
# SQLiteHandler.py - Class responsible for working with the SQlite databases.
#
# __init__() - Takes the path of the database as an argument.
# connect() - Establishes a connection to the database.
# execute() - Executes the SQL queries on the database.
# commit() - This method commits the current transaction.
# close() - This closes the database connection
#############################################################################
import sqlite3,os
class SQLiteHandler:
def __init__(self, databasepath):
# Gets the database location
self.path = databasepath
if not os.path.exists(self.path):
raise Exception ("Database not found!")
self.connection = None
self.query = None
def connect(self):
# Creates the connection object
try:
self.connection = sqlite3.connect(self.path)
return self.connection
except sqlite3.Error, msg:
raise msg
def execute(self,query):
# Executes SQL queries by creating the cursor object
self.query = query
try:
self.cursorobj = self.connection.cursor()
if self.cursorobj is not None:
self.cursorobj.execute(self.query)
return self.cursorobj
except sqlite3.Error, msg:
raise msg
def fetchAll(self, query):
# Wrapper to fetch rows of select query output
self.query = query
row = self.connection.cursor().execute(self.query)
self.commit()
return row.fetchall()
def fetchOne(self, query):
# Wrapper to fetch first row of select query output
self.query = query
row = self.connection.cursor().execute(self.query)
self.commit()
return row.fetchone()
def commit(self):
# Commits a transaction on DB
try:
if self.connection is not None:
self.connection.commit()
except sqlite3.Error, msg:
raise msg
def close(self):
# Closes the database connection
try:
if self.connection is not None:
self.connection.close()
except sqlite3.Error, msg:
raise msg
#####################################################################################################
|
UTF-8
|
Python
| false | false | 2,014 |
4,045,859,204,272 |
b82061e9d83957c394848ce457baf65b1f333b6e
|
ffd07ce8a45b9f1d1c2b36b84716e3b9c540cbf1
|
/sunspot/sunspot_db_v01.py
|
676d45d27f574db6915b2a5930589fe1c86eb86d
|
[] |
no_license
|
ahetland/lsePyWorkshop
|
https://github.com/ahetland/lsePyWorkshop
|
c2597421ed01f8c6d03d1da5f97f459363e1dbb5
|
5623187f66a8a6a44480eca6fcc4bf4a0e4b7daa
|
refs/heads/master
| 2016-09-06T22:59:39.903635 | 2014-06-27T12:30:16 | 2014-06-27T12:30:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import datetime
from sqlalchemy import Column, ForeignKey, Integer, String, Date
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class SunSpot(Base):
"""Class for a single sunspot."""
__tablename__ = 'tbl_sunspots'
id = Column(Integer, primary_key=True)
date = Column(Date, nullable=False)
sunspot_cnt = Column(Integer, nullable=False)
def fromSILO(self, year, month, day, date_in_y_fr, sunspot_cnt, def_prov):
"Initialise with data from SILSO"
self.date = datetime.date( int(year), int(month), int(day) )
self.sunspot_cnt = sunspot_cnt
def __str__(self):
"Return string representation"
return "%s Count: %s" % (self.date, self.sunspot_cnt)
# Create an engine that stores data in the local directory's
# sqlalchemy_example.db file.
engine = create_engine('sqlite:///sqlalchemy_example_2.db')
# Create all tables in the engine. This is equivalent to "Create Table"
# statements in raw SQL.
Base.metadata.create_all(engine)
if __name__=='__main__':
import doctest
doctest.testmod()
|
UTF-8
|
Python
| false | false | 2,014 |
16,338,055,624,909 |
50cff8ddc6a2754f04bde2c6b2446a0e1f084ccb
|
9b153a48395d64e681c195a61e395f94d3efb217
|
/news/urls.py
|
962054d1c76d791d48f7825f24a9651304ea606b
|
[] |
no_license
|
MaratFM/Djanym
|
https://github.com/MaratFM/Djanym
|
7f19f0168835b4fee961d1df3e3f517480700670
|
3031a38a88a96d1ade8e2391cb455fa9921e7549
|
refs/heads/master
| 2021-03-13T00:01:13.790120 | 2010-02-20T09:09:29 | 2010-02-20T09:09:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#coding=utf-8
from django.conf.urls.defaults import *
from views import *
from models import News
news_list_dict = {'queryset': News.objects.all(),
'paginate_by': 10,
}
news_detail_dict = {'queryset': News.objects.all(),
}
urlpatterns = patterns('',
url(r'^$',
'django.views.generic.list_detail.object_list',
news_list_dict,
name='news_list' ),
url(r'^(?P<object_id>\d+)/$',
'django.views.generic.list_detail.object_detail',
news_detail_dict,
name='news_detail'),
)
|
UTF-8
|
Python
| false | false | 2,010 |
4,080,218,977,417 |
58682fbb0f34e7f631bcfa45c1b4d2bdbc72161a
|
c04b1c494ca15c4e3a0279d85f28eae32efae193
|
/StillWeb/HtAccess.py
|
5cb1f987d7188efb2f297256d5a7e339958035eb
|
[] |
no_license
|
midnightskinhead/stillweb
|
https://github.com/midnightskinhead/stillweb
|
69a6c1a27554dee08024726b33659697c2aff872
|
f160c241813e3cb342ee4582c4ef313806c34b1f
|
refs/heads/master
| 2021-01-18T18:16:38.835283 | 2011-06-14T03:47:34 | 2011-06-14T03:47:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# HtAccess.py - StillWeb .htaccess file generator
# Copyright (C) 2008 Dwayne C. Litzenberger <[email protected]>
import os
import shutil
import urllib.parse
from StillWeb.sw_util import TypicalPaths
class HtAccessPlugin:
def __init__(self, framework):
self._framework = framework
self._framework.plugins['StillWeb.ScriptProcessor'].register_command('make_htaccess', self.handle_make_htaccess)
def cleanup(self):
self._framework = None
def handle_make_htaccess(self, target_url):
"""Make a .htaccess file, adding a proper RewriteBase to the file
Usage: make_htaccess TARGET_RELATIVE_URL
"""
tp = TypicalPaths(self._framework, target_url)
print("generating htaccess %s (using %s)" % (tp.output_filename, tp.source_filename))
source_file = open(tp.source_filename, "rt", encoding='UTF-8')
if os.path.exists(tp.output_filename):
os.unlink(tp.output_filename)
output_file = open(tp.output_filename, "wt", encoding='UTF-8')
shutil.copyfileobj(source_file, output_file)
# Add RewriteBase line to .htaccess
output_file.write("\n# Begin automatically-generated section\n")
# SECURITY FIXME - base_url must not have special characters that will be interpreted weirdly by Apache
output_file.write("RewriteBase %s\n" % (urllib.parse.urlparse(tp.base_url).path,))
# Close files
source_file.close()
output_file.close()
def create_plugin(framework):
return HtAccessPlugin(framework)
# vim:set ts=4 sw=4 sts=4 expandtab:
|
UTF-8
|
Python
| false | false | 2,011 |
807,453,899,093 |
f986cb91deb18a750ccf3775234d1885df7590dc
|
902a435d9e7b73cbb050cf407d04d97d9dfe056b
|
/perm.py
|
7d3c97488583979e4ae8701489f25c4ee3cf7b91
|
[] |
no_license
|
git2samus/rosalind-python
|
https://github.com/git2samus/rosalind-python
|
77c6546d520d9afecf779398ec462ca57b75247d
|
bcf0297e1f7210e965df49ef5acc42b3f3558fbe
|
refs/heads/master
| 2016-09-05T13:21:55.892136 | 2014-05-03T00:27:57 | 2014-05-03T00:27:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
import sys, math
from itertools import permutations
if __name__ == '__main__':
try:
n = int(sys.argv[1])
except IndexError:
print('Missing arguments (expects one integer)', file=sys.stderr)
sys.exit(1)
except ValueError:
print('Bad arguments (expects one integer)', file=sys.stderr)
sys.exit(1)
print(math.factorial(n))
for l in permutations(m+1 for m in range(n)):
print(' '.join(str(m) for m in l))
|
UTF-8
|
Python
| false | false | 2,014 |
6,055,903,924,640 |
164c91ca79068d8e52b27342efdd396b42472377
|
601e46c8eb8f72c52408fc49b5cc384641a93edb
|
/app/views.py
|
42045ef4dc1a9bd1ab35794696fcee55a4b720a9
|
[] |
no_license
|
aykamko/calmunch
|
https://github.com/aykamko/calmunch
|
0595885e8415b476f7ea7de2a2aac9ba44cdc4eb
|
137f6d34ac613b4cdc9becc967669cf680ab767e
|
refs/heads/master
| 2016-09-03T00:43:43.165243 | 2013-09-29T06:13:36 | 2013-09-29T06:13:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash
from app import app
from app.models import Event
from app.database import db_session
@app.route('/')
def index():
event_query = db_session.query(Event).all()
return render_template('index.html', event_query = event_query);
@app.route('/about_us')
def about_us():
return render_template('about_us.html')
@app.route('/create-event', methods=('GET', 'POST'))
def create_event():
from app.forms import CreateEventForm
form = CreateEventForm(request.form)
if request.method == 'POST' and form.validate():
return redirect('/')
if len(form.errors.keys()) != 0:
flash("Please fill the following fields with valid information: " + ', '.join([field.capitalize() for field in form.errors.keys()]))
return render_template('create-event.html', form = form)
|
UTF-8
|
Python
| false | false | 2,013 |
13,451,837,577,044 |
6997db20e0351d9d1854afd708d261a102ceb23c
|
5a08be5db2e1550b32d763c0b2a9dd7762158743
|
/test/test-run.py.in
|
697015ad0327c3c2d8ef3c27f14071adc23f27df
|
[] |
no_license
|
rocky/pydb
|
https://github.com/rocky/pydb
|
74ddc7213ab05dbaf365a462d35adf8c37ca94b6
|
7f414431af3544c4201454bb3492b68c551e5edc
|
refs/heads/master
| 2016-08-07T12:50:55.525965 | 2012-12-26T19:44:07 | 2012-12-26T19:44:07 | 244,214 | 15 | 4 | null | false | 2012-12-26T21:13:49 | 2009-07-06T11:34:09 | 2012-12-26T19:44:12 | 2012-12-26T19:44:12 | 180 | null | 2 | 0 |
Python
| null | null |
#!@PYTHON@ -t
# -*- Python -*-
# $Id: test-run.py.in,v 1.5 2008/12/10 13:31:26 rockyb Exp $
"Unit test for Extended Python debugger's runl and runv commands "
import difflib, os, sys, unittest, signal, time
top_builddir = "@top_builddir@"
if top_builddir[-1] != os.path.sep:
top_builddir += os.path.sep
sys.path.insert(0, os.path.join(top_builddir, 'pydb'))
top_srcdir = "@top_srcdir@"
if top_srcdir[-1] != os.path.sep:
top_srcdir += os.path.sep
sys.path.insert(0, os.path.join(top_srcdir, 'pydb'))
import pydb
builddir = "@builddir@"
if builddir[-1] != os.path.sep:
builddir += os.path.sep
srcdir = "@srcdir@"
if srcdir[-1] != os.path.sep:
srcdir += os.path.sep
pydir = os.path.join(top_builddir, "pydb")
pydb_short = "pydb.py"
pydb_path = os.path.join(pydir, pydb_short)
def diff_files(outfile, rightfile):
fromfile = rightfile
fromdate = time.ctime(os.stat(fromfile).st_mtime)
fromlines = open(fromfile, 'U').readlines()
tofile = outfile
todate = time.ctime(os.stat(tofile).st_mtime)
tolines = open(tofile, 'U').readlines()
diff = list(difflib.unified_diff(fromlines, tolines, fromfile,
tofile, fromdate, todate))
if len(diff) == 0:
os.unlink(outfile)
for line in diff:
print line,
return len(diff) == 0
class RunTests(unittest.TestCase):
def test_runl(self):
"""Test pydb.runl()"""
python_script = '%sgcd.py' % srcdir
cmdfile = '%sbrkpt2.cmd' % srcdir
if sys.hexversion <= 0x020400f0:
rightfile = os.path.join(top_srcdir, 'test', 'data',
'brkpt2-2.3.right')
else:
rightfile = os.path.join(top_srcdir, 'test', 'data',
'brkpt2.right')
outfile = 'runtest.out'
if os.path.exists(outfile): os.unlink(outfile)
pydb.runl('--basename', '--batch', '--nx', '--output', outfile,
'--command', cmdfile, python_script, '3', '5')
result = diff_files(outfile, rightfile)
self.assertEqual(True, result, "brkpt2 (via runl)")
def test_runv(self):
"""Test pydb.runl()"""
python_script = '%shanoi.py' % srcdir
cmdfile = '%scmdparse.cmd' % srcdir
rightfile = os.path.join(top_srcdir, 'test', 'data',
'cmdparse.right')
outfile = 'runtest.out'
if os.path.exists(outfile): os.unlink(outfile)
args = ('--basename', '--batch', '--nx', '--output', outfile,
'--command', cmdfile, python_script)
pydb.runv(args)
result = diff_files(outfile, rightfile)
self.assertEqual(True, result, "cmdparse (via runv)")
def test_pydbrc(self):
"""Test pydb.runl()"""
python_script = '%shanoi.py' % srcdir
cmdfile = '%spydbrc.cmd' % srcdir
rightfile = os.path.join(top_srcdir, 'test', 'data',
'pydbrc.right')
outfile = 'pydbrc.out'
if os.path.exists(outfile): os.unlink(outfile)
os.environ['HOME'] = srcdir
args = ('--basename', '--batch', '--output', outfile,
'--command', cmdfile, python_script)
pydb.runv(args)
result = diff_files(outfile, rightfile)
self.assertEqual(True, result, "pydbrc test")
if srcdir[-1] == os.path.sep:
# Chop off pathsep to make sure that works
os.environ['HOME'] = srcdir[0:-1]
else:
# Add of pathsep to make sure that works
os.environ['HOME'] = srcdir + os.path.sep
pydb.runv(args)
result = diff_files(outfile, rightfile)
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false | false | 2,012 |
8,804,682,975,267 |
e47c8186393388b4b2f29b88c269a40916fb1dbf
|
243e2a25fe12f9f7e0fda524af4968ec0500f513
|
/test_scripts/test56.py
|
c0822e06e35216f95082c2f1dcad1057caee5f60
|
[
"GPL-2.0-only",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-or-later"
] |
non_permissive
|
pez2001/sVimPy
|
https://github.com/pez2001/sVimPy
|
e6e515167ce510c1f9150d963af381fa826da285
|
01adfffcaf80ed5deb33c24fb31d6de105aef834
|
refs/heads/master
| 2021-01-01T05:33:35.417567 | 2013-10-29T21:10:14 | 2013-10-29T21:10:14 | 3,014,874 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def f(i):
a = i
return(a)
print(f(1))
|
UTF-8
|
Python
| false | false | 2,013 |
3,083,786,565,946 |
d4ed2b9988acce2bd10ff44547e8b16074deb9ff
|
437c3450901ae748a07b6f578695167b01e11acf
|
/jump/commands/__init__.py
|
68813a9e1a0a19ffc4d2f41134103274932daa94
|
[
"GPL-3.0-only",
"GPL-3.0-or-later",
"BSD-3-Clause",
"GPL-2.0-or-later"
] |
non_permissive
|
jonasdiemer/jump
|
https://github.com/jonasdiemer/jump
|
8383e2c00dcc7209420d364c2867d43c8c113abb
|
264ca89989b82ad233ce37864220d0b42c4fd9cf
|
refs/heads/master
| 2021-01-19T16:24:56.122028 | 2010-01-06T04:51:20 | 2010-01-06T04:51:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
"""
__init__.py
Created by Olli Wang ([email protected]) on 2009-10-26.
Copyright (c) 2009 Ollix. All rights reserved.
This file is part of Jump.
Jump is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or any later version.
Jump is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with Jump. If not, see <http://www.gnu.org/licenses/>.
"""
|
UTF-8
|
Python
| false | false | 2,010 |
13,915,694,077,699 |
962e95a59dd192a3d8705c68d5992de868783884
|
2ed2f087b9b5c7e1cf84afb914195f2169a7617e
|
/setup.py
|
e4a54df573a8b439f856e3386fb0f2c621250ce4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"CC-BY-ND-3.0",
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
koalalorenzo/PhiFi
|
https://github.com/koalalorenzo/PhiFi
|
3d2c73712e70115c8047add5a5bb7a9118bd0fbb
|
36a5ad9f6081007794fbebd9f6eafaf21d62d461
|
refs/heads/master
| 2021-01-19T10:11:17.829598 | 2012-08-27T17:12:17 | 2012-08-27T17:12:17 | 3,274,283 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding=utf-8 -*-
from distutils.core import setup
import phifi
setup(
name='phifi',
version=phifi.__version__,
description=phifi.__description__,
author=phifi.__author__,
license=phifi.__license__,
author_email='[email protected]',
url='http://phifi.setale.me/',
packages=['phifi']
)
|
UTF-8
|
Python
| false | false | 2,012 |
17,557,826,328,955 |
492863dc2220755a9974e12f5da5a32486af4aaa
|
ff78b2043fd5a30647dfb5a29ebea74b370af3f6
|
/students/ScottC/session4/session_4_safe_input.py
|
47c874fabf083433dc26d127da4c201ed64f633e
|
[
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"LGPL-2.1-only",
"GPL-3.0-only",
"GPL-2.0-only",
"CC-BY-SA-4.0"
] |
non_permissive
|
AmandaMoen/AmandaMoen
|
https://github.com/AmandaMoen/AmandaMoen
|
7f1bd10c87eb8b3873caf2272d22a46a89db413c
|
6f4997aef6f0aecb0e092bc4b1ec2ef61c5577e8
|
refs/heads/master
| 2020-05-17T09:17:15.925291 | 2014-05-29T04:07:19 | 2014-05-29T04:07:19 | 19,657,795 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def safe_input():
"""Wrapper around raw_input() to return None instead of exceptions"""
try:
input_string = raw_input('Enter any input: ')
except KeyboardInterrupt:
print u""
print u"You initiated a keyboard interrupt. Good bye."
return None
except EOFError:
print u""
print u"You reached the End Of File. Good bye."
return None
else:
print input_string
if __name__ == '__main__':
safe_input()
|
UTF-8
|
Python
| false | false | 2,014 |
2,714,419,377,418 |
9eb41edd46298b3016bedede06e77a7d581a2ac4
|
1a1b7f607c5e0783fd1c98c8bcff6460e933f09a
|
/core/group/group.py
|
b70e80c35b08e95ad24622419c2d974780b11639
|
[] |
no_license
|
smrmohammadi/freeIBS
|
https://github.com/smrmohammadi/freeIBS
|
14fb736fcadfaea24f0acdafeafd2425de893a2d
|
7f612a559141622d5042614a62a2580a72a9479b
|
refs/heads/master
| 2021-01-17T21:05:19.200916 | 2014-03-17T03:07:15 | 2014-03-17T03:07:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from core.ibs_exceptions import *
from core.errors import errorText
class Group:
def __init__(self,group_id,group_name,comment,owner_id,attributes):
self.group_id=group_id
self.group_name=group_name
self.comment=comment
self.owner_id=owner_id
self.attributes=attributes
def hasAttr(self,attr_name):
return self.attributes.has_key(attr_name)
def getAttr(self,attr_name):
try:
return self.attributes[attr_name]
except KeyError:
raise GeneralException(errorText("GENERAL","ATTR_NOT_FOUND")%attr_name)
def getGroupName(self):
return self.group_name
def getGroupID(self):
return self.group_id
def getComment(self):
return self.comment
def getOwnerID(self):
return self.owner_id
def getAttrs(self):
return self.attributes
|
UTF-8
|
Python
| false | false | 2,014 |
13,511,967,138,464 |
cba82c4b8e68d7827b23462498fc7e61680f84fc
|
28ffeb87eeec084ea7a56568256b763c71a48e47
|
/manage.py
|
c5e78b7446629c68181fce7737ebe26763adbbf6
|
[] |
no_license
|
marazmiki/django-project-template
|
https://github.com/marazmiki/django-project-template
|
013fe59405f3226ba317a209762272cae85e9d61
|
6c78ec664d7a4c45f1bc8673810edec4a40276d2
|
refs/heads/master
| 2021-01-10T19:09:46.327557 | 2014-12-29T07:54:01 | 2014-12-29T07:54:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# coding: utf-8
import os
import sys
def get_cwd():
cwd = os.path.dirname(__file__)
return os.path.normpath(os.path.realpath(cwd))
def get_extra_dirs():
return ['compat', 'apps', 'project', '']
def add_path(path):
sys.path.insert(0, os.path.normpath(os.path.join(get_cwd(),'src', path)))
if __name__ == '__main__':
for directory in get_extra_dirs():
add_path(directory)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
UTF-8
|
Python
| false | false | 2,014 |
2,327,872,309,875 |
f011e475e2fc8fc5e0b693bee5ab43b6c4916bdb
|
ca450e8134776db3adf7a2c40c18c5f7f8465342
|
/test_mapper.py
|
fb68dcf34633f08983ad42602b8e0f50d40d9c7e
|
[] |
no_license
|
jlmaccal/hhr_mapper
|
https://github.com/jlmaccal/hhr_mapper
|
fd59f0a38401208f8eac4bf23c9d4b2799c76c0a
|
75a014be80850b3d13f294518084f47d8fbec25b
|
refs/heads/master
| 2021-01-13T09:49:42.202161 | 2014-04-10T02:24:48 | 2014-04-10T02:24:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
import unittest
import mapper
class TestFileIO(unittest.TestCase):
def setUp(self):
self.lines = open('sequence.hhr').readlines()
self.line_ranges = mapper.find_line_ranges(self.lines)
def test_find_line_ranges_gives_correct_ranges(self):
self.assertEqual(self.line_ranges[0], (114, 140))
self.assertEqual(self.line_ranges[1], (140, 166))
def test_find_line_ranges_gives_correct_number_of_matches(self):
self.assertEqual(len(self.line_ranges), 104)
def test_parse_match_number_is_correct(self):
match_number = mapper.parse_match_number(
self.lines[self.line_ranges[0][0]])
self.assertEqual(match_number, 1)
def test_parse_description_gives_correct_information(self):
pdb, chain, desc = mapper.parse_description(
self.lines[self.line_ranges[0][0] + 1])
self.assertEqual(pdb, '4fr9')
self.assertEqual(chain, 'A')
self.assertIn('Uncharacterized', desc)
def test_gather_query_sequence_lines_gets_correct_lines(self):
lines = mapper.get_query_sequence_lines(
self.lines[self.line_ranges[0][0]:self.line_ranges[0][1]])
self.assertEqual(len(lines), 2)
self.assertTrue(lines[0].startswith('Q T0644'))
self.assertTrue(lines[1].startswith('Q T0644'))
def test_gather_template_sequence_lines_gets_correct_lines(self):
lines = mapper.get_template_sequence_lines(
self.lines[self.line_ranges[0][0]:self.line_ranges[0][1]])
self.assertEqual(len(lines), 2)
self.assertTrue(lines[0].startswith('T 4fr9_A'))
self.assertTrue(lines[1].startswith('T 4fr9_A'))
def test_extract_residue_numbers_has_correct_start_and_end(self):
lines = mapper.get_query_sequence_lines(
self.lines[self.line_ranges[0][0]:self.line_ranges[0][1]])
start1, end1 = mapper.extract_residue_range_from_sequence_line(lines[0])
start2, end2 = mapper.extract_residue_range_from_sequence_line(lines[1])
self.assertEqual(start1, 30)
self.assertEqual(end1, 109)
self.assertEqual(start2, 110)
self.assertEqual(end2, 166)
def test_extract_sequence_from_sequence_line_has_correct_sequence(self):
lines = mapper.get_query_sequence_lines(
self.lines[self.line_ranges[0][0]:self.line_ranges[0][1]])
seq = mapper.extract_sequence_from_sequence_line(lines[0])
self.assertTrue(seq.startswith('GYL'))
self.assertTrue(seq.endswith('SYN'))
def test_gather_query_ss_pred_gets_correct_lines(self):
lines = mapper.get_query_ss_pred_lines(
self.lines[self.line_ranges[0][0]:self.line_ranges[0][1]])
self.assertEqual(len(lines), 2)
self.assertTrue(lines[0].startswith('Q ss_pred'))
self.assertTrue(lines[1].startswith('Q ss_pred'))
def test_gather_tempate_ss_pred_gets_correct_lines(self):
lines = mapper.get_template_ss_pred_lines(
self.lines[self.line_ranges[0][0]:self.line_ranges[0][1]])
self.assertEqual(len(lines), 2)
self.assertTrue(lines[0].startswith('T ss_pred'))
self.assertTrue(lines[1].startswith('T ss_pred'))
def test_gather_tempate_dssp_gets_correct_lines(self):
lines = mapper.get_template_dssp_lines(
self.lines[self.line_ranges[0][0]:self.line_ranges[0][1]])
self.assertEqual(len(lines), 2)
self.assertTrue(lines[0].startswith('T ss_dssp'))
self.assertTrue(lines[1].startswith('T ss_dssp'))
def test_extract_ss_from_ss_line_has_correct_sequence(self):
lines = mapper.get_query_ss_pred_lines(
self.lines[self.line_ranges[0][0]:self.line_ranges[0][1]])
ss = mapper.extract_ss_from_ss_line(lines[0])
self.assertTrue(ss.startswith('CCC'))
self.assertTrue(ss.endswith('CCC'))
def test_get_query_ss_pred_gets_correct_ss_string(self):
ss = mapper.get_query_ss(
self.lines[self.line_ranges[0][0]:self.line_ranges[0][1]])
self.assertEqual(len(ss), 137)
self.assertTrue(ss.startswith('CCCCCH'))
self.assertTrue(ss.endswith('CCCCC'))
def test_get_template_ss_pred_gets_correct_ss_string(self):
ss = mapper.get_template_ss(
self.lines[self.line_ranges[0][0]:self.line_ranges[0][1]])
self.assertEqual(len(ss), 137)
self.assertTrue(ss.startswith('CccCCH'))
self.assertTrue(ss.endswith('ccCCCC'))
def test_get_template_dssp_gets_correct_ss_string(self):
ss = mapper.get_template_dssp(
self.lines[self.line_ranges[0][0]:self.line_ranges[0][1]])
self.assertEqual(len(ss), 137)
self.assertTrue(ss.startswith('TCCCC'))
self.assertTrue(ss.endswith('CCCCC'))
def test_get_query_sequence_is_correct(self):
seq, start, end = mapper.get_query_sequence(
self.lines[self.line_ranges[0][0]:self.line_ranges[0][1]])
self.assertEqual(len(seq), 137)
self.assertEqual(start, 30)
self.assertEqual(end, 166)
self.assertTrue(seq.startswith('GYL'))
self.assertTrue(seq.endswith('PRV'))
def test_get_template_sequence_is_correct(self):
seq, start, end = mapper.get_template_sequence(
self.lines[self.line_ranges[0][0]:self.line_ranges[0][1]])
self.assertEqual(len(seq), 137)
self.assertEqual(start, 8)
self.assertEqual(end, 144)
self.assertTrue(seq.startswith('GYL'))
self.assertTrue(seq.endswith('PRV'))
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false | false | 2,014 |
10,299,331,586,479 |
d94f3f93ec02af56f3792af0ec84bd516e4acdd9
|
83b183bfff8490fe13e92fd462e8911b464186f3
|
/week2/wk2.py
|
c642c417b798beb8cd3eb90d1b97997cc32a5606
|
[] |
no_license
|
wihl/6.002x-Spring-2014
|
https://github.com/wihl/6.002x-Spring-2014
|
b3d4eee64f67d615b75c2e0c65d68be7cb9a6f47
|
09eccbaa0a86444119aeafc8b904aa00a714d145
|
refs/heads/master
| 2020-04-08T09:40:24.625919 | 2014-05-13T16:33:26 | 2014-05-13T16:33:26 | 18,675,966 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import random
import pylab as plt
def genEven():
'''
Returns a random even number x, where 0 <= x < 100
'''
# Your code here
return random.randrange(0,100,2)
def deterministicNumber():
'''
Deterministically generates and returns an even number between 9 and 21
'''
# Your code here
return 10
def stochasticNumber():
'''
Stochastically generates and returns a uniformly distributed even number between 9 and 21
'''
# Your code here
return random.randrange(9,21,2) +1
y = [stochasticNumber() for x in xrange(1000)]
plt.hist(y, bins=100)
plt.show()
|
UTF-8
|
Python
| false | false | 2,014 |
11,330,123,749,687 |
7cb1a2f426f3af78fcfdc097e9d2c74a28834c3d
|
233e72e8948b81978d6a44ae02878427b1e58ea9
|
/space_game.py
|
3685995303c15be7e0ef0274aea78bc3e9dc5026
|
[] |
no_license
|
tmudway/Space_game
|
https://github.com/tmudway/Space_game
|
bf951e1db4bde102b3fd2ecaf5f0e108d198c9a5
|
4915157a42077856d2754a5f6c01fdcb4adcc79a
|
refs/heads/master
| 2021-05-27T02:07:58.889594 | 2013-05-09T10:46:55 | 2013-05-09T10:46:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pygame, sys, math
from pygame.locals import *
from Classes.planet import Planet
from Classes.player import Player
# Generic image loading function. Useful to have around
def load_image(name):
image = pygame.image.load(name).convert_alpha()
return(image)
class Star(pygame.sprite.Sprite):
def __init__(self, mass, location):
pygame.sprite.Sprite.__init__(self)
self.mass = mass
self.sphere_of_influence = mass * 2
self.apparent_mass = 0
self.display_location = location
self.image = load_image('Images/star1.png')
self.image = pygame.transform.scale(self.image, (self.mass, self.mass))
self.rect = self.image.get_rect()
self.rect.center = self.display_location
self.frame_ID = 0
def update(self):
self.image = load_image('Images/star' + str(self.frame_ID / 15) + '.png')
self.frame_ID += 1
if self.frame_ID >= 30:
self.frame_ID = 0
def main():
# Initiate pygame
pygame.init()
# Prep the fps timer
fpsclock = pygame.time.Clock()
# Define the main surfaces and set the caption
window_surface = pygame.display.set_mode((1024,768))
trace_surface = pygame.Surface((1024, 768))
pygame.display.set_caption("Space Simulator")
# Define colours that will be used
black_colour = pygame.Color(0,0,0)
yellow_colour = pygame.Color(255, 255, 0)
green_colour = pygame.Color(0, 255, 0)
red_colour = pygame.Color(255, 0, 0)
# Define fonts to be used
text_font = pygame.font.SysFont("monospace", 12)
# Planetary bodies currently being tested
bodies = pygame.sprite.Group()
stars = pygame.sprite.Group()
sun = Star(100, [550, 484])
urf = Planet(20, sun, 200.0, 1,1)
munjd = Planet(3, urf, 22.0, 2.0, 2)
plr = Player([100, 100], [1,1], 5, sun)
stars.add((sun))
bodies.add((urf, munjd))
while True:
# Reset the main surface and overlay the trace surface
window_surface.fill(black_colour)
window_surface.blit(trace_surface, (0,0))
# Draw stars
for star in stars:
# pygame.draw.circle(window_surface, yellow_colour, star.display_location, star.sphere_of_influence)
window_surface.blit(star.image, (star.rect))
stars.update()
# Draw planets
for planet in bodies:
#pygame.draw.circle(window_surface, green_colour, [int(planet.display_location[0]), int(planet.display_location[1])], planet.sphere_of_influence)
window_surface.blit(planet.image, (planet.rect))
bodies.update()
# Draw player
window_surface.blit(plr.image, (plr.display_location))
plr.update(stars, bodies)
# Draw text
text_surface = text_font.render("Fuel: " + str(plr.fuel), False, yellow_colour)
window_surface.blit(text_surface, (0, 0))
# Event handling code
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
pygame.event.post(pygame.event.Event(QUIT))
# Update planetary bodies
# Update the screen
pygame.display.update()
# Keep the fps at a fixed amount
fpsclock.tick(30)
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,013 |
11,175,504,914,347 |
e787dc4a9c4adf3a87ebbeeb5a6fa101d1af5648
|
478af2611f0df443bda5b579b89461d2195df273
|
/app/__init__.py
|
69ef65af982e460efb6ad1b9a960a941e0f4c9b2
|
[] |
no_license
|
tmcelreath/citibike-flask
|
https://github.com/tmcelreath/citibike-flask
|
46cd20d701744ac5394250c1bbaee447ff1edfe9
|
4da25d291f8659903a8610e2e9d8079537d5ee31
|
refs/heads/master
| 2016-08-04T14:24:14.804608 | 2014-09-11T20:51:55 | 2014-09-11T20:51:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
""" App initialization """
import logging
from flask import Flask, g
from flask.ext.bootstrap import Bootstrap
from flask.ext.pymongo import PyMongo
from flask_limiter import Limiter
from flask_debugtoolbar import DebugToolbarExtension
from config import config
from citibike_dao import CitiBikeDAO
LOG_FILENAME = 'app.main.log'
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
mongo = PyMongo()
bootstrap = Bootstrap()
limiter = Limiter()
def get_mongo():
return mongo
def create_app(config_name):
""" Factory function for creating application instances
:param config_name:
"""
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
DEBUG_TOOLBAR = DebugToolbarExtension(app)
mongo.init_app(app)
bootstrap.init_app(app)
limiter.init_app(app)
@app.before_request
def pre_request():
g.dao = CitiBikeDAO(mongo)
g.mongo = mongo.db
g.logging = logging
from main import main as main_blueprint
from api import api as api_blueprint
limiter.limit('30/minute')(main_blueprint)
limiter.limit('100/minute')(api_blueprint)
app.register_blueprint(main_blueprint, url_prefix='')
app.register_blueprint(api_blueprint, url_prefix='/api')
return app
|
UTF-8
|
Python
| false | false | 2,014 |
16,698,832,886,742 |
64182de2360f4c095735187670c8153684b2bdc4
|
28691ec55ebce9ec7045d12ea9675932ce12d671
|
/py2rhino-project/branches/sandbox2/py2rhino/_make/data/parser_out/light/rectangular_light_plane.py
|
dbb30a301d1cd3fe80d69aaf8e803d83f2ba66d9
|
[] |
no_license
|
ianclarksmith/design-automation
|
https://github.com/ianclarksmith/design-automation
|
1e71315193effc0c18b4a8b41300bda6f41a3f09
|
e27cc028fe582395f4a62f06697137867bb0fc33
|
refs/heads/master
| 2020-04-22T22:28:39.385395 | 2009-10-26T02:48:37 | 2009-10-26T02:48:37 | 37,266,915 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
rectangular_light_plane = {
"input_folder_name": "Light_Methods",
"input_file_name": "RectangularLightPlane",
"output_package_name": "light",
"output_module_name": "rectangular_light_plane",
"doc_html": """
Returns the plane of a rectangular light object.
""",
"syntax_html": {
0: ("strObject"),
},
"params_html": {
0: {
"name": "strObject",
"py_name": "object",
"opt_or_req": "Required",
"type": "String",
"name_prefix": "str",
"name_main": "Object",
"doc": """
The light object's identifier.
"""
},
},
"returns_html": {
0: {
"type": "array",
"doc": "The plane if successful. The elements of a plane array are as follows:"
},
1: {
"type": "array",
"doc": "The plane's origin (3-D point)."
},
2: {
"type": "array",
"doc": "The plane's X axis direction (3-D vector)."
},
3: {
"type": "array",
"doc": "The plane's Y axis direction (3-D vector)."
},
4: {
"type": "array",
"doc": "The plane's Z axis direction (3-D vector)."
},
5: {
"type": "null",
"doc": "If not successful, or on error."
},
},
"id_com": 776,
"params_com": {
0: {
"name": "vaLight",
"opt_or_req": "Required",
"type": "tagVARIANT",
},
},
"returns_com": "tagVARIANT",
}
|
UTF-8
|
Python
| false | false | 2,009 |
8,117,488,192,705 |
e6f0727674dfc89a2cf1f9c15ddeb9b34bf29558
|
f9e6d3b2a7afcffef172af6f606fc5991a8b0baa
|
/channelguide/ratings/tests.py
|
e12503402afc89cc97664ab703d4bc6c492eb00d
|
[
"AGPL-3.0-only",
"AGPL-3.0-or-later"
] |
non_permissive
|
kmshi/miroguide
|
https://github.com/kmshi/miroguide
|
a231298c1d4f7ed61d666e2d32ed212604fe79e9
|
e1e29a3a8e821bf27c0cad7df95944622c9b9c18
|
refs/heads/master
| 2020-05-15T21:58:18.733575 | 2011-05-30T07:53:15 | 2011-05-30T07:53:15 | 1,476,870 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright (c) 2008-2009 Participatory Culture Foundation
# See LICENSE for details.
from channelguide.channels.models import Channel
from channelguide.ratings.models import Rating
from channelguide.testframework import TestCase
from django.conf import settings
import re
class RatingsTestBase(TestCase):
def setUp(self):
TestCase.setUp(self)
self.owner = self.make_user('owner')
self.owner.get_profile().approved = True
self.owner.get_profile().save()
self.channel = self.make_channel(self.owner)
self.users = []
for rating in range(6):
user = self.make_user('user%i' % rating)
user.get_profile().approved = True
user.get_profile().save()
self.users.append(user)
self.rate_channel(self.channel, user, rating)
self.logout()
def rate_channel(self, channel, user, rating):
self.get_page('/feeds/%i/rate' % channel.id, login_as=user,
data = {'rating': rating})
class RatingTestCase(RatingsTestBase):
def _find_average_in_page(self, page):
matches = re.findall('"Average Rating: (\d\.\d*)"', page.content)
return float(matches[0])
def _find_user_in_page(self, page):
matches = re.findall('"User Rating: (\d)"', page.content)
return int(matches[0])
def test_unauthenticated_details_has_average(self):
"""
The unauthenticated details page should show the average rating.
"""
url = self.channel.get_url()[len(settings.BASE_URL)-1:]
page = self.get_page(url)
self.assertEquals(self._find_average_in_page(page), 3)
def test_unrated_user_details_has_average(self):
"""
The details page for a user who hasn't rated the channel should show
the average rating.
"""
url = self.channel.get_url()[len(settings.BASE_URL)-1:]
page = self.get_page(url, self.owner)
self.assertEquals(self._find_average_in_page(page), 3)
def test_rated_user_details_has_rating(self):
"""
The details page for a user who has rated the channel should show
their rating.
"""
for user in self.users:
url = self.channel.get_url()[len(settings.BASE_URL)-1:]
page = self.get_page(url, user)
rating = Rating.objects.get(user=user, channel=self.channel)
page_rating = self._find_user_in_page(page)
if rating.rating is None:
self.assertEquals(page_rating, 0)
else:
self.assertEquals(page_rating, rating.rating)
def test_rating_needs_login(self):
"""
An anonymous user who tries to rate a channel should be redirected
to the login page.
"""
self.assertLoginRedirect('/feeds/%s/rate' % self.channel.id,
login_as=None)
def test_new_rating(self):
"""
Going to the rating page should create a new rating in the database.
"""
self.rate_channel(self.channel, self.owner, 5)
self.assertEquals(Rating.objects.get(user=self.owner,
channel=self.channel).rating,
5)
rating = self.channel.rating
self.assertAlmostEquals(rating.average, 3.333, 3)
self.assertEquals(rating.count, 6)
self.assertEquals(rating.total, 20)
def test_rating_update(self):
"""
Going to the rating page again should update the old rating.
"""
self.rate_channel(self.channel, self.users[0], 3)
self.rate_channel(self.channel, self.users[0], 5)
self.assertEquals(Rating.objects.get(user=self.users[0],
channel=self.channel).rating,
5)
rating = self.channel.rating
self.assertAlmostEquals(rating.average, 3.333, 3)
self.assertEquals(rating.count, 6)
self.assertEquals(rating.total, 20)
def test_confirming_a_user_updates_table(self):
"""
When a user is approved, their ratings should be added to the
generated ratings table.
"""
user = self.make_user('foo')
self.rate_channel(self.channel, user, 5)
self.assertEquals(self.channel.rating.average, 3)
self.assertEquals(self.channel.rating.count, 5)
url = user.get_profile().generate_confirmation_url()
self.get_page(url[len(settings.BASE_URL_FULL)-1:])
c = Channel.objects.get(pk=self.channel.pk)
self.assertAlmostEquals(c.rating.average, 3.333, 3)
self.assertEquals(c.rating.count, 6)
class GeneratedRatingsTestCase(RatingsTestBase):
def test_get_average(self):
"""
Channel.query().join('rating').average should return the average rating
for the channel.
"""
self.assertEquals(float(self.channel.rating.average), 3)
def test_get_average_ignores_unapproved(self):
"""
Channel.rating.average should ignore ratings from users who are not
approved.
"""
new_user = self.make_user('foo')
self.rate_channel(self.channel, new_user, 5)
self.assertEquals(float(self.channel.rating.average), 3)
self.rate_channel(self.channel, new_user, 4)
self.assertEquals(float(self.channel.rating.average), 3)
def test_get_average_ignores_null(self):
"""
Channel.rating.average should ignore NULL ratings.
"""
new_user = self.make_user('foo')
new_user.get_profile().approved = 1
new_user.get_profile().save()
self.rate_channel(self.channel, new_user, 0)
self.assertEquals(float(self.channel.rating.average), 3)
def test_get_count(self):
"""
Channel.query().join('rating').count should return the number of
ratings for the channel.
"""
self.assertEquals(self.channel.rating.count, 5)
def test_get_count_ignores_unapproved(self):
"""
Channel.rating.count should ignore ratings from users who are not
approved.
"""
new_user = self.make_user('foo')
self.rate_channel(self.channel, new_user, 5)
self.assertEquals(self.channel.rating.count, 5)
def test_get_count_ignores_null(self):
"""
Channel.rating.count should ignore NULL ratings.
"""
new_user = self.make_user('foo')
new_user.get_profile().approved = 1
new_user.get_profile().save()
self.rate_channel(self.channel, new_user, 0)
self.assertEquals(float(self.channel.rating.count), 5)
|
UTF-8
|
Python
| false | false | 2,011 |
11,802,570,146,632 |
20ca2b1f86161273b9c0f2d6fee9ae5723402339
|
f0176f99c83b524e301056ba1c1b6d24156d235f
|
/bilview.py
|
86906a02baecb6fa53883e31e2ae68780218d3a3
|
[] |
no_license
|
g-sharma/bilview
|
https://github.com/g-sharma/bilview
|
d3fcdc1973ca32a5fb584e0a60436b9bfca75625
|
aefb28b87414aeb64162d0ca52460827061d193b
|
refs/heads/master
| 2021-01-23T03:08:04.290869 | 2013-08-30T05:49:05 | 2013-08-30T05:49:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/bin/python
#***********************************************************************
# Written By Gagan Sharma :
# August 10th 2013, Department of Radiology, The University of Melbourne
# Australia
# To Do, add slider to view Perfusion data in a more scientific way.
#***********************************************************************
"""
Read DICOM Images and open it in a viewer.
Usage: python bilview.py -input directory-name
"""
import numpy as np
import scipy
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
import dicom
import os
import sys
# check command line arguments make sense
if not 1 < len(sys.argv) < 4:
print(__doc__)
sys.exit()
app = QtGui.QApplication([])
## Create window with two ImageView widgets
win = QtGui.QMainWindow()
win.resize(512,512)
win.setWindowTitle('BILVIEW')
cw = QtGui.QWidget()
win.setCentralWidget(cw)
l = QtGui.QGridLayout()
cw.setLayout(l)
imv1 = pg.ImageView()
l.addWidget(imv1, 0, 0)
win.show()
roi = pg.LineSegmentROI([[10, 64], [120,64]], pen='r')
fileList=[]
rootdir= sys.argv[1]
if os.path.isdir(rootdir):
for root,dir,files in os.walk(rootdir):
for ieach in files:
fileList.append(os.path.join(rootdir,ieach))
ds=dicom.read_file(fileList[0])
imagedata=np.zeros((ds.pixel_array.shape[0],ds.pixel_array.shape[1],len(fileList)))
ipos=np.zeros(len(fileList))
for each in range(len(fileList)):
ds=dicom.read_file(fileList[each])
imagedata[:,:,each]=ds.pixel_array
ipos[each]=ds.InstanceNumber
else:
print "Please provide directory path...."
exit(1)
# This is the key. Got it from Brad and Soren's code.Pretty Kewl..
ipos=ipos.argsort()
imagedata=imagedata[:,:,ipos]
#############################################################
data=(imagedata.transpose())
def update():
global data, imv1#, imv2
d2 = roi.getArrayRegion(data, imv1.imageItem, axes=(1,2))
roi.sigRegionChanged.connect(update)
## Display the data
imv1.setImage(data)
imv1.setHistogramRange(-0.01, 0.01)
imv1.setLevels(-0.003, 0.003)
update()
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
UTF-8
|
Python
| false | false | 2,013 |
6,863,357,745,689 |
f083811c39bc0a8689398c7634d8b2d826d4e013
|
153ecce57c94724d2fb16712c216fb15adef0bc4
|
/z3c.searcher/tags/0.5.2/src/z3c/searcher/interfaces.py
|
322e00a94ea514c3d24597b793bcb7497c502e50
|
[
"ZPL-2.1"
] |
permissive
|
pombredanne/zope
|
https://github.com/pombredanne/zope
|
10572830ba01cbfbad08b4e31451acc9c0653b39
|
c53f5dc4321d5a392ede428ed8d4ecf090aab8d2
|
refs/heads/master
| 2018-03-12T10:53:50.618672 | 2012-11-20T21:47:22 | 2012-11-20T21:47:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
##############################################################################
#
# Copyright (c) 2007 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id:$
"""
__docformat__ = "reStructuredText"
import zope.i18nmessageid
import zope.interface
import zope.schema
from zope.schema import vocabulary
from zope.session.interfaces import ISession
from zope.location.interfaces import ILocation
from z3c.indexer import interfaces
from z3c.indexer import query
from z3c.form.interfaces import IForm
from z3c.table.interfaces import ITable
_ = zope.i18nmessageid.MessageFactory('z3c')
SEARCH_SESSION = u'z3c.search.intefaces.ISearchSession'
SEARCH_SESSION_FILTER_KEY = 'default'
CONNECTOR_OR = 'OR'
CONNECTOR_AND = 'AND'
CONNECTOR_NOT = 'NOT'
NOVALUE = object()
class ISearchSession(ISession):
"""Search session supporting API for filter management.
Filters contain the criterium rows and are stored persistent
The methods support a key argument. This could be a context reference key
give from the IntId utility or some other discriminator. If we do not
support a key, the string ``default`` is used.
"""
def getFilter(name, key=SEARCH_SESSION_FILTER_KEY):
"""Return search filter by name."""
def getFilters(name):
"""Return a list of search filters."""
def addFilter(name, searchFilter, key=SEARCH_SESSION_FILTER_KEY):
"""Add search filter."""
def removeFilter(name, key=SEARCH_SESSION_FILTER_KEY):
"""Remove search filter."""
connectorVocabulary = vocabulary.SimpleVocabulary([
vocabulary.SimpleTerm(CONNECTOR_OR, title=_('or')),
vocabulary.SimpleTerm(CONNECTOR_AND, title=_('and')),
vocabulary.SimpleTerm(CONNECTOR_NOT, title=_('not')),
])
class ISearchCriterium(ILocation):
"""A search citerium of a piece of data."""
__name__ = zope.schema.TextLine(
title=_('Name'),
description=_('The locatable criterium name.'),
required=True)
label = zope.schema.TextLine(
title=_('Label'),
description=_('Label used to present the criterium.'),
required=True)
operatorLabel = zope.schema.TextLine(
title=_('Operator label'),
description=_('The operator label.'),
required=True)
indexOrName = zope.interface.Attribute("Index or index name.")
operator = zope.schema.Object(
title=_('Operator'),
description=_('The operator used for the chain the queries.'),
schema=interfaces.IQuery,
required=True)
connectorName = zope.schema.Choice(
title=_('Connector Name'),
description=_('The criterium connector name.'),
vocabulary=connectorVocabulary,
default=CONNECTOR_OR,
required=True)
value = zope.schema.TextLine(
title=_('Search Query'),
required=True)
def search(searchQuery):
"""Generate chainable search query."""
class ITextCriterium(ISearchCriterium):
"""Sample full text search criterium implementation."""
class ISearchCriteriumFactory(zope.interface.Interface):
"""A factory for the search criterium"""
title = zope.schema.TextLine(
title=_('Title'),
description=_('A human-readable title of the criterium.'),
required=True)
weight = zope.schema.Int(
title=_('Int'),
description=_('The weight/importance of the factory among all '
'factories.'),
required=True)
def __call__():
"""Generate the criterium."""
class ISearchFilter(zope.interface.Interface):
"""Search criteria for position search."""
criteria = zope.interface.Attribute(
"""Return a sequence of selected criteria.""")
criteriumFactories = zope.schema.List(
title=_('Criteria factories'),
description=_('The criteria factories.'),
value_type=zope.schema.Object(
title=_('Criterium factory'),
description=_('The criterium factory.'),
schema=ISearchCriteriumFactory,
required=True),
default=[])
def clear():
"""Clear the criteria."""
def createCriterium(name, value=NOVALUE):
"""Create a criterium by factory name."""
def addCriterium(criterium):
"""Add a criterium by name at the end of the list."""
def createAndAddCriterium(name, value=NOVALUE):
"""Create and add a criterium by name at the end of the list."""
def removeCriterium(criterium):
"""Add a criterium by name at the end of the list."""
def getDefaultQuery():
"""Get a query that returns the default values.
Override this method in your custom search filter if needed.
This query get used if ``NO`` criterias are available.
"""
def getAndQuery():
"""Return a ``And`` query which get used by default or None.
Override this method in your custom search filter if needed.
This query get used if ``one or more`` criterias are available.
"""
def getNotQuery():
"""Return a ``Not`` query which get used as starting query or None.
Override this method in your custom search filter if needed.
This query get used if ``one or more`` criterias are available.
"""
def generateQuery():
"""Generate a query object."""
class IFilterForm(IForm):
"""Filter form."""
class ISearchForm(IForm):
"""Search form."""
class ISearchTable(ITable):
"""Search table."""
|
UTF-8
|
Python
| false | false | 2,012 |
9,405,978,400,024 |
d05a003aabbb24fa3b68e9a4660c97d9a4b4b9b5
|
f164f6752a39786c984ce208b61bfa0c046d81c9
|
/emailregistration/backends/registration/default_urls.py
|
dd098f40c2a6b0c222afe9c4535e64d53a3f06ad
|
[] |
no_license
|
guyf/wesharea-project
|
https://github.com/guyf/wesharea-project
|
2598f00a1ae03de31b811ede6e2fd919f3e6cd7e
|
9a139a714010157700243ab91193dbc54655b50a
|
refs/heads/master
| 2020-04-10T03:52:02.084276 | 2012-01-25T08:27:07 | 2012-01-25T08:27:07 | 3,241,050 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls.defaults import *
from emailregistration.forms import EmailRegistrationForm
urlpatterns = patterns('',
url(r'^register/$', 'registration.views.register', {'backend':'emailregistration.backends.registration.DefaultEmailRegBackend','form_class': EmailRegistrationForm}, name='registration_register'),
#TODO: should implement a default view for the below - in the meantime it is in bbm
#url(r'^invitation/(?P<user_id>\d+)/(?P<activation_key>\w+)/$', registration_invitation, name='registration_invitation'),
(r'', include('registration.backends.default.urls')),
)
|
UTF-8
|
Python
| false | false | 2,012 |
6,597,069,786,990 |
e4c9c4b0c9c9f3c3217636ae096b05c64e2ae70f
|
a772644b12709770a0a8bb84d86e7c8815af835b
|
/aquarius/objects/BookType.py
|
cdf597414567d34cef7cd04b2ac8579a6132eb01
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"GPL-3.0-only"
] |
non_permissive
|
jeroanan/Aquarius
|
https://github.com/jeroanan/Aquarius
|
3c5b5e0c44400cf9dff53c8ce7317c3680968155
|
6b4f0406ad6e04f78cfb5ac78558d4e16c7333e2
|
refs/heads/master
| 2016-09-06T18:55:26.030068 | 2014-08-26T00:20:01 | 2014-08-26T00:20:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class BookType(object):
__format = ""
__mimetype = ""
@property
def Format(self):
return self.__format
@Format.setter
def Format(self, value):
self.__format = value
@property
def MimeType(self):
return self.__mimetype
@MimeType.setter
def MimeType(self, value):
self.__mimetype = value
|
UTF-8
|
Python
| false | false | 2,014 |
15,710,990,381,840 |
db5dee4a4d69d25271624c386baeedf2107ec841
|
7388c691cc43adb8fb8cb81068335846e22be8d7
|
/app.py
|
ef6fa1679a5e6066a9a0aeb3c3d3c2f8c92b1f84
|
[
"MIT"
] |
permissive
|
lihuimail/flask-wp
|
https://github.com/lihuimail/flask-wp
|
31f69c8b6c8def2ec5279f56bd48eecf96b7b4e0
|
260f9fc474a3c3faf1c6e12f701895a29aa1446b
|
refs/heads/master
| 2021-01-18T01:49:48.584996 | 2014-07-07T19:40:57 | 2014-07-07T19:40:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import Flask, render_template, request, url_for, redirect
from flaskwp.wordpress import WordpressAPI
from werkzeug.exceptions import abort
app = Flask(__name__)
config = {
'server': {
'host': '0.0.0.0',
'port': 3001,
'debug': True
},
'cms': {
'base': "http://cms.ridgestreet.se:3000/api"
}
}
wordpress = WordpressAPI(api_root = config['cms']['base'])
@app.route("/")
def home():
posts = wordpress.get_recent_posts()
pages = wordpress.get_page_index()
page = wordpress.get_page('home')
return render_template(
'home.html',
pages = pages['pages'],
page = page['page'],
posts = posts['posts']
)
@app.route("/<slug>")
def get_page(slug):
page = wordpress.get_page(slug)
if not page:
abort(404)
return render_template('page.html', page = page['page'])
@app.route("/search")
def get_search_results():
query = request.args.get("q")
posts = wordpress.get_search_results(query)
if not posts:
abort(404)
return render_template('posts.html', posts = posts['posts'], q = query)
@app.route("/comment", methods=['POST'])
def post_comment():
email = request.form.get("email")
url = request.form.get("url")
name = request.form.get("name")
comment = request.form.get("comment")
post_id = request.form.get("post")
wordpress.submit_comment(post_id, name, email, comment, url)
post = wordpress.get_post(post_id = post_id)
return redirect(url_for('get_custom_post', post_type = 'blog', slug = post['slug']))
@app.route("/category/<category>")
def get_category(category):
posts = wordpress.get_category_posts(category)
return render_template('posts.html', posts = posts['posts'])
@app.route("/tag/<tag>")
def get_tag(tag):
posts = wordpress.get_tag_posts(tag)
return render_template('posts.html', posts = posts['posts'])
@app.route("/blog/<slug>")
def get_blog_post(slug):
post = wordpress.get_post(slug)
if not post:
abort(404)
return render_template('post.html', post = post['post'])
@app.route("/author/<slug>")
def get_author(slug):
posts = wordpress.get_author_posts(slug)
if not posts:
abort(404)
return render_template('posts.html', posts = posts['posts'])
@app.route("/case")
def get_cases():
cases = wordpress.get_posts(post_type='case')
if not cases:
abort(404)
return render_template('posts.html', posts = cases['posts'], post_type = 'case')
@app.route("/<post_type>/<slug>")
def get_custom_post(post_type, slug):
post = wordpress.get_post(slug, post_type)
if not post:
abort(404)
return render_template('post.html', post = post['post'])
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
if __name__ == "__main__":
app.run(**config['server'])
|
UTF-8
|
Python
| false | false | 2,014 |
10,797,547,805,191 |
976d4a6cd72d8818cc85cd58db7a2888bfbd5ba5
|
0c1abff154523aa341a75884cb6c39d21fdf1e64
|
/Execution/Cert.py
|
65bef29173cb1dc5e3e6df5e6fbf7c1a5ab5ee9c
|
[] |
no_license
|
yxakuo/hacloud-whu
|
https://github.com/yxakuo/hacloud-whu
|
779e67fcd8dd927fe5b78f10d19f58568911ad6e
|
69969208ba3d29ec7eaa29c0128085365551db78
|
refs/heads/master
| 2021-01-18T14:18:47.450303 | 2013-01-31T09:02:26 | 2013-01-31T09:02:26 | 33,712,128 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class Cert():
params = '{"auth":{"tenantName":"admin", "passwordCredentials":{"username":"admin", "password": "admin"}}}'
headers = {"Content-Type": "application/json"}
url = '127.0.0.1:5000'
def get_url(self):
return self.url
def get_headers(self):
return self.headers
def get_params(self):
return self.params
def set_url(self,url):
self.url = url
return self.url
def set_headers(self,headers):
self.headers = headers
return headers
def set_params(self,params):
self.params = params
return params
|
UTF-8
|
Python
| false | false | 2,013 |
7,052,336,320,727 |
2984b3260fe08a39eab7fce065d71c68ca1900e3
|
c93a831b886f959de471ec4c88e009835c3975c8
|
/urls.py
|
3ebdc9c4c86f901e76d21d7c688652dfdc753ecb
|
[] |
no_license
|
webcoders/django-webcoders
|
https://github.com/webcoders/django-webcoders
|
4dac01b283427e413866ecd5dc352ba66a89acb4
|
62c108371ef2fb19363ca745838f385c1482d331
|
refs/heads/master
| 2016-09-06T01:33:04.549584 | 2014-07-19T12:20:43 | 2014-07-19T12:20:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
# url(r'^$', 'topship.views.home', name='home'),
# url(r'^topship/', include('topship.foo.urls')),
url(r"^/widgets/address/",'webcoders.views.address.get', name='address' ),
)
#urlpatterns += staticfiles_urlpatterns()
|
UTF-8
|
Python
| false | false | 2,014 |
10,685,878,654,685 |
7d3af986c84772d0c835d4301b1c5d0477f4404a
|
915adb74c43089de08988f9eef4294459313be52
|
/zadanie.py
|
a5507deb1e8e4d77c21843ac271257cd9e0427ff
|
[] |
no_license
|
Morgaroth/msi_lab2
|
https://github.com/Morgaroth/msi_lab2
|
ce5dd452ee7f395d791c30f6a437f1175abaa17b
|
f5a2da2525c4ee69a229f668b427cfb75b1fd2b8
|
refs/heads/master
| 2021-01-23T15:30:05.715880 | 2014-11-04T23:10:33 | 2014-11-04T23:10:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from itertools import cycle
import numpy
from pybrain import LSTMLayer
import matplotlib.pyplot as plt
from pybrain.datasets import SequentialDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised import RPropMinusTrainer
from sys import stdout
from numpy import genfromtxt
def train(d, cycles=100, epochs_per_cycle=7):
ds = SequentialDataSet(1, 1)
net = buildNetwork(1, 5, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=False)
for sample, next_sample in zip(d, cycle(d[1:])):
ds.addSample(sample, next_sample)
trainer = RPropMinusTrainer(net, dataset=ds)
train_errors = [] # save errors for plotting later
for i in xrange(cycles):
trainer.trainEpochs(epochs_per_cycle)
train_errors.append(trainer.testOnData())
stdout.flush()
return net, train_errors
def predict(d, window_size=30):
elems = []
for i in xrange(window_size + 1, len(d)):
real_value = d[i]
predicted_values = []
for e in xrange(0, 30):
net30, err30 = train(d[i - window_size - 1:i], cycles=30)
predicted = net30.activate((real_value,))[0]
predicted_values.append(predicted)
predicted_value30 = sum(predicted_values) * 1.0 / len(predicted_values)
predicted_values = []
for e in xrange(0, 30):
net100, err100 = train(d[i - window_size - 1:i], cycles=100)
predicted = net100.activate((real_value,))[0]
predicted_values.append(predicted)
predicted_value100 = sum(predicted_values) * 1.0 / len(predicted_values)
print "real=", real_value, "predicted30=", predicted_value30, "predicted100=", predicted_value100
elems.append([i - window_size, real_value, predicted_value30, predicted_value100])
return elems
data = genfromtxt('euro.csv', delimiter=',')
data = [elem[1] for elem in data]
print "data", data
elems = predict(data)
numpy.savetxt("with_meaning.csv", elems, delimiter=";", fmt="%s")
|
UTF-8
|
Python
| false | false | 2,014 |
18,923,625,929,969 |
5657476a394ef7cee1eba0ad43ea6df0cd2a8370
|
019ae2d5ad7a444dd0dd5eba076b4fc0ac481d45
|
/hdlmake/vhdl_parser.py
|
3408580142e7d74b64f559a514711d987eb6fddc
|
[
"GPL-3.0-only"
] |
non_permissive
|
JamesHyunKim/myhdl
|
https://github.com/JamesHyunKim/myhdl
|
2914cea15c622c9968fe190c50710ccfbfc02d90
|
aabe6c840ed4d8414dd62bb67781e10672184103
|
refs/heads/master
| 2020-05-17T14:28:51.793698 | 2014-10-10T09:14:54 | 2014-10-10T09:14:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 CERN
# Author: Tomasz Wlostowski ([email protected])
#
# This file is part of Hdlmake.
#
# Hdlmake is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hdlmake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hdlmake. If not, see <http://www.gnu.org/licenses/>.
from new_dep_solver import DepParser
import logging
def _remove_gaps(buf, delims, gap_chars, lower_strings=False):
da = {}
for d in delims:
da[d] = False
prev_is_gap = False
buf2 = ""
lines = []
for c in buf:
for d in delims:
if c == d:
da[d] = not da[d]
within_string = any(da.values()) and not (c in delims)
if not within_string:
if(c in gap_chars):
if(not prev_is_gap):
prev_is_gap = True
buf2 += " "
else:
prev_is_gap = False
buf2 += c
if c == ";" or c == "\n":
lines.append(buf2)
buf2 = ""
else:
buf2 += c
prev_is_gap = False
return lines
class VHDLParser(DepParser):
def parse(self, dep_file):
from dep_file import DepRelation
if dep_file.is_parsed:
return
logging.info("Parsing %s" % dep_file.path)
content = open(dep_file.file_path, "r")
buf = ""
# stage 1: strip comments
for l in content.readlines():
ci = l.find("--")
if ci == 0:
continue
while ci > 0:
quotes = l[:ci].count('"') # ignore comments in strings
if quotes % 2 == 0:
l = l[:ci-1]
break
ci = l.find("--", ci+1)
buf += l
# stage 2: remove spaces, crs, lfs, strip strings (we don't need them)
buf2 = ""
string_literal = char_literal = False
prev_is_gap = False
gap_chars = " \r\n\t"
lines = []
for c in buf:
if c == '"' and not char_literal:
string_literal = not string_literal
if c == "'" and not string_literal:
char_literal = not char_literal
within_string = (string_literal or char_literal) and (c != '"') and (c != "'")
if(not within_string):
if(c in gap_chars):
if(not prev_is_gap):
prev_is_gap = True
buf2 += " "
else:
prev_is_gap = False
buf2 += c.lower()
if c == ";":
lines.append(buf2)
buf2 = ""
else:
prev_is_gap = False
import re
patterns = {
"use": "^ *use +(\w+) *\. *(\w+) *\. *\w+ *;",
"entity": "^ *entity +(\w+) +is +(port|generic)",
"package": "^ *package +(\w+) +is",
"arch_begin": "^ *architecture +(\w+) +of +(\w+) +is +",
"arch_end": "^ *end +(\w+) +;",
"instance": "^ *(\w+) *\: *(\w+) *(port|generic) *map"
}
compiled_patterns = map(lambda p: (p, re.compile(patterns[p])), patterns)
within_architecture = False
for l in lines:
matches = filter(lambda (k, v): v is not None, map(lambda (k, v): (k, re.match(v, l)), compiled_patterns))
if(not len(matches)):
continue
what, g = matches[0]
if(what == "use"):
dep_file.add_relation(DepRelation(g.group(1)+"."+g.group(2), DepRelation.USE, DepRelation.PACKAGE))
if(what == "package"):
dep_file.add_relation(DepRelation(g.group(1),
DepRelation.PROVIDE,
DepRelation.PACKAGE))
dep_file.add_relation(DepRelation("%s.%s" % (dep_file.library, g.group(1)),
DepRelation.PROVIDE,
DepRelation.PACKAGE))
elif(what == "entity"):
dep_file.add_relation(DepRelation(g.group(1),
DepRelation.PROVIDE,
DepRelation.ENTITY))
dep_file.add_relation(DepRelation("%s.%s" % (dep_file.library, g.group(1)),
DepRelation.PROVIDE,
DepRelation.ENTITY))
elif(what == "package"):
dep_file.add_relation(DepRelation(g.group(1),
DepRelation.PROVIDE,
DepRelation.PACKAGE))
dep_file.add_relation(DepRelation("%s.%s" % (dep_file.library, g.group(1)),
DepRelation.PROVIDE,
DepRelation.PACKAGE))
elif(what == "arch_begin"):
arch_name = g.group(1)
within_architecture = True
elif(what == "arch_end" and within_architecture and g.group(1) == arch_name):
within_architecture = False
elif(what == "instance" and within_architecture):
dep_file.add_relation(DepRelation(g.group(2),
DepRelation.USE,
DepRelation.ENTITY))
dep_file.is_parsed = True
|
UTF-8
|
Python
| false | false | 2,014 |
10,307,921,515,526 |
0e9b3988b4cda5e71038fd28d881ab00a3d5bcdf
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/playground/eren/2011/desktop/gnome/gdm/actions.py
|
2d5ad53d403e7bfd516e01c548d3d141bb2ad126
|
[] |
no_license
|
aligulle1/kuller
|
https://github.com/aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2
# See the file http://www.gnu.org/copyleft/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.configure("--localstatedir=/var \
--enable-static=no \
--enable-authentication-scheme=shadow \
--disable-scrollkeeper \
--with-pam-prefix=/etc")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "ChangeLog", "NEWS", "README", "TODO")
# No gnome yet, this package just for xfce, no need to keep gnome desktop file
pisitools.removeDir("/usr/share/xsessions")
|
UTF-8
|
Python
| false | false | 2,013 |
10,187,662,472,520 |
5a4adfbd576fd9499015aa93a7e04cd924070ebe
|
7c20b6748e6fc16ddf63c63cc5717de4ebe3df8a
|
/code/linear_regression.py
|
9b7420b9d4b54d0d09e651ea6b70553d4d82377d
|
[] |
no_license
|
eggpi/mc886
|
https://github.com/eggpi/mc886
|
8e188c6adbfdc1b3f38b13a4cf700f6819d0af5d
|
25ce5c067d9b50cb414443b40fa920d16d49c49a
|
refs/heads/master
| 2020-04-12T01:01:11.024719 | 2013-11-24T18:55:25 | 2013-11-24T18:55:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import random
import pprint
M = 5 # dimension, number of weights
N = 100 # number of measurements
# make a linear function given weights
def make_f(weights):
return lambda x: sum(t * s for t, s in zip(weights, x))
# make a random set of data points to be fed to y,
# that is, a vector of M numbers, the first being
# 1.0 by definition.
def make_x():
return [1.0] + random.sample(range(1000), M - 1)
def J(f):
return sum((f(x) - y)**2 for x, y in data) / (2 * N)
# the partial derivative of function J(w) for the i-th weight,
# f is the current estimation function (that is, make_f(w))
def DJ(f, i):
return sum((f(x) - y) * x[i] for x, y in data) / N
# calculate the norm of vector v
def norm(v):
return sum(x**2 for x in v)**0.5
# create the target function and data points
target = []
for i in range(M):
target.append(random.random())
y = make_f(target)
x = []
for i in range(N):
x.append(make_x())
# min-max normalization
xnorms = [norm(v) for v in x]
mi, ma = min(xnorms), max(xnorms)
data = []
for v in x:
v = [(e - mi) / (ma - mi) for e in v]
data.append((v, y(v)))
print "Data: "
pprint.pprint(data)
# initial guess at parameters
learned = []
for i in range(M):
learned.append(random.random())
alpha = 0.0001
err = float('inf')
while err > 0.0001:
print "Starting iteration!"
print " :: learned = {0}".format(learned)
print " :: J(learned) = {0}".format(J(make_f(learned)))
f = make_f(tuple(learned))
for i in range(M):
learned[i] = learned[i] - alpha * DJ(f, i)
err = J(make_f(learned))
print " :: learned = {0}".format(learned)
print " :: err = {0}".format(err)
print "End iteration!"
print "Learned is: {0}".format(learned)
print "Target was: {0}".format(target)
|
UTF-8
|
Python
| false | false | 2,013 |
15,805,479,670,674 |
02678f076cf8a7d9214c4ffd6f550f37dc08b194
|
3198c735b63e2abd89cebe76d925c2fecf1904e7
|
/basic/mul.py
|
fd3825a4bef641e0225e563e4f08e3e90600a852
|
[] |
no_license
|
niyaspkd/python
|
https://github.com/niyaspkd/python
|
d871a9fc54578e6ed2fdc4d038e0573c1854a9fd
|
dd69450447e9ea3ba950ecf5988ac9db809451eb
|
refs/heads/master
| 2020-06-26T18:08:05.391793 | 2014-03-28T05:39:18 | 2014-03-28T05:39:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def mul(n):
while n>0:
if n<10:
a=n%10
elif n<100:
b=n%100/10
c=n%10
a=b*c
else:
b=n/100
c=n%100/10
d=n%10
a=b*d*c
return a
print mul(1)
|
UTF-8
|
Python
| false | false | 2,014 |
17,755,394,826,253 |
d19a0b26f97446129359a84fe2b87a7339146abf
|
b1cf5f01bed003c0863eff365b7ad5037c067567
|
/core/views.py
|
dc1f63d8a8fa9caa8dd1056d8eaddc7bc7b483c1
|
[] |
no_license
|
yati-sagade/five
|
https://github.com/yati-sagade/five
|
c222360efe923f5eced6769073a2e951f471b458
|
e5bf1d6b7cf12ae6f05a6dca29af05e7c662e6a5
|
refs/heads/master
| 2021-01-01T19:15:48.674422 | 2014-06-01T06:32:25 | 2014-06-01T06:32:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import json
import random
from django.db import IntegrityError
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.views.generic import View, TemplateView
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_http_methods
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from .forms import UserCreationForm
from .places import nearby_search
from .models import Place, UserProfile, Notification
from .utilities import random_password, json_response, to_dict, get_post_data
from .utilities import http_basic_auth
@require_http_methods(['POST'])
def logout_view(request):
logout(request)
if request.is_ajax():
return json_response({'success': True})
return redirect('five-home')
@require_http_methods(['POST'])
@csrf_exempt
def login_view(request):
'''
Log our user in.
'''
data = get_post_data(request)
error = None
try:
username = data['handle']
password = data['password']
except KeyError:
error = 'Incorrect handle/password'
else:
user = authenticate(username=username, password=password)
if user is not None and user.is_active:
login(request, user)
else:
error = 'Incorrect handle/password'
if error is not None:
if request.is_ajax():
return json_response({'error': error}, 403)
messages.add_message(request, messages.ERROR, error)
if request.is_ajax():
return json_response({'status': 'ok'})
return redirect('five-home')
@require_http_methods(['POST'])
@login_required
@ensure_csrf_cookie
def check_in(request, place_id):
'''
Check the current user in to the place with ID ``place_id``.
'''
try:
place = Place.objects.get(id=place_id)
except Place.DoesNotExist:
ret = ({'error': 'No place with id {} found'.format(place_id)}, 404)
except Exception as e:
ret = ({'error': str(e)}, 500)
else:
profile = request.user.userprofile
profile.current_location = place
profile.save()
# Build notifications for people at this location.
nearby_profiles = UserProfile.objects.filter(current_location=place).exclude(user=request.user)
for profile in nearby_profiles:
Notification.objects.create(user=profile.user, data=json.dumps({
'image': profile.avatar_url(),
'data': '{} is around you. Go say hi!'.format(profile.user.username)
}))
ret = ({'success': True}, 200)
return json_response(*ret)
@require_http_methods(['GET'])
@http_basic_auth
@ensure_csrf_cookie
def get_user_details(request):
'''
Return the details of the currently logged in User.
'''
u = request.user
if u.is_authenticated():
return json_response(u.userprofile.serialize())
return json_response({'error': 'login required'}, 403)
@require_http_methods(['GET'])
@http_basic_auth
@ensure_csrf_cookie
def get_nearby_people(request):
'''
Return a list of nearby people.
'''
current_profile = request.user.userprofile
ret = current_profile.nearby_people()
return json_response({'data': [u.serialize() for u in ret]})
@require_http_methods(['POST'])
@http_basic_auth
@login_required
def get_nearby_places(request, lat, lon, rad=1000):
'''
Return nearby places centered around a location of
(lat, lon) and within a radius of ``rad`` metres.
'''
data = nearby_search((lat, lon), rad)
if data['status'] != 'OK':
try:
# This is not guaranteed to be present.
err_msg = data['error_message']
except KeyError:
err_msg = 'An error occurred. Try again later'
return json_response({'error': err_msg})
places= []
for result in data['results']:
result_name = result.get('name')
if result_name is None:
# No point in bothering with a place which we don't
# know the name of.
continue
result_loc = result['geometry']['location']
defaults = {
'name': result_name,
'description': '',
'lat': result_loc['lat'],
'lon': result_loc['lng'],
'icon': result['icon'],
}
try:
viewport = result['geometry']['viewport']
viewport_ne = viewport['northeast']
viewport_sw = viewport['southwest']
except KeyError:
viewport = None
# Fill in viewport info if we have it
if viewport is not None:
defaults.update({
'viewport_ne_lat': viewport_ne['lat'],
'viewport_ne_lon': viewport_ne['lng'],
'viewport_sw_lat': viewport_sw['lat'],
'viewport_sw_lon': viewport_sw['lng']
})
# Store the result in our database if it does not exist
# already.
place, created = Place.objects.get_or_create(
id=result['id'],
defaults=defaults
)
places.append(place)
resp = {'data': [place.to_dict() for place in places]}
return json_response(resp)
class HomeView(View):
def get(self, request):
if self.request.user.is_authenticated():
return render(request, 'core/home.html')
return render(request, 'core/index.html')
def get_context_data(self, **kwargs):
return {}
class UserView(View):
def get(self, request, uid=None):
'''
Get info about a user. For an AJAX request, send the JSON
representation of a user. For a non-AJAX request, render the requested
user profile
'''
if uid is None:
return json_response({'error': 'No id given'}, status=400)
try:
user = User.objects.get(id=uid)
except User.DoesNotExist:
return json_response({'error': 'No such user'}, status=404)
if request.is_ajax():
return json_response(to_dict(user))
# Render the user page here
return render(request, 'core/user.html', {'user': user})
def post(self, request):
'''
Create a new user(Sign up).
'''
errors = {}
data = get_post_data(request)
# Validate the data
form = UserCreationForm(data)
if form.is_valid():
username = data['handle']
email = data['email']
password = data['password']
# Create the user
try:
user = User.objects.create(username=username, email=email)
user.set_password(password)
user.save()
except IntegrityError:
errors = {'error': 'Error creating user'}
else:
UserProfile.objects.create(user=user, bio='')
else:
errors = form.errors
if request.is_ajax():
if error is not None:
response = {'status': 'ok', 'id': user.id}
else:
response = errors
return json_response(response)
if not errors:
# Log the user in
user = authenticate(username=username, password=password)
login(request, user)
else:
for item in errors.iteritems():
messages.add_message(request, messages.ERROR, ':'.join(item))
# Redirect to home
return redirect('five-home')
@csrf_exempt
def ping_view(request, data):
'''
Simple ping view for debugging.
'''
payload = ''
if request.method == 'POST':
payload = request.body
message = data
return json_response({'message': message, 'payload': payload})
ALPHABET = 'abcdefghijklmnopqrstuvwxyz '
@require_http_methods(['POST'])
@http_basic_auth
@ensure_csrf_cookie
def get_notification(request):
'''
Return a notification with some content with a 40% chance.
'''
all_notifs = Notification.objects.filter(user=request.user)
data = [json.loads(notif.data) for notif in all_notifs]
all_notifs.delete()
return json_response({'data': data})
|
UTF-8
|
Python
| false | false | 2,014 |
5,978,594,482,076 |
88a7a18d26265728c45e26ca9b51e102c882ba89
|
bd242360936dc812c96a8e216de205517c84e005
|
/samples/tut3.py
|
54b65c3a44c2483304c6e91fd733a1832ceaa359
|
[
"CC-BY-NC-SA-3.0"
] |
non_permissive
|
decalage2/python-crash-course
|
https://github.com/decalage2/python-crash-course
|
81c35de2601661256baaf3ac000ceeff8d9ebb06
|
68e0bc08f73a7a34e2c59a48fa323c8512fafe07
|
refs/heads/master
| 2023-03-24T23:41:25.378995 | 2014-09-21T20:56:16 | 2014-09-21T20:56:16 | 277,366,204 | 6 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class Person:
def __init__(self, lastname, firstname):
self.lname = lastname
self.fname = firstname
def display(self):
print "Last name: %s - First name: %s" % \
(self.lname, self.fname)
def __str__(self):
return "Last name: %s - First name: %s" % \
(self.lname, self.fname)
class AddrBookPerson (Person):
def __init__(self, lastname, firstname, address):
Person.__init__(self, lastname, firstname)
self.addr = address
def display_address(self):
print self.addr
def move(self, new_address):
self.addr = new_address
def set_postcode(self, postcode):
self.postcode = postcode
def display_postcode(self):
print self.postcode
##pl = Person('lagadec', 'philippe')
###pl.display()
##print pl
##
##jc = AddrBookPerson('Gallard', 'JC', 'The Hague')
##jc.display()
##jc.display_address()
##
##jc.move('Wassenaar')
##jc.display_address()
##
##jc.set_postcode('1234AB')
##jc.display_postcode()
|
UTF-8
|
Python
| false | false | 2,014 |
4,509,715,706,607 |
18d1ff5db42744a33ee8e9535cf1b9af55c7f466
|
6815e0d4281551527b59a89e55b7dc5f000d83f7
|
/pdf_analyzer.py
|
71f1ab74d531ad2b9d5ca1f5d0af517bd1be7100
|
[] |
no_license
|
mellowizz/metastudy
|
https://github.com/mellowizz/metastudy
|
bd345e5268ad282f98a948b434dc6ebde8126e0b
|
84dee0280ae48a68ad77190e21c3dbf5419ae3df
|
refs/heads/master
| 2016-09-06T10:53:51.454913 | 2013-10-21T17:25:08 | 2013-10-21T17:25:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
import codecs, sys
from pdfminer.pdfparser import PDFParser, PDFDocument
txtfile = "schlag.txt"
f = codecs.open(txtfile, encoding='utf-8')
schlaglist = [x.split('\n') for x in f] # list
'''
for x in schlaglist:
print u(x)
'''
try:
fp = open('/home/niklasmoran/EL/skript211.pdf')
print (fp.type())
parser = PDFParser(fp)
doc = PDFDocument()
parser.set_document(doc)
doc.initialize('')
outlines = doc.get_outlines()
print outlines.type()
except:
print "that didn't work", sys.exc_info()[0]
|
UTF-8
|
Python
| false | false | 2,013 |
2,370,821,950,222 |
6b2db7e71f49c5113d517e97129fb584d8ad384b
|
5bfad7ca17e79dbe77edcdc16b15ca70dd538b5d
|
/s3-dynamo-sync-check/lib/sns.py
|
79642f72794c5a18e0946214899e1ab3296670d7
|
[
"Apache-2.0"
] |
permissive
|
msfuko/CSTools
|
https://github.com/msfuko/CSTools
|
c027e97047da868a606de5f3d41db2a4672500ec
|
b6418d5c844540a8fd0115c1b9f69fb70fe907b8
|
refs/heads/master
| 2021-01-19T14:34:02.487965 | 2014-12-08T11:37:15 | 2014-12-08T11:37:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from awsobject import AWSObject
class SNS(AWSObject):
def __str__(self):
return "sns"
def send(self, topic_name, subject, message):
if len(subject) > 100:
raise ValueError("Subject is too long: %s" % subject)
topics = self.conn.get_all_topics()
mytopics = topics["ListTopicsResponse"]["ListTopicsResult"]["Topics"]
topic_arn = None
for topic in mytopics:
if topic_name in topic['TopicArn']:
topic_arn = topic['TopicArn']
break
if topic_arn:
print "SNS sent by %s" % topic_name
res = self.conn.publish(topic_arn, message, subject)
|
UTF-8
|
Python
| false | false | 2,014 |
15,118,284,898,931 |
ce0bcdb824fc0c17f8dbffe579ad1fb9547c2134
|
6a903a5486cac7bdce7d83640a3b8c157ca636a8
|
/lmdb/__init__.py
|
1e3e19cbd5b49aaf0d40b4ab09cc4514dfe4bf83
|
[] |
no_license
|
pombredanne/python-lmdb
|
https://github.com/pombredanne/python-lmdb
|
8f8db399060064e7766b827dd94fb4f7f936b722
|
e93d796cfa80b38ee3065bfb598617809e363350
|
refs/heads/master
| 2018-05-30T11:36:24.145134 | 2013-09-22T20:19:41 | 2013-09-22T20:19:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding: utf-8
from lmdb.lmdb import *
|
UTF-8
|
Python
| false | false | 2,013 |
12,962,211,327,489 |
311a98c47e65c9d34b4989634859e7aae4934f85
|
5857c8d3399096adfec2c77f445a5bba757998cc
|
/aniseasons/helpers.py
|
b04f65ad8be9227d66dec358367347e4d5f530fd
|
[] |
no_license
|
hkal/aniseasons.com
|
https://github.com/hkal/aniseasons.com
|
01748aaf1d361ed3bdafed18fc6dbba4b8f16fb2
|
413ea60f1303d8bc1e6d0a001f875c12f84aee7d
|
refs/heads/master
| 2015-08-07T08:34:57.445649 | 2013-09-01T03:37:12 | 2013-09-01T03:37:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import request
from PIL import Image
from unicodedata import normalize
import os
import re
def resize_image(pic, max_width):
im = Image.open(pic)
dimensions = im.size
width_percent = (max_width / float(dimensions[0]))
new_height = int((float(dimensions[1]) * float(width_percent)))
return im.resize((max_width, new_height), Image.ANTIALIAS)
def save_image_and_thumbnail(name, fileblob, path, full_width=600, thumb_width=194, ext='.jpg'):
filename = name + ext
thumb_filename = "thumb-" + filename
full_image_path = os.path.join(path, filename)
thumb_image_path = os.path.join(path, thumb_filename)
image = resize_image(fileblob, full_width)
image.save(full_image_path, 'JPEG', quality=95)
thumb = resize_image(open(full_image_path), thumb_width)
thumb.save(thumb_image_path, 'JPEG', quality=95)
return filename, thumb_filename
def are_fields_valid(request, is_update=False):
if len(request.form) > 0:
if request.form['title'] and request.form['type'] and request.form['description']:
if not is_update and request.files or is_update:
return True
return False
def slugify(text, delim=u'-'):
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
result = []
for word in _punct_re.split(text.lower()):
word = normalize('NFKD', word).encode('ascii', 'ignore')
if word:
result.append(word)
return unicode(delim.join(result))
|
UTF-8
|
Python
| false | false | 2,013 |
2,662,879,773,187 |
b9fc82cab1f424a73c81eef688acbe5b84667138
|
f766a5e19fd314ea3357d3323818b7a3beed85e1
|
/haartrack.py
|
0c0c1a27b30e1e1aadb3f0cf2c25a6e9cd789c4d
|
[] |
no_license
|
gszpura/handdect
|
https://github.com/gszpura/handdect
|
0b2fa3bad8ea657d48dd6223b6320c67fc82ca4a
|
b0355efd43fe5ce9a8041f21344037e4bb20b940
|
refs/heads/master
| 2021-01-23T03:08:19.383945 | 2014-05-08T12:49:13 | 2014-05-08T12:49:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import cv2
import numpy as np
import time
PREFIX_MODULE = "cascades/"
def w_h_divided_by(image, divisor):
"""Return an image's dimensions, divided by a value."""
h, w = image.shape[:2]
return (w/divisor, h/divisor)
class Face(object):
"""Represents face on the image"""
def __init__(self):
self.faceRect = None
self.noseRect = None
self.mouthRect = None
def draw(self, image):
if self.faceRect is not None:
x, y, w, h = self.faceRect
cv2.rectangle(image, (x, y), (x+w, y+h), (255,0,0))
if self.noseRect:
x, y, w, h = self.noseRect
cv2.rectangle(image, (x, y), (x+w, y+h), (255,0,0))
if self.mouthRect:
x, y, w, h = self.mouthRect
cv2.rectangle(image, (x, y), (x+w, y+h), (255,0,0))
class Hand(object):
"""Represents hand on the image"""
def __init__(self):
self.handRect = None
def draw(self, image):
if self.handRect is not None:
x, y, w, h = self.handRect
cv2.rectangle(image, (x, y), (x+w, y+h), (255,0,0))
class FaceTracker(object):
"""A tracker for facial features: face, nose, (mouth?)."""
def __init__(self, scaleFactor = 1.2, minNeighbors = 2, flags = cv2.cv.CV_HAAR_SCALE_IMAGE):
self.scaleFactor = scaleFactor
self.minNeighbors = minNeighbors
self.flags = flags
self._faces = []
self._faceClassifier = cv2.CascadeClassifier(PREFIX_MODULE + 'haarcascade_frontalface_alt.xml')
self._eyeClassifier = cv2.CascadeClassifier(PREFIX_MODULE + 'haarcascade_eye.xml')
self._noseClassifier = cv2.CascadeClassifier(PREFIX_MODULE + 'haarcascade_mcs_nose.xml')
self._mouthClassifier = cv2.CascadeClassifier(PREFIX_MODULE + 'haarcascade_mcs_mouth.xml')
@property
def faces(self):
"""The tracked facial features."""
return self._faces
def update(self, img):
"""Update the tracked facial features."""
self._faces = []
cp = img.copy()
image = cv2.cvtColor(cp, cv2.COLOR_BGR2GRAY)
image = cv2.equalizeHist(image)
minSize = w_h_divided_by(image, 6)
maxSize = w_h_divided_by(image, 2)
cv2.imshow('Equ', image)
faceRects = self._faceClassifier.detectMultiScale(
image, self.scaleFactor, self.minNeighbors, self.flags,
minSize, maxSize)
print faceRects
if faceRects is not None:
for faceRect in faceRects:
face = Face()
face.faceRect = faceRect
x, y, w, h = faceRect
# Seek a nose in the middle part of the face.
searchRect = (x+w/4, y+h/4, w/2, h/2)
"""
face.noseRect = self._detectOneObject(
self._noseClassifier, image, searchRect, 32)
# Seek a mouth in the lower-middle part of the face.
searchRect = (x+w/6, y+h*2/3, w*2/3, h/3)
face.mouthRect = self._detectOneObject(
self._mouthClassifier, image, searchRect, 16)
"""
self._faces.append(face)
def _detectOneObject(self, classifier, image, rect, ratio):
x, y, w, h = rect
minSize = w_h_divided_by(image, ratio)
subImage = image[y:y+h, x:x+w]
subRects = classifier.detectMultiScale(
subImage, self.scaleFactor, self.minNeighbors,
self.flags, minSize)
if len(subRects) == 0:
return None
subX, subY, subW, subH = subRects[0]
return (x+subX, y+subY, subW, subH)
class HandTracker(object):
"""A tracker for hand features"""
def __init__(self, scaleFactor = 1.1, minNeighbors = 4, flags = cv2.cv.CV_HAAR_DO_CANNY_PRUNING):
#name = PREFIX_MODULE + "haarcascade_hand_2.xml"
#name = PREFIX_MODULE + "fist.xml"
#name = PREFIX_MODULE + "palm.xml"
name = PREFIX_MODULE + "hand_cascade.xml"
self.scaleFactor = scaleFactor
self.minNeighbors = minNeighbors
self.flags = flags
self._hands = []
self._handClassifier = cv2.CascadeClassifier(name)
@property
def hands(self):
"""The tracked facial features."""
return self._hands
def update(self, img):
"""Update the tracked facial features."""
self._hands = []
cp = img.copy()
image = cv2.cvtColor(cp, cv2.COLOR_BGR2GRAY)
handRects = self._handClassifier.detectMultiScale(
image, self.scaleFactor, self.minNeighbors, self.flags,
(70, 70), (175, 125))
for r in handRects:
h = Hand()
h.handRect = r
self._hands.append(h)
|
UTF-8
|
Python
| false | false | 2,014 |
18,863,496,371,761 |
c1cd9931092b8daef857ee2319c4844cf58c41af
|
101e1d0c8772ad178ea8b6cd001ed74f1adbb42c
|
/jobsite_web/jobsite_main/business.py
|
01640d1eb41dc122a9ba81f518159809142b042c
|
[] |
no_license
|
dnephin/perzoot
|
https://github.com/dnephin/perzoot
|
fe066be04cb27b6e7965501e0624929f6835ca31
|
71d02395ed32f9f7a296c6ef8e286e880ece8d13
|
refs/heads/master
| 2016-09-10T10:36:28.992311 | 2011-02-24T03:38:14 | 2011-02-24T03:38:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Business rules and logic.
Perzoot
"""
import logging
from jobsite_main.statics import *
from jobsite_main.search import Search
from jobsite_main.db import get_user_events
log = logging.getLogger('Business')
def get_search_type(request, form):
"""
Determine the search type from the request and form.
"""
if request.method == "POST":
return CLEAN_SEARCH
if 'event' in request.GET:
return SEARCH_EVENT_SEARCH
if form.cleaned_data.get('start'):
return NEXT_PAGE_SEARCH
if request.GET.get('otp_sort'):
return RESORT_SEARCH
if request.GET.get('otp_filter'):
return FILTERED_SEARCH
return CLEAN_SEARCH
def get_search_messages(form, response):
"""
Return a list of helpful search messages for this search based on
the number of results and filters being used.
"""
def suppliment_results(request, search_results):
"""
Remove postings that were deleted by the user and add identifiers for
visited/favorited postings for the user.
"""
if not search_results:
return
doc_map = dict((d['id'], d) for d in search_results['results'])
user_events = get_user_events(request, ids=doc_map.keys())
for event in user_events:
if event.event == 'save':
doc_map[event.posting_id]['type'] = 'saved_event'
continue
# Saved should overwrite opened
if event.event == 'open' and not doc_map[event.posting_id]['type']:
doc_map[event.posting_id]['type'] = 'opened_event'
continue
if event.event == 'remove':
search_results['results'].remove(doc_map[event.posting_id])
continue
log.warn("Unknown user event type: %s" % (event.event))
|
UTF-8
|
Python
| false | false | 2,011 |
17,532,056,535,676 |
b012a199628cc50e4c14cbc1b86a5151a1919890
|
fab3d466b228d37c4a5f6511934220db777ec34d
|
/rsa/primitive_root.py
|
b54ee0c1932f0f2232b89606ead73320d5ba240a
|
[] |
no_license
|
fcu-d0449763/cryptography
|
https://github.com/fcu-d0449763/cryptography
|
63fbad1a5e5f7aea994af75156a89721e1a00486
|
c649803da107511e02de1fc3a293f4b695c150fe
|
refs/heads/master
| 2021-05-28T00:24:10.633799 | 2013-06-11T07:06:52 | 2013-06-11T07:06:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import bigmod
prime = int(raw_input ('please input a prime(100 < prime < 1000):'))
#prime = 101
primitive_root = []
for a in range(1, prime):
res = []
for i in range (1, prime):
res.append(bigmod.bigmod(a, i, prime))
res_single = set(res) # remove the duplicate members
if len(res) == (prime -1) and len(res_single) == (prime -1):
#print a,res_single
primitive_root.append(a) # get the primitive root
print 'The primitive root are: ', primitive_root
print 'The number of primitive root:', len(primitive_root)
|
UTF-8
|
Python
| false | false | 2,013 |
14,783,277,462,975 |
3af83d72cc7e0702bb8f0d96e0b7f4f006020883
|
573616eebe8a0f44c2974c28b304b1cdd407f97c
|
/app/fast/__init__.py
|
5d42256abf730fdc240a19154aa040ce6e6a7337
|
[] |
no_license
|
tgwang/flask-sqlalchemy
|
https://github.com/tgwang/flask-sqlalchemy
|
a914fa61936bef2428720d0057c40eabed27cef5
|
e6a407b620933170d36055bb933cd998db993d61
|
refs/heads/master
| 2021-01-19T09:31:55.345071 | 2014-11-09T10:59:47 | 2014-11-09T10:59:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import Blueprint
fast = Blueprint('fast', __name__)
from . import routes
|
UTF-8
|
Python
| false | false | 2,014 |
6,116,033,436,149 |
f00613147390e1b65200830e496232f8c5b5816b
|
01c67396c97bf8b9ded4522e072711bd3b637efb
|
/youbot_main_ai/src/youbot_main_ai/states/SelectTask.py
|
bcfe4b2b6c57358fbcf0660ebe9183f07d85f031
|
[
"CC-BY-NC-SA-3.0",
"LicenseRef-scancode-cc-by-nc-sa-3.0-us",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-warranty-disclaimer",
"CC-BY-NC-4.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
WFWolves/wolves-at-work
|
https://github.com/WFWolves/wolves-at-work
|
f047d2285fc3156651f5c2accb4edb6565bfad93
|
c897b0f0c30b4e1d5fbbc4117b9e23a61528a54d
|
refs/heads/master
| 2020-05-09T20:13:12.887058 | 2013-09-27T10:08:57 | 2013-09-27T10:08:57 | 13,144,896 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import smach
import rospy
import copy
class SelectTask(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['taskBNT', 'taskBMT', 'taskBTT', 'taskPPT', 'taskUnknown'],
input_keys=['refbox_message_in'])
def execute(self, userdata):
msg = userdata.refbox_message_in
if msg.startswith("BNT"):
return "taskBNT"
elif msg.startswith("BMT"):
return "taskBMT"
elif msg.startswith("BTT"):
return "taskBTT"
elif msg.startswith("PPT"):
return "taskPPT"
else:
return "taskUnknown"
|
UTF-8
|
Python
| false | false | 2,013 |
7,662,221,668,919 |
87a508a95eb11a240b172d560d9b309bcbd2a3c0
|
140c75a12b09af203d252ec465823bc165664322
|
/del_expired_log.py
|
66938555426a4c001630c90ec55d4030cc450eb5
|
[] |
no_license
|
jomenxiao/studyPython
|
https://github.com/jomenxiao/studyPython
|
1526a5fa4a706ecb7ae47f7f7c899e9e1f09db0b
|
42065db8811e05b26e10d4f74c4aa4a253c242bc
|
refs/heads/master
| 2021-01-21T12:36:23.414310 | 2014-12-09T08:57:55 | 2014-12-09T08:57:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import datetime
import tarfile
base_dir = '/data1/home/tlog/logplat/log'
back_dir = '/data1/home/tlog/logplat/log/back'
expired_day = 90
if not os.path.exists(back_dir):
os.mkdir(back_dir)
date_today = datetime.datetime(time.localtime().tm_year,time.localtime().tm_mon,time.localtime().tm_mday)
tar_match_file_dict = {}
def tar_file(work_dir,back_work_dir,e_tlog_dir):
work_dir = work_dir
back_work_dir = back_work_dir
e_tlog_dir = e_tlog_dir
os.chdir(work_dir)
print "[" + time.strftime("%Y %m %d %H:%M:%S", time.localtime()) + "] " + work_dir + " START....."
for e_file in os.listdir(work_dir):
if not ( os.path.isfile(e_file) and e_file.split('.')[-1] == 'log' ):
print "[" + time.strftime("%Y %m %d %H:%M:%S", time.localtime()) + "] " + work_dir + os.path.sep + e_file + " NO log file....."
pass
file_mtime = time.localtime(os.stat(e_file).st_ctime)
file_mtime_date = datetime.datetime(file_mtime.tm_year,file_mtime.tm_mon,file_mtime.tm_mday)
separated_days = (date_today - file_mtime_date).days
if separated_days >= expired_day and os.path.isfile(work_dir + os.path.sep + e_file):
print "[" + time.strftime("%Y %m %d %H:%M:%S", time.localtime()) + "] " + work_dir + os.path.sep + e_file + ' log file EXPIRED 90 DAYS REMOVED!!!'
os.remove(e_file)
elif separated_days > 1 and os.path.isfile(work_dir + os.path.sep + e_file):
tar_file_name = str(file_mtime.tm_year) + '_' + str(file_mtime.tm_mon) + '_' + str(file_mtime.tm_mday)
if tar_file_name in tar_match_file_dict:
if e_file not in tar_match_file_dict[tar_file_name]:
tar_match_file_dict[tar_file_name].append(e_file)
else:
pass
else:
tar_match_file_dict[tar_file_name]=[]
tar_match_file_dict[tar_file_name].append(e_file)
elif separated_days <= 1:
print "[" + time.strftime("%Y %m %d %H:%M:%S", time.localtime()) + "] " + work_dir + os.path.sep + e_file + " NO TAR new log file!!!!"
pass
else:
print "[" + time.strftime("%Y %m %d %H:%M:%S", time.localtime()) + "] " + work_dir + os.path.sep + e_file + " NO EXISTS!!!!"
pass
for (e_tar_name, e_tar_file_list) in tar_match_file_dict.items():
if os.path.isfile(back_work_dir + os.path.sep + e_tar_name + '.tar.gz'):
os.rename(back_work_dir + os.path.sep + e_tar_name + '.tar.gz', back_work_dir + os.path.sep + e_tar_name + "_" + time.strftime("%Y%m%d%H%M%S",time.localtime()) + '.tar.gz_bak')
print "[" + time.strftime("%Y %m %d %H:%M:%S", time.localtime()) + "] " + back_work_dir + os.path.sep + e_tar_name + '.tar.gz' + " START TAR...."
tar = tarfile.open(back_work_dir + os.path.sep + e_tar_name + '.tar.gz', 'w:gz')
for e_day_file in e_tar_file_list:
if os.path.isfile(e_day_file):
log_info = "[" + time.strftime("%Y %m %d %H:%M:%S", time.localtime()) + "] " + str(work_dir + os.path.sep + e_day_file) + " ADD IN " + str(back_work_dir + os.path.sep + e_tar_name) + ".tar.gz"
print log_info
tar.add(e_day_file)
else:
print "[" + time.strftime("%Y %m %d %H:%M:%S", time.localtime()) + "] " + e_day_file + " NOT FOUND FOR TAR!!!"
tar.close()
if os.path.isfile(back_work_dir + os.path.sep + e_tar_name + '.tar.gz'):
for e_log_file in e_tar_file_list:
if os.path.isfile(e_log_file):
log_info = "[" + time.strftime("%Y %m %d %H:%M:%S", time.localtime()) + "] " + str(e_log_file) + " TAR AND REMOVED!!!!"
print log_info
os.remove(e_log_file)
else:
log_info = "[" + time.strftime("%Y %m %d %H:%M:%S", time.localtime()) + "] " + str(e_log_file) + " TAR AND NOT FOUND REMOVED!!!!"
print log_info
def remove_pass_tar_log():
if not os.path.exists(back_dir):
return
os.chdir(back_dir)
for e_tarlog_dir in os.listdir(back_dir):
if not os.path.isdir(e_tarlog_dir):
pass
os.chdir(back_dir + os.path.sep + e_tarlog_dir)
list_dir = [e for e in os.listdir(back_dir + os.path.sep + e_tarlog_dir) \
if (e.split(".")[-1] == 'gz' or e.split(".")[-1] == 'gz_bak') and e.split(".")[-2] == 'tar' ]
for e_tar_file in list_dir:
e_filename = e_tar_file.split(".")[0]
file_date = datetime.datetime(int(e_filename.split("_")[0]),int(e_filename.split("_")[1]),int(e_filename.split("_")[2]))
separated_days = (date_today - file_date).days
if separated_days >= expired_day:
log_info = str(os.getcwd() + os.path.sep + e_tar_file) + " tar log file EXPIRED 90 DAYS REMOVED!!!!"
print log_info
os.remove(e_tar_file)
def main():
tlog_dir_list = os.listdir(base_dir)
for e_tlog_dir in tlog_dir_list:
work_dir = base_dir + os.path.sep + e_tlog_dir
back_work_dir = back_dir + os.path.sep + e_tlog_dir
if os.path.isdir(work_dir) and back_dir != work_dir and e_tlog_dir.startswith("tlogd"):
if not os.path.exists(back_work_dir):
os.mkdir(back_work_dir)
print "[" + time.strftime("%Y %m %d %H:%M:%S", time.localtime()) + "] " + work_dir
tar_file(work_dir,back_work_dir,e_tlog_dir)
else:
print "[" + time.strftime("%Y %m %d %H:%M:%S", time.localtime()) + "] " + work_dir + " DIR NOT TLOG DIR!!!"
if __name__ == '__main__':
print "[" + time.strftime("%Y %m %d %H:%M:%S", time.localtime()) + "] starting....."
main()
remove_pass_tar_log()
print "[" + time.strftime("%Y %m %d %H:%M:%S", time.localtime()) + "] fininshed!!!"
|
UTF-8
|
Python
| false | false | 2,014 |
300,647,720,406 |
a51475f2608012dc501e849bb05b5e62a676fabd
|
e2cba878fec9ac7a34b36f3de7d3c2dd28fca11e
|
/MEDRank/utility/process.py
|
f8fbc77a0966fcaec4770d1e2eb4558da14564d7
|
[
"GPL-2.0-only"
] |
non_permissive
|
aaraya1516/MEDRank
|
https://github.com/aaraya1516/MEDRank
|
2c900d50cc32b9f7c0f014dc6e83c7ba8f261ef7
|
776e489f1d7cbf4ce29055b12b6c12ee0b1b2fa3
|
refs/heads/master
| 2021-01-15T13:14:28.217254 | 2011-11-04T01:08:29 | 2011-11-04T01:08:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
"""
process.py
Created by Jorge Herskovic on 2010-07-02.
Copyright (c) 2010 University of Texas - Houston. All rights reserved.
"""
from MEDRank.utility import proctitle
from MEDRank.utility.logger import logging, ULTRADEBUG
from MEDRank.utility.workflow import CouldNotRank
import traceback
# pylint: disable-msg=C0322
def processor(workflow_class,
graph_builder_constructor, graph_builder_params,
ranker_constructor, ranker_params,
eval_parameters,
ranking_cutoff,
mesh_tree_filename, distance_matrix_filename,
distance_function,
umls_converter_data_filename,
extra_data_name,
extra_data_contents,
my_input_queue, my_output_queue,
my_own_name=None):
logging.info("Setting up worker.")
if my_own_name is not None:
proctitle.setproctitle(my_own_name)
my_workflow=workflow_class(graph_builder_constructor,
graph_builder_params,
ranker_constructor,
ranker_params,
eval_parameters,
ranking_cutoff,
mesh_tree_filename,
distance_matrix_filename,
distance_function,
umls_converter_data_filename
)
if extra_data_name is not None:
my_workflow.__setattr__(extra_data_name, extra_data_contents)
logging.info("Finished setting up worker process. Waiting for requests.")
try:
while True:
request=my_input_queue.get()
logging.log(ULTRADEBUG, "Processing request %r", request)
if request=='STOP':
logging.log(ULTRADEBUG, "Received stop request.")
break
try:
my_workflow.process_article(request)
# Recover the article, push it on the output queue
my_output_queue.put(my_workflow.all_results)
# Clear the output queue
my_workflow.all_results={}
except CouldNotRank:
#my_input_queue.put(request) # On error, push the task
# back into the queue
logging.info("Skipping unrankable article.")
except:
logging.warn("EXCEPTION RAISED: \n%s",
traceback.format_exc())
raise
finally:
logging.log(ULTRADEBUG, "Returning results to caller.")
logging.log(ULTRADEBUG, "Ending processor execution.")
return
|
UTF-8
|
Python
| false | false | 2,011 |
1,047,972,066,865 |
7f59d2d8aab4998803bfb7a6672c77b68c3e670a
|
0ba1ff84fe16bb56039397b5349789fa07dfdd09
|
/cart/middleware.py
|
dd0a397bc06e72d2b8421df8f83ce0b2550bb199
|
[
"LGPL-3.0-only"
] |
non_permissive
|
impossible-bureau/django-cart
|
https://github.com/impossible-bureau/django-cart
|
74466bf38a20ab65f81e2ac5693b5fb7c460f9b4
|
790c48e00702e3371d81a082390ae63e89a0e45a
|
refs/heads/master
| 2020-12-25T15:51:17.304381 | 2013-04-17T20:17:31 | 2013-04-17T20:17:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.utils import timezone
import models
CART_ID = 'CART-ID'
class CartMiddleware(object):
def process_request(self, request):
try:
cart_id = request.session[CART_ID]
request.cart = models.Cart.objects.get(pk=cart_id)
except (KeyError, models.Cart.DoesNotExist):
request.cart = models.Cart.objects.create(creation_date=timezone.now())
request.session[CART_ID] = request.cart.pk
|
UTF-8
|
Python
| false | false | 2,013 |
9,749,575,788,182 |
86d9f987ed4e83f324109ee99e96bdfcfd2d4edb
|
6929e5f8ab09ce6c9131845ef2aed82ed5ccd84d
|
/widgets.py
|
7d1e3175f67d26ed8c6317c21563d424df5a491c
|
[
"MIT"
] |
permissive
|
xacce/django_type_rel
|
https://github.com/xacce/django_type_rel
|
487cb49bed4f93a5c20eb155274a58215bdb413b
|
d7c55a50f3a7d827ff33ccdbe306da6827236f3f
|
refs/heads/master
| 2021-01-01T19:23:19.181480 | 2013-10-22T12:21:06 | 2013-10-22T12:21:06 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django import forms
from django.forms import fields
from django.contrib.admin import widgets
from django.templatetags.static import static
from django.utils.html import mark_safe
class TypeRelWidget(widgets.AdminTextInputWidget):
def __init__(self, model, to_model, rel_rel_name, admin, *args, **kwargs):
self.from_model, self.admin, self.to_model, self.rel_rel_name = model, admin, to_model, rel_rel_name
self.objects = admin.dtr_root_qs(to_model, rel_rel_name)
super(TypeRelWidget, self).__init__(*args, **kwargs)
def render(self, name, value, attrs=None):
from django.core.urlresolvers import reverse
key = "%s.%s-%s.%s-%s" % (
self.from_model._meta.app_label, self.from_model.__name__,
self.to_model._meta.app_label, self.to_model.__name__,
self.rel_rel_name
)
default_query = ''
if value:
to_object = self.to_model.objects.get(pk=value)
default_query = ("%s-%s" % (key, getattr(to_object, "%s_id" % self.rel_rel_name)))
options = ''u"".join([u"<option value='%s-%s'>%s</option>" % (key, x.pk, self.admin.dtr_root_label(x)) for x in self.objects])
html = u"<select data-url='%s' data-default-query='%s' data-default='%s' data-target='%s' class='dtr_select'>%s</select>" \
% (reverse('dtr_listing'), default_query, value, name, options)
html_main_select = "<select name='%s' id='%s'></select>" % (name, name)
html += html_main_select
return mark_safe(html)
# return super(SorcererWidget, self).render(name, value, final_attrs)
#
#
#
@property
def media(self):
js_list = [
static("django_type_rel/dtr.js"),
]
return forms.Media(
js=js_list,
)
|
UTF-8
|
Python
| false | false | 2,013 |
7,559,142,481,941 |
2bfd48904795524a4d58084474756b5a1b28b6cf
|
aded4826a76e435ca6f2cd32a731e82319b707c0
|
/Set1/BreakRepeatingXOR.py
|
e974474d3051b3dda7a886db0f745b9bc5ec2109
|
[
"MIT"
] |
permissive
|
MDSilber/CryptoExercises
|
https://github.com/MDSilber/CryptoExercises
|
e1962a3dddc6bbe807772bec25fee1e053f277ea
|
be9fb61233c9ae49051c319b02deab5d36fa5512
|
refs/heads/master
| 2021-01-15T21:44:25.860451 | 2014-08-24T04:55:00 | 2014-08-24T05:00:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import Crypto
import Base64
def binary_for_string(string1):
string_binary = ''.join(format(ord(c), '08b') for c in string1)
return string_binary
def hamming_distance(string_1_binary, string_2_binary):
distance = 0
for i in xrange(len(string_1_binary)):
if string_1_binary[i] != string_2_binary[i]:
distance += 1
return distance
def find_key_length(encrypted_string):
key_length = 0
binary_string = binary_for_string(encrypted_string)
key_lengths = {}
for i in xrange(2, 40):
substring_1 = binary_string[: 8*i]
substring_2 = binary_string[8*i:16*i]
key_lengths[i] = float((hamming_distance(substring_1, substring_2)))/float(i)
return sorted(key_lengths.items(), key=lambda x: x[1])[0][0]
def chunk_string_into_blocks(string_to_be_chunked, length):
return [string_to_be_chunked[x:x+length] for x in xrange(0, len(string_to_be_chunked), length)]
def encoded_transposed_chunks(chunks, length):
# start with array of empty strings
transposed_chunks = ['' for i in xrange(0, length)]
for i in xrange(0, length):
for chunk in chunks:
# current_string = str(transposed_chunks[i])
# current_string += str(chunk[i])
transposed_chunks[i % length] += bytes(chunk)
return transposed_chunks
def decode_transposed_chunks(chunks):
return [Crypto.brute_single_char_xor(chunk)[0] for chunk in chunks]
def reconstruct_message_from_decoded_transposed_chunks(chunks):
message = ''
chunk_length = len(chunks[0])
for i in xrange(0, chunk_length):
for chunk in chunks:
message += str(chunk[i])
return message
def test_chunking(message):
chunks = chunk_string_into_blocks(message, 4)
transposed_chunks = encoded_transposed_chunks(chunks, 4)
new_message = reconstruct_message_from_decoded_transposed_chunks(transposed_chunks)
assert(message == new_message)
print new_message
# running the code
encoded_string = open('./6.txt', 'r').read()
encoded_binary_string = Base64.base_64_to_bytes(encoded_string)
# print encoded_string
key_length = find_key_length(encoded_binary_string)
print key_length
encoded_chunks = chunk_string_into_blocks(encoded_binary_string, key_length)
# # print encoded_chunks
transposed_chunks = encoded_transposed_chunks(encoded_chunks, key_length)
# # print transposed_chunks
decoded_transposed_chunks = decode_transposed_chunks(transposed_chunks)
# #print decoded_transposed_chunks
print reconstruct_message_from_decoded_transposed_chunks(decoded_transposed_chunks)
|
UTF-8
|
Python
| false | false | 2,014 |
15,685,220,593,155 |
d4831972f40f9c49a6641939772cd5866de1ad61
|
ecb0246387e16c4cd492c829b82f8242b98e1b07
|
/pythonlibs/mgear/maya/transform.py
|
936c584efee5f36c2976246aa7960cd3206cb3fe
|
[] |
no_license
|
jpasserin/mgear
|
https://github.com/jpasserin/mgear
|
fc9fbe76ec16cc95d89369ad68d57213157661e5
|
f916014f58a77fe20028b45736f39061b9f61b7b
|
refs/heads/master
| 2020-05-28T14:16:59.116355 | 2011-07-17T11:15:23 | 2011-07-17T11:15:23 | 2,061,394 | 17 | 8 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
This file is part of MGEAR.
MGEAR is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/lgpl.html>.
Author: Jeremie Passerin [email protected]
Url: http://www.jeremiepasserin.com
Date: 2011 / 07 / 13
'''
## @package mgear.maya.transform
# @author Jeremie Passerin
#
#############################################
# GLOBAL
#############################################
from pymel.core.general import *
from pymel.util import *
import pymel.core.datatypes as dt
import mgear.maya.vector as vec
#############################################
# TRANSFORM
#############################################
def getTranslation(node, worldSpace=True):
return node.getTranslation(space="world")
def getTransform(node, worldSpace=True):
return node.getMatrix(worldSpace=True)
def getTransformLookingAt(pos, lookat, normal, axis="xy", negate=False):
normal.normalize()
if negate:
a = pos - lookat
# normal *= -1
else:
a = lookat - pos
a.normalize()
c = cross(a, normal)
c.normalize()
b = cross(c, a)
b.normalize()
if axis == "xy":
X = a
Y = b
Z = c
elif axis == "xz":
X = a
Z = b
Y = -c
elif axis == "yx":
Y = a
X = b
Z = -c
elif axis == "yz":
Y = a
Z = b
X = c
elif axis == "zx":
Z = a
X = b
Y = c
elif axis == "zy":
Z = a
Y = b
X = -c
m = dt.Matrix()
m[0] = [X[0], X[1], X[2], 0.0]
m[1] = [Y[0], Y[1], Y[2], 0.0]
m[2] = [Z[0], Z[1], Z[2], 0.0]
m[3] = [pos[0], pos[1], pos[2], 1.0]
return m
# ===========================================================
def getChainTransform(positions, normal, negate=False):
# Draw
transforms = []
for i in range(len(positions)-1):
v0 = positions[i-1]
v1 = positions[i]
v2 = positions[i+1]
# Normal Offset
if i > 0:
normal = vec.getTransposedVector(normal, [v0, v1], [v1, v2])
t = getTransformLookingAt(v1, v2, normal, "xz", negate)
transforms.append(t)
return transforms
def getTransformFromPos(pos):
m = dt.Matrix()
m[0] = [1.0, 0, 0, 0.0]
m[1] = [0, 1.0, 0, 0.0]
m[2] = [0, 0, 1.0, 0.0]
m[3] = [pos[0], pos[1], pos[2], 1.0]
return m
def setMatrixPosition(in_m, pos):
m = dt.Matrix()
m[0] = in_m[0]
m[1] = in_m[1]
m[2] = in_m[2]
m[3] = [pos[0], pos[1], pos[2], 1.0]
return m
def setMatrixRotation(m, rot):
# for v in rot:
# v.normalize()
X = rot[0]
Y = rot[1]
Z = rot[2]
m[0] = [X[0], X[1], X[2], 0.0]
m[1] = [Y[0], Y[1], Y[2], 0.0]
m[2] = [Z[0], Z[1], Z[2], 0.0]
return m
# filterTransform ==========================================
## Retrieve a transformation filtered.
# @param t SITransformation - Reference transformation.
# @param translation Boolean - True to match translation.
# @param rotation Boolean - True to match rotation.
# @param scaling Boolean - True to match scaling.
# @return SITransformation - The filtered transformation
def getFilteredTransform(m, translation=True, rotation=True, scaling=True):
t = dt.Vector(m[3][0],m[3][1],m[3][2])
x = dt.Vector(m[0][0],m[0][1],m[0][2])
y = dt.Vector(m[1][0],m[1][1],m[1][2])
z = dt.Vector(m[2][0],m[2][1],m[2][2])
out = dt.Matrix()
if translation:
out = setMatrixPosition(out, t)
if rotation and scaling:
out = setMatrixRotation(out, [x,y,z])
elif rotation and not scaling:
out = setMatrixRotation(out, [x.normal(), y.normal(), z.normal()])
elif not rotation and scaling:
out = setMatrixRotation(out, [dt.Vector(1,0,0) * x.length(), dt.Vector(0,1,0) * y.length(), dt.Vector(0,0,1) * z.length()])
return out
##########################################################
# ROTATION
##########################################################
# setRefPose =============================================
def getRotationFromAxis(in_a, in_b, axis="xy", negate=False):
a = dt.Vector(in_a.x, in_a.y, in_a.z)
b = dt.Vector(in_b.x, in_b.y, in_b.z)
c = dt.Vector()
if negate:
a *= -1
a.normalize()
c = a ^ b
c.normalize()
b = c ^ a
b.normalize()
if axis == "xy":
x = a
y = b
z = c
elif axis == "xz":
x = a
z = b
y = -c
elif axis == "yx":
y = a
x = b
z = -c
elif axis == "yz":
y = a
z = b
x = c
elif axis == "zx":
z = a
x = b
y = c
elif axis == "zy":
z = a
y = b
x = -c
m = dt.Matrix()
setMatrixRotation(m, [x,y,z])
return m
|
UTF-8
|
Python
| false | false | 2,011 |
816,043,835,390 |
4e0d9146e5b12b064c7de4e4aee088fc09320411
|
0d2d96c9c47d4856a22f9afbc46ed81c771f16e8
|
/globalvar.py
|
91d7a7af738c902579241bb62807392ddc45981f
|
[] |
no_license
|
root/OG
|
https://github.com/root/OG
|
75b57074a98f27dd54b79413b6ee7d98e80ff2c6
|
77deb361e8b734aca40a834a7f95505156018039
|
refs/heads/master
| 2020-04-24T21:16:34.051209 | 2013-11-15T13:02:44 | 2013-11-15T13:02:44 | 14,423,632 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
global cmdproglue,path,problempath
cmdproglue = '''
#!/bin/bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
'''
problempath = "/home/kid/OG/problist"
shellpath = "/home/kid/OG/"
path = problempath+"/code_file/"
newdir = problempath + "/DB/"
programtype = dict()
|
UTF-8
|
Python
| false | false | 2,013 |
5,583,457,523,822 |
0df0d01cab9e79314928818c8dbef4733f841078
|
ce46a3cab26cfcf40c14f766ea94995e73aec3e5
|
/blogDjango/blog/tests.py
|
cb3e1a97f83d467a274287de2d1a38d0a35830e9
|
[] |
no_license
|
rebecabordini/blogDjango
|
https://github.com/rebecabordini/blogDjango
|
270dbae5e9e57ca3aeab6ef62f4d283f945692a3
|
57c96f063cff7f07f5cfab3fde86f09385db0314
|
refs/heads/master
| 2016-09-16T14:05:41.135264 | 2014-10-07T14:33:31 | 2014-10-07T14:33:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import unittest
import datetime
from blog.models import Blog, Category
from blog import views
class ValidacoesPost(unittest.TestCase):
propriedade_da_classe = False
@classmethod
def setUpClass(cls):
Blog.objects.all().delete()
Category.objects.all().delete()
categoria = Category.objects.create(title='Artes', slug='artes')
dataAtual = datetime.datetime.now().date()
dataFutura = dataAtual + datetime.timedelta(days=1)
kwargs = {
'body': 'Teste',
'category': categoria,
}
cls.post_atual = Blog.objects.create(title='BlogTestSucesso',slug='blogtestesucesso',posted=dataAtual, **kwargs)
cls.post_futuro = Blog.objects.create(title='BlogTestFalha',slug='blogtestefalha',posted=dataFutura, **kwargs)
def testApenasPostsNoPassadoSaoExibidos(self):
self.assertIn(self.post_atual, Blog.objects.posts_publicados_no_passado())
def testApenasPostsNoFuturoSaoExibidos(self):
self.assertIn(self.post_futuro, Blog.objects.posts_publicados_no_futuro())
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false | false | 2,014 |
13,022,340,873,236 |
b0a66d6932521b95541a5621b3d79fff678815ef
|
5f2608d4a06e96c3a032ddb66a6d7e160080b5b0
|
/week1/homework_w1_q_b3.py
|
472d5fb86cf3b157491c378a0693ede4d41edf18
|
[] |
no_license
|
sheikhusmanshakeel/statistical-mechanics-ens
|
https://github.com/sheikhusmanshakeel/statistical-mechanics-ens
|
f3e150030073f3ca106a072b4774502b02b8f1d0
|
ba483dc9ba291cbd6cd757edf5fc2ae362ff3df7
|
refs/heads/master
| 2020-04-08T21:40:33.580142 | 2014-04-28T21:10:19 | 2014-04-28T21:10:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import random, math, pylab, numpy
def markov_pi(N, delta):
x, y = 1.0, 1.0
n_hits = 0
for i in range(N):
del_x, del_y = random.uniform(-delta, delta), random.uniform(-delta, delta)
if abs(x + del_x) < 1.0 and abs(y + del_y) < 1.0:
x, y = x + del_x, y + del_y
if x**2 + y**2 < 1.0: n_hits += 1
return n_hits
def pi_values(delta):
n_runs = 1000
n_trials = 4000
sum = 0.0
sigma = 0.0
for run in range(n_runs):
pi_est = 4.0 * markov_pi(n_trials, delta) / float(n_trials)
sum += pi_est
sigma += (pi_est - math.pi) ** 2
return sum / float(n_runs), math.sqrt(sigma/(n_runs))
#deltas = [x*0.1 for x in range(9, 16)] # range 0.9, 1.0, 1.1 ... 2.0
deltas = [x*0.1 for x in range(1, 51)] # range 0.1, 0.2, 0.3 ... 5.0
accepts = [pi_values(d) for d in deltas]
rmses = [a[1] for a in accepts]
print 'Data'
print deltas
print rmses
pylab.plot(deltas, rmses, 'o')
pylab.xlabel('delta')
pylab.ylabel('rms error')
pylab.savefig('rms error varying deltas b3.png')
pylab.show()
|
UTF-8
|
Python
| false | false | 2,014 |
7,619,272,004,304 |
989c7d317dccc171a8e3c3a6a63703ecd50d4f31
|
af1776e5ef0c36fe209dcdd49cffbceb0a351eda
|
/FrameApplication/Cache.py
|
a515283e2007d65c7f344706d5ffac092fedcf54
|
[] |
no_license
|
HeliZhu/WorkshopRepo
|
https://github.com/HeliZhu/WorkshopRepo
|
cbd4d1af1167be13f704965682ea37815796cb70
|
bf0c901c8d24b1c9de858ed1df715e0bb947b38d
|
refs/heads/master
| 2020-12-25T11:57:56.903306 | 2013-08-30T07:30:17 | 2013-08-30T07:30:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
CURRENT_FOLDER_PATH = os.path.dirname(__file__)
CACHE_FOLDER_PATH = os.path.join(CURRENT_FOLDER_PATH, '..', 'Cache')
def _getFilePath(function):
def __getFilePath(fileName, *args, **kwargs):
return function(os.path.abspath(os.path.join(CACHE_FOLDER_PATH, fileName)), *args, **kwargs)
return __getFilePath
@_getFilePath
def getCache(fileName):
try:
with open(fileName) as cacheFile:
return cacheFile.read()
except:
return ''
@_getFilePath
def writeCache(fileName, content):
with open(fileName, 'w') as cacheFile:
cacheFile.write(content)
|
UTF-8
|
Python
| false | false | 2,013 |
1,477,468,799,044 |
9a180230021a476240c8761ac051c8c9f9f50a1b
|
580368d8129abfb13d941217a067a26fd4876532
|
/yahoo-beijing/run.py
|
095002b908f9b796ed8e904a3de28cb2f13bf537
|
[] |
no_license
|
fuxiang90/yahoo-dcf-2013
|
https://github.com/fuxiang90/yahoo-dcf-2013
|
38082213fbc0d22b36b32bb25dbeebb84daafbce
|
094e4ff9e30f7d2f43bf0e1cddc00037ef26c67a
|
refs/heads/master
| 2021-01-16T01:01:35.013916 | 2013-05-19T01:46:22 | 2013-05-19T01:46:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib
import urllib2
import sys
import json
import re
import yql
api_key = "77f1cc3b101c84e5c2694ff1ab73172b"
def get_json_url(url):
# url = "http://where.yahooapis.com/geocode?location=" + str(location) + "&flags=J&gflags=R&appid=" + str(yhack_yahoo_app_id)
# json = urllib2.urlopen(woeid_api_url).read()
pass
def get_json_flickr_yql():
url = "http://query.yahooapis.com/v1/public/yql?q="
query_str = 'select * from flickr.photos.sizes where photo_id in (select id from flickr.photos.search where text=@text and api_key=@api_key limit 100) and api_key =@api_key'
# select * from flickr.photos.sizes where photo_id in (select id from flickr.photos.search where text="supermaket" and api_key="77f1cc3b101c84e5c2694ff1ab73172b" limit 100) and api_key ="77f1cc3b101c84e5c2694ff1ab73172b"
# query_str.replace(' ', "%20")
print url+query_str
# print urllib2.urlopen(url+query_str).read()
y = yql.Public()
query = 'select * from flickr.photos.search where text=@text and api_key="77f1cc3b101c84e5c2694ff1ab73172b" limit 3';
# l = y.execute(query ,{"text": "supermaket"})
l = y.execute(query_str , {"text":"supermaket" ,"api_key":api_key})
for row in l.rows:
print row
# print json_str
if __name__ == '__main__':
# result = urllib2.urlopen('http://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%3D'FFIV'%0A%09%09&format=json&env=http%3A%2F%2Fdatatables.org%2Falltables.env&callback=').read()
# print result.read()
# y = yql.Public()
# result = y.execute('use "http://www.datatables.org/yahoo/finance/yahoo.finance.quotes.xml" as yahoo.finance.quotes; select * from yahoo.finance.quotes where symbol in ("YHOO")')
#
get_json_flickr_yql()
print "done it"
|
UTF-8
|
Python
| false | false | 2,013 |
2,757,369,040,345 |
99e9ad9fc9c24b01d9c16e4cf898e486fd454f10
|
8fce3f0920ecd92a7891c94536dfca45a13ce6e4
|
/channellog.py
|
75b37f4c918028427eac44e609c754439b86db6a
|
[] |
no_license
|
tomasklapka/sioclog
|
https://github.com/tomasklapka/sioclog
|
e4b970d7cda5b8686889f95a250f7463dbf8e8c8
|
6f93c22125a3c6f49fc8eac200ab0f8bd8aa986a
|
refs/heads/master
| 2021-01-19T13:07:16.739097 | 2009-09-29T20:42:42 | 2009-09-29T20:42:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""channellog.py - a module for filtering and rendering streams of IRC data
Example usage:
from channellog import OffFilter, ChannelFilter, TimeFilter, HtmlSink, TurtleSink, RawSink, ChannelsAndDaysSink, run
pipeline = ChannelFilter("#sioc", RawSink())
run(file("sioc.log"), pipeline)
"""
import sys, re
from traceback import print_exc
import ircbase
ircbase.dbg = False
from ircbase import parseprefix, Line, Irc
from templating import new_context, get_template, expand_template
from turtle import PlainLiteral, TypedLiteral, TurtleWriter
from vocabulary import namespaces, RDF, RDFS, OWL, DC, DCTERMS, XSD, FOAF, SIOC, SIOCT, DS
from htmlutil import html_escape, html_unescape
def parse_action(text):
if text.startswith("\x01ACTION ") and text.endswith("\x01"):
return True, text[len("\x01ACTION "):-1]
else:
return False, text
class IrcFilter(Irc):
def __init__(self, sink):
self.sink = sink
def close(self):
if self.sink:
self.sink.close()
class IrcSink(IrcFilter):
def __init__(self):
IrcFilter.__init__(self, None)
class AddRegisteredFilter(IrcFilter):
def irc_PRIVMSG(self, line):
content = line.args[1]
if content[0] in ["+", "-"]:
line.registered = content[0]
line.args[1] = content[1:]
else:
line.registered = None
irc_NOTICE = irc_PRIVMSG
def handleReceivedFallback(self, line):
self.sink.handleReceived(line)
link_res = [
(r'<(http(s)?://[^ ]*)>', r'<<a href="\1">\1</a>>'),
(r'"(http(s)?://[^ ]*)"', r'"<a href="\1">\1</a>"'),
(r'\[(http(s)?://[^ |]*)(\||\])', r'[<a href="\1">\1</a>\3'),
(r'(http(s)?://[^ ]*[^ ,.\1])[)](,? |$)', r'<a href="\1">\1</a>)\3'),
(r'(http(s)?://[^ ]*[^ ,.\1])', r'<a href="\1">\1</a>'),
(r'(^|[ (])(www\.[^ ]*[^ ,.\1])[)](,? |$)', r'\1<a href="http://\2">\2</a>)\3'),
(r'(^|[ (])(www\.[^ ]*[^ ,.\1])', r'\1<a href="http://\2">\2</a>'),
]
link_res = [(re.compile(link_re), sub) for (link_re, sub) in link_res]
class AddLinksFilter(IrcFilter):
def irc_PRIVMSG(self, line):
content = line.args[1]
content_html = html_escape(content)
links = []
for link_re, sub in link_res:
content_html_sub = link_re.sub(sub, content_html)
if content_html_sub is not content_html:
for groups in link_re.findall(content_html):
if groups[1].startswith("www"):
uri = "http://" + groups[1]
else:
uri = groups[0]
links += [html_unescape(uri)]
break # XXX later link_res could match other parts of the string
line.content_html = content_html_sub
line.links = links
def handleReceivedFallback(self, line):
self.sink.handleReceived(line)
class ChannelFilter(IrcFilter):
"""A filter that only passes on lines related to a given channel"""
def __init__(self, channel, sink):
IrcFilter.__init__(self, sink)
self.registered = False
self.nick = None # nick
self.user = None # user@host
self.serverprefix = None # irc.jyu.fi
self.clientprefix = None # nick!user@host
self.channels = []
self.away = False
self.awaymsg = None
self.namreply = {}
self.interestingchannel = channel
self.sink = sink
self.nick2channels = {}
self.channel2nicks = {}
def isme(self, prefix):
return parseprefix(prefix)[0] == self.nick
def handleReceived(self, line):
if line.prefix:
nick,_ = parseprefix(line.prefix)
else:
nick = None
relatedbefore = self.nick2channels.get(nick, [])
Irc.handleReceived(self, line)
# FIXME: Many commands missing here!
if line.cmd in ('NICK', 'QUIT'):
if self.interestingchannel in relatedbefore:
self.sink.handleReceived(line)
elif line.cmd in ('JOIN','PART','KICK','PRIVMSG','NOTICE','TOPIC'):
if line.args[0].lower() == self.interestingchannel:
self.sink.handleReceived(line)
elif line.cmd in ('366','332','333','329'):
if line.args[1].lower() == self.interestingchannel:
self.sink.handleReceived(line)
elif line.cmd in ('353',):
if line.args[2].lower() == self.interestingchannel:
self.sink.handleReceived(line)
handleReceivedFallback = lambda self,x:None
# state tracking:
def irc_RPL_WELCOME(self, line):
self.nick = line.args[0]
_, self.user = parseprefix(line.args[-1].split(' ')[-1])
self.clientprefix = "%s!%s" % (self.nick, self.user)
# reset state from previous connects:
self.channels = []
self.away = False
self.awaymsg = None
self.namreply = {}
self.nick2channels = {}
self.channel2nicks = {}
def irc_NICK(self, line):
# we get messages about other clients as well
if self.isme(line.prefix):
self.nick = line.args[0]
self.clientprefix = self.nick + '!' + self.user
oldnick,_ = parseprefix(line.prefix)
newnick = line.args[0]
self.nick2channels[newnick] = self.nick2channels[oldnick]
del self.nick2channels[oldnick]
for c in self.nick2channels[newnick]:
i = self.channel2nicks[c].index(oldnick)
self.channel2nicks[c][i] = newnick
def irc_JOIN(self, line):
channel = line.args[0].lower()
if self.isme(line.prefix):
self.channels.append(channel)
self.channel2nicks[channel] = []
nick,_ = parseprefix(line.prefix)
if not nick in self.nick2channels:
self.nick2channels[nick] = []
self.nick2channels[nick].append(channel)
self.channel2nicks[channel].append(nick)
def irc_PART(self, line):
channel = line.args[0].lower()
if self.isme(line.prefix):
self.channels.remove(channel)
del self.channel2nicks[channel]
else:
nick,_ = parseprefix(line.prefix)
self.nick2channels[nick].remove(channel)
self.channel2nicks[channel].remove(nick)
def irc_KICK(self, line):
channel = line.args[0].lower()
if line.args[1] == self.nick:
self.channels.remove(channel)
del self.channel2nicks[channel]
else:
nickword = line.args[1].lower()
for n in self.nick2channels.keys():
if n.lower() == nickword:
nick = n
self.nick2channels[nick].remove(channel)
self.channel2nicks[channel].remove(nick)
def irc_QUIT(self, line):
nick,_ = parseprefix(line.prefix)
for c in self.nick2channels[nick]:
self.channel2nicks[c].remove(nick)
del self.nick2channels[nick]
#2008-09-25T18:32:40+03:00 :irc.jyu.fi 353 tuukkah_ = #footest :tuukkah_ @tuukkah
#2008-09-25T18:32:40+03:00 :irc.jyu.fi 366 tuukkah_ #footest :End of NAMES list.
def irc_RPL_NAMREPLY(self, line):
channel = line.args[2].lower()
if not channel in self.namreply:
self.namreply[channel] = []
self.namreply[channel] += line.args[3].split(" ")
def irc_RPL_ENDOFNAMES(self, line):
channel = line.args[1].lower()
newnicks = self.namreply.pop(channel)
oldnicks = self.channel2nicks[channel]
for n in oldnicks:
self.nick2channels[n].remove(channel)
self.channel2nicks[channel] = []
for n in newnicks:
if not n:
continue
nick = n.lstrip("@").lstrip("+")
self.channel2nicks[channel].append(nick)
if not nick in self.nick2channels:
self.nick2channels[nick] = []
self.nick2channels[nick].append(channel)
def irc_RPL_UNAWAY(self, _):
self.away = False
def irc_RPL_NOWAWAY(self, _):
self.away = True
class TimeFilter(IrcFilter):
"""A filter that only passes on lines whose time matches a given prefix"""
def __init__(self, timeprefix, sink):
IrcFilter.__init__(self, sink)
self.timeprefix = timeprefix
self.sink = sink
def handleReceivedFallback(self, line):
if line.ztime.startswith(self.timeprefix):
self.sink.handleReceived(line)
class OffFilter(IrcFilter):
"""A filter that removes lines marked as off-the-record"""
def irc_PRIVMSG(self, line):
content = line.args[1]
# XXX need to remove leading + or - from content?
if content.startswith("[off]") or content.startswith("\1ACTION [off]"):
return True # hide off-record statements
def handleReceivedFallback(self, line):
self.sink.handleReceived(line)
class EventSink(IrcSink):
def __init__(self, root, channel, timeprefix, selfuri):
IrcSink.__init__(self)
self.root = root
self.channel = channel
self.timeprefix = timeprefix
self.datauri = selfuri
self.events = []
def irc_PRIVMSG(self, line):
id = line.ztime.split("T")[1][:-1] # FIXME not unique
date = line.ztime.split("T")[0]
time = id.split(".")[0]
nick,_acct = parseprefix(line.prefix)
channel = line.args[0]
channelURI = self.root + channel[1:] + "#channel"
content = line.content_html #line.args[1]
creator = self.root + "users/" + nick + "#user"
action, content = parse_action(content)
self.events.append({'id': id, 'time': time,
'date': date,
'channel': channel,
'channelURI': channelURI,
'isAction': action,
'creator': creator, 'nick': nick,
'content': content.decode("utf-8")})
handleReceivedFallback = lambda self,x:None
class HtmlSink(EventSink):
"""A sink that renders the lines it receives as a HTML table"""
def __init__(self, crumbs, root, channel, timeprefix, selfuri):
EventSink.__init__(self, root, channel, timeprefix, selfuri)
self.crumbs = crumbs
self.title = "#%s on %s" % (channel, timeprefix)
self.context = context = new_context()
context.addGlobal('crumbs', self.crumbs)
context.addGlobal('datarooturi', self.root)
context.addGlobal('datauri', self.datauri)
def close(self):
context = self.context
channelID = self.channel.strip("#").lower()
channelURI = self.root + channelID + "#channel"
context.addGlobal('channel', {'name': channelID,
'uri': channelURI})
context.addGlobal('timeprefix', self.timeprefix)
context.addGlobal('title', self.title)
context.addGlobal('events', self.events)
template = get_template('channellog')
expand_template(template, context)
class BackLogHtmlSink(HtmlSink):
def __init__(self, nick, up_to, *args):
HtmlSink.__init__(self, *args)
self.nick = nick
self.up_to = up_to
self.cleared = False
self.clear = False
def handleReceived(self, line):
if self.up_to and line.ztime[:len(self.up_to)] >= self.up_to:
return
if line.prefix:
nick,_acct = parseprefix(line.prefix)
if nick == self.nick:
if not (line.cmd == "JOIN" or (line.cmd == "QUIT" and line.args[0].count("freenode.net"))):
self.clear = True
elif line.cmd == "PRIVMSG" and self.clear: # XXX ? clear only when someone else says something
self.clear = False
self.cleared = True
self.events = self.events[-1:] # everything this far was old news to nick
self.cleartime = line.ztime
HtmlSink.handleReceived(self, line)
def close(self):
if not self.cleared:
self.events = [] # we never cleared, thus nothing was backlog
else:
self.context.addGlobal('prevlink', self.datauri+"?up_to="+self.cleartime)
HtmlSink.close(self)
class UserFilter(IrcFilter):
def __init__(self, user, sink):
IrcFilter.__init__(self, sink)
self.user = user
def handleReceived(self, line):
if not line.prefix:
return
nick,_ = parseprefix(line.prefix)
if nick == self.user:
self.sink.handleReceived(line)
class ChannelMessageTailFilter(IrcFilter):
def __init__(self, n, sink):
IrcFilter.__init__(self, sink)
self.n = n
self.channels = {}
self.count = 0
def irc_PRIVMSG(self, line):
channel = line.args[0]
if not channel.startswith("#"):
return True # filter out
self.channels[channel] = [(self.count, line)] + self.channels.get(channel, [])[:self.n]
self.count += 1
return True # we'll rehandle this later
def close(self):
events = sum(self.channels.values(), [])
events.sort()
for _,line in events:
self.sink.handleReceived(line)
class TurtleSink(IrcSink):
"""A sink that renders the lines it receives as a Turtle RDF document"""
def __init__(self, root, channel, timeprefix):
IrcSink.__init__(self)
self.root = root
self.channel = channel
self.timeprefix = timeprefix
self.channelID = self.channel.strip("#").lower()
self.channelURI = self.root + self.channelID + "#channel"
oldChannelURI = "irc://freenode/%23" + self.channelID
self.triples = []
self.base = self.root
self.seenNicks = {}
self.triples += [(self.channelURI, OWL.sameAs, oldChannelURI),
(self.channelURI, RDF.type, SIOC.Forum),
(self.channelURI, RDF.type, SIOCT.ChatChannel),
(self.channelURI, RDFS.label,
PlainLiteral("#" + self.channel)),
]
def irc_PRIVMSG(self, line):
self.triples += self.create_triples(line)
def create_triples(self, line):
id = line.ztime.split("T")[1][:-1] # FIXME not unique
time = line.ztime
day = line.ztime.split("T")[0]
nick,_acct = parseprefix(line.prefix)
rawcontent = line.args[1]
file = self.channelID + "/" + day
# XXX need to remove leading + or - from rawcontent?
action, content = parse_action(rawcontent)
if content.startswith("[off]"):
return [] # hide off-record statements
if action:
label = " * " + nick + " " + content
else:
label = "<" + nick + "> " + content
event = self.root + file + "#" + id
timestamp = TypedLiteral(time, XSD.dateTime)
self.seenNicks[nick] = nick
creator = self.root + "users/" + nick + "#user"
return [None, # adds a blank line for clarity
(self.channelURI, SIOC.container_of, event),
(event, DCTERMS.created, timestamp),
(event, SIOC.has_creator, creator),
(event, SIOC.content, PlainLiteral(rawcontent)),
(event, RDFS.label, PlainLiteral(label)),
(event, RDF.type, SIOC.Post),
] + \
[(event, SIOC.links_to, uri)
for uri in line.links]
def close(self):
for nick in self.seenNicks:
creator = self.root + "users/" + nick + "#user"
oldCreator = "irc://freenode/" + nick + ",isuser"
self.triples += [None,
(creator, OWL.sameAs, oldCreator),
(creator, RDFS.label, PlainLiteral(nick)),
(creator, RDF.type, SIOC.User),
]
writer = TurtleWriter(None, namespaces)
title = "Log of #%s on %s" % (self.channel, self.timeprefix)
writer.write([("", RDFS.label, PlainLiteral(title)),
("", FOAF.primaryTopic, self.channelURI),
])
writer.setBase(self.base)
writer.write(self.triples)
writer.close()
class RawSink(IrcSink):
"""A sink that prints the lines it receives raw but timestamped"""
def handleReceivedFallback(self, line):
print "%s %s" % (line.time,line)
def incvalue(store, key):
store[key] = store.get(key, 0) + 1
class ChannelsAndDaysSink(IrcSink):
"""A sink that collects the channels and days of activity that it sees"""
def __init__(self):
IrcSink.__init__(self)
self.channels = {}
self.days = {}
self.day2channels = {}
self.channel2days = {}
self.channel2topic = {}
self.nicks = {}
self.nick2channels = {}
self.channel2nicks = {}
self.channel2latest = {}
self.nick2latest = {}
def irc_TOPIC(self, line):
channelId = line.args[0].strip("#").lower()
self.channel2topic[channelId] = line.args[1]
# print channelId, line.args[1]
def irc_RPL_TOPIC(self, line):
channelId = line.args[1].strip("#").lower()
self.channel2topic[channelId] = line.args[2]
# print channelId, line.args[2]
def irc_PRIVMSG(self, line):
time = line.ztime
day = time.split("T")[0]
target = line.args[0]
if not target.startswith('#'):
return
channelName = target.strip("#").lower()
incvalue(self.days, day)
incvalue(self.channels, channelName)
incvalue(self.day2channels.setdefault(day, {}), channelName)
incvalue(self.channel2days.setdefault(channelName, {}), day)
self.channel2latest[channelName] = time
if not line.prefix:
return
nick,_ = parseprefix(line.prefix)
incvalue(self.nicks, nick)
incvalue(self.nick2channels.setdefault(nick, {}), channelName)
incvalue(self.channel2nicks.setdefault(channelName, {}), nick)
self.nick2latest[nick] = time
handleReceivedFallback = lambda self,x:None
class TaxonomySink(IrcSink):
"""A sink that collects the NickServ taxonomy information it sees"""
def __init__(self):
IrcSink.__init__(self)
self.taxonomy_state = None
self.taxonomy_response = None
self.taxonomy = {}
def irc_NOTICE(self, line):
if line.args[0].startswith("#"):
return False
if not line.prefix or parseprefix(line.prefix)[0] != "NickServ":
return False
msg = line.args[1]
if msg.startswith("Taxonomy for \2"):
nick = msg[len("Taxonomy for \2"):-2]
self.taxonomy_state = nick
self.taxonomy_response = []
elif (msg.startswith("End of \2") or
msg.endswith("\2 is not registered.")):
self.taxonomy[self.taxonomy_state] = self.taxonomy_response
self.taxonomy_state = self.taxonomy_response = None
elif self.taxonomy_state:
key, rest = msg.split(" ", 1)
value = rest.split(":", 1)[1][1:]
self.taxonomy_response.append((self.taxonomy_state, key, value))
def run(sources, pipeline):
"""Processes each line from the sources in the pipeline and closes it"""
if not isinstance(sources, list):
sources = [sources]
pipeline = AddRegisteredFilter(pipeline)
for source in sources:
if not isinstance(source, file):
try:
source = file(source)
except:
print_exc()
continue
for i, l in enumerate(source):
#print l
time, linestr = l[:-1].split(" ",1)
try:
linestr = linestr.rstrip('\r') # according to RFC, there is \r
pipeline.handleReceived(Line(linestr=linestr, time=time))
except:
print_exc()
print >>sys.stderr, "... on %s:%s: %s" % (source.name, i+1, l)
pipeline.close()
if __name__ == '__main__':
# test main
import sys
root = sys.argv[1]
channel = sys.argv[2]
timeprefix = sys.argv[3]
title = "%s-%s" % (channel, timeprefix)
selfuri = "" # FIXME
pipeline = OffFilter(ChannelFilter(channel,
TimeFilter(timeprefix,
RawSink()
# HtmlSink(title, selfuri)
# TurtleSink(root, channel)
)))
run(sys.stdin, pipeline)
|
UTF-8
|
Python
| false | false | 2,009 |
19,335,942,791,668 |
8807bf04b8fc03c2334a61bdcf7deb64eec8b1d3
|
d4de54d8a8e716ede0e8bf88d28fc1142d1f6475
|
/menu/views.py
|
a3c2758623258e037ef5c204062f90fc0a983ae3
|
[] |
no_license
|
sergeyzsg/menu-project
|
https://github.com/sergeyzsg/menu-project
|
ed7772a7c7dc3b5291d2bfa98e07b57285548b49
|
ff2e9ab7d8caf42801341ff3c4adde4ec4bb7005
|
refs/heads/master
| 2016-09-10T14:39:01.947039 | 2010-05-12T10:34:38 | 2010-05-12T10:34:38 | 32,121,662 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.template import RequestContext
from menu.models import Menu
def menu_page(request, menu_id):
menu = Menu.objects.get(id=menu_id)
context = {
'title': menu.title,
'menu': menu,
}
return render_to_response('menu/menu.html',
context,
context_instance=RequestContext(request))
|
UTF-8
|
Python
| false | false | 2,010 |
9,182,640,127,727 |
861b1dd454c536d65769fbd5db5dd8beabb6e542
|
87813e5c531e3b73237734d2ff76ff3ed432b0f4
|
/ch3/paycheck.py
|
4e6a30092db3b2bd0e7d925ef3b44c0986d67fde
|
[] |
no_license
|
spsanderson/python_for_informatics
|
https://github.com/spsanderson/python_for_informatics
|
f1839ce8030a3ffd8c4c00a22e3217b2463f834d
|
8de0f52f6db42791cc42d53ab216d8dc7689aabc
|
refs/heads/master
| 2021-01-20T23:32:25.911531 | 2013-12-28T22:55:32 | 2013-12-28T22:55:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# ask for hours worked and pay per hour to compute total pay
try:
hours_worked = float(raw_input('Please enter the hours you worked: \n'))
print ''
except:
print 'Please enter hours as a number'
quit()
try:
rate_per_hr = float(raw_input('Please enter your hourly rate: \n'))
rate_round = round(rate_per_hr, 2)
print ''
except:
print 'Please enter rate per hour as a number'
quit()
if hours_worked <= 40:
pay = hours_worked * rate_round
else:
pay = rate_round * 40 + (rate_round * 1.5 * (hours_worked - 40))
print 'Your pay is ' + str(pay)
|
UTF-8
|
Python
| false | false | 2,013 |
1,125,281,464,077 |
36580338e4af86a2e2346d3e28fce831c23a0534
|
7f763d7c2289e0dcbcc01073f38ea11706736ed7
|
/Nadsat/plugin.py
|
5868b0fd1cb0fd06414f9c071a639b1a89ceed3a
|
[] |
no_license
|
davidsedlar/LemongrabBot
|
https://github.com/davidsedlar/LemongrabBot
|
ee3662a506dcbf6c6bfea0decd00044dd0e40ea9
|
37e18bc54554394ea3afa60fa168414e43ce0a99
|
refs/heads/master
| 2021-01-15T12:42:21.857132 | 2014-12-31T20:42:43 | 2014-12-31T20:42:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
###
# Copyright (c) 2008, Jonathan Brinley
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
naddict = {u' commodity': 'horrorshow', u' joke': 'smeck', u'hitch': 'itty', u' chief': 'rasoodock', u'sleep': 'zasnoot', u' digging': 'kopat', u' jabbing': 'yeckate', u' shrieking': 'horn', u' scared': 'spoogy', u' word of honor': 'slovo', u' scar': 'skriking', u' glide by': 'sloochat', u' employment': 'rabbit', u' gunman': 'pooshka', u' snack': 'mounch', u" florist's chrysanthemum": 'em', u' arcminute': 'minoota', u'brake shoe': 'sabog', u' dick': 'sod', u' nan': 'baboochka', u' dude': 'chelloveck', u' torpedo': 'pooshka', u'soiled': 'grazzy', u'screaming': 'horn', u' trace': 'dook', u' competitiveness': 'drat', u' teen': 'nadsat', u'school': 'skolliwoll', u' physical body': 'plott', u' panicky': 'spoogy', u' babble': 'govoreet', u' mitt': 'rooker', u' private road': 'yeckate', u'flashy': 'gromky', u' dress circle': 'banda', u' cigarette': 'cancer', u' produce': 'prod', u' pecker': 'sod', u'clothes': 'platties', u' meaninglessness': 'chepooka', u'tired': 'fagged', u'samara': 'klootch', u' bighearted': 'bolshy', u'manus': 'lapa', u' officer': 'rozz', u' outcry': 'platch', u' coffin nail': 'cancer', u' handkerchief': 'tashtook', u' graven image': 'bog', u' sensation': 'odin', u' tom turkey': 'kot', u' kill': 'oobivat', u' triple': 'tree', u' sport': 'filly', u' breast': 'groody', u' touch': 'dook', u' lynchpin': 'keeshkas', u' arrogant': 'nadmenny', u' buddy': 'bratty', u'menage': 'domy', u' raciness': 'mounch', u' injury': 'vred', u'dry': 'osoosh', u' poop': 'sod', u' champion': 'odin', u' muddle': 'yahma', u' bid': 'filly', u' big': 'bolshy', u' abrasion': 'skriking', u' biz': 'eegra', u' manner of walking': 'gooly', u'lonesome': 'oddy-knocky', u' date': 'drat', u'head': 'mozg', u' bit': 'mounch', u' stack': 'banda', u' accidental injury': 'vred', u' turn of events': 'itty', u' facial gesture': 'smot', u'schooling': 'skolliwoll', u' face': 'smot', u'rakehell': 'krovvy', u' jab': 'yeckate', u' sign of the zodiac': 'domy', u' cuckoo': 'veck', u'chain of mountains': 'oozy', u' shape': 'plott', u' crosscurrent': 'razrez', u'music': 'pop-disk', u' body of work': 'rabbit', u' muddied': 'grazzy', u' swearword': 'chelloveck', u' air pocket': 'carman', u'hurt': 'vred', u' mining': 'kopat', u' overthrow': 'razdrez', u' guy wire': 'veck', u' prison': 'staja', u'hole': 'yahma', u' impairment': 'vred', u' vocalism': 'goloss', u' colleague': 'chelloveck', u'ma': 'em', u' singular': 'zammechat', u' beau': 'chelloveck', u'mo': 'minoota', u'work': 'rabbit', u' can': 'cancer', u'whack': 'clop', u' mom': 'em', u' manifestation': 'smot', u' beak': 'morder', u' after part': 'cal', u' devolve': 'sloochat', u' narrative': 'raskazz', u' potable': 'peet', u'enchantress': 'lighter', u' figure': 'plott', u' dance band': 'banda', u' plash': 'plesk', u'hitman': 'pooshka', u' corny': 'hound-and-horny', u' pass': 'sloochat', u' neck opening': 'shiyah', u' fight': 'drat', u' true cat': 'veck', u' frill': 'chepooka', u' nicety': 'dook', u' swelled': 'bolshy', u' slash': 'shive', u' strike': 'tolchock', u' young man': 'chelloveck', u' kat': 'veck', u' granny': 'baboochka', u' chassis': 'plott', u' filthy': 'merzky', u' nap': 'zasnoot', u' misfire': 'ptitsa', u'office': 'cantora', u' smear': 'mesto', u' tender': 'bezoomy', u' odour': 'von', u' brainiac': 'rasoodock', u' grownup': 'bolshy', u' circuit': 'itty', u' dentition': 'zoobies', u'wickedness': 'nochy', u' clubhouse': 'shlaga', u' distress': 'vred', u' household': 'domy', u' paunch': 'brooko', u' excoriation': 'skriking', u' readiness': 'banda', u' stage set': 'banda', u' study': 'rabbit', u' conk': 'sloochat', u' caper': 'filly', u' thinking': 'messel', u' hard-on': 'pan-handle', u'band': 'banda', u" shepherd's crook": 'prestoopnik', u' routine': 'itty', u' striation': 'banda', u'bread': 'kleb', u'base on balls': 'gooly', u' grease': 'cal', u' lucre': 'skriking', u'potatoes': 'kartoffel', u' turning': 'itty', u' moment': 'minoota', u' whang': 'clop', u' crapulence': 'peet', u' walkway': 'gooly', u' phonation': 'goloss', u' washing': 'cheest', u' red cent': 'sod', u' disorder': 'razdrez', u' free rein': 'filly', u'god': 'bog', u' chicken feed': 'skriking', u' pitch': 'razrez', u'medicine': 'pop-disk', u'self-important': 'nadmenny', u' oral sex': 'rasoodock', u' dolt': 'gloopy', u' citizenry': 'lewdies', u' gas pedal': 'pooshka', u' empathise': 'pony', u' crowing': 'bolshy', u' leading': 'privodeet', u' panicked': 'spoogy', u' derangement': 'razdrez', u'stochasticity': 'shoom', u' go through': 'sloochat', u' backtalk': 'rot', u' ally': 'droog', u' computerized tomography': 'veck', u' ball': 'eggiweg', u' oral fissure': 'rot', u' dessert': 'sladky', u' mavin': 'odin', u' collar': 'tree', u' foetid': 'merzky', u' laddie': 'chelloveck', u' expletive': 'chelloveck', u' trail': 'privodeet', u'loud': 'gromky', u' chronicle': 'raskazz', u' locution': 'smot', u' involution': 'interessovat', u' pass off': 'sloochat', u' dry wash': 'cheest', u' shopworn': 'fagged', u' booster cable': 'privodeet', u' one': 'odin', u' cigaret': 'cancer', u' cabaret': 'shlaga', u'tom': 'kot', u' kick the bucket': 'sloochat', u'listen': 'slooshy', u' dissonance': 'shoom', u' prat': 'cancer', u' hint': 'privodeet', u' drumhead': 'rasoodock', u" florists' chrysanthemum": 'em', u' diddlysquat': 'sod', u' sense of smell': 'von', u' mess': 'yahma', u' lousy': 'merzky', u' booster': 'droog', u'noblewoman': 'dama', u' mesh': 'drat', u'friend': 'droog', u' doughnut': 'shaika', u' mass': 'lewdies', u' looseness': 'filly', u'handkerchief': 'tashtook', u'mind': 'rasoodock', u'corny': 'hound-and-horny', u' situation': 'mesto', u' yell': 'platch', u' shucks': 'sod', u' concatenation': 'oozy', u' plurality': 'shaika', u' lifetime': 'jeezny', u' occur': 'sloochat', u' mummy': 'em', u' wow': 'horn', u' work party': 'shaika', u'mouth': 'rot', u' zany': 'veck', u' lavation': 'cheest', u'parting': 'ookadeet', u' khat': 'veck', u' coterie': 'shaika', u'rotary': 'banda', u' good sense': 'keeshkas', u' hoo-ha': 'razdrez', u' ambition': 'sneety', u' pencil lead': 'privodeet', u' fantasm': 'dook', u' spectre': 'dook', u' trite': 'fagged', u' hagfish': 'lighter', u' apologia': 'appypolly loggy', u' croak': 'sloochat', u' insane': 'bezoomy', u' mommy': 'em', u'underpants': 'neezhnies', u' babble out': 'govoreet', u'sugar': 'sakar', u' momma': 'em', u'rich': 'bugatty', u' chesty': 'nadmenny', u' military officer': 'rozz', u' firm': 'domy', u' pes': 'noga', u' giving': 'bolshy', u' banding': 'banda', u' interference': 'shoom', u'pocket': 'carman', u' push button': 'knopka', u' rich': 'bugatty', u' slip away': 'sloochat', u'vociferation': 'platch', u' fate': 'banda', u' suffering': 'vred', u'bad': 'baddiwad', u' jack': 'sod', u'solid food': 'pishcha', u'release': 'knopka', u' breadstuff': 'kleb', u'steal': 'crast', u' curing': 'banda', u' wife': 'zheena', u' trumpery': 'chepooka', u' sidekick': 'bratty', u' gumption': 'keeshkas', u' crowd': 'shaika', u' iodin': 'odin', u' huffy': 'bezoomy', u' farewell': 'ookadeet', u' aspect': 'smot', u'mum': 'em', u'bellowing': 'crark', u'staff of life': 'kleb', u' creep': 'dook', u' masses': 'lewdies', u' adult': 'bolshy', u' poking': 'yeckate', u' war cry': 'platch', u' boom': 'crark', u'news': 'slovo', u' boob': 'groody', u' stock': 'razrez', u' abdomen': 'brooko', u' play': 'itty', u'cow': 'scoteena', u' interlocking': 'drat', u' peeress': 'dama', u' animal foot': 'noga', u' let out': 'govoreet', u' romp': 'filly', u' metre': 'raz', u' heading': 'rasoodock', u' nutrient': 'pishcha', u' angiotensin converting enzyme': 'odin', u'speak': 'govoreet', u' cranny': 'chelloveck', u' ballock': 'eggiweg', u' soma': 'plott', u' mind': 'slooshy', u' cosh': 'shoot', u'quickly': 'skorry', u'sac': 'carman', u' slit': 'sod', u'backwash': 'cheest', u'interest': 'interessovat', u' follow': 'sloochat', u' ingroup': 'shaika', u' pastime': 'interessovat', u' mint': 'banda', u'life': 'jeezny', u'sprightliness': 'jeezny', u' blighter': 'chelloveck', u'chum': 'bratty', u' ill-scented': 'merzky', u' tight': 'merzky', u' life history': 'jeezny', u' hooter': 'morder', u' hogwash': 'chepooka', u' screeching': 'horn', u' extend': 'sloochat', u' apparition': 'dook', u' meretricious': 'gromky', u' lad': 'chelloveck', u' dark': 'nochy', u' bull': 'sod', u'voice': 'goloss', u' encounter': 'sloochat', u' handsome': 'bolshy', u' term of enlistment': 'itty', u' randomness': 'shoom', u' cay': 'klootch', u' sassing': 'goober', u' hour': 'minoota', u' cat': 'veck', u' joyfulness': 'radosty', u'sting': 'mounch', u' fille': 'ptitsa', u' trap': 'yahma', u' cristal': 'itty', u' place': 'mesto', u' planetary house': 'domy', u' cakehole': 'yahma', u' steer': 'privodeet', u' pack': 'shaika', u'severity': 'baddiwad', u' expectant': 'bolshy', u' morsel': 'mounch', u' traffic circle': 'banda', u' bingle': 'odin', u'utilitarian': 'polezny', u' sense': 'keeshkas', u'belly': 'brooko', u' materialisation': 'molodoy', u' utile': 'polezny', u'let loose': 'govoreet', u'hand': 'rooker', u' lodge': 'shlaga', u' knife': 'yahzick', u' birdsong': 'platch', u'butter': 'maslo', u' pharynx': 'gorlo', u' posterior': 'cancer', u' wholeness': 'odin', u' afternoon tea': 'chai', u' mankind': 'orange', u' prominent': 'bolshy', u' ace': 'odin', u' buster': 'chelloveck', u' cheating(a)': 'merzky', u' act': 'itty', u' dingy': 'grazzy', u' parentage': 'razrez', u' pleasure': 'radosty', u' fleet': 'sloochat', u' gambol': 'filly', u'cervix': 'shiyah', u'spectacles': 'otchkies', u' common chord': 'tree', u' decease': 'sloochat', u' foul': 'merzky', u' goose': 'veck', u' large number': 'shaika', u'prison': 'staja', u' frantic': 'bezoomy', u'hat': 'shlapa', u' laugh': 'smeck', u' brassy': 'gromky', u' verbalize': 'govoreet', u' common sense': 'keeshkas', u' stub': 'cancer', u' repellant': 'merzky', u' mansion': 'domy', u' chop-chop': 'skorry', u' helping hand': 'rooker', u'shoulder': 'pletcho', u' struggle': 'drat', u' hindquarters': 'cancer', u'night': 'nochy', u' report': 'raskazz', u' bullshit': 'sod', u' chemical chain': 'oozy', u'deal': 'kupet', u'people': 'lewdies', u'non-Christian priest': 'godman', u' tobacco plant': 'snoutie', u' madam': 'dama', u' marked-up': 'merzky', u'depository library': 'biblio', u'secret plan': 'eegra', u'pick up': 'sobirat', u' stripe': 'banda', u' applauder': 'yahzick', u' whirl': 'itty', u" tinker's damn": 'sod', u' arsehole': 'sod', u'view': 'messel', u' gull': 'shoot', u' affaire': 'interessovat', u' tacky': 'gromky', u' severeness': 'baddiwad', u' liveliness': 'jeezny', u'typeface': 'litso', u' melodic theme': 'messel', u' specs': 'otchkies', u'magazine': 'raz', u' cabbage': 'skriking', u' shooter': 'pooshka', u'son': 'malchick', u'prisoner': 'plenny', u' roast': 'clop', u'chime': 'zvonock', u' dramatic play': 'filly', u' clipping': 'raz', u'guts': 'keeshkas', u' materialize': 'sloochat', u' number': 'itty', u' meter': 'raz', u' immortal': 'bog', u'fight': 'drat', u' affray': 'razdrez', u' capitulum': 'rasoodock', u' girlfriend': 'ptitsa', u' feller': 'chelloveck', u' funny': 'zammechat', u' muddy': 'grazzy', u' gun for hire': 'pooshka', u' pectus': 'groody', u' dresser': 'groody', u' flock': 'banda', u' program library': 'biblio', u' prick': 'sod', u' period of play': 'filly', u' band': 'shaika', u' bang': 'clop', u' sink': 'sloochat', u'fundament': 'noga', u' sing': 'govoreet', u' difference': 'drat', u' shriek': 'horn', u' care': 'shilarny', u' slicing': 'lomtick', u' look': 'von', u' turgid': 'bolshy', u' shekels': 'skriking', u' loot': 'skriking', u' scream': 'horn', u' tonality': 'klootch', u'shit': 'cal', u' atomic number 82': 'privodeet', u' harm': 'vred', u' stooge': 'cancer', u' sire': 'pee', u' buy the farm': 'sloochat', u' fellow': 'chelloveck', u' storey': 'raskazz', u' brain': 'rasoodock', u'smell': 'von', u'hackneyed': 'fagged', u' disturbance': 'shoom', u' sentiment': 'messel', u'geezer': 'ded', u' bastard': 'sod', u' dollar sign': 'golly', u' role': 'cantora', u'smelling': 'nuking', u' set': 'banda', u' pelf': 'skriking', u' vainglorious': 'bolshy', u' top dog': 'rasoodock', u'apology': 'appypolly loggy', u'excuse': 'appypolly loggy', u' breathlessness': 'sod', u' sore': 'bezoomy', u'teeth': 'zoobies', u' panic-struck': 'spoogy', u' berth': 'mesto', u' air hole': 'carman', u' lecture': 'govoreet', u' mathematical group': 'gruppa', u' horseshoe': 'sabog', u' illegitimate': 'sod', u'phantasma': 'dook', u' afflictive': 'bezoomy', u'battle': 'bitva', u'milk': 'moloko', u'green groceries': 'prod', u'sexual practice': 'pol', u' exceed': 'sloochat', u' clapper': 'yahzick', u' rip': 'razrez', u' scathe': 'vred', u'father': 'pee', u' business organization': 'shilarny', u'granny': 'baboochka', u' befall': 'sloochat', u' rim': 'goober', u' wiz': 'odin', u' flavor': 'von', u' instant': 'minoota', u' newsprint': 'gazetta', u' shoulder joint': 'pletcho', u' looking at': 'smot', u' fade': 'sloochat', u'cook': 'vareet', u' dickens': 'dva', u'minute': 'minoota', u' return': 'sloochat', u'policeman': 'rozz', u'die': 'snuff it', u'dig': 'kopat', u'brother': 'bratty', u'leave': 'ookadeet', u' representative': 'goloss', u' fluster': 'razdrez', u'guy': 'veck', u' pass away': 'sloochat', u' honker': 'morder', u' youth': 'molodoy', u' stinger': 'shive', u' evanesce': 'sloochat', u' saying': 'smot', u' lumberman': 'chelloveck', u'movies': 'sinny', u' maculation': 'mesto', u' wraith': 'dook', u' diddlyshit': 'sod', u' two': 'dva', u' question': 'rasoodock', u' animation': 'jeezny', u' nincompoop': 'cal', u' infer': 'pony', u'boy': 'malchick', u' encephalon': 'rasoodock', u' string': 'oozy', u' moo-cow': 'scoteena', u' tierce': 'tree', u' club': 'shlaga', u' realize': 'pony', u' hombre': 'veck', u' thrust': 'yeckate', u' chump': 'shoot', u'pillow': 'podooshka', u' case': 'smot', u'teenage': 'nadsat', u'useful': 'polezny', u' go past': 'sloochat', u' creative thinker': 'rasoodock', u' nonsense': 'chepooka', u' leave': 'ookadeet', u' fundament': 'cancer', u' opinion': 'messel', u' aliveness': 'jeezny', u' solidification': 'banda', u' cock': 'sod', u' spend': 'sloochat', u' fun': 'filly', u' family': 'shaika', u'sake': 'interessovat', u' sot': 'pyahnitsa', u' single': 'odin', u' battalion': 'shaika', u' airstream': 'cheest', u' cacography': 'skriking', u'club': 'shlaga', u' driving': 'yeckate', u' neb': 'morder', u' head word': 'rasoodock', u' cutting off': 'shive', u' smutty': 'merzky', u' rum': 'zammechat', u' garish': 'gromky', u' gaudy': 'gromky', u' run': 'sloochat', u' dirt': 'sod', u' riot': 'horn', u' bloke': 'chelloveck', u'cat': 'koshka', u' pudden-head': 'gloopy', u' hokum': 'chepooka', u' afoul(ip)': 'merzky', u'laughter': 'smeck', u' come': 'sloochat', u'heart': 'glazz', u' kettle of fish': 'yahma', u'piece of music': 'lomtick', u' spine': 'keeshkas', u' schnoz': 'morder', u' bridge player': 'rooker', u"ship's bell": 'collocol', u' groundwork': 'noga', u' motley fool': 'shoot', u' precaution': 'chasso', u' swage': 'razdrez', u' jest': 'smeck', u' admirer': 'droog', u'tarradiddle': 'raskazz', u' happen': 'sloochat', u'birdsong': 'warble', u' swordplay': 'filly', u' poke': 'yeckate', u'maw': 'yahma', u' go across': 'sloochat', u' subroutine library': 'biblio', u' engagement': 'interessovat', u' bunk': 'chepooka', u' boastful': 'bolshy', u' bosom': 'groody', u' buns': 'cancer', u'mad': 'bezoomy', u' human body': 'plott', u'filthy': 'merzky', u'man': 'orange', u' dab': 'plesk', u' buy': 'kupet', u'neck': 'shiyah', u' confection': 'sladky', u' bum': 'cancer', u" cat-o'-nine-tails": 'veck', u' canny': 'oomny', u' pass on': 'sloochat', u' thorax': 'groody', u' skanky': 'merzky', u' faller': 'chelloveck', u' difference of opinion': 'drat', u' sobbing': 'sod', u' spokesperson': 'goloss', u'beak': 'cluve', u' comrade': 'chelloveck', u' choke': 'sloochat', u' history': 'raskazz', u'brain': 'mozg', u'deity': 'bog', u' magical spell': 'itty', u'group': 'gruppa', u' stupid person': 'gloopy', u' excavation': 'kopat', u'hanky': 'tashtook', u' twat': 'veck', u'window': 'okno', u' stroke': 'brosay', u' baseball club': 'shlaga', u' vista': 'smot', u' sympathise': 'pony', u' dig': 'kopat', u'sarcastic': 'sarky', u' phantom': 'dook', u' archeological site': 'kopat', u' gaming': 'filly', u' hit man': 'pooshka', u'name': 'eemya', u' harebrained': 'bezoomy', u' slur': 'mesto', u' rake': 'razrez', u' fetid': 'merzky', u' braggart(a)': 'bolshy', u' safety device': 'chasso', u' cleaning woman': 'soomka', u' estimate': 'messel', u' rummy': 'zammechat', u' tawdry': 'gromky', u' third': 'tree', u' account': 'raskazz', u' knife thrust': 'yeckate', u' alcoholic drink': 'firegold', u' quarter': 'cal', u' charwoman': 'soomka', u'girl': 'ptitsa', u' platitudinous': 'hound-and-horny', u'priest': 'godman', u' howler': 'horn', u' sight': 'banda', u' manus': 'rooker', u' mountain range': 'oozy', u' proceed': 'sloochat', u'virtuoso': 'odin', u' chest': 'groody', u' metrical unit': 'noga', u' peck': 'banda', u' hired hand': 'rooker', u' tooshie': 'cancer', u' ass': 'cancer', u' quat': 'veck', u'thing': 'veshch', u' tea leaf': 'chai', u'place': 'mesto', u' anchor ring': 'shaika', u' neck': 'shiyah', u' touchy': 'bezoomy', u' school principal': 'rasoodock', u'one': 'odin', u' golf-club': 'shlaga', u' nous': 'rasoodock', u'teatime': 'chai', u'little': 'malenky', u' noteworthy': 'zammechat', u' tidy sum': 'banda', u'caught': 'loveted', u' insect bite': 'mounch', u' sleep': 'zasnoot', u' ring': 'shaika', u' biography': 'jeezny', u' afters': 'sladky', u' sonny': 'chelloveck', u' group': 'gruppa', u' drive': 'yeckate', u' attire': 'platties', u' logger': 'chelloveck', u' star': 'odin', u'sassing': 'rot', u' come up': 'sloochat', u'rich people': 'bugatty', u' oeuvre': 'rabbit', u' target': 'cancer', u' nightspot': 'shlaga', u' countersign': 'slovo', u' adult female': 'soomka', u'say': 'skazat', u'rent': 'razrez', u' intimacy': 'interessovat', u' line of descent': 'razrez', u' butt end': 'cancer', u' dump': 'sod', u'snout': 'morder', u' combat injury': 'vred', u' exercise set': 'banda', u' sensitive': 'bezoomy', u' bore': 'gloopy', u' destiny': 'banda', u'begin': 'nachinat', u'bromidic': 'hound-and-horny', u' darkness': 'nochy', u' lead story': 'privodeet', u' speckle': 'mesto', u' draw': 'sloochat', u' grit': 'keeshkas', u' spirit': 'von', u' starting line': 'skriking', u'drive': 'yeckate', u' judgment': 'rasoodock', u' maven': 'odin', u' bunkum': 'chepooka', u' roach': 'banda', u' nates': 'cancer', u' tush': 'cancer', u'perch': 'shest', u' blood line': 'razrez', u' fount': 'smot', u' watchword': 'slovo', u' batch': 'banda', u' teenage': 'nadsat', u'shoe': 'sabog', u'snack': 'mounch', u'dice': 'snuff it', u' tincture': 'dook', u' splatter': 'plesk', u' ringing': 'shaika', u'manoeuvre': 'filly', u' stern': 'cancer', u' hoo-hah': 'razdrez', u' diddley': 'sod', u'refined sugar': 'sakar', u' divinity': 'bog', u' womanhood': 'soomka', u' revolting': 'merzky', u' mentation': 'messel', u' damn': 'sod', u' stock(a)': 'fagged', u' snuff it': 'sloochat', u' headache': 'shilarny', u' fountainhead': 'rasoodock', u' maneuver': 'filly', u' baseball glove': 'rooker', u' take place': 'sloochat', u' falderol': 'chepooka', u'naked': 'nagoy', u'stupid': 'gloopy', u' spook': 'dook', u' berm': 'pletcho', u' parkway': 'yeckate', u' knock': 'clop', u' odor': 'von', u'brim': 'goober', u' keep': 'sloochat', u' bollock': 'eggiweg', u' guide': 'sloochat', u'concern': 'shilarny', u' margin call': 'platch', u' emit': 'sloochat', u' garb': 'platties', u' scratch line': 'skriking', u' demented': 'bezoomy', u' mother wit': 'keeshkas', u" gentleman's gentleman": 'orange', u' bottom': 'cancer', u' splash': 'plesk', u' asshole': 'sod', u' take heed': 'slooshy', u'schnozzle': 'cluve', u' wicked': 'merzky', u' gibe': 'kopat', u' undercut': 'shive', u' optic': 'glazz', u' articulation': 'goloss', u' jester': 'shoot', u' promptly': 'skorry', u' breathe': 'sloochat', u' blow over': 'sloochat', u' shadow': 'nochy', u' declamatory': 'bolshy', u' sidesplitter': 'horn', u' clam': 'golly', u' frock': 'platties', u'pole': 'shest', u' tattle': 'govoreet', u' pop off': 'sloochat', u' troth': 'drat', u' transcend': 'sloochat', u'sugariness': 'sladky', u' full-grown': 'bolshy', u' phone call': 'platch', u' splashing': 'plesk', u' nut': 'eggiweg', u' intellectual nourishment': 'pishcha', u' electronic jamming': 'jammiwam', u' guy cable': 'veck', u' small-arm': 'lomtick', u' white meat': 'groody', u' sordid': 'grazzy', u' bragging(a)': 'bolshy', u' pip': 'mesto', u' snoot': 'morder', u' haircloth': 'voloss', u' tercet': 'tree', u' spill the beans': 'govoreet', u' descent': 'razrez', u' keystone': 'klootch', u' business organisation': 'shilarny', u' natural language': 'yahzick', u'sound': 'zvook', u' business firm': 'domy', u' gobbler': 'kot', u' pass by': 'sloochat', u' ancestry': 'razrez', u'newspaper': 'gazetta', u' brass': 'smot', u'slippers': 'toofles', u' arse': 'sod', u'fool': 'shoot', u' psychic trauma': 'vred', u' unsportsmanlike': 'merzky', u' sass': 'rot', u' glove': 'rooker', u' lifespan': 'jeezny', u' disturbed': 'bezoomy', u' human foot': 'noga', u' female child': 'ptitsa', u'helmet': 'shlem', u' claim': 'platch', u' campaign': 'yeckate', u' mad': 'bezoomy', u' paper': 'gazetta', u' great(p)': 'bolshy', u' sap': 'shoot', u'see': 'viddy', u' crazy': 'bezoomy', u' disgusting': 'merzky', u' gentleman': 'orange', u' dispute': 'drat', u' noisome': 'merzky', u' self-aggrandising': 'bolshy', u'bang': 'tolchock', u'female person': 'sharp', u' ordnance': 'pooshka', u' brother': 'bratty', u' screech': 'horn', u' spill': 'govoreet', u' built in bed': 'chepooka', u'money': 'pretty polly', u'valuable': 'dorogoy', u' breadbasket': 'brooko', u'kill': 'oobivat', u' bad': 'bolshy', u'touch': 'mesto', u'blow': 'vellocet', u' granny knot': 'baboochka', u'loving cup': 'tass', u' scratch': 'sod', u' infantry': 'noga', u' sharpness': 'mounch', u' strain': 'warble', u' unsporting': 'merzky', u' chime': 'collocol', u' slew': 'banda', u' empathize': 'pony', u' gimcrackery': 'chepooka', u' night club': 'shlaga', u'alcoholic beverage': 'firegold', u' hollering': 'crark', u'darn': 'sod', u' sick': 'bezoomy', u'mob': 'shaika', u'lady': 'dama', u' passel': 'banda', u' tint': 'dook', u' sentry duty': 'chasso', u' spell': 'lomtick', u' read': 'pony', u' triplet': 'tree', u' scoop': 'carman', u' nib': 'morder', u' military man': 'orange', u' rakehell': 'razrez', u' rear': 'cancer', u' derriere': 'cancer', u' trey': 'tree', u' stead': 'mesto', u' quick': 'skorry', u' guy': 'veck', u' federal agency': 'cantora', u'slice': 'lomtick', u' serviceman': 'orange', u' striking': 'tolchock', u' enceinte': 'bolshy', u' battle cry': 'platch', u'putting to death': 'oobivat', u' back talk': 'rot', u' felon': 'prestoopnik', u' sonny boy': 'chelloveck', u'knife': 'nozh', u' old-hat': 'fagged', u' min': 'minoota', u' fall guy': 'shoot', u'power': 'cantora', u' vertebral column': 'keeshkas', u' gong': 'zvonock', u' mark': 'skriking', u' wound': 'vred', u' paint': 'klootch', u' crush': 'jammiwam', u'a drug': 'drencrom', u'terrified': 'spoogy', u' schnozzle': 'morder', u'throw': 'brosay', u' idol': 'bog', u' killing': 'oobivat', u'muggins': 'shoot', u' foreland': 'rasoodock', u' fouled': 'merzky', u' hotshot': 'odin', u' ball club': 'shlaga', u'bountiful': 'bolshy', u' recital': 'raskazz', u' dent': 'sod', u' diddly-shit': 'sod', u' spatter': 'plesk', u' throttle': 'pooshka', u' chain': 'oozy', u' scene': 'smot', u' olfactory sensation': 'von', u' involvement': 'interessovat', u' scandal': 'cal', u' put across': 'sloochat', u'chickenfeed': 'hen-korm', u' scent': 'von', u' well-worn': 'fagged', u' holla': 'crark', u' cerebration': 'messel', u' hollo': 'crark', u' spinal column': 'keeshkas', u' goat': 'cancer', u' participation': 'interessovat', u' move on': 'sloochat', u' gash': 'lomtick', u' green goods': 'prod', u' adult male': 'orange', u' business': 'shilarny', u' smudge': 'mesto', u' nanna': 'baboochka', u' great deal': 'banda', u'dirty': 'grazhny', u' smelling': 'von', u' safeguard': 'chasso', u' integrity': 'odin', u' mental capacity': 'rasoodock', u' olfactory modality': 'von', u' smash': 'tolchock', u' grease-gun': 'pooshka', u' countercurrent': 'razrez', u'fellow': 'chelloveck', u' garden truck': 'prod', u' portion': 'banda', u' affair': 'interessovat', u' fair sex': 'soomka', u' subtlety': 'dook', u' curse word': 'chelloveck', u' straits': 'rasoodock', u' crusade': 'yeckate', u' curse': 'chelloveck', u' filth': 'cal', u' tomentum': 'voloss', u'time': 'raz', u' brainsick': 'bezoomy', u' trinity': 'tree', u' hurly burly': 'razdrez', u' fourth dimension': 'raz', u' toll': 'zvonock', u' interest': 'interessovat', u' minute of arc': 'minoota', u' goof': 'veck', u'wife': 'zheena', u'guard duty': 'chasso', u' angiotensin-converting enzyme': 'odin', u' snatch': 'skvat', u' blackjack': 'shoot', u' bump': 'sloochat', u' nasty': 'merzky', u' platitudinal': 'hound-and-horny', u' buck': 'golly', u' reflection': 'smot', u'dollar': 'golly', u' lineage': 'razrez', u'capitulum': 'ooko', u' side': 'smot', u' round': 'itty', u'remarkable': 'zammechat', u' aroma': 'von', u' confidential information': 'privodeet', u' stupe': 'gloopy', u'tail': 'cancer', u' accelerator pedal': 'pooshka', u' scratching': 'skriking', u'washroom': 'vaysay', u' noise': 'razdrez', u'woman': 'soomka', u' thunder': 'crark', u'song': 'warble', u'horror': 'strack', u' vocalisation': 'goloss', u'awful': 'merzky', u'grannie': 'baboochka', u' vocal music': 'warble', u' go on': 'sloochat', u' mountain chain': 'oozy', u' dope': 'cal', u'windowpane': 'okno', u' clique': 'shaika', u' password': 'slovo', u' delight': 'radosty', u' hind end': 'cancer', u' golf club': 'shlaga', u'tea': 'chai', u' cry': 'platch', u' whisker': 'voloss', u' push': 'yeckate', u' detriment': 'vred', u' spotlight': 'mesto', u' swearing': 'chelloveck', u' point': 'rasoodock', u' chest of drawers': 'groody', u' sentence': 'raz', u' reflexion': 'smot', u' naan': 'baboochka', u' knocking': 'clop', u' odd': 'zammechat', u' pricking': 'sod', u' gent': 'chelloveck', u' physique': 'plott', u' theme': 'messel', u' multitude': 'shaika', u' yucky': 'merzky', u' terzetto': 'tree', u' hankie': 'tashtook', u' dramatic event': 'filly', u' mouth': 'rot', u'chain': 'oozy', u' boyfriend': 'chelloveck', u' revulsion': 'strack', u' strand': 'oozy', u' star sign': 'domy', u' gelt': 'skriking', u' buttocks': 'cancer', u'married woman': 'zheena', u' large': 'bolshy', u' hardening': 'banda', u'eye': 'glazz', u' remarkable': 'zammechat', u' apt': 'oomny', u'two': 'dva', u' weirdie': 'dook', u'splash': 'plesk', u'piece of work': 'rabbit', u' tad': 'dook', u' surpass': 'sloochat', u' utter': 'govoreet', u' shimmer': 'filly', u' yap': 'yahma', u' big cat': 'veck', u' shout': 'platch', u' march on': 'sloochat', u' raft': 'banda', u' commonplace': 'fagged', u' dapple': 'mesto', u'chaplain': 'charlie', u' girl': 'ptitsa', u' materialization': 'molodoy', u' gild': 'shlaga', u' troika': 'tree', u' slumber': 'zasnoot', u' plaza': 'mesto', u' lumberjack': 'chelloveck', u' panic-stricken': 'poogly', u' feed bunk': 'chepooka', u' useful': 'polezny', u' pudding head': 'gloopy', u' artillery': 'pooshka', u' pilus': 'voloss', u' interpret': 'pony', u' range of mountains': 'oozy', u' make pass': 'sloochat', u' narration': 'raskazz', u'dollar mark': 'golly', u' heavy(a)': 'bolshy', u'gash': 'shive', u' pedigree': 'razrez', u" cash in one's chips": 'sloochat', u' gravid': 'bolshy', u' lead': 'sloochat', u'advice': 'soviet', u' three': 'tree', u'blood': 'krovvy', u' deletion': 'shive', u' progress': 'sloochat', u' nonsensicality': 'chepooka', u' egest': 'sloochat', u' associate': 'chelloveck', u'paw': 'lapa', u' witch': 'lighter', u'egg': 'eggiweg', u' daughter': 'ptitsa', u' waist': 'tally', u' incision': 'sod', u' humankind': 'orange', u' vocal': 'warble', u' magnetic pole': 'shest', u' matter': 'veshch', u' swallow': 'peet', u' workplace': 'rabbit', u' mamma': 'em', u' hoi polloi': 'lewdies', u' anchor': 'keeshkas', u' oneness': 'odin', u'arrogant': 'nadmenny', u' caboodle': 'banda', u' station': 'mesto', u' backside': 'cancer', u' banal': 'fagged', u' vox': 'goloss', u'eternal sleep': 'zasnoot', u' plot': 'eegra', u'good': 'horrorshow', u' chemical group': 'gruppa', u' fool': 'shoot', u'food': 'pishcha', u' snow': 'vellocet', u' overstep': 'sloochat', u' office staff': 'cantora', u'foot': 'noga', u' unbalanced': 'bezoomy', u'erection': 'pan-handle', u' computed axial tomography': 'veck', u'wash': 'cheest', u' cut of meat': 'shive', u' tool': 'sod', u' sympathize': 'pony', u' cunning': 'oomny', u' grammatical construction': 'smot', u' blood': 'razrez', u' jumper lead': 'privodeet', u' moolah': 'skriking', u'harm': 'vred', u' don': 'pee', u' thigh-slapper': 'horn', u' spike': 'ooko', u'house': 'domy', u'gun': 'pooshka', u' washout': 'cheest', u' nine': 'shlaga', u'scratching': 'skriking', u' grimace': 'smot', u' bod': 'plott', u' vexation': 'shilarny', u'alcohol': 'firegold', u' nozzle': 'morder', u' walk': 'gooly', u' intellection': 'messel', u' human face': 'smot', u' lettuce': 'skriking', u' qat': 'veck', u' stake': 'interessovat', u' damage': 'vred', u' lot': 'banda', u' olfactory property': 'von', u' trine': 'tree', u' criminal': 'prestoopnik', u' bunghole': 'sod', u' inner circle': 'shaika', u' perish': 'sloochat', u' ruffle': 'razdrez', u' hired gun': 'pooshka', u' communicate': 'sloochat', u'terrible': 'oozhassny', u' isthmus': 'banda', u' atomic number 53': 'odin', u'radical': 'gruppa', u' upset': 'razdrez', u' continue': 'sloochat', u' feeling': 'von', u' dirty': 'merzky', u'razor': 'britva', u'story': 'raskazz', u' verbal expression': 'smot', u'jew': 'yahoody', u' riptide': 'razrez', u' intelligence': 'slovo', u' duty tour': 'itty', u' cheap': 'gromky', u' psyche': 'rasoodock', u'wonderful': 'choodessny', u' shit': 'sod', u'mainstay': 'keeshkas', u' eye': 'glazz', u' cut': 'lomtick', u' peach': 'govoreet', u' bite': 'mounch', u" ma'am": 'dama', u' cub': 'chelloveck', u' mickle': 'banda', u' appointment': 'drat', u' schooltime': 'skolliwoll', u' enchantment': 'itty', u' mischievousness': 'baddiwad', u' shortness of breath': 'sod', u' smasher': 'tolchock', u' crone': 'lighter', u' glasses': 'otchkies', u' impure': 'grazzy', u' fleck': 'mesto', u'yowl': 'crark', u' threadbare': 'fagged', u'thought': 'messel', u' eyeglasses': 'otchkies', u' trio': 'tree', u'commotion': 'razdrez', u' homo': 'orange', u'lip': 'goober', u' blank space': 'mesto', u' worry': 'shilarny', u'play': 'filly', u' progeny': 'molodoy', u' accelerator': 'pooshka', u' authorise': 'sloochat', u'eggs': 'eggiweg', u'cigarette': 'cancer', u' ennead': 'shlaga', u' by-blow': 'sod', u' clever': 'oomny', u' spit': 'yahzick', u' sucker': 'shoot', u' belly laugh': 'horn', u' judgement': 'rasoodock', u'military personnel': 'orange', u' unique': 'zammechat', u' fall out': 'sloochat', u' excision': 'shive', u' iniquity': 'nochy', u' fighting': 'drat', u' night': 'nochy', u'cursorily': 'skorry', u' position': 'mesto', u' love child': 'sod', u' let the cat out of the bag': 'govoreet', u' hairsbreadth': 'voloss', u' tone': 'von', u' roofy': 'banda', u' orchis': 'eggiweg', u' liberal': 'bolshy', u" tinker's dam": 'sod', u' hollow': 'yahma', u' header': 'rasoodock', u' give tongue to': 'govoreet', u' roar': 'crark', u' holloa': 'crark', u' battle': 'drat', u' keister': 'cancer', u' legislate': 'sloochat', u'hit': 'tolchock', u' grime': 'cal', u' rallying cry': 'platch', u' nine-spot': 'shlaga', u'breast': 'groody', u' build': 'plott', u' rabble': 'shaika', u' interest group': 'interessovat', u' buncombe': 'chepooka', u'cry': 'platch', u' magnanimous': 'bolshy', u' syndicate': 'shaika', u' ecstasy': 'itty', u' spattering': 'plesk', u' world': 'orange', u' circumstances': 'banda', u' nose candy': 'vellocet', u'sex': 'pol', u' rope': 'banda', u' ill-gotten': 'grazzy', u'barb': 'kopat', u' carry on': 'sloochat', u' triad': 'tree', u' mentality': 'rasoodock', u' luck': 'banda', u' thought': 'messel', u' human': 'orange', u' venter': 'brooko', u'inebriate': 'pyahnitsa', u' miss': 'ptitsa', u' life-time': 'jeezny', u' hand': 'sloochat', u' butt': 'cancer', u'dullard': 'gloopy', u" child's play": 'filly', u' good turn': 'itty', u' give-up the ghost': 'sloochat', u' mongrel': 'sod', u'chump change': 'hen-korm', u' news report': 'raskazz', u' mama': 'em', u' issue': 'molodoy', u' catch': 'skvat', u' whizz': 'odin', u' ninny': 'cal', u' art object': 'lomtick', u' address': 'govoreet', u' collation': 'mounch', u' strait': 'zvook', u' minute': 'minoota', u' elapse': 'sloochat', u'bell': 'collocol', u' scraping': 'skriking', u'sweet': 'sladky', u' beldam': 'lighter', u' ghostwriter': 'dook', u' stupid': 'gloopy', u' golf hole': 'yahma', u' rest': 'zasnoot', u'mumble': 'chumble', u'dream': 'sneety', u' pot': 'banda', u' bellow': 'crark', u' shoes': 'mesto', u' twist': 'prestoopnik', u' testis': 'eggiweg', u' enlistment': 'itty', u' cold shoulder': 'shive', u'understand': 'pony', u' horse sense': 'keeshkas', u' movement': 'yeckate', u' express': 'govoreet', u'look': 'smot', u' hatful': 'banda', u'bill': 'morder', u'material body': 'plott', u' prison term': 'raz', u'adolescent': 'nadsat', u'cagey': 'oomny', u' butt joint': 'cancer', u'joyousness': 'radosty', u' horseshit': 'sod', u'rip': 'razrez', u'headway': 'gulliver', u' male child': 'malchick', u' verbalise': 'govoreet', u' expression': 'smot', u' verbalism': 'smot', u' nerve': 'smot', u' pestiferous': 'grazzy', u' one-third': 'tree', u' gang': 'shaika', u' racket': 'shoom', u' blab': 'govoreet', u'composition': 'gazetta', u'conflict': 'drat', u" hair's-breadth": 'voloss', u' roundabout': 'banda', u' fella': 'chelloveck', u' line': 'razrez', u' crap': 'sod', u' trance': 'itty', u' alcohol': 'firegold', u' phantasm': 'dook', u' unity': 'odin', u' badness': 'baddiwad', u' musical theme': 'messel', u' persuasion': 'messel', u' sopor': 'zasnoot', u' ft': 'noga', u' kerfuffle': 'razdrez', u' scribble': 'skriking', u'eyeglasses': 'otchkies', u' erection': 'pan-handle', u' advance': 'sloochat', u' time': 'raz', u' level': 'raskazz', u' profligate': 'razrez', u' squanderer': 'razrez', u'cut': 'shive', u' musical composition': 'lomtick', u' distasteful': 'merzky', u'cup': 'tass', u' rap': 'clop', u' raw': 'bezoomy', u'snap': 'skvat', u' business office': 'cantora', u' cocksucker': 'sod', u' dress': 'platties', u' blot': 'mesto', u' floor': 'raskazz', u'big': 'bolshy', u'doorbell': 'zvonock', u' wad': 'banda', u' putz': 'sod', u' male parent': 'pee', u'game': 'eegra', u' blood brother': 'bratty', u' slide by': 'sloochat', u' gambling': 'filly', u'knock': 'clop', u' familiar': 'chelloveck', u' flutter': 'razdrez', u' rophy': 'banda', u' fall': 'sloochat', u' hole': 'jammiwam', u'fucking': 'sodding', u' tear': 'razrez', u' linchpin': 'keeshkas', u' wino': 'pyahnitsa', u'curious': 'zammechat', u' threesome': 'tree', u' wind': 'privodeet', u' clams': 'skriking', u' drunk': 'pyahnitsa', u' go by': 'sloochat', u' sex': 'pol', u' snip': 'raz', u' crook': 'prestoopnik', u' dough': 'skriking', u' forefront': 'rasoodock', u' call option': 'platch', u' skid': 'sabog', u' captive': 'plenny', u' devil': 'dva', u' overhaul': 'sloochat', u' pigeonholing': 'gruppa', u'word': 'slovo', u' life story': 'jeezny', u' fix': 'yahma', u' self-aggrandizing': 'bolshy', u' bureau': 'groody', u' split': 'razrez', u' fib': 'raskazz', u' drunkard': 'pyahnitsa', u' circle': 'banda', u'crone': 'lighter', u' lead-in': 'privodeet', u' drinkable': 'peet', u' cagy': 'oomny', u' repugnance': 'strack', u' clitoris': 'knopka', u' fathead': 'veck', u' diddly-squat': 'sod', u' refinement': 'dook', u'repulsion': 'strack', u' iodine': 'odin', u' ghost': 'dook', u' piazza': 'mesto', u' key fruit': 'klootch', u' soul': 'rasoodock', u'nonsense': 'chepooka', u' die': 'snuff it', u' prodigal': 'razrez', u' audio': 'zvook', u' newspaper': 'gazetta', u' whoreson': 'sod', u' grouping': 'gruppa', u' stria': 'banda', u' frame': 'plott', u' blab out': 'govoreet', u' sign': 'domy', u' unclean': 'grazzy', u' char': 'soomka', u' slam': 'kopat', u' chap': 'chelloveck', u'waistline': 'tally', u' repelling': 'merzky', u' thinker': 'rasoodock', u' drop dead': 'sloochat', u' lid': 'shlapa', u' agency': 'cantora', u' muckle': 'banda', u' lip': 'rot', u' safety': 'chasso', u' stain': 'mesto', u' bout': 'itty', u' chickenfeed': 'hen-korm', u' orotund': 'bolshy', u' eternal rest': 'zasnoot', u' billet': 'mesto', u' quite a little': 'banda', u' bounteous': 'bolshy', u' pursuit': 'interessovat', u' large(p)': 'bolshy', u' shaft': 'sod', u' come about': 'sloochat', u' tum': 'brooko', u' nightclub': 'shlaga', u' teenaged': 'nadsat', u' give-and-take': 'slovo', u' birdcall': 'warble', u' race': 'cheest', u' travel by': 'sloochat', u' pest': 'chelloveck', u' gentlewoman': 'dama', u' pickle': 'yahma', u' prospect': 'smot', u' rear end': 'cancer', u' shot': 'kopat', u' founder': 'pee', u' gadfly': 'chelloveck', u' adept': 'odin', u' magic spell': 'itty', u' leave of absence': 'ookadeet', u' go': 'sloochat', u' steal': 'kupet', u' principal': 'rasoodock', u' intellect': 'rasoodock', u' living': 'jeezny', u' rot': 'chepooka', u' script': 'rooker', u' origin': 'razrez', u' mental disturbance': 'razdrez', u' discussion': 'slovo', u' speedily': 'skorry', u' opus': 'lomtick', u' mug': 'shoot', u' mum': 'em', u' intoxicant': 'firegold', u' boldness': 'smot', u'cleaning lady': 'soomka', u'scream': 'horn', u' base': 'noga', u' malicious gossip': 'cal', u' grandma': 'baboochka', u' post': 'mesto', u' quietus': 'zasnoot', u' bash': 'clop', u' cartridge clip': 'raz', u' tale': 'raskazz', u'gang': 'shaika', u' apparel': 'platties', u'go': 'itty', u' realise': 'pony', u' turnover': 'razdrez', u' talk': 'govoreet', u' tatty': 'gromky', u' hoop': 'shaika', u'young': 'molodoy', u' hoot': 'sod', u' carbohydrate': 'sakar', u' whiz': 'odin', u' campana': 'zvonock', u' yowl': 'crark', u' track': 'shive', u' paseo': 'gooly', u" ship's officer": 'rozz', u' sugar': 'skriking', u' weirdy': 'dook', u' facial expression': 'smot', u'materialise': 'sloochat', u' tintinnabulation': 'shaika', u'drinking': 'peet', u' business concern': 'shilarny', u'splattering': 'plesk', u' loathly': 'merzky', u' offer': 'itty', u' food': 'pishcha', u'button': 'knopka', u' hug drug': 'itty', u' spot': 'mesto', u' disruption': 'razdrez', u' foot': 'noga', u'panic-stricken': 'spoogy', u' crevice': 'chelloveck', u' tomfool': 'shoot', u' knocker': 'groody', u' ride': 'yeckate', u' invertebrate foot': 'noga', u' lap': 'banda', u' liaison': 'interessovat', u' taradiddle': 'raskazz', u' tongue': 'yahzick', u' kale': 'skriking', u' topographic point': 'mesto', u' specter': 'dook', u' approximation': 'messel', u' driveway': 'yeckate', u' head teacher': 'rasoodock', u' auditory sensation': 'zvook', u'lolly': 'skriking', u' contaminating': 'grazzy', u' scrap': 'drat', u' guts': 'keeshkas', u' behind': 'cancer', u' computed tomography': 'veck', u'testicles': 'yarbles', u' foundation': 'noga', u' fling': 'itty', u'gens': 'eemya', u' jumper cable': 'privodeet', u' illegitimate child': 'sod', u' stinking': 'merzky', u'panicked': 'poogly', u' typeface': 'smot', u' outlaw': 'prestoopnik', u' turn': 'prestoopnik', u' tail end': 'cancer', u' grown': 'bolshy', u' gran': 'baboochka', u' turd': 'sod', u' effort': 'yeckate', u' middle': 'glazz', u' feel': 'von', u' dollar bill': 'golly', u' metrical foot': 'noga', u' sweetness': 'sladky', u' speak': 'govoreet', u' perturbation': 'razdrez', u'clever': 'oomny', u' puerile': 'nadsat', u' freehanded': 'bolshy', u' driving force': 'yeckate', u' fanny': 'cancer', u' woman': 'soomka', u'balls': 'sharries', u' the skinny': 'cal', u' centre': 'glazz', u' guff': 'chepooka', u' weirdo': 'dook', u' moxie': 'keeshkas', u' unhinged': 'bezoomy', u' low-down': 'cal', u' tide rip': 'razrez', u' sexual urge': 'pol', u' haphazardness': 'shoom', u' acquaintance': 'droog', u'speech sound': 'zvook', u' repellent': 'merzky', u' property': 'mesto', u' view': 'smot', u' wearing apparel': 'platties', u' bozo': 'veck', u' shadiness': 'dook', u' promontory': 'rasoodock', u' authority': 'cantora', u' firearm': 'lomtick', u'thin-skinned': 'bezoomy', u' soft touch': 'shoot', u' slice': 'shive', u' trade good': 'horrorshow', u' flash': 'gromky', u' plenty': 'banda', u' gritstone': 'keeshkas', u' baseball mitt': 'rooker', u' pinna': 'ooko', u' diddly': 'sod', u' composition': 'lomtick', u' testicle': 'eggiweg', u' cuss': 'chelloveck', u' delirious': 'bezoomy', u' hankey': 'tashtook', u' bell shape': 'zvonock', u' thrusting': 'yeckate', u' vocalization': 'goloss', u' phone': 'zvook', u' turn over': 'sloochat', u' violent death': 'oobivat', u' press': 'jammiwam', u' pouch': 'carman', u' with child(p)': 'bolshy', u' deuce-ace': 'tree', u' spark advance': 'privodeet', u'lingua': 'yahzick', u' mental disorder': 'razdrez', u' gender': 'pol', u' clear': 'sloochat', u' go game': 'itty', u'swain': 'chelloveck', u' piece': 'orange', u'interpreter': 'goloss', u' parole': 'slovo', u' sack': 'carman', u' learning ability': 'mozg', u' bell': 'zvonock', u' olfaction': 'von', u' deglutition': 'peet', u' lieu': 'mesto', u' peter': 'sod', u' font': 'smot', u'criminal': 'prestoopnik', u' find': 'sloochat', u' heavy weapon': 'pooshka', u' folie': 'razdrez', u'muddle': 'jammiwam', u' start': 'skriking', u' exit': 'sloochat', u' forget me drug': 'banda', u' lolly': 'sakar', u' function': 'cantora', u' substructure': 'noga', u'fuzz': 'voloss', u' government agency': 'cantora', u' jibe': 'kopat', u' playing period': 'filly', u' human race': 'orange', u' unrestrained': 'bezoomy', u' headstone': 'klootch', u' center': 'glazz', u' guild': 'shlaga', u' cupful': 'tass', u'society': 'shlaga', u' drama': 'filly', u' buzzer': 'zvonock', u' ingenious': 'oomny', u' deal': 'rooker', u' rod': 'shest', u' part': 'lomtick', u' gob': 'yahma', u' collision': 'tolchock', u' telephone call': 'platch', u' shadowiness': 'dook', u" ship's bell": 'zvonock', u' bunch': 'shaika', u' pesterer': 'chelloveck', u' mouthpiece': 'rot', u' top': 'sloochat', u' jam': 'yahma', u' grandmother': 'baboochka', u' human beings': 'orange', u'folderal': 'chepooka', u' eyelid': 'shlapa', u' wash drawing': 'cheest', u' epithet': 'eemya', u' grunge': 'cal', u' peculiar': 'zammechat', u' slant': 'razrez', u' mastermind': 'rasoodock', u' ternion': 'tree', u'produce': 'prod', u' charm': 'itty', u'prison house': 'staja', u' one dollar bill': 'golly', u' turkey cock': 'kot', u' walk of life': 'gooly', u' cartridge holder': 'raz', u' bloodline': 'razrez', u' solidifying': 'banda', u' glossa': 'yahzick', u' tour': 'itty', u' confrere': 'chelloveck', u' oath': 'chelloveck', u' order': 'shlaga', u' clit': 'knopka', u' clip': 'raz', u' uterine cervix': 'shiyah', u' seat': 'mesto', u' begetter': 'pee', u' winder': 'klootch', u' headway': 'rasoodock', u' schoolhouse': 'skolliwoll', u' foul-smelling': 'merzky', u' paw': 'rooker', u' rachis': 'keeshkas', u' valet': 'orange', u' patsy': 'shoot', u' excited': 'bezoomy', u' formula': 'smot', u' jejune': 'nadsat', u' pal': 'bratty', u'waist': 'tally', u' malefactor': 'prestoopnik', u'cam stroke': 'brosay', u' cervix uteri': 'shiyah', u' joy': 'radosty', u' protagonist': 'droog', u' sex activity': 'pol', u'guy rope': 'veck', u' cutting': 'shive', u' hired man': 'rooker', u' leave-taking': 'ookadeet', u' funky': 'merzky', u' facet': 'smot', u'throat': 'gorlo', u'bastard': 'sod', u' back': 'keeshkas', u' humans': 'orange', u' smelly': 'merzky', u' dogshit': 'sod', u' niner': 'shlaga', u' wizard': 'odin', u' rout': 'shaika', u' interestingness': 'interessovat', u' timeworn': 'fagged', u' good deal': 'banda', u' computerized axial tomography': 'veck', u' translate': 'pony', u' roue': 'razrez', u' police officer': 'rozz', u' smell': 'smot', u' nuance': 'dook', u' bombastic': 'bolshy', u' slipstream': 'cheest', u'upset': 'razdrez', u'face': 'litso', u' school day': 'skolliwoll', u' drunkenness': 'peet', u' tail': 'cal', u' officeholder': 'rozz', u' terce': 'tree', u' pile': 'banda', u'affair': 'veshch', u' bargain': 'kupet', u' tumid': 'bolshy', u' saphead': 'shoot', u' cheek': 'smot', u' beldame': 'lighter', u' aspiration': 'sneety', u' palpebra': 'shlapa', u' shade': 'dook', u' psychological disorder': 'razdrez', u'fear': 'shilarny', u'supporter': 'droog', u' dickhead': 'sod', u' stemma': 'razrez', u'deuce': 'dva', u' slime eels': 'lighter', u' laughingstock': 'cancer', u' soil': 'cal', u' idea': 'rasoodock', u' walking': 'gooly', u' formulation': 'smot', u' second': 'minoota', u' poor fish': 'gloopy', u' dreaming': 'sneety', u' lapse': 'sloochat', u' panorama': 'smot', u' tummy': 'brooko', u' teething': 'zoobies', u' go along': 'sloochat', u' shite': 'sod', u' scrape': 'skriking', u' genius': 'rasoodock', u' booking': 'drat', u' mate': 'chelloveck', u' jamming': 'jammiwam', u' crack': 'itty', u' tour of duty': 'itty', u' missy': 'ptitsa', u' gritrock': 'keeshkas', u' looking': 'smot', u' head': 'rasoodock', u'tobacco': 'snoutie', u' gas': 'pooshka', u' spate': 'banda', u' headspring': 'rasoodock', u' reach': 'sloochat', u' roaring': 'crark', u' hear': 'slooshy', u' heap': 'banda', u'grab': 'skvat', u' gag': 'smeck', u' big(a)': 'bolshy', u' bill': 'cluve', u' nighttime': 'nochy', u' sexuality': 'pol', u' auricle': 'ooko', u' lede': 'privodeet', u' juvenile': 'nadsat', u'articulatio humeri': 'pletcho', u' chomp': 'mounch', u' headland': 'rasoodock', u'computed tomography': 'koshka', u' braggy': 'bolshy', u'joy': 'radosty', u' come out': 'sloochat', u' snag': 'razrez', u'key': 'klootch', u' excrete': 'sloochat', u' hitting': 'tolchock', u' to-do': 'razdrez', u' painful': 'bezoomy', u' boozing': 'peet', u' man': 'orange', u'the great unwashed': 'lewdies', u' odontiasis': 'zoobies', u' doodly-squat': 'sod', u' flavour': 'von', u'walk': 'gooly', u'laugh': 'smeck', u' smirch': 'mesto', u' cleanup': 'oobivat', u' wampum': 'skriking', u' home': 'mesto', u' mountain': 'banda', u' forte': 'gromky', u' crony': 'bratty', u' rump': 'cancer', u' heavy(p)': 'bolshy', u' baccy': 'snoutie', u' stir': 'plesk', u' scrawl': 'skriking', u' small change': 'hen-korm', u' cause': 'yeckate', u' stomach': 'brooko', u' dame': 'dama', u' quickly': 'skorry', u' terrified': 'spoogy', u' belt': 'clop', u' gimcrack': 'gromky', u'handwriting': 'rooker', u' overtake': 'sloochat', u' come on': 'sloochat', u' valet de chambre': 'orange', u' while': 'lomtick', u' young lady': 'ptitsa', u' fully grown': 'bolshy', u' goofball': 'veck', u' office': 'mesto', u' read/write head': 'rasoodock', u' holler': 'crark', u' celestial pole': 'shest', u' disgustful': 'merzky', u' tether': 'tree', u' closed chain': 'shaika', u' gunslinger': 'pooshka', u' jape': 'smeck', u'drink': 'peet', u' call': 'warble', u' rapidly': 'skorry', u' social club': 'shlaga', u' big-chested': 'nadmenny', u' backbone': 'keeshkas', u' sexual activity': 'pol', u' abdominal cavity': 'brooko', u' overturn': 'razdrez', u' anatomy': 'plott', u' face pack': 'shaika', u' naughtiness': 'baddiwad', u' openhanded': 'bolshy', u'flesh': 'plott', u' dance orchestra': 'banda', u' fortune': 'banda', u' annulus': 'shaika', u'erecting': 'pan-handle', u' sand': 'keeshkas', u' swing': 'shive', u' prohibitionist': 'osoosh', u' make it': 'sloochat', u'generous': 'sammy', u'titty': 'groody', u'drunk': 'pyahnitsa', u' daub': 'mesto', u' shoal': 'skolliwoll', u' wit': 'rasoodock', u' story': 'raskazz', u' apace': 'skorry', u' son of a bitch': 'sod', u' simoleons': 'skriking', u' terminal': 'shest', u' bunk bed': 'chepooka', u' shank': 'tally', u' food for thought': 'pishcha', u' combat': 'drat', u' theatre': 'domy', u' doorbell': 'zvonock', u' laundry': 'cheest', u' beverage': 'peet', u' bechance': 'sloochat', u' rostrum': 'morder', u' rima oris': 'rot', u' caput': 'rasoodock', u'heed': 'slooshy', u' form': 'plott', u' forefather': 'pee', u'scared': 'poogly', u'library': 'biblio', u' yarn': 'raskazz', u'hair': 'voloss', u' range': 'oozy', u' estimation': 'messel', u' disco biscuit': 'itty', u' chapeau': 'shlapa', u' bent': 'banda', u'lead': 'tree', u' slip by': 'sloochat', u' chance': 'sloochat', u' bend': 'prestoopnik', u' amour': 'interessovat', u' big(p)': 'bolshy', u'trout': 'forella', u'noise': 'shoom', u' space': 'mesto', u' superstar': 'odin', u' cock-a-hoop': 'bolshy', u' crew': 'shaika', u' betrothal': 'drat', u' triggerman': 'pooshka', u' eliminate': 'sloochat', u' pipe dream': 'sneety', u' public figure': 'eemya', u'jam': 'jammiwam', u' pass along': 'sloochat', u' newspaper publisher': 'gazetta', u' song': 'warble', u" objet d'art": 'lomtick', u' understructure': 'noga', u'lady friend': 'ptitsa', u' offspring': 'molodoy', u' jackass': 'veck', u'tongue': 'yahzick', u' trauma': 'vred', u' chary': 'oomny', u' founding father': 'pee', u' tomcat': 'kot', u' leash': 'tree', u' patch': 'mesto', u'three': 'tree', u'guard': 'chasso', u'female': 'sharp', u' thought process': 'messel', u' authorize': 'sloochat', u' pungency': 'mounch', u' give': 'sloochat', u' sentry go': 'chasso', u' clock time': 'raz', u' life sentence': 'jeezny', u'buy': 'kupet', u'coke': 'vellocet', u' tip': 'privodeet', u' tit': 'groody', u' young woman': 'ptitsa', u'construction': 'smot', u' pillock': 'gloopy', u'whitlow': 'prestoopnik', u' oral cavity': 'rot', u'ear': 'ooko', u' humanity': 'orange', u' washables': 'cheest', u'learning ability': 'rasoodock', u'skeleton key': 'polyclef', u' human being': 'orange', u' frightened': 'spoogy', u' meshing': 'drat', u' expire': 'sloochat', u' little girl': 'ptitsa', u' halo': 'shaika', u' olfactory perception': 'von', u' oculus': 'glazz', u' brainpower': 'rasoodock', u' camp': 'shaika', u' theater': 'domy', u' loathsome': 'merzky', u' feisty': 'bezoomy', u'goodness': 'horrorshow', u' ternary': 'tree', u' pulp': 'plott', u'happen': 'sloochat', u' tidings': 'slovo', u'old': 'starry', u'sick': 'bolnoy', u' snub': 'shive', u' baseball swing': 'shive', u' crime syndicate': 'shaika', u' euphony': 'pop-disk', u' squat': 'sod', u' write up': 'raskazz', u'star': 'privodeet', u' hag': 'lighter', u' fissure': 'chelloveck', u' boodle': 'skriking', u' snout': 'morder', u'stab': 'yeckate', u' frolic': 'filly', u' hap': 'sloochat', u'ghost': 'dook', u' trashy': 'gromky', u'tomcat': 'kot', u' dinero': 'skriking', u' cruddy': 'merzky', u' beginner': 'pee', u' companion': 'chelloveck', u' saccharide': 'sakar', u' inebriant': 'firegold', u' bread': 'skriking', u' fray': 'razdrez'}
class Nadsat(callbacks.Plugin):
def nadsat(self, irc, msg, args):
"""<phrase>
Translates English into Nadsat Slang
"""
nadphrase = []
for word in args:
if naddict.get(word, None):
nadphrase.append(naddict[word])
else:
nadphrase.append(word)
nadphrase = ' '.join(nadphrase)
irc.reply(nadphrase, prefixNick=True)
Class = Nadsat
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
UTF-8
|
Python
| false | false | 2,014 |
10,213,432,249,785 |
be6c6436c28d314e3a97c2cfc341d5d7e4a1d00e
|
1d103214adcd3d7834ec85308e14c160df14c5f0
|
/tests/test_givens.py
|
7d3602fdeb63b9fd7ac5d76300f532b424b59bc4
|
[] |
no_license
|
ericmjonas/pykrylov
|
https://github.com/ericmjonas/pykrylov
|
c41a22d82c345d3223cac9bd85e5ddfd89d8fe92
|
b1022dbf07a9be601a2c23c285175281a2c48ded
|
refs/heads/master
| 2020-05-16T20:30:14.301575 | 2014-07-29T21:20:27 | 2014-07-29T21:20:27 | 22,395,012 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
from pykrylov import givens
from pykrylov import util
from nose.tools import *
def test_simple_L():
"""
Example from wikipedia
"""
A = np.array([[6, 5, 9, 3],
[5, 1, 4, 2],
[8, 4, 3, 6],
[1, 2, 3, 4]], dtype=float)
# try and zero (2, 1)
row_a = 3 # change the values in row 2 as our target
row_b = 2 # manipulating the values in row 3
col_tgt = 1 # we want row_a, col_tgt to be zero
c, s = givens.givens(A[row_b, col_tgt],
A[row_a, col_tgt]) # this is the value we want to zero
Anew = givens.apply_Givens_rotation_f(row_a, row_b, (c, s), A)
assert util.close_zero(Anew[row_a, col_tgt])
# Anew2 = givens.apply_Givens_rotation(row_a, row_b, (c, s), A)
# assert util.close_zero(Anew2[row_a, col_tgt])
def test_simple_R():
"""
"""
A = np.array([[6, 5, 9, 3],
[5, 1, 4, 2],
[8, 4, 3, 6],
[1, 2, 3, 4]], dtype=float)
# try and zero (2, 1)
col_a = 3 # change the values in col 3 as our target
col_b = 2 # manipulating the values in row 2
row_tgt = 1 # we want row_a, col_tgt to be zero
c, s = givens.givens(A[row_tgt, col_b],
A[row_tgt, col_a]) # this is the value we want to zero
Anew = givens.apply_Givens_rotation_f(col_a, col_b, (c, s), A, "R")
print A
print Anew
assert util.close_zero(Anew[row_tgt, col_a])
# Anew2 = givens.apply_Givens_rotation(row_a, row_b, (c, s), A)
# assert util.close_zero(Anew2[row_a, col_tgt])
|
UTF-8
|
Python
| false | false | 2,014 |
7,224,135,042,294 |
c86bcd10594ed102b2f14776e89b1d4f5a2ca678
|
c8be22bb43858b00d7c7ca879d92a60f49b8f02f
|
/filtering.py
|
52016975d1d9538fdf9fe26704f36baf1276e674
|
[] |
no_license
|
hashx101/info256-proj1
|
https://github.com/hashx101/info256-proj1
|
20d5a0cc08631d75f665fcb0fa8b5af7b75c2aca
|
785f09f2d70c9249fcaff5c1863240207931716a
|
refs/heads/master
| 2020-04-06T06:53:26.200901 | 2013-10-16T16:58:45 | 2013-10-16T16:58:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import nltk
import string
lemmatizer = nltk.WordNetLemmatizer()
stemmer = nltk.stem.porter.PorterStemmer()
table = string.maketrans("","")
stopwordsDict = {}
with open('stopwords.dict') as f:
for word in f.readlines():
word = word.lower().strip()
for form in [word, lemmatizer.lemmatize(word), stemmer.stem_word(word)]:
stopwordsDict[form] = 1
def stripPunct(s):
return s.translate(table, string.punctuation)
def tokenize(s):
return nltk.tokenize.wordpunct_tokenize(s)
def lower(word):
return word.lower()
def lemmatize(word):
return lemmatizer.lemmatize(word)
def stem(word):
return stemmer.stem_word(word)
def removeStopwords(word, stopwords=stopwordsDict):
return None if word in stopwords else word
def chainFilter(*fns):
def f(seq):
for fn in fns:
seq = filter(lambda w: w if w else False, map(fn, seq))
return seq
return f
if __name__ == "__main__":
words = "I am a stupid SenTence 12.".split(" ")
filterFn = chainFilter(lower, lambda w: removeStopwords(w))
print filterFn(words)
|
UTF-8
|
Python
| false | false | 2,013 |
14,370,960,594,510 |
ff7350d36c539ed229b5c15299ddf70d1e7a4c5c
|
c88a247c6c937a6911eaf80fc88f210574f043db
|
/src/test_by_user.py
|
5dac61a250acf7224626857891085cab8340c5f4
|
[] |
no_license
|
tychofreeman/repo_metrics
|
https://github.com/tychofreeman/repo_metrics
|
59d9c355b707aa3a78f0512f95038cc1cf188aea
|
170d83c458c9621307fadb40de3ef0f4dd7160fe
|
refs/heads/master
| 2021-01-17T21:29:42.323073 | 2013-05-02T04:21:46 | 2013-05-03T03:00:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import unittest
from test_support import Changeset
from filters import by_user
class TestByUser(unittest.TestCase):
def test_not_matching_user_returns_false(self):
self.assertFalse(by_user(['A User'])(Changeset(user='Another User')))
def test_matching_user_returns_true(self):
self.assertTrue(by_user(['A User'])(Changeset(user='A User')))
def test_case_insensitive(self):
self.assertTrue(by_user(['A USER'])(Changeset(user='A User')))
def test_partial_matches(self):
self.assertTrue(by_user(['user'])(Changeset(user='A User')))
|
UTF-8
|
Python
| false | false | 2,013 |
16,982,300,700,049 |
f51594ae04c7999163dd0e20878fb81e857f012c
|
80af2a01aeaafe68ed614610f6c67d781ad47a79
|
/sysdevel/distutils/configure/hgtools_py.py
|
2be4a6ff895a6ff213efcb129fcd2f5b1a6fe265
|
[
"MPL-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
non_permissive
|
pombredanne/pysysdevel
|
https://github.com/pombredanne/pysysdevel
|
46e20f9a65d30a55049938fb93c76ddeb302c832
|
29e5b077b85b19759234ab803698090439c9f428
|
refs/heads/master
| 2021-01-18T20:29:07.921310 | 2013-11-14T22:21:04 | 2013-11-14T22:21:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from ..configuration import py_config
class configuration(py_config):
"""
Find/install hgtools
"""
def __init__(self):
py_config.__init__(self, 'hgtools', '3.0.2', debug=False)
|
UTF-8
|
Python
| false | false | 2,013 |
2,070,174,248,919 |
3aa565e7301861b23f9cdcbd5dadec49db0ef84f
|
91b62431307b341b45ba066a192d55d422081cba
|
/server.py
|
dad2bcae6d16b508b5188865e9a068425320d823
|
[] |
no_license
|
groodt/99bottles-jmeter
|
https://github.com/groodt/99bottles-jmeter
|
73b1cfa6b66f1a1fe85b2c5405a52964b1904c94
|
bac7db4d649756f1d90bfca0378bef5e1f5e1d7e
|
refs/heads/master
| 2020-05-27T08:02:48.497743 | 2011-11-26T19:57:56 | 2011-11-26T19:57:56 | 2,711,004 | 4 | 4 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import bottle
import simplejson
bottle.debug(True)
@bottle.post('/bottle')
def store_bottle():
# Extract values from JSON POST body
raw_json = bottle.request.body
json_string = ''.join(raw_json.readlines())
parsed_json = simplejson.loads(json_string)
(num_bottles, drink, date, thread) = (parsed_json['bottles'], parsed_json['drink'], \
parsed_json['date'], parsed_json['thread'])
# Print the "99 Bottles" song to the console. Correctly pluralise "bottle".
plural_bottles = "bottle" if (int(num_bottles)==1) else "bottles"
print("%s %s of %s on the wall. Date=%s Thread=%s" % (num_bottles, plural_bottles, \
drink, date, thread))
return "Cheers!"
# Run the server
bottle.run(server="paste", host='localhost', port=9999, reloader=True)
|
UTF-8
|
Python
| false | false | 2,011 |
17,781,164,617,965 |
05e49c76347c78fe27947d7607b35e9da4da725a
|
fb6c3385d0545f9645dec1f4ed5adeeed4b11087
|
/seed.py
|
9e6d3811f4b00d2a62b4c2213e8e42bc0a9c0f38
|
[] |
no_license
|
Eleonore9/Tipsy
|
https://github.com/Eleonore9/Tipsy
|
6e0de086b5131592175a24091350ac7dbbc0654e
|
902c9f88a40d9796687c88fc1799b16df1aefa7e
|
refs/heads/master
| 2016-09-05T13:40:55.487586 | 2012-11-07T19:42:42 | 2012-11-07T19:42:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import model
db = model.connect_db()
user_id = model.new_user(db, "[email protected]", "securepassword", "Christian")
other_user = model.new_user(db, "[email protected]", "passepas", "Nodrey")
cop_user = model.new_user(db, "[email protected]", "passepasse", "Clo")
coup_user = model.new_user(db, "[email protected]", "passeparla", "Cicile")
du_user = model.new_user(db, "[email protected]", "takapasser", "Yuki")
du2_user = model.new_user(db, "[email protected]", "passeici", "Fatou")
ip_user = model.new_user(db, "[email protected]", "brevet", "Elise")
ele_user = model.new_user(db, "[email protected]", "motdepasse", "Ele")
claire_user = model.new_user(db, "[email protected]", "passeport", "Claire")
task = model.new_task(db, "Complete this task list", user_id)
task_no = model.new_task(db, "Find a job in NYC", other_user)
task_clo = model.new_task(db, "Publish a high IF paper", cop_user)
task_cici = model.new_task(db, "Get a bloody INSERM tenure", coup_user)
task_yuk = model.new_task(db, "Survive Phd", du_user)
task_fatou = model.new_task(db, "Enjoy Pasteur Inst", du2_user)
task_elise = model.new_task(db, "Enjoy patent consultancy", ip_user)
task_ele = model.new_task(db, "Create a web app", ele_user)
task_claire = model.new_task(db, "Meet Cicile and I", claire_user)
|
UTF-8
|
Python
| false | false | 2,012 |
18,236,431,172,004 |
be838f69bdce77450e9d7d13fda761624373c82b
|
cc0bdc8928c8aa96b67bee50cf4ac9d51920f6ed
|
/src/main/python/eval_inferrer.py
|
682148c3b29e18ae2a22cd9c87e5632aa643d333
|
[] |
no_license
|
shilad/SourceGeography
|
https://github.com/shilad/SourceGeography
|
b6aed3bbc1bb4e4cb88316c43267b7fa1eeff47c
|
07357c197e1e3e23d6a9013aee0ae8fee7832b54
|
refs/heads/master
| 2016-09-08T00:25:31.767739 | 2014-09-24T01:51:11 | 2014-09-24T01:51:11 | 22,221,797 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# According to Google (https://support.google.com/webmasters/answer/1347922?hl=en)
import sys
import logistic_inferrer
import urlinfo
from sg_utils import *
import nb_inferrer
import rule_inferrer
import baseline_inferrer
TEST_ALG = 'logistic'
def read_test(dao, path):
names = {}
for line in sg_open(PATH_COUNTRY_NAMES):
(cc, name) = line.lower().strip().split(' ', 1)
names[name.strip()] = cc.strip()
data = {}
for line in sg_open(PATH_CODED_URL_COUNTRIES):
tokens = line.strip().split('\t', 1)
if len(tokens) != 2:
continue
(url, name) = tokens
name = name.lower()
if name not in names:
warn('unknown name: %s' % (`name`, ))
continue
ui = dao.get_url(url)
if not ui:
warn('unknown url: %s' % (`url`, ))
continue
data[url] = (ui, names[name].lower())
warn('retained %d urls' % len(data))
return data
def normalize_cc(cc):
cc = cc.lower()
if cc == 'uk': cc = 'gb'
return cc
def is_hard(dao, ui):
if ui.tld in ('mil', 'gov'):
return False
elif ui.tld not in GENERIC_TLDS and ui.tld in dao.tld_countries:
return False
else:
return True
def test_feature(feat, test):
num_missing = 0
hard_correct = []
hard_wrong = []
correct = []
wrong = []
for url in test:
(ui, actual_cc) = test[url]
if hasattr(feat, 'infer_dist'):
(conf, dist) = feat.infer_dist(ui)
if not dist:
num_missing += 1
continue
top = sorted(dist, key=dist.get, reverse=True)
top_prob = dist[top[0]]
k = (url, actual_cc, top[:3], top_prob)
if normalize_cc(top[0]) == normalize_cc(actual_cc):
correct.append(k)
if is_hard(dao, ui): hard_correct.append(k)
else:
wrong.append(k)
if is_hard(dao, ui): hard_wrong.append(k)
else:
(guess, rule) = feat.infer(ui)
if not guess:
num_missing += 1
continue
if normalize_cc(guess.iso) == normalize_cc(actual_cc):
k = (url, actual_cc, guess, 0.9)
correct.append(k)
if is_hard(dao, ui): hard_correct.append(k)
else:
k = (url, actual_cc, guess, 0.5)
wrong.append(k)
if is_hard(dao, ui): hard_wrong.append(k)
overall_conf = sum([x[-1] for x in correct + wrong]) / len(correct + wrong)
correct_conf = sum([x[-1] for x in correct]) / len(correct)
if wrong:
wrong_conf = sum([x[-1] for x in wrong]) / len(wrong)
else:
wrong_conf = 0.0
total = len(test)
print 'Feature %s had %d correct, %d wrong (%.1f%%), %d missing, coverage=%.1f%% confs c=%.7f, w=%.7f, all=%.3f. Wrong are:' % \
(feat.name, len(correct), len(wrong), 100.0 * len(correct) / len(correct + wrong),
num_missing, 100.0 * (total - num_missing) / total, correct_conf, wrong_conf, overall_conf)
if hard_correct or hard_wrong:
print 'Hard domains: %d correct, %d wrong (%.1f%%)' % (len(hard_correct), len(hard_wrong), 100.0 * len(hard_correct) / len(hard_wrong + hard_correct))
for w in correct:
print '\tcorrect: %s actual=%s pred=%s conf=%.3f' % w
for w in wrong:
print '\twrong: %s actual=%s pred=%s conf=%.3f' % w
print
if __name__ == '__main__':
dao = urlinfo.UrlInfoDao()
test = read_test(dao, PATH_2012)
if TEST_ALG == 'nb':
# test each individual feature
inf = nb_inferrer.NaiveBayesInferrer(dao)
for feat in inf.features:
test_feature(feat, test)
# test the feature on ourself
test_feature(inf, test)
elif TEST_ALG == 'logistic':
# test each individual feature
inf = logistic_inferrer.LogisticInferrer(dao)
for feat in inf.features:
test_feature(feat, test)
# test the feature on ourself
test_feature(inf, test)
elif TEST_ALG == 'rule':
# test each individual feature
inf = rule_inferrer.Inferrer(dao)
test_feature(inf, test)
elif TEST_ALG == 'baseline':
inf = baseline_inferrer.BaselineInferrer(dao)
test_feature(inf, test)
else:
raise Exception('unknown algorithm: ' + TEST_ALG)
|
UTF-8
|
Python
| false | false | 2,014 |
3,891,240,419,881 |
d1c8ab34d06dadb0f42bad37ff9b7e0ede09ee95
|
70cb5d5c4cc96f0e3cb0b4bbdadb095fdc34f15b
|
/etc/8queens.py
|
0217b0552cb8e4436d1500c71e1fc7ebd21e67fa
|
[] |
no_license
|
wcbubel/python-stuffs
|
https://github.com/wcbubel/python-stuffs
|
14bec9ab88ce159c6b8b3a138e1829b5018be605
|
f1cb3cd6549577c996f2094de20587a6762ff74d
|
refs/heads/master
| 2020-12-24T14:56:36.699699 | 2013-03-22T17:58:12 | 2013-03-22T17:58:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#
# 8 Queens
#
def n_queens( n, width ):
if n == 0:
return [[]]
else:
return add_queen( n-1, width, n_queens(n-1, width))
def add_queen( new_row, width, prev_sol ):
solut = []
for sol in prev_sol:
for new_col in range(width):
if safe_queen(new_row, new_col, sol):
solut.append(sol + [new_col])
return solut
def safe_queen( new_row, new_col, sol ):
for row in range(new_row):
if (sol[row] == new_col or
abs(sol[row]-new_col)==abs(row-new_row)):
return 0
return 1
for sol in n_queens( 8, 8 ):
print sol
|
UTF-8
|
Python
| false | false | 2,013 |
14,955,076,166,637 |
c1c1aa21a48fdc62474484a8010211a698dbbb46
|
b842b7529b64fdade141e59a18f241b624962dd4
|
/clientconf.py
|
fdeefd6cab8de39074012fa323bb7fbe4d43d340
|
[
"CC0-1.0",
"BSD-2-Clause"
] |
permissive
|
godarklight/DMPHive
|
https://github.com/godarklight/DMPHive
|
b5959b5898c47042e06d8933f3bb76d985258274
|
9c282627e29a019f377a4dd5a3550728d8ae04cd
|
refs/heads/master
| 2020-12-25T21:44:36.324522 | 2014-11-20T10:31:58 | 2014-11-20T10:35:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import configparser
CONFIG = configparser.ConfigParser()
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
CONFIGFILE = os.path.join(ROOT_DIR, 'client.cfg')
def generate_default_config():
CONFIG['GENERAL'] = {}
CONFIG['GENERAL']['datadir'] = 'Data'
CONFIG['LOGGING'] = {}
CONFIG['LOGGING']['debugging'] = 'True'
CONFIG['HANDLERS'] = {}
CONFIG['HANDLERS']['root'] = 'Handlers'
CONFIG['KEYPAIR'] = {}
CONFIG['KEYPAIR']['Private'] = 'PrivKey.xml'
def write_config():
try:
with open(CONFIGFILE, 'w') as file:
CONFIG.write(file)
except Exception as inst:
print('Encountered exception while trying to write configuration file!')
try:
if not CONFIG.read(CONFIGFILE):
print('No config file or config is empty! Loading defaults and generating client.cfg...')
generate_default_config()
write_config()
except Exception as inst:
print('Error reading config file! Loading defaults...')
generate_default_config()
|
UTF-8
|
Python
| false | false | 2,014 |
3,092,376,479,452 |
75ca6ac476c26ece6c02689f46a06e3cf5d1ad75
|
04a7df65294299ea8ebab27cb01d795894dcd9dd
|
/setup.py
|
4173bac8fac280f8b23df03dd3bbddca1eba1c12
|
[] |
no_license
|
yoosefi/piki
|
https://github.com/yoosefi/piki
|
f79b5f2e3692a2183b196c8c77cf051ac4d79c1d
|
f46fbee5fd1f9d99f2e1a3e34ebc281bd744aa16
|
refs/heads/master
| 2016-09-05T13:02:13.878908 | 2013-12-18T08:26:08 | 2013-12-18T08:26:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from piki import __version__
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name = 'piki',
version = __version__,
description = 'MediaWiki API Library',
author = 'magicmarkers',
author_email = '[email protected]',
url = "https://github.com/magicmarkers/piki/",
packages = ['piki'],
license = 'GNU-GPLv3',
platforms = 'Posix; MacOS X; Windows',
classifiers = [
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
UTF-8
|
Python
| false | false | 2,013 |
2,319,282,357,834 |
a095a2b3730290cc131ea9a3074f0dee215ecd53
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/tests/still_torun/test_python_canonicalisation.py
|
4a0f0c19de0dd8aad0684d76c27bf0bfb2b73ea9
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
https://github.com/MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from languages.python import PythonProgram
code = '''
print 'hello world'
class A:
def first_function(self, a):
a *= 2
if isinstance(a, int):
print 'This most definitely gets executed for sure'
return first_function
def second_function(b):
b = 2
for i in xrange(1000):
first_function(i)
b = 2
for i in xrange(1000):
first_function(i)
b = 2
for i in xrange(1000):
first_function(i)'''
expected='''print 's'
class A:
def f(i, i):
i *= 2
if i(i, i):
print 's'
return i
def f(i):
i = 2
for i in i(1000):
i(i)
i = 2
for i in i(1000):
i(i)
i = 2
for i in i(1000):
i(i)'''
program = PythonProgram(code, 'a.py')
assert program.get_canonicalised_program_source.strip() == expected.strip()
|
UTF-8
|
Python
| false | false | 2,014 |
5,068,061,430,560 |
8b9c86c4403c8ffd35169ac8230fcf3ea9127f28
|
b6bc83894fe3618243b56054f3493b99471ed973
|
/testtime.py
|
33a0e099966a56d814cf56ec4e69a865fee63f2a
|
[] |
no_license
|
knd/MiniPoolAutoplayer
|
https://github.com/knd/MiniPoolAutoplayer
|
78e52bd9ea35492af3780580f4ec640bb54a2a33
|
9f8a082611c4790907efd12e096a40dacf26205d
|
refs/heads/master
| 2016-09-02T06:14:25.994174 | 2012-04-19T03:59:20 | 2012-04-19T03:59:20 | 4,070,485 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
total = 0
for i in range(500*700):
total += 1
if total %2 == 0:
total -= 1
|
UTF-8
|
Python
| false | false | 2,012 |
16,183,436,774,799 |
aa8710ffc32b84edf1fdc707ab7dadbc18ca5443
|
91b28adc29e5fe2c05a19855638b98afc62880a6
|
/uservoice_console.py
|
b003ee80f05868b52c2583eb209a8ad2032704e5
|
[] |
no_license
|
JGaard/Uservoice_ticket_console
|
https://github.com/JGaard/Uservoice_ticket_console
|
d5c98bea3698771f7995bf86bd5b293a534bdd5c
|
d252351560d6a590781dd9956aee46ea4fd4083c
|
refs/heads/master
| 2016-09-10T10:20:48.184702 | 2013-10-06T20:58:18 | 2013-10-06T20:58:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
""" Experimental Script to help pull in Uservoice API data and stream line the ticket response process. The following was written by Jamie Gaard.
She does NOT give express permission for anyone to use the following and will severly punish anyone who attempts to fraud her work. She is above the law"""
"""Copyright@JamieGaard"""
import uservoice
import re
##Authenticaion is a superclass that sets the client_login information used to create, update, change and view article and tickets.
class Authentication(object):
def __init__(self, subdomain, api_key, api_secret=None):
self.subdomain = subdomain
self.api_key = api_key
self.api_secret= api_secret
self.client = self.client_login()
def client_login(self):
return uservoice.Client(self.subdomain, self.api_key, self.api_secret)
class Tickets(Authentication):
def __init__(self, subdomain, api_key, api_secret):
super(Tickets, self).__init__(subdomain, api_key, api_secret)
#non-conforming tickets are
self.nonconforming_tickets = []
self.open_tickets = self.set_open_tickets()
self.current_ticket = self.set_current_ticket(self.open_tickets.iterkeys().next())
def set_current_ticket(self, ticket_number):
return self.open_tickets[ticket_number]
def set_open_tickets(self):
open_tickets = {}
tickets = self.client.get_collection('/api/v1/tickets/search.json?query=status%3Aopen')
for ticket in tickets:
body = self.format_ticket(ticket)
try:
open_tickets[ticket['id']] = {'subject':ticket['subject'], 'sender_username':ticket['messages'][0]['sender']['name'],
'sender_email':ticket['messages'][0]['sender']['email'], 'devices':body['Device(s)'], 'sender_name':body['Name'],
'location':body['Location'], 'message':body['Message']}
except:
self.nonconforming_tickets.append(body)
return open_tickets
def retrieve_open_tickets(self):
return self.open_tickets
def retrieve_current_ticket(self):
return self.current_ticket
def retrieve_nonconforming_tickets(self):
return self.nonconforming_tickets
def format_message(self, ticket):
ticket = ticket['messages'][0]['body']
ticket = ticket.replace('\n\n','\n').splitlines()
ticket = [x.strip(' ') for x in ticket]
return ticket
def format_body(self, ticket):
try:
message_index = ticket.index('Message:')
message = ticket[message_index:-1]
ticket = ticket[:message_index] + ticket[-1:]
except ValueError:
self.nonconforming_tickets.append(ticket)
try:
ticket = dict(s.split(':') for s in ticket)
message = dict(s.split(':') for s in [''.join(message)])
body_dict = ticket.copy()
body_dict.update(message)
return body_dict
except ValueError, AttributeError:
self.nonconforming_tickets.append(ticket)
def format_ticket(self, ticket):
message_dict = self.format_body(self.format_message(ticket))
return message_dict
def post_response(self, ticket_id, ticket_response):
with self.client.login_as_owner() as owner:
posted_response = owner.post("/api/v1/tickets/" + str(ticket_id) + "/ticket_messages.json?", {
'ticket_message': {
#Custom headers and signatures with the message_body append to the middle
'text': 'Hi Twiner!' + '\n\n' +
ticket_response + '\n' +
"PS: Can you help us out? If you've been having fun on twine, please rate us 5 stars:" + '\n\n' +
"App Store: http://bit.ly/twineapple" + '\n\n' +
"Google Play: http://bit.ly/twinegoogle" + '\n\n' +
"- Etan (@etanb)" + '\n\n' +
"Head of twine Customer Experience" + '\n\n' +
"Get informative and entertaining updates on: Twitter and Facebook"
}})
def close_ticket(self, ticket_id):
with self.client.login_as_owner() as owner:
close_ticket = owner.put("/api/v1/tickets/" + str(ticket_id) + ".json", {
'ticket': {
'state' : 'closed'
}})
def add_to_article_uses(self, ticket_id):
with self.client.login_as_owner() as owner:
update_article = owner.put("/api/v1/articles/" + str(article_id) + ".json", {
'article': {
'uses' : article['uses']+1
}})
class Articles(Authentication):
def __init__(self, subdomain, api_key, api_secret):
super(Articles, self).__init__(subdomain, api_key, api_secret)
self.all_articles = self.set_all_articles()
#Current_article is set to the first key in the self.all_articles list by default.
self.current_article = self.all_articles.iterkeys().next()
def retrieve_article_title(self):
return self.current_article['title']
def retrieve_article_body(self):
return self.current_article['formatted_text']
# Raw articles are differ from the list of all article in that they have not been editing of their HTML tags. Raw articles are already formatted for sending the reposnse to the user and should therefore be used in place or self.all_articles when responding to users.
def get_raw_articles(self):
raw_articles = {}
articles = self.client.get_collection('/api/v1/articles.json?filter=all')
for article in articles:
raw_articles[article['id']] = {'title':article['title'], 'formatted_text':article['formatted_text'], 'uses':article['uses'], 'text': article['text']}
return raw_articles
def set_all_articles(self):
all_articles = {}
articles = self.client.get_collection('/api/v1/articles.json?filter=all')
for article in articles:
all_articles[article['id']] = {'title':article['title'], 'formatted_text':article['formatted_text'], 'uses':article['uses'], 'text':self.formatting(article['text'])}
return all_articles
def set_current_article(self, article_number):
self.current_article = self.all_articles[article_number]
def record_use(self):
self.current_article['uses'] = self.current_article['uses'] + 1
#formatting is only used for diplay purposes on the client and does not affect the response that is sent to the user.
def formatting(self, text):
return text.encode('ascii', 'ignore').replace(' ', ' ').replace("'", "").replace('\n', '')
|
UTF-8
|
Python
| false | false | 2,013 |
6,880,537,640,748 |
512212bb98670da1bed44110397dee74f01bd7e9
|
df0709be820035fd7df9acaa57844b8449c7f97f
|
/Week 5/ProblemSet5/recursion.py
|
b557c192a9efae22868fb9a748ebc54ab365c961
|
[] |
no_license
|
leanton/6.00x
|
https://github.com/leanton/6.00x
|
11a9e6ca98374d9a99475f800a375dcb3e94d2d1
|
424153f85116e461d17b30b5d9e88eeb63a8ab82
|
refs/heads/master
| 2021-01-02T09:32:15.697407 | 2014-03-17T08:23:58 | 2014-03-17T08:23:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def reverseString(aStr):
"""
Given a string, recursively returns a reversed copy of the string.
For example, if the string is 'abc', the function returns 'cba'.
The only string operations you are allowed to use are indexing,
slicing, and concatenation.
aStr: a string
returns: a reversed string
"""
if len(aStr) == 0 or len(aStr) == 1:
return aStr
else:
return aStr[-1] + reverseString(aStr[1:-1])+ aStr[0]
print reverseString('asdfg')
|
UTF-8
|
Python
| false | false | 2,014 |
5,153,960,777,328 |
e09aacd990bbf9ac0fc19ca94af7bfee8100d8bf
|
927259b503861aefec77c3230aa37e85ec9b2bfe
|
/tools/project-creator/android/__init__.py
|
495b23018513d1b8c3448d1671a2df289886727c
|
[
"MIT"
] |
permissive
|
nicolasgramlich/cocos2d-x
|
https://github.com/nicolasgramlich/cocos2d-x
|
325da9a33cd14abb91be5beac6ddef58434a5ae2
|
4bb9cb49bb2e0445e1f7fd2e75af8bd3a61275a2
|
refs/heads/master
| 2021-01-18T08:19:10.915735 | 2013-04-30T23:51:00 | 2013-04-30T23:51:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# This empty file indicates this folder as a module in python
# Don't remove me!
|
UTF-8
|
Python
| false | false | 2,013 |
7,868,380,092,578 |
29b13e6ea39ade3f2fd16d4f178bb72a0efa4bc0
|
7074827c95bb886a16e649375d6cc0faf151c60d
|
/datbigcuke/scheduler.py
|
dee0f2f06fdf189fe1184adf9a912cd2d8ab047a
|
[] |
no_license
|
churay/datbigcuke
|
https://github.com/churay/datbigcuke
|
7baa5223677d432864b558f49b576e99395087e7
|
d39a1108c60248916c2e2f8f48a94e5258e3a90e
|
refs/heads/master
| 2020-03-29T17:05:29.337354 | 2014-05-01T11:05:32 | 2014-05-01T11:05:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#Functions for scheduling a meeting time for a group
#(by Tom)
from datetime import timedelta, datetime, date, time
import math
#returns a datetime or time representing the
#start of the m minute interval
#in which the given datetime or time t lies
def time_interval_start(t, m):
minutes = int(m * math.floor(float(t.minute)/m))
if(type(t) is datetime):
return datetime(t.year, t.month, t.day, t.hour, minutes)
if(type(t) is time):
return time(t.hour, minutes)
#computes the possible meeting times
#group_members is a dictionary of members whose
# keys are identifiers of the member
# values are lists of 2-tuples,
# containing a start time (datetime) and end time (datetime)
# these tell when that group member is unavailable
#deadline is a datetime specifying before when the meeting must occur
#duration is a timedelta specifying how long the meeting should be
#off_limits_start and off_limits_end are times
# which tell when the meeting should not be scheduled each day
#This function returns a list of 2-tuples describing possible meeting times
# first value being the time of the meeting
# second value being a list of group members
# who are busy at some pointduring the meeting, if any
# and this list is sorted first by time, then by availability
#
#Important assumptions:
#the off_limits time should cover a day change
# if not, return an error list
#the earliest schedulable time is an hour from now
def schedule_meeting(group_members, deadline, duration, off_limits_start, off_limits_end):
#check off_limits assumption
if(off_limits_start <= off_limits_end):
return [(deadline, ['error: off limits times are bad'])]
#m is the interval length, this must be 0 < m < 60
m = 15
midnight = time(0)
#convert the given times to the intervals they are in
overall_start = time_interval_start(datetime.now() + timedelta(hours=1, minutes=m), m)
overall_end = time_interval_start(deadline, m)
day_start = time_interval_start(off_limits_end, m)
day_end = time_interval_start(off_limits_start, m)
#create a structure that maps usable time intervals
#to lists of members busy during that interval
#and a similar structure that only has fully usable slots of length duration
#this initializes all usable times to an empty list
intervals = {}
options = {}
day = overall_start.date()
while(day <= overall_end.date()):
t = day_start
end = day_end
#start later on the first day, or end earlier on the last day, if necessary
if(day == overall_start.date() and t < overall_start.time()):
t = overall_start.time()
if(day == overall_end.date() and overall_end.time() < end):
end = overall_end.time()
while(t < end):
intervals[datetime.combine(day, t)] = []
if((datetime.combine(day + timedelta(days=1), midnight) - duration) < datetime.combine(day, t)):
t = (datetime.combine(day, t) + timedelta(minutes=m)).time()
continue
if((datetime.combine(day, t) + duration).time() <= end):
options[datetime.combine(day, t)] = []
t = (datetime.combine(day, t) + timedelta(minutes=m)).time()
day = day + timedelta(days=1)
#put busy group members in the appropriate intervals
for member in group_members:
for appointment in group_members[member]:
t = time_interval_start(appointment[0], m)
end = time_interval_start(appointment[1], m)
while(t < end):
if(t in intervals):
intervals[t].append(member)
t = time_interval_start(t + timedelta(minutes=m), m)
#use the intervals to fill the options with busy group members
for option in options:
t = option
end = time_interval_start(option + duration, m)
while(t < end):
if t in intervals:
options[option] += intervals[t]
t = t + timedelta(minutes=m)
#zip the options into a sortable form
option_tuples = []
for option in options:
option_tuples.append((len(set(options[option])), len(options[option]), option))
#construct the result
result = []
for option in sorted(option_tuples):
result.append((option[2], list(set(options[option[2]]))))
return result
|
UTF-8
|
Python
| false | false | 2,014 |
15,831,249,489,584 |
ff9c53e710ec7765dd2afac4e7454858ea15d0df
|
ca5043de8132c52050c9f997cc46e83e90fc99fc
|
/yelp_classifier.py
|
96e1acbff6998a05795974f1d527830ebbec53e6
|
[] |
no_license
|
conwayc/Yelp-Coding-For-Grub-entry
|
https://github.com/conwayc/Yelp-Coding-For-Grub-entry
|
468125bb4f47d97002f094df445e33c1a6d4fa00
|
eba0ea0571f4a673bb2651a2b49774d8166b28da
|
refs/heads/master
| 2020-12-25T00:05:12.647515 | 2012-03-17T01:44:41 | 2012-03-17T01:44:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import json
def train_and_predict(training_filename, test_filename):
with open(training_filename) as training_file:
user_averages, business_averages = generate_model(training_file)
with open(test_filename) as test_file:
predict_reviews(test_file, user_averages, business_averages)
if __name__ == '__main__':
if len(argv) != 3:
print("usage: %s training-data.json test-data.json" % argv[0])
training_filename = argv[1]
test_filename = argv[2]
train_and_predict(training_filename, test_filename)
|
UTF-8
|
Python
| false | false | 2,012 |
7,645,041,832,411 |
ee0fc0cad3436cdea916ccf8feae38d13fa22afd
|
7e3ab9bce98a241b14de723e335e490933c9e43e
|
/motion.py
|
eb78e6b0b4d827ad4f2931f9cc06835a7ed54641
|
[] |
no_license
|
toopay/area51
|
https://github.com/toopay/area51
|
3fae27cd27603e16f7e16dbad7b082c8e769d545
|
b5bd6f8c5858ae93ce004ff457ba0336d3f35963
|
refs/heads/master
| 2022-11-15T02:17:25.469979 | 2013-11-18T02:25:05 | 2013-11-18T02:25:05 | 9,700,107 | 6 | 1 | null | false | 2022-11-12T09:49:39 | 2013-04-26T17:00:37 | 2021-11-24T21:24:16 | 2022-11-12T09:49:35 | 612 | 8 | 3 | 2 |
Python
| false | false |
#!/usr/bin/env python
"""
motion.py
~~~~~~~~~
Motion detection with OpenCV
Usage: ./motion.py
(Press 's' to start tracking and 'q' to Quit)
:author: Taufan Aditya
"""
import cv2
def init(video):
vc = cv2.VideoCapture(0)
if not vc.isOpened():
print "Cannot open device!"
return
# Set the vc to 320x240
vc.set(3, 320)
vc.set(4, 240)
cv2.namedWindow(video)
# Set initial kernel for morphology transformation
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
return (vc,kernel)
def grayify(img):
return cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
def get_components(frame, prev, kernel):
# Get diff frame for secondary window
diff = cv2.subtract(grayify(frame), grayify(prev));
(thresh, diff) = cv2.threshold(diff, 5, 255, cv2.THRESH_BINARY)
diff = cv2.morphologyEx(diff, cv2.MORPH_OPEN, kernel)
# Find contours and hierarcy from diff
(contours, hierarchy) = cv2.findContours(diff.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
return diff,contours
def get_moved_components(contours, min_width, min_height):
comps = ()
for c in contours:
comp = cv2.boundingRect(c)
# Optimize the motion result by reduce the noise
if comp[2] > min_width and comp[3] > min_height:
comps += (comp,)
return comps
def draw_motion(contours, frame):
comps = get_moved_components(contours, 5, 5)
for (x,y,w,h) in comps:
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0))
return frame
if __name__ == '__main__':
init_stage = False
wnd_main = "Main VC"
wnd_debug = "Diff VC"
prev = None
# Initialize VideoCapture
(vc,kernel) = init(wnd_main)
while True:
val,frame = vc.read()
if init_stage:
diff,contours = get_components(frame, prev, kernel)
cv2.imshow(wnd_debug, diff)
if not contours == None:
frame = draw_motion(contours, frame)
# Stream the frame for main window
cv2.imshow(wnd_main, frame)
prev = frame
# Force to take the last 8 bits of the integer returned by waitKey
key = cv2.waitKey(15)
key = key & 255 if key + 1 else -1
# 'q' to Quit and 's' to Start
if key == ord('q'):
cv2.destroyAllWindows()
vc.release()
break
elif key == ord('s') and init_stage == False:
cv2.namedWindow(wnd_debug)
init_stage = True
|
UTF-8
|
Python
| false | false | 2,013 |
6,932,077,231,779 |
4890d7d2188b34251900f28aedce845db4601cfb
|
2c970d7023762c77707972e56eb2a367681e1f3a
|
/get_address-python
|
d47f6970964dad7385a67ca1757d2d287f297bea
|
[] |
no_license
|
frogtron3030/scripts
|
https://github.com/frogtron3030/scripts
|
64209f1a70a8059274a6935c8fb7586979143a83
|
5c70d3291e62007e9800ee8c3a8b03f74251be5c
|
refs/heads/master
| 2016-07-27T06:34:18.393884 | 2014-09-19T14:04:43 | 2014-09-19T14:04:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import socket
url = 'www.google.com'
ipaddress = socket.gethostbyname(url)
print 'The IP Address of ' + url + ' is ' + ipaddress
|
UTF-8
|
Python
| false | false | 2,014 |
1,778,116,504,628 |
165cd6296ea3d0101534d329ae293adb33aa78dc
|
c6d2f32250f1a80296e817ffc0a0e0259f49458f
|
/test/test_bounds.py
|
4b16a1c3868328bc2fab4c0396568bcd4e94a2e3
|
[] |
no_license
|
lacon1/blender_nif_scripts
|
https://github.com/lacon1/blender_nif_scripts
|
d9b7e4172ee472ce3590f5d3ca1a6f6a717e702d
|
d5c491424291bd4b42f1e4c41c8ae2644cb350fe
|
refs/heads/master
| 2021-01-15T10:34:33.866693 | 2011-11-20T16:02:44 | 2011-11-20T16:02:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""Export and import bound boxes."""
import bpy
import nose.tools
import io_scene_nif.export_nif
import test
class TestBoundImport(test.Base):
def test_import(self):
bpy.ops.import_scene.nif(
filepath="test/import/bounding_box.nif",
log_level='DEBUG',
)
b_bbox = bpy.data.objects.get("Bounding Box")
# test stuff
assert(b_bbox.draw_bounds_type == 'BOX')
assert(b_bbox.draw_type == 'BOUNDS')
class TestBoundExport(test.Base):
def setup(self):
'''
# create a cube
bpy.ops.mesh.primitive_cube_add()
self.obj = bpy.data.objects["Cube"]
self.obj.name = "Bounding Box"
self.obj.draw_bounds_type = 'BOX'
self.obj.draw_type = 'BOUNDS'
self.mesh = self.obj.data
bpy.ops.mesh.primitive_cube_add()
self.obj = bpy.data.objects["Cube"]
self.obj.name = "BBoxTest"
'''
bpy.ops.import_scene.nif(
filepath="test/import/bounding_box.nif",
log_level='INFO',
)
bpy.ops.object.select_name(name="Bounding Box")
def test_export(self):
# export
bpy.ops.export_scene.nif(
filepath="test/export/bounding_box.nif",
log_level='DEBUG',
)
'''
self.obj = bpy.data.objects["Bounding Box"]
self.mesh = self.obj.data
# test stuff...
bbox = nif_export.root_blocks[0].children[0]
assert(bbox.has_bounding_box)
'''
class TestBSBoundImport(test.Base):
def test_import(self):
bpy.ops.import_scene.nif(
filepath="test/import/bounding_box_bsbound.nif",
log_level='DEBUG',
)
|
UTF-8
|
Python
| false | false | 2,011 |
11,338,713,696,373 |
9ca2c33d1408348e83986c5c678233c8b21afe86
|
616dbb92c7c1d5d4eefe547b1c67bff38ee2270a
|
/ps6_image/ResizeableImage.py
|
827efd4d7813fba8c304d90cf50c62f1dd96bc7b
|
[] |
no_license
|
chuzui/python_algorithm
|
https://github.com/chuzui/python_algorithm
|
d04360cd44865382c4afcda1ecfa82cb972fe41e
|
bfcaeb3f41a2cb098c72e66dd7b03e0096ff2a22
|
refs/heads/master
| 2021-01-10T13:42:50.115860 | 2014-11-01T13:16:57 | 2014-11-01T13:17:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import ImageMatrix
class ResizeableImage(ImageMatrix.ImageMatrix):
def best_seam(self):
dp = {}
path={}
for i in range(self.width):
dp[i, 0] = self.energy(i, 0)
#for j in range(self.height):
#3dp[0, j] = self.energy(0, j)
for j in range(1, self.height):
for i in range(0,self.width):
min_value = dp[i, j-1]
p = i, j-1
if dp.has_key((i-1, j-1)) and dp[i-1,j-1] < min_value:
min_value = dp[i-1,j-1]
p = i-1,j-1
if dp.has_key((i+1,j-1)) and dp[i+1,j-1] < min_value:
min_value = dp[i+1,j-1]
p = i+1, j-1
dp[i,j] = min_value + self.energy(i,j)
path[i,j] = p
min = dp[0,self.height-1]
col = 0
for i in range(self.width):
if dp[i,self.height-1] < min:
min = dp[i, self.height-1]
col = i
re = []
cur = col,self.height-1
re.append(cur)
while path.has_key(cur):
cur = path[cur]
re.append(cur)
return re
def remove_best_seam(self):
self.remove_seam(self.best_seam())
|
UTF-8
|
Python
| false | false | 2,014 |
14,388,140,485,801 |
0449e1dd8eb4650455891728fd8264517ff26c48
|
17110cf6d4de2d3ba59bdafb2d5d939bb22b2d31
|
/ipython_log.py
|
d5a1bc7f84be2e604b8ee12f3dd37fcae5f0c205
|
[] |
no_license
|
drusmanbashir/deejay
|
https://github.com/drusmanbashir/deejay
|
1f61cef1ff5580eb8c1a10873ca013f654b32a25
|
e8d87effb4b7d2cc3324da75c17f822017bc02d2
|
refs/heads/master
| 2021-01-01T19:38:30.081301 | 2014-02-18T03:00:24 | 2014-02-18T03:00:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# IPython log file
case = Case.objects.get(pk=1)
case.folder
case.validate(
)
runlog ipython.log
get_ipython().magic(u'logstart ')
casi = Case.objects.get(pk=1)
casi.folder
casi.validate()
get_ipython().magic(u'logoff ')
|
UTF-8
|
Python
| false | false | 2,014 |
3,659,312,154,738 |
fb0649cd207a853036cf25f5a1038d62c33d30bd
|
06cfc0fc2c914524388d84f7e2c5afde9ea41c24
|
/config/urls/piehub.py
|
e9914b6b968a0fd238c0adb67be369fccb6c1511
|
[] |
no_license
|
jvwong/dj_powered
|
https://github.com/jvwong/dj_powered
|
b6d91c1f4e3eca312c7b42676cfe1c62151c089b
|
911efbd2203c546cde11709a4d00de3215603b62
|
refs/heads/master
| 2016-05-25T15:05:45.493115 | 2014-07-04T15:06:54 | 2014-07-04T15:06:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import patterns, url, include
from django.contrib import flatpages
from django.contrib import admin
from django.conf import settings
admin.autodiscover()
from django.views.generic.base import TemplateView
urlpatterns = patterns('')
if settings.DEBUG:
# static files (images, css, javascript, etc.)
urlpatterns = patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
urlpatterns += patterns('',
url(r'^$', TemplateView.as_view(template_name='basie/neighbourhood.html'), name='basie_map_neighbourhood'),
url(r'^voter/', include('basie.urls.voter')),
url(r'^open311/$', TemplateView.as_view(template_name='basie/open_311.html'), name='basie_map_open311'),
url(r'^tweets/', include('basie.urls.tweets')),
url(r'^pin/', include('basie.urls.pin')),
url(r'^admin/', include(admin.site.urls)),
)
|
UTF-8
|
Python
| false | false | 2,014 |
7,344,394,079,740 |
31f981829ad1d2882a272ff22bacfe94213ae338
|
a78ccb85c7e29e725ecbc8e12aa930a88760d978
|
/if_exists_using_try.sikuli/sikuli_unit_test.py
|
3511bdbb485d68b982666fa203d62febdd4cfdf6
|
[] |
no_license
|
Dannyzen/My-Sikuli-Tests
|
https://github.com/Dannyzen/My-Sikuli-Tests
|
f27aa553221e98a40fe138103098d2bf0a230af3
|
1c9f9d8a2846bd7f7fc7c126229fef5f7c588def
|
refs/heads/master
| 2021-01-23T21:37:55.524000 | 2012-01-11T06:54:42 | 2012-01-11T06:54:42 | 2,900,131 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def setUp(self):
keyDown(Key.CTRL + Key.SHIFT + Key.ESC)
keyUp()
type(Key.ALT + "f" + "n")
wait(2)
type ("firefox")
type (Key.ENTER)
if exists("le.png"):
print ("it's fucking there")
else:
print ("shit's not there")
def tearDown(self):
keyDown(Key.ALT + Key.F4)
keyUp()
untilNotExist("le.png")
def testA(self):
assert exists
win == 1
def testB(self):
assert not exists
win ==0
|
UTF-8
|
Python
| false | false | 2,012 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.