__id__
int64 3.09k
19,722B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
256
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 3
values | repo_name
stringlengths 5
109
| repo_url
stringlengths 24
128
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 6.65k
581M
⌀ | star_events_count
int64 0
1.17k
| fork_events_count
int64 0
154
| gha_license_id
stringclasses 16
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
5.76M
⌀ | gha_stargazers_count
int32 0
407
⌀ | gha_forks_count
int32 0
119
⌀ | gha_open_issues_count
int32 0
640
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 2
classes | gha_disabled
bool 1
class | content
stringlengths 9
4.53M
| src_encoding
stringclasses 18
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | year
int64 1.97k
2.01k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4,922,032,565,195 |
1b23bd3939e5d18e1276201450839ed57f74bb27
|
826706ac7548875bed42b66cc6b6b53443ca5ab9
|
/runserver.py
|
3c9dcbd8800bac7c949b65898f8194084a46b158
|
[
"MIT"
] |
permissive
|
arpi-robotics/arpi-robotics
|
https://github.com/arpi-robotics/arpi-robotics
|
8ba8a1f14578fc5166d16d22d5b8d9057df9de31
|
4fa2a5f1b946ca9bba74953c5f413919a8e321ad
|
refs/heads/master
| 2020-06-06T04:09:33.957723 | 2014-01-12T03:23:01 | 2014-01-12T03:23:01 | 15,670,916 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!venv/bin/python
# coding: utf-8
from gevent.pywsgi import WSGIServer
from geventwebsocket.handler import WebSocketHandler
from app import my_app
if __name__ == '__main__':
http_server = WSGIServer(('',5000), my_app, handler_class=WebSocketHandler)
http_server.serve_forever()
|
UTF-8
|
Python
| false | false | 2,014 |
463,856,491,026 |
461416be973b89b9353924984bcdf363736e11a6
|
f225c8ec1a4f271a2e8baf6841ace55d65b9a299
|
/SpellingMaster.py
|
bba70b6775db44708bc3c082e66aa9e59710fa94
|
[] |
no_license
|
pamulapati/SpellingMaster
|
https://github.com/pamulapati/SpellingMaster
|
ce80038942e20238ee880de5ceddb009164ef157
|
c042a6e128b1d108cefe907bad85359595574917
|
refs/heads/master
| 2020-06-04T03:14:33.132511 | 2013-10-19T20:45:45 | 2013-10-19T20:45:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on Mar 25, 2013
@author: APamulapati
'''
# Anjali is smart, this is Dad's test code for princess Anjali Spelling homework.
import sys, os, glob, random
import subprocess
import time
import json
import pprint
def ask_ok(prompt, retries=4, complaint='Yes or no, please!'):
while True:
ok = input(prompt)
if ok in ('y', 'Y', 'ye', 'yes', 'Yes', 'Sure', 'sure', 'bring it on', 'add', 'A', 'new'):
return True
if ok in ('n', 'N', 'no', 'nop', 'nope', 'No', 'Nope', 'nope'):
return False
retries = retries - 1
if retries < 0:
raise IOError('refusenik child')
print (complaint)
def randomLine(filename):
"Retrieve a random line from a file, reading through the file once"
fh = open(filename, "r")
lineNum = 0
it = ''
whichLine = 0;
while 1:
aLine = fh.readline()
lineNum = lineNum + 1
if aLine != "":
#
# How likely is it that this is the last line of the file ?
if random.uniform(0, lineNum) < 1:
it = aLine
whichLine = lineNum
else:
break
fh.close()
# print(it+"===>"+str(whichLine)+"===>"+str(lineNum))
return it, whichLine, (lineNum - 1)
def play(word):
# for word in words:
p = os.system("mpg321 -q sounds/" + word + ".mp3")
def correctSpelling(word):
print('Correct spelling is *****' + word + '*****')
def word_definition (word):
print ("------Definition-------")
df = open("definitions/" + word + ".txt")
definition = df.readline()
definition = definition.replace("dict_api.callbacks.id100(", "").replace(",200,null)", "").replace("\\x", "\\u00");
# print definition
definition_list = json.loads(definition)
# pprint.pprint(definition_list)
# print type(definition_list)
# print definition_list['primaries']
# pprint.pprint(definition_list['primaries'])
found = 0
part_of_speach = []
count = 0
if 'primaries' in definition_list:
for primaries in definition_list['primaries']:
# pprint.pprint( primaries['entries'])
for entries in primaries['entries']:
# pprint.pprint( entries)
# print "---------------------"
if entries['type'] == 'meaning':
# pprint.pprint(entries)
if len(entries['terms']) > 0:
count = count+1
print(str(count)+") "+ entries['terms'][0]['text'].replace((word), "<Your Word>"))
found = 1
if 'labels' in primaries['terms'][0]:
part_of_speach.append(" " + primaries['terms'][0]["labels"][0]['text'])
if not found:
if 'webDefinitions' in definition_list:
for primaries in definition_list['webDefinitions']:
# pprint.pprint( primaries['entries'])
for entries in primaries['entries']:
# pprint.pprint( entries)
# print "---------------------"
if entries['type'] == 'meaning':
# pprint.pprint(entries)
if len(entries['terms']) > 0:
count = count+1
print(str(count)+") "+ entries['terms'][0]['text'].replace((word), "<Your Word>"))
found = 0
if 'labels' in primaries['terms'][0]:
part_of_speach.append(primaries['terms'][0]["labels"][0]['text'])
print ("Part of Speach :" + str(part_of_speach))
print ("------End of Definition-------")
name = input('What is your name?\n')
if name not in ('Anjali', 'Angel','Test','Arun','Chins'):
print ('This system servers only Anjali princess, you are not my master go away!!!')
print ('System shutting down')
else:
print ('Welcome Dear Anjali, Let us buzz some words')
if ask_ok('Can we do some spellings?') == 0:
print ('Hmmm ... ok , let me know when you are in mood for some words.')
else:
print ('Ok let us rock on ...')
print ('Ok , let the Spell Buzz begin.')
which_file = int(input('What do you want to spell from (Enter 1: Latest home work, 2: All home work, 3: Big list of any words 4: Past misspells 5:Elementry Dictionary) '))
if(which_file == 1):
filename = name+"/recenthomework.txt"
elif(which_file == 2):
filename = name+"/allhomework.txt"
elif(which_file == 4):
filename = name+"/misspelledfile.txt"
elif(which_file == 5):
filename = name+"/elementrydct.txt"
elif(which_file == 3):
filename = "biglist.txt"
correct = 0
count = 0;
seen = []
ask = True
while True:
line, lineNum, size = randomLine(filename);
if lineNum in seen :
continue
seen.append(lineNum)
question = line.strip()
if os.path.isfile("sounds/" + question + ".mp3") :
play(question)
answer = input("Enter spelling (q : to quit; r : to repeat d : to define):")
answer = answer.strip()
if(answer == 'q'):
break
while not answer or answer == 'r' or answer == 'd':
if answer == 'r':
play(question)
if answer == 'd':
if os.path.isfile("definitions/" + question + ".txt"):
word_definition(question)
else:
print("Sorry no definition!")
answer = input("Enter spelling (q : to quit; r : to repeat d : to define):")
answer = answer.strip()
if(answer == question):
play('/response/ding')
# play('is')
print("Excellent! Very good spelling !")
correct = correct + 1
else:
play('/response/buzzer')
correctSpelling(question)
e = open(name+'/misspelledfile.txt', 'a')
e.write(question + '\n')
e.flush()
e.close()
count = count + 1
if(len(seen) >= size):
print ('You worked on all words that i have in this section !!!')
break
else:
continue
print ('You answered {0} out of {1} correct !!!'.format(correct, count))
|
UTF-8
|
Python
| false | false | 2,013 |
15,393,162,806,291 |
26495130f0227b356dd0598959d244d32bea62c9
|
a3cfbe8285e059390833e481abd8d4a8d2e0ca3f
|
/quearl/bin/localize.py
|
45c6979480ed673d90ed5e679485e61f69b841b3
|
[
"AGPL-3.0-or-later",
"AGPL-3.0-only"
] |
non_permissive
|
raffaellod/quearl
|
https://github.com/raffaellod/quearl
|
253f569014262a94f21c39ea8caebd5773afa439
|
ea083daa8ed3ec770dc506acca77907552dc6033
|
refs/heads/master
| 2016-09-15T18:10:29.802506 | 2013-12-15T16:44:23 | 2013-12-15T16:44:23 | 11,852,000 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8; mode: python; tab-width: 3 -*-
#
# Copyright 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
# Raffaello D. Di Napoli
#
# This file is part of Quearl.
#
# Quearl is free software: you can redistribute it and/or modify it under the terms of the GNU
# Affero General Public License as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# Quearl is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License along with Quearl. If
# not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------------------------------
"""Utilities for localization files (.l10n)."""
import gzip
import os
import re
import sys
####################################################################################################
# l10n_generator
class l10n_generator(object):
"""Generates localization files for all programming languages from the localized string files."""
@classmethod
def update_module(cls, module):
"""Updates every localization file in a given module.
QuearlModule module
Module for which to update the localization.
"""
sL10nDir = module.l10n_dir()
if sL10nDir is None:
# This module has no localization files.
return
for sFileName in os.listdir(sL10nDir):
if sFileName.endswith('.l10n'):
cls.update_from_l10n_file(module, os.path.join(sL10nDir, sFileName))
@classmethod
def update_from_l10n_file(cls, module, sL10nFileName):
"""Creates/updates the localization files generated from the specified l10n file.
QuearlModule module
Module to which the file belongs.
str sL10nFileName
Full path to the .l10n file.
"""
sModulePrefix = module.abbr().upper()
dictL10nEntries = None
# Find out the locale from the l10n file name.
match = re.search(
r'[\/](?P<locale>(?P<language>[a-z]{2})-(?P<country>[a-z]{2}))\.l10n$', sL10nFileName
)
sLocale = match.group('locale')
sLanguage = match.group('language')
sCountry = match.group('country')
# Get the last modification time of the .l10n file.
dtL10nFile = os.path.getmtime(sL10nFileName)
for sType in 'php', 'js':
sOutputDir = os.path.join(module.rodata_dir(), 'l10n', sType)
sOutputFileName = os.path.join(sOutputDir, sLocale + '.' + sType)
try:
# Try to get the last modification time of the output file.
dtOutputFile = os.path.getmtime(sOutputFileName)
if dtL10nFile <= dtOutputFile:
# The file doesn’t need to be re-generated.
continue
except OSError:
# Assume that the file does not exist.
pass
# Make sure we read and processed the source .l10n file…
if dictL10nEntries is None:
# …and if we didn’t, do it now.
sys.stdout.write('Processing l10n file {}\n'.format(sL10nFileName))
dictL10nEntries = cls.parse_l10n(sL10nFileName)
# Add a few more constants.
dictL10nEntries['L10N_INCLUDED' ] = True
dictL10nEntries['LANG_ISO639' ] = sLanguage
dictL10nEntries['COUNTRY_ISO3166'] = sCountry
# Generate the output.
sys.stdout.write(' Generating localization in {} format\n'.format(sType.upper()))
sFile = getattr(cls, 'l10n_to_' + sType)(sModulePrefix, dictL10nEntries)
# Make sure the destination directory exists.
try:
os.makedirs(sOutputDir, 0o755, True)
except OSError:
# The subdirectory was already there, or couldn’t be created. In the latter case,
# opening the file will fail, so an exception will be raised in any case.
pass
# Store the generated file.
if sType == 'js':
# Also write a gzipped version of the same file.
sys.stdout.write(' Writing {}.gz\n'.format(sOutputFileName))
with open(sOutputFileName + '.gz', 'wb') as fileOutput:
fileOutput.write(gzip.compress(sFile.encode('utf-8')))
sys.stdout.write(' Writing {}\n'.format(sOutputFileName))
with open(sOutputFileName, 'w') as fileOutput:
fileOutput.write(sFile)
@staticmethod
def parse_l10n(sL10nFileName):
"""Parses a .l10n file, returning a dictionary containing the entries defined.
str sL10nFileName
Full path to the .l10n file.
dict(object) return
Localized constants.
"""
with open(sL10nFileName, 'r') as fileL10n:
sL10nFile = fileL10n.read()
# Strip the BOM, if present.
if sL10nFile.startswith('\ufeff'):
sL10nFile = sL10nFile[1:]
# Prepare this module’s localization.
dictEntries = {}
iLine = 0
# Parse through the whole file.
for sLine in sL10nFile.splitlines():
iLine += 1
# Skip empty lines and comments.
if len(sLine) > 0 and sLine[0] != '#':
match = re.match(r'^(?P<name>[0-9A-Z_]+)(:?:(?P<type>int))?\t+(?P<value>.*)$', sLine)
if not match:
raise SyntaxError('line {}: invalid syntax: {}'.format(iLine, repr(sLine)))
sType = match.group('type')
sValue = match.group('value')
if sType:
if sType == 'int':
# Enter ints as… ints.
oValue = int(sValue)
else:
raise SyntaxError('line {}: unknown type: {}'.format(iLine, sType))
else:
# Unescape strings.
oValue = sValue.replace('\\n', '\n').replace('\\\\', '\\')
# Add the entry.
dictEntries[match.group('name')] = oValue
return dictEntries
@staticmethod
def l10n_to_php(sModulePrefix, dictL10nEntries):
"""Generates a PHP localization file with the provided entries.
str sModulePrefix
Prefix to be prepended to each generated constant’s name.
dict(object) dictL10nEntries
Localized constants.
str return
PHP version of the contents of dictL10nEntries.
"""
s = '<?php\n' \
'# -*- coding: utf-8; mode: php; tab-width: 3 -*-\n' \
'# AUTOMATICALLY-GENERATED FILE - do not edit!\n' \
'\n'
for sName, oValue in dictL10nEntries.items():
if isinstance(oValue, bool):
if oValue:
sValue = 'true'
else:
sValue = 'false'
elif isinstance(oValue, str):
# Escape escape sequences.
sValue = oValue.replace('\\', '\\\\').replace('\n', '\\n')
# Escape the quotes we use, and add them at either ends.
sValue = "'" + sValue.replace("'", "\\'") + "'"
else:
# Python and PHP are similar enough that for numeric types we can simply use repr().
sValue = repr(oValue)
s += "define('L10N_{}_{}', {});\n".format(sModulePrefix, sName, sValue)
s += '\n' \
'?>'
return s
@staticmethod
def l10n_to_js(sModulePrefix, dictL10nEntries):
"""Generates a JavaScript localization file with the provided entries.
str sModulePrefix
Prefix to be prepended to each generated constant’s name.
dict(object) dictL10nEntries
Localized constants.
str return
JavaScript version of the contents of dictL10nEntries.
"""
# Note that semicolons and most whitespace are omitted from the output, in an attempt to
# reduce the resulting JS size.
s = '// -*- coding: utf-8; mode: javascript; tab-width: 3 -*-\n' \
'// AUTOMATICALLY-GENERATED FILE - do not edit!\n' \
'\n' \
'var L=L10n\n'
for sName, oValue in dictL10nEntries.items():
if isinstance(oValue, bool):
sValue = oValue and 'true' or 'false'
else:
# Thanks to the similarities between Python strings and JS strings, for most types we
# can simply use repr().
sValue = repr(oValue)
s += 'L.{}_{}={}\n'.format(sModulePrefix, sName, sValue)
s += 'L=undefined\n'
return s
####################################################################################################
# __main__
if __name__ == '__main__':
# Get the full path of this script.
sDir = os.path.dirname(os.path.abspath(sys.argv[0]))
# Setup the PATH environment variable to load quearl_inst.
sys.path.append(sDir)
import quearl_inst
# Obtain the Quearl installation subdirectory and instantiate a QuearlInst for it.
qinst = quearl_inst.QuearlInst(os.path.normpath(os.path.join(sDir, '..')))
# Update all modules.
for module in qinst.modules():
l10n_generator.update_module(module)
sys.exit(0)
|
UTF-8
|
Python
| false | false | 2,013 |
8,521,215,138,213 |
74728dfb4dc587180b7be67bf65955a4af068cbb
|
e0bbcbf62f423e9e5c6625a9805f8a614f80d765
|
/s3putsecurefolder.py
|
c89387eaee958c3e23e27d152bd38b480c8028e2
|
[
"MIT"
] |
permissive
|
sinbad/s3putsecurefolder
|
https://github.com/sinbad/s3putsecurefolder
|
f719e0af5e726ee3c877945c095c309444c6e6b0
|
3b3ba4a440ad801f4f051df9d1dc6eaa2ec61958
|
refs/heads/master
| 2021-01-01T18:34:22.633096 | 2013-01-07T13:53:12 | 2013-01-07T13:53:12 | 29,584,149 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2009-2012 Steve Streeting
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import os
import hashlib
import tempfile
import subprocess
import time
from optparse import OptionParser
from boto.s3.connection import S3Connection
from boto.s3.bucket import Bucket
import fnmatch
SRC_MD5_META='s3putsecure-md5'
currentKeyName=''
lastTime=time.time()
lastBytes=0
currentKps=0
def progress(bytes_done, total_bytes):
global lastBytes, lastTime, currentKps, currentKeyName
bytediff = bytes_done - lastBytes
nowTime = time.time()
timeDiff = nowTime - lastTime
if timeDiff > 0:
currentKps = (bytediff / timeDiff) / 1024
lastBytes = bytes_done
lastTime = nowTime
msg = "\rProgress: %s - %d / %d bytes (%d%%) (%dK/s)" % (currentKeyName, bytes_done, total_bytes, (bytes_done * 100) / total_bytes, currentKps)
sys.stdout.write(msg)
sys.stdout.flush()
# Utility script for uploading the contents of a local folder to an S3 bucket, encrypting
# every file before upload, and only uploading those files which are different locally. The
# MD5 of every file is used to determine if the local file is different. Local files are
# always uploaded if the MD5 differs regardless of modification time'''
# Parse options
parser = OptionParser(usage='usage: %prog [options] source_folder target_bucket gpg_recipient_or_phrase')
parser.add_option('-n', '--dry-run', action='store_true', dest='simulate', default=False,
help='Do not upload any files, just list actions')
parser.add_option('-a', '--accesskey', dest='access_key',
help='AWS access key to use instead of relying on environment variable AWS_ACCESS_KEY')
parser.add_option('-s', '--secretkey', dest='secret_key',
help='AWS secret key to use instead of relying on environment variable AWS_SECRET_KEY')
parser.add_option('-c', '--create', action='store_true', dest='create_bucket', default=False,
help='Create bucket if it does not already exist')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False,
help='Verbose output')
parser.add_option('-S', '--symmetric', action='store_true', dest='symmetric', default=False,
help='Instead of encrypting with a public key, encrypts files using a symmetric cypher and the passphrase given on the command-line.')
parser.add_option('-D', '--disableencryption', action='store_true', dest='donotencrypt', default=False,
help='Do not encrypt before uploading.')
parser.add_option('-X', '--exclude', action='append', type='string', dest='excludes',
help='Exclude file patterns')
(options, args) = parser.parse_args()
# check remaining args
if len(args) < 2:
parser.error('Expected at least 2 arguments.')
if len(args) < 3 and not options.donotencrypt:
parser.error('Expected at least 3 arguments for encrypted sync.')
sourceFolder = args[0]
targetBucket = args[1]
if not options.donotencrypt:
gpgRecipOrPass = args[2]
simulate = options.simulate
accessKey = options.access_key
secretKey = options.secret_key
donotencrypt = options.donotencrypt
if not os.path.exists(sourceFolder):
parser.error('Error ' + sourceFolder + ' does not exist.')
if accessKey is None:
accessKey = os.environ.get('AWS_ACCESS_KEY')
if accessKey is None:
parser.error('Error, no AWS_ACCESS_KEY defined, use -a or --accesskey.')
if secretKey is None:
secretKey = os.environ.get('AWS_SECRET_KEY')
if secretKey is None:
parser.error('Error, no AWS_SECRET_KEY defined, use -s or --secretkey.')
if donotencrypt:
print 'Warning: encryption disabled as requested'
print 'Uploading ' + sourceFolder + ' to s3://' + targetBucket
print 'Establishing connection to S3...'
conn = S3Connection(accessKey, secretKey)
print 'Connection successful, opening bucket...'
bucket = conn.get_bucket(targetBucket)
if bucket is None:
if options.create_bucket:
print 'Creating bucket ' + targetBucket
bucket = conn.create_bucket(targetBucket)
else:
print 'Error, bucket ' + targetBucket + ' does not exist.'
exit(-1)
print 'Bucket opened successfully.'
if options.simulate:
print 'Simulation mode, not actually uploading data.'
print 'Please be patient, hash calculations can take a few seconds on larger files.'
# standardise path (removes any trailing slash & double slashes)
sourceFolder = os.path.normpath(sourceFolder)
prefixlen = len(sourceFolder) + 1 # length of prefix, including trailing slash
# get contents of folder
for dirpath, dirname, filenames in os.walk(sourceFolder):
for f in filenames:
fullpath = dirpath + '/' + f
# Check exclusions
excludeThis = False
if options.excludes is not None:
for exclude in options.excludes:
print 'Checking exclude: ' + exclude
if fnmatch.fnmatch(fullpath, exclude):
excludeThis = True
break
if excludeThis:
continue
keyname = fullpath[prefixlen:]
# check whether this key is present already
key = bucket.get_key(keyname)
localfile = file(fullpath, 'rb')
# check MD5
localmd5sum = hashlib.md5(localfile.read()).hexdigest()
if key is not None:
# key.etag is the md5 as a quoted string
# however this is the md5 for the encrypted file, we need to compare the unencrypted md5
# So, we store the md5 of the unencrypted file in metadata
remotemd5sum = key.get_metadata(SRC_MD5_META)
if localmd5sum == remotemd5sum:
if options.verbose:
print fullpath + ' md5 matches s3://' \
+ targetBucket + '/' + keyname + ' (' + localmd5sum + '), not uploading.'
continue
else:
if options.verbose:
print fullpath + ' md5 (' + localmd5sum + ') differs from s3://' \
+ targetBucket + '/' + keyname + ' md5 (' + remotemd5sum + ')'
else:
key = bucket.new_key(keyname)
# If we get here, we upload
print 'Uploading ' + fullpath + ' as ' + keyname
if not options.simulate:
# set metadata BEFORE upload
key.set_metadata(SRC_MD5_META, localmd5sum)
if not options.donotencrypt:
# encrypt first using gpg
tempfilename = tempfile.gettempdir() + '/' + f
if options.symmetric:
if options.verbose:
print 'Symmetrically encrypting ' + fullpath + ' to ' + tempfilename
subprocess.check_call(['gpg', '-c', '--no-use-agent', '--yes', \
'--passphrase', gpgRecipOrPass, '-o', tempfilename, \
fullpath])
else:
if options.verbose:
print 'Public-key encrypting ' + fullpath + ' to ' + tempfilename + ' for ' + gpgRecipOrPass
subprocess.check_call(['gpg', '-e', '-r', gpgRecipOrPass, '--yes', \
'-o', tempfilename, fullpath])
else:
tempfilename = fullpath
# upload, with progress
currentKeyName = keyname
lastTime=time.time()
currentKps = 0
key.set_contents_from_filename(tempfilename, cb=progress, num_cb=100)
if not options.donotencrypt:
os.remove(tempfilename)
# newline, to clear progress
print
|
UTF-8
|
Python
| false | false | 2,013 |
5,007,931,873,092 |
85f4208a13474795a96173c4d14121705929d153
|
f963f819afbcb26a45b4c29e546a9e3394406148
|
/tim.py
|
fa3f690c609ac7a0f64f12ab299bd919ffe6e9e0
|
[] |
no_license
|
barakmich/tim-the-enchanter
|
https://github.com/barakmich/tim-the-enchanter
|
db5d4248c6ec3462a12b771480e4e7ca11eca574
|
452769a918588e0b2da6d97918f979e14172a296
|
refs/heads/master
| 2021-01-01T19:06:40.500284 | 2014-01-06T23:34:35 | 2014-01-06T23:34:35 | 11,799,763 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import readline
import sys
import colorama
import json
from colorama import Fore, Style
from models.default_model import DefaultModel
from game_evaluator import DeceptionGame, AvalonGame
def repl_report(report, namemap, ngood):
sort_order = sorted(
[(report[i]["side"].get(True, 0.0), i)
for i in range(len(report))],
reverse=True)
still_good = 0
for goodness, i in sort_order:
row = "%s: %2f%% Good %2f%% Evil" % (
namemap.get(i, "") + " (%s)" % str(i),
goodness * 100,
(1.0 - goodness) * 100)
if still_good < ngood:
print(Fore.CYAN + Style.BRIGHT + row)
else:
print(Fore.RED + Style.BRIGHT + row)
roles = sorted([(v, k) for k, v in report[i]["role"].iteritems()],
reverse=True)
row = " "
has_roles = True
for score, role in roles:
if role == "":
has_roles = False
break
row += "%2.1f%% %s " % (
score * 100, role)
if has_roles:
print(row)
still_good += 1
def display_statement(statement, namemap):
out = ""
out += Fore.MAGENTA + Style.BRIGHT
out += statement["type"].title()
out += Style.RESET_ALL + " -- "
for key in statement["print_order"]:
out += Fore.YELLOW
out += key.title() + ": "
out += Style.RESET_ALL
out += str(statement[key]).title() + " "
return out
def help():
print(Fore.GREEN + "Initial Commands:")
print("load <filename> -- Loads a savefile")
print("newgame -- Starts a new game")
print("\n")
print(Fore.GREEN + "Game Commands:")
print("save <filename> -- Saves a game to file")
print("ls -- Lists current assertions")
print("disb <index> -- Delete (disbelieve) an assertion")
print("name -- Name a player index for pretty printing")
print("side -- Assert that someone must be good or evil")
print("lady -- Assert that one player saw another and made a claim")
print("vote -- Assert a voted-on team and the votes"
" (whether it succeeded or not)")
print("switch -- Good and Evil Lancelots switch")
print("mission -- Assert the results of a team and a mission")
print("eval <repetitions> -- Quick eval, discounting special roles")
print("fulleval <repetitions> -- Eval, counting special roles")
print("report -- Show last report again")
def main():
colorama.init(autoreset=True)
readline.get_history_length()
print(Fore.GREEN + Style.BRIGHT + "Tim the Enchanter v1.0")
game = None
namemap = {}
while True:
try:
command_str = raw_input("%s> " % game)
command_list = command_str.strip().split(" ")
command = command_list[0]
if command == "quit" or command == "q" or command == "exit":
sys.exit(0)
if command == "help":
help()
continue
if game is None:
if command == "newgame":
nplayers = raw_input("How many players? ")
game = DeceptionGame(
AvalonGame(int(nplayers)), DefaultModel)
namemap = {}
elif command == "load":
if len(command_list) < 2:
print(Fore.RED + "Need an input file")
continue
inpath = os.path.expanduser(command_list[1])
with open(inpath, "r") as savefile:
observations = json.load(savefile)
metadata = observations[0]
data = observations[1:]
game = DeceptionGame(
AvalonGame(int(metadata["game_size"])),
DefaultModel)
namemap = metadata["player_names"]
game.load_save(data)
else:
print(Fore.RED + "Need to create a game")
continue
elif command == "ls":
for i, statement in enumerate(game.seen):
print "%d: %s" % (i, display_statement(statement, namemap))
continue
elif command == "vote":
input = raw_input("Team? ").strip()
team = [int(x) for x in input]
votes = [int(x) for x in raw_input("Votes? ").strip()]
round = int(raw_input("Round? ").strip())
fail_req = int(raw_input("# Fails Required? ").strip())
game.do_vote(team, votes, fail_req, round)
game.trace = {}
continue
elif command == "mission":
team = [int(x) for x in raw_input("Team? ").strip()]
fails = int(raw_input("# of Fails? ").strip())
must = int(raw_input("Spys must fail? ").strip()) == 1
round = int(raw_input("Round? ").strip())
game.do_mission(team, fails, must, round)
game.trace = {}
continue
elif command == "lady" or command == "lol":
p1 = int(raw_input("ID For Lady? ").strip())
p2 = int(raw_input("ID For Target? ").strip())
claim = int(raw_input("Claim? ").strip()) == 1
round = int(raw_input("Round? ").strip()) == 1
game.player_sees_player_and_claims(p1, p2, claim, round)
game.trace = {}
continue
elif command == "side":
p1 = int(raw_input("ID For Assertion? ").strip())
claim = int(raw_input("Good? ").strip()) == 1
game.add_known_alliance(p1, claim)
game.trace = {}
continue
elif command == "switch":
r = int(raw_input("Starting in round?").strip())
game.switch_lancelots(r)
game.trace = {}
continue
elif command == "eval":
times = 200 / (game.n_players - 4) * 2
if len(command_list) > 1:
times = int(command_list[1])
game.eval(times, quick=True)
repl_report(game.report(), namemap, game.n_good)
elif command == "fulleval":
times = 200 / (game.n_players - 4) * 2
if len(command_list) > 1:
times = int(command_list[1])
game.eval(times)
repl_report(game.report(), namemap, game.n_good)
elif command == "report":
repl_report(game.report(), namemap, game.n_good)
elif command == "save":
if len(command_list) < 2:
print(Fore.RED + "Need an output file")
continue
metadata = [{
"game_size": game.n_players,
"player_names": namemap
}]
outpath = os.path.expanduser(command_list[1])
with open(outpath, "w") as savefile:
json.dump(metadata + game.seen, savefile, indent=2)
elif command == "name":
if len(command_list) < 3:
print(Fore.RED + "No args?")
continue
namemap[int(command_list[1])] = command_list[2]
elif command == "disb" or command == "disbelieve":
if len(command_list) < 2:
print(Fore.RED + "No args?")
continue
game.disbelieve(int(command_list[1]))
else:
print(Fore.RED + "Unknown command: %s" % command)
continue
except EOFError:
print "Canceled"
continue
except Exception, e:
print str(e)
continue
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,014 |
10,436,770,556,825 |
479badd572953d1c1a2c0f3d7d35afceae89167c
|
54eadb8673355814de717f75e0272839bf387fae
|
/renderarea.py
|
52c0277a6e965fdd85eca2de4def58b856b3e8ea
|
[] |
no_license
|
ahmetoluc/PyQtGPS
|
https://github.com/ahmetoluc/PyQtGPS
|
a64f841f5121706a0c55f8df10d975a062e810bd
|
461cae9374958c8f6fe08160f2917e41b0261268
|
refs/heads/master
| 2023-03-19T02:54:53.147300 | 2010-04-16T23:42:21 | 2010-04-16T23:42:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys, os
from PyQt4 import QtCore, QtGui
from tilegrid import TileGrid
class RenderArea(QtGui.QWidget):
#set of points to draw Heading and Bearing arrows
points = QtGui.QPolygon([
QtCore.QPoint(0, -15),
QtCore.QPoint(-15, 15),
QtCore.QPoint(0, 0),
QtCore.QPoint(15, 15)
])
def __init__(self, parent = None):
QtGui.QWidget.__init__(self, parent)
self.tilegrid = TileGrid(parent=parent)
self.sysPath = os.path.join(sys.path[0], "")
#load map tile into pixmap
self.setBackgroundRole(QtGui.QPalette.Base)
def minimumSizeHint(self):
return QtCore.QSize(256, 256)
def sizeHint(self):
return QtCore.QSize(400, 400)
def paintEvent(self, event):
#create instance of QPainter
painter = QtGui.QPainter()
#begin paint
painter.begin(self)
#set anti-aliasing hint
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.translate(0,0)
# move coordinates origin to center of renderArea
painter.translate(self.width()/2, self.height()/2)
for img in self.tilegrid.images:
p, fname = img
image = QtGui.QImage()
image.load(self.tilegrid.mapPath+fname)
# draw image with offset
painter.drawImage(p, image)
painter.setPen(QtGui.QPen(QtCore.Qt.green, 4, QtCore.Qt.DotLine))
painter.drawPolyline(self.tilegrid.pathways)
image = QtGui.QImage()
image.load("./wp.png")
for wp in self.tilegrid.visible_waypoints:
painter.drawImage(wp, image)
#rotate painter coordinates system according to current bearing
painter.rotate(float(self.tilegrid.bearing))
painter.setPen(QtGui.QPen(QtCore.Qt.blue, 2, QtCore.Qt.SolidLine))
#draw bearing arrow from set of points
painter.drawPolygon(RenderArea.points)
painter.end()
# del painter
|
UTF-8
|
Python
| false | false | 2,010 |
14,912,126,487,813 |
769ba5844d6b9cd01c71bdc5868b1e2f1c034054
|
c238e7eeeeb1fe067c1f2f13b096cab030efc170
|
/agent/tormenta/agent/api.py
|
e0444360139f4d8fb78236066f7ce7be323d9080
|
[] |
no_license
|
niedbalski/tormenta
|
https://github.com/niedbalski/tormenta
|
65c72fc0e6d59905dcfcbf0481cc3f9db5fc5338
|
13831ff5a3dce4132fb59444ecb3caf49a6dde2c
|
refs/heads/master
| 2016-09-06T00:45:15.902023 | 2013-05-14T22:05:41 | 2013-05-14T22:05:41 | 9,633,480 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
__author__ = 'Jorge Niedbalski R. <[email protected]>'
from tormenta.core.config import settings
from tormenta.core.tasks import InstanceTask
from tormenta.core.decorators import has_public_key
from tormenta.core.model import (db, PublicKey,
Instance,
InstanceResource as InstanceResourceModel)
from tormenta.agent import (initialize, params)
from flask import Flask, request
from flask.ext.restful import (Resource, Api, marshal_with, marshal, abort)
import beanstalkc
import logging
logger = logging.getLogger('tormenta.agent.api')
app = Flask(__name__)
api = Api(app, prefix='/api/v%d' % settings.options.api_version)
def marshal_and_count(n, r, f=None, **other):
if not isinstance(r, list):
r = [r]
if f:
r = map(lambda q: marshal(q, f), r)
d = dict({'count': len(r), '%s' % n: r})
for k, v in other.items():
d.update({k: v})
return d
class PublicKeyResource(Resource):
params = params.PublicKey()
@has_public_key
def get(self, public_key):
return marshal_and_count('keys', public_key, f=self.params.fields)
def post(self, *args, **kwargs):
public_key_id = PublicKey.encode(self.params.post['public_key'])
try:
key = PublicKey.get(public_key_id)
except:
key = PublicKey.create(public_key_id=public_key_id,
raw=self.params.post['public_key'])
return marshal_and_count('keys', key, f=self.params.fields)
class InstanceResource(Resource):
params = params.Instance()
@has_public_key
def get(self, public_key):
"""
List all instances
"""
instances = Instance.select().where(
Instance.public_key == public_key)
if self.params.get['state'] is not None:
instances = instances.where(
Instance.state == self.params.get['state'])
m_instances = []
for instance in instances:
resource_filters = self.params.get.get('resource_filter', None)
if resource_filters:
if instance.has_resources(resource_filters):
m_instances.append(instance)
else:
m_instances.append(instance)
m_instances = map(lambda i: i.hydrate(marshal,
self.params.fields,
self.params.resource.fields),
m_instances)
return marshal_and_count('instances', m_instances)
@has_public_key
def delete(self, public_key):
"""
Receives a request for destroy a running/requested instance
"""
if self.params.get['state']:
instances = instances.where(Instance.state == args['state'])
instance_ids = args.get('instance_ids', None)
if instance_ids:
instance_filters = []
for instance_id in instance_ids:
instance_filters.append(Instance.instance_id == instance_id)
instances = instances.where(reduce(operator.or_,
instance_filters))
affected = instances.execute()
return affected
@has_public_key
def post(self, public_key):
"""
Receives a request for a new Instance
"""
try:
instance = Instance.create_from_args(self.params.post, public_key)
job_id = InstanceTask().start(instance.instance_id)
except Exception as ex:
logger.error(ex.message)
return abort(500)
return marshal_and_count('instances',
instance,
f=self.params.fields,
job_id=job_id)
api.add_resource(PublicKeyResource, '/public_key')
api.add_resource(InstanceResource, '/instance')
|
UTF-8
|
Python
| false | false | 2,013 |
12,463,995,108,617 |
bbbd841e0d0c8dc15f9d6d54b2da3b82aa3d89a3
|
4f9c83c4967774188c7670a45cb386f7fb16408d
|
/offers/admin.py
|
97dd75a2feb20240cf3288ce2120a0410a25049f
|
[] |
no_license
|
marlonmantilla/cb_project
|
https://github.com/marlonmantilla/cb_project
|
c79302eb58cc95ff5574d6ec801b9cf27694d355
|
afa6b465211983a52260280b1b1b1d1f06c42b40
|
refs/heads/master
| 2021-01-01T15:50:11.759704 | 2012-06-14T02:34:09 | 2012-06-14T02:34:09 | 3,060,563 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from offers.models import Oferta, Categoria, Tienda, Producto, Favoritas
from django.contrib import admin
admin.site.register(Oferta)
admin.site.register(Categoria)
admin.site.register(Tienda)
admin.site.register(Producto)
admin.site.register(Favoritas)
|
UTF-8
|
Python
| false | false | 2,012 |
6,030,134,133,690 |
8c1a64d18a6649b3dbbad0b33ebae08cc6ba2e6f
|
b43a125989e34af30ecdac4effdf3c0cfd4d9325
|
/entertainerlib/gui/transitions/__init__.py
|
54fe51f5e36b13ce9c058c1b2fb4c6bb32d7ca25
|
[] |
no_license
|
tiwilliam/entertainer
|
https://github.com/tiwilliam/entertainer
|
841ffda7f3a6ab2aaf7bc297c7f678aceecb6912
|
945463032481c3afdef56d0ef9f5be102829eb35
|
refs/heads/master
| 2016-09-06T07:10:21.794767 | 2012-10-02T08:42:20 | 2012-10-02T08:42:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright (c) 2009 Entertainer Developers - See COPYING - GPLv2
'''Frontend GUI transitions module'''
|
UTF-8
|
Python
| false | false | 2,012 |
481,036,356,489 |
4521c1cad90b872ddddc283a6b54637880974ca6
|
1fa34ea9643c4a5310e2010cd06baf02c6cfc9f7
|
/Round D APAC test/C.Sort a scrambled itinerary/solution.py
|
0ac1e88c966514f5d02e043022d4414bf1a08ff4
|
[] |
no_license
|
devdil/Google-APAC-2015
|
https://github.com/devdil/Google-APAC-2015
|
8c37d45b5c9627a6b44c39916c3bb1751d99e056
|
e3bc4553a22d36280aaea7f19b9807f41811f431
|
refs/heads/master
| 2021-01-10T21:05:34.940216 | 2014-11-11T05:21:51 | 2014-11-11T05:21:51 | 26,449,622 | 0 | 1 | null | false | 2014-12-25T23:26:34 | 2014-11-10T18:33:36 | 2014-11-10T18:34:56 | 2014-11-11T05:21:51 | 156 | 0 | 1 | 0 |
Python
| null | null |
testcases = int(raw_input());
for y in range(1,testcases+1):
number_of_stations = int(raw_input());
stations ={};
for x in range(number_of_stations*2):
stations[raw_input()] = raw_input()
for x in stations.keys():
if x not in stations.values():
source = x ;
break;
print "Case #%s:"%(y),
while stations.has_key(source):
print source+"-"+stations[source],
source = stations[source]
print ""
|
UTF-8
|
Python
| false | false | 2,014 |
9,045,201,151,728 |
0905374e00232c061b0c1e149998c67fbb3f4a61
|
6695f50d29ad1833bfafb0a189ab6a5e2377aec9
|
/rome/__init__.py
|
d5f774aaa0f750b22e0507b5d57d1450e7d193d9
|
[] |
no_license
|
ouvigna/rome
|
https://github.com/ouvigna/rome
|
edf6a6d8667c1ac31965d817d98076838ed2ab29
|
808c34f6c879fca69f382452d3d32739722570ea
|
refs/heads/master
| 2021-01-19T09:09:18.523322 | 2011-07-08T14:58:31 | 2011-07-08T14:58:31 | 1,556,244 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
"""
from copy import copy
from itertools import chain
from rome.language import _
class ValidationError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
self.error = msg
class MetaValidator(type):
def __new__(cls, name, bases, attrs):
cls = type.__new__(cls, name, bases, attrs)
cls._errors = copy(getattr(cls, '_errors', {}))
cls._errors.update(attrs.get('__errors__', {}))
return cls
class Validator(object):
__metaclass__ = MetaValidator
__errors__ = {}
def __init__(self, *args, **kwargs):
self.__dependencies__ = kwargs.get('dependencies', [])
self._custom_errors = kwargs.get('errors', {})
def validate(self, value, **kwargs):
raise NotImplementedError()
def get_list_errors(self):
return dict([(key, self._custom_errors[key] if key in self._custom_errors \
else self._errors[key]) for key in self._errors])
def _validation_error(self, error, **kwargs):
raise ValidationError(self.get_list_errors()[error] % kwargs)
class CombinedValidator(Validator):
__combined_fields__ = ()
def __init__(self, *args, **kwargs):
Validator.__init__(self, dependencies=args, **kwargs)
if len(self.__combined_fields__) != len(args):
raise TypeError('__init__() takes exactly %(expected)d arguments (%(given)d given)' %
{'expected': len(self.__combined_fields__), 'given': len(args)})
for combined_field, field in zip(self.__combined_fields__, args):
setattr(self, combined_field, field)
class Field(Validator):
def __init__(self, *args, **kwargs):
Validator.__init__(self, *args, **kwargs)
self.mandatory = kwargs.get('mandatory', True)
self.forbidden = kwargs.get('forbidden', None)
self.validators = [validator for validator in args if isinstance(validator, Validator)]
if 'default' in kwargs:
self.default = kwargs['default']
self._compose_dependencies()
def _compose_dependencies(self):
[self.__dependencies__.append(dep) for dep in \
chain.from_iterable([validator.__dependencies__ for validator in self.validators]) \
if dep not in self.__dependencies__]
def validate(self, value, dependencies={}):
result = value
for validator in self.validators:
self._add_dependencies(validator, dependencies)
result = validator.validate(value)
return result
def _add_dependencies(self, validator, dependencies):
for dep in validator.__dependencies__:
value = None
if dep in dependencies:
value = dependencies[dep]
setattr(validator, dep, value)
class FieldConstant(Field):
def __init__(self, value, *args, **kwargs):
Validator.__init__(self, *args, **kwargs)
self.value = value
class FieldCombined(Field):
def __init__(self, *args, **kwargs):
if len(args) > 1:
raise TypeError("FieldCombined accepts only one validator")
Field.__init__(self, *args, **kwargs)
self.destination = kwargs.get('destination', None)
if not isinstance(self.validators[0], CombinedValidator):
raise TypeError("FieldCombined accepts only CombinedValidator validators")
def _add_dependencies(self, validator, dependencies):
pass
class FieldList(Field):
__errors__ = {'max_error': _("%(max)i items maximum permitted"),
'min_error': _("%(min)i items minimum permitted")}
def __init__(self, *args, **kwargs):
Field.__init__(self, *args, **kwargs)
self.max = kwargs.get('max', None)
self.min = kwargs.get('min', None)
def validate(self, value, dependencies={}):
errors = []
result = []
self._check_length(value)
for item in value if isinstance(value, (list, tuple)) else [value]:
try:
result.append(Field.validate(self, item, dependencies=dependencies))
errors.append(None)
except ValidationError as ve:
errors.append(ve.error)
if any(errors):
raise ValidationError(errors)
return result
def _check_length(self, values):
if self.max is not None and len(values) > self.max:
self._validation_error('max_error', max=self.max)
if self.min is not None and len(values) < self.min:
self._validation_error('min_error', min=self.min)
class MetaSchema(MetaValidator):
def __new__(cls, name, bases, attrs):
cls = MetaValidator.__new__(cls, name, bases, attrs)
cls._constant_fields = copy(getattr(cls, '_constant_fields', {}))
cls._fields = copy(getattr(cls, '_fields', {}))
cls._combined_fields = copy(getattr(cls, '_combined_fields', {}))
for attr, field in attrs.iteritems():
if isinstance(field, FieldConstant):
cls._constant_fields[attr] = field
elif isinstance(field, FieldCombined):
cls._combined_fields[attr] = field
elif isinstance(field, Field):
cls._fields[attr] = field
elif (attr in cls._fields or attr in cls._combined_fields or attr in cls._constant_fields) \
and field is None:
# Remove field
if attr in cls._fields: del(cls._fields[attr])
if attr in cls._combined_fields: del(cls._combined_fields[attr])
if attr in cls._constant_fields: del(cls._constant_fields[attr])
return cls
class Schema(Validator):
__metaclass__ = MetaSchema
__errors__ = {'missing': _('Missing value'),
'forbidden': _("Forbidden by conditions")}
def validate(self, value):
errors = {}
result = {}
self.__constant_fields_validation(value, result)
self.__regular_fields_validation(value, result, errors)
self.__combined_fields_validation(value, result, errors)
if errors:
raise ValidationError(errors)
return result
def __constant_fields_validation(self, value, result):
for field, validator in self._constant_fields.iteritems():
result[field] = validator.value
def __regular_fields_validation(self, value, result, errors):
for field, validator in self._fields.iteritems():
try:
if field in value or hasattr(validator, 'default'):
self.__is_forbidden(validator, value)
test_value = value[field] if field in value else validator.default
deps = self.__lookup_dependencies(validator.__dependencies__, value, result)
result[field] = validator.validate(test_value, dependencies=deps)
elif self.__is_mandatory(validator, value):
self._validation_error('missing')
except ValidationError as ve:
errors[field] = ve.error
def __combined_fields_validation(self, value, result, errors):
for field, validator in self._combined_fields.iteritems():
try:
if set(validator.__dependencies__).isdisjoint(errors.keys()):
deps = self.__lookup_dependencies(validator.__dependencies__, value, result)
validator.validate(value, dependencies=deps)
except ValidationError as ve:
errors[field if validator.destination is None else validator.destination] = ve.error
def __is_forbidden(self, field, values):
if hasattr(field, 'forbidden') and callable(field.forbidden) and field.forbidden(self, values):
self._validation_error('forbidden')
def __is_mandatory(self, field, values):
if callable(field.mandatory):
return field.mandatory(self, values)
else:
return field.mandatory
def __lookup_dependencies(self, dependencies, values, result):
return dict([(dep, values[dep] if dep in values else result[dep]) for dep in dependencies \
if dep in values or dep in result])
|
UTF-8
|
Python
| false | false | 2,011 |
10,574,209,488,696 |
94a0c5ba85243caafe0e89e4cd1fe87a8bd5a4f9
|
ac45a4815c38f116b70c0ae61db2cd1f66473ae3
|
/users/management/commands/test_email.py
|
ca6348473943c7845f626c6a21fb1c7e2836ee3b
|
[] |
no_license
|
mollycode/tnd
|
https://github.com/mollycode/tnd
|
cc03e4ebec14279cb58c48c382fcc9953d6c802a
|
c257dc93b24eed88c3b94a7f56e23e7fa3eab34c
|
refs/heads/master
| 2020-12-25T03:00:23.350171 | 2013-01-13T06:01:06 | 2013-01-13T06:01:06 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.core.management.base import BaseCommand, CommandError
from django.core.mail import EmailMessage
from users.models import User
class Command(BaseCommand):
args = ''
help = 'Sends all users a test message'
def handle(self, *args, **options):
all_users = User.objects.all()
bcc_list = []
for user in all_users:
if user.email:
bcc_list.append(user.email)
self.stdout.write('Added email "%s"\n' % user.email)
message = EmailMessage("Test Message from 3ND",
"This is a test message from the 3ND site, please ignore",
"[email protected]",
["[email protected]"],
bcc = bcc_list)
self.stdout.write('Constructed test email message\n')
message.send(fail_silently = False)
self.stdout.write('Sent test email message\n')
# self.stdout.write('Successfully sent email to poll "%s"' % poll_id)
|
UTF-8
|
Python
| false | false | 2,013 |
11,123,965,310,936 |
6399f42bc4de1cec99d7a56426d734834458c31a
|
88450311cfc5611e1dbfa1886c9fd027d6ff04a4
|
/mkt/reviewers/tests/test_views_themes.py
|
48cad96e875cf192a343c916216a69ece5d13bb5
|
[] |
no_license
|
rtilder/zamboni
|
https://github.com/rtilder/zamboni
|
0fa713b4a989eb5408fbd6717428e026ae5d71fb
|
9902f058f10a517b15c66b5dfe0f0d9fb3c9a34b
|
refs/heads/master
| 2021-01-17T21:54:48.574071 | 2013-04-29T18:26:40 | 2013-04-29T18:26:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.test.client import RequestFactory
import mock
from nose.tools import eq_
from pyquery import PyQuery as pq
from access.models import GroupUser
from addons.models import Persona
import amo
import amo.tests
from amo.tests import addon_factory, days_ago
from amo.urlresolvers import reverse
from devhub.models import ActivityLog
import mkt.constants.reviewers as rvw
from mkt.reviewers.models import ThemeLock
from mkt.reviewers.views_themes import _get_themes
from mkt.site.fixtures import fixture
from users.models import UserProfile
class ThemeReviewTestMixin(object):
fixtures = fixture('group_admin', 'user_admin', 'user_admin_group',
'user_persona_reviewer', 'user_999')
def setUp(self):
self.reviewer_count = 0
self.create_switch(name='mkt-themes')
self.status = amo.STATUS_PENDING
self.flagged = False
def req_factory_factory(self, user, url):
req = RequestFactory().get(reverse(url))
req.user = user.user
req.groups = req.user.get_profile().groups.all()
req.TABLET = True
return req
def create_and_become_reviewer(self):
"""Login as new reviewer with unique username."""
username = 'reviewer%s' % self.reviewer_count
email = username + '@mozilla.com'
reviewer = User.objects.create(username=email, email=email,
is_active=True, is_superuser=True)
user = UserProfile.objects.create(user=reviewer, email=email,
username=username)
user.set_password('password')
user.save()
GroupUser.objects.create(group_id=50060, user=user)
self.client.login(username=email, password='password')
self.reviewer_count += 1
return user
@mock.patch.object(rvw, 'THEME_INITIAL_LOCKS', 2)
def test_basic_queue(self):
"""
Have reviewers take themes from the pool,
check their queue sizes.
"""
for x in range(rvw.THEME_INITIAL_LOCKS + 1):
addon_factory(type=amo.ADDON_PERSONA, status=self.status)
themes = Persona.objects.all()
expected_themes = [
[themes[0], themes[1]],
[themes[2]],
[]
]
for expected in expected_themes:
reviewer = self.create_and_become_reviewer()
eq_(_get_themes(mock.Mock(), reviewer, flagged=self.flagged),
expected)
eq_(ThemeLock.objects.filter(reviewer=reviewer).count(),
len(expected))
@mock.patch.object(rvw, 'THEME_INITIAL_LOCKS', 2)
def test_top_off(self):
"""If reviewer has fewer than max locks, get more from pool."""
for x in range(2):
addon_factory(type=amo.ADDON_PERSONA, status=self.status)
reviewer = self.create_and_become_reviewer()
_get_themes(mock.Mock(), reviewer, flagged=self.flagged)
ThemeLock.objects.filter(reviewer=reviewer)[0].delete()
_get_themes(mock.Mock(), reviewer, flagged=self.flagged)
# Check reviewer checked out the themes.
eq_(ThemeLock.objects.filter(reviewer=reviewer).count(),
rvw.THEME_INITIAL_LOCKS)
@mock.patch.object(rvw, 'THEME_INITIAL_LOCKS', 2)
def test_expiry(self):
"""
Test that reviewers who want themes from an empty pool can steal
checked-out themes from other reviewers whose locks have expired.
"""
for x in range(2):
addon_factory(type=amo.ADDON_PERSONA, status=self.status)
reviewer = self.create_and_become_reviewer()
_get_themes(mock.Mock(), reviewer, flagged=self.flagged)
# Reviewer wants themes, but empty pool.
reviewer = self.create_and_become_reviewer()
_get_themes(mock.Mock(), reviewer, flagged=self.flagged)
eq_(ThemeLock.objects.filter(reviewer=reviewer).count(), 0)
# Manually expire a lock and see if it's reassigned.
expired_theme_lock = ThemeLock.objects.all()[0]
expired_theme_lock.expiry = self.days_ago(1)
expired_theme_lock.save()
_get_themes(mock.Mock(), reviewer, flagged=self.flagged)
eq_(ThemeLock.objects.filter(reviewer=reviewer).count(), 1)
def test_expiry_update(self):
"""Test expiry is updated when reviewer reloads his queue."""
addon_factory(type=amo.ADDON_PERSONA, status=self.status)
reviewer = self.create_and_become_reviewer()
_get_themes(mock.Mock(), reviewer, flagged=self.flagged)
ThemeLock.objects.filter(reviewer=reviewer).update(expiry=days_ago(1))
_get_themes(mock.Mock(), reviewer, flagged=self.flagged)
eq_(ThemeLock.objects.filter(reviewer=reviewer)[0].expiry >
days_ago(1), True)
@mock.patch('mkt.reviewers.tasks.send_mail_jinja')
def test_commit(self, send_mail_jinja_mock):
for x in range(5):
addon_factory(type=amo.ADDON_PERSONA, status=self.status)
count = Persona.objects.count()
form_data = amo.tests.formset(initial_count=count,
total_count=count + 1)
themes = Persona.objects.all()
# Create locks.
reviewer = self.create_and_become_reviewer()
for index, theme in enumerate(themes):
ThemeLock.objects.create(
theme=theme, reviewer=reviewer,
expiry=datetime.datetime.now() +
datetime.timedelta(minutes=rvw.THEME_LOCK_EXPIRY))
form_data['form-%s-theme' % index] = str(theme.id)
# moreinfo
form_data['form-%s-action' % 0] = str(rvw.ACTION_MOREINFO)
form_data['form-%s-comment' % 0] = 'moreinfo'
form_data['form-%s-reject_reason' % 0] = ''
# flag
form_data['form-%s-action' % 1] = str(rvw.ACTION_FLAG)
form_data['form-%s-comment' % 1] = 'flag'
form_data['form-%s-reject_reason' % 1] = ''
# duplicate
form_data['form-%s-action' % 2] = str(rvw.ACTION_DUPLICATE)
form_data['form-%s-comment' % 2] = 'duplicate'
form_data['form-%s-reject_reason' % 2] = ''
# reject (other)
form_data['form-%s-action' % 3] = str(rvw.ACTION_REJECT)
form_data['form-%s-comment' % 3] = 'reject'
form_data['form-%s-reject_reason' % 3] = '1'
# approve
form_data['form-%s-action' % 4] = str(rvw.ACTION_APPROVE)
form_data['form-%s-comment' % 4] = ''
form_data['form-%s-reject_reason' % 4] = ''
res = self.client.post(reverse('reviewers.themes.commit'), form_data)
self.assert3xx(res, reverse('reviewers.themes.queue_themes'))
eq_(themes[0].addon.status, amo.STATUS_REVIEW_PENDING)
eq_(themes[1].addon.status, amo.STATUS_REVIEW_PENDING)
eq_(themes[2].addon.status, amo.STATUS_REJECTED)
eq_(themes[3].addon.status, amo.STATUS_REJECTED)
eq_(themes[4].addon.status, amo.STATUS_PUBLIC)
eq_(ActivityLog.objects.count(), 5)
expected_calls = [
mock.call('A question about your Theme submission',
'reviewers/themes/emails/moreinfo.html',
{'reason': None,
'comment': u'moreinfo',
'theme': themes[0],
'reviewer_email': u'[email protected]',
'base_url': 'http://testserver'},
headers={'Reply-To': settings.THEMES_EMAIL},
from_email=settings.ADDONS_EMAIL,
recipient_list=set([]), cc=settings.THEMES_EMAIL),
mock.call('Theme submission flagged for review',
'reviewers/themes/emails/flag_reviewer.html',
{'reason': None,
'comment': u'flag',
'theme': themes[1],
'base_url': 'http://testserver'},
headers={'Reply-To': settings.THEMES_EMAIL},
from_email=settings.ADDONS_EMAIL,
recipient_list=[settings.THEMES_EMAIL], cc=None),
mock.call('A problem with your Theme submission',
'reviewers/themes/emails/reject.html',
{'reason': mock.ANY,
'comment': u'duplicate',
'theme': themes[2],
'base_url': 'http://testserver'},
headers={'Reply-To': settings.THEMES_EMAIL},
from_email=settings.ADDONS_EMAIL,
recipient_list=set([]), cc=settings.THEMES_EMAIL),
mock.call('A problem with your Theme submission',
'reviewers/themes/emails/reject.html',
{'reason': mock.ANY,
'comment': u'reject',
'theme': themes[3],
'base_url': 'http://testserver'},
headers={'Reply-To': settings.THEMES_EMAIL},
from_email=settings.ADDONS_EMAIL,
recipient_list=set([]), cc=settings.THEMES_EMAIL),
mock.call('Thanks for submitting your Theme',
'reviewers/themes/emails/approve.html',
{'reason': None,
'comment': u'',
'theme': themes[4],
'base_url': 'http://testserver'},
headers={'Reply-To': settings.THEMES_EMAIL},
from_email=settings.ADDONS_EMAIL,
recipient_list=set([]), cc=settings.THEMES_EMAIL)
]
eq_(send_mail_jinja_mock.call_args_list[0], expected_calls[0])
eq_(send_mail_jinja_mock.call_args_list[1], expected_calls[1])
eq_(send_mail_jinja_mock.call_args_list[2], expected_calls[2])
eq_(send_mail_jinja_mock.call_args_list[3], expected_calls[3])
eq_(send_mail_jinja_mock.call_args_list[4], expected_calls[4])
def test_user_review_history(self):
addon_factory(type=amo.ADDON_PERSONA, status=self.status)
reviewer = self.create_and_become_reviewer()
res = self.client.get(reverse('reviewers.themes.history'))
eq_(res.status_code, 200)
doc = pq(res.content)
eq_(doc('tbody tr').length, 0)
theme = Persona.objects.all()[0]
for x in range(3):
amo.log(amo.LOG.THEME_REVIEW, theme.addon, user=reviewer,
details={'action': rvw.ACTION_APPROVE,
'comment': '', 'reject_reason': ''})
res = self.client.get(reverse('reviewers.themes.history'))
eq_(res.status_code, 200)
doc = pq(res.content)
eq_(doc('tbody tr').length, 3)
res = self.client.get(reverse('reviewers.themes.logs'))
eq_(res.status_code, 200)
doc = pq(res.content)
eq_(doc('tbody tr').length, 3 * 2) # Double for comment rows.
def test_single_basic(self):
with self.settings(ALLOW_SELF_REVIEWS=True):
user = UserProfile.objects.get(
email='[email protected]')
self.login(user)
addon = addon_factory(type=amo.ADDON_PERSONA, status=self.status)
res = self.client.get(reverse('reviewers.themes.single',
args=[addon.slug]))
eq_(res.status_code, 200)
eq_(res.context['theme'].id, addon.persona.id)
eq_(res.context['reviewable'], not self.flagged)
def test_single_cannot_review_my_app(self):
with self.settings(ALLOW_SELF_REVIEWS=False):
user = UserProfile.objects.get(
email='[email protected]')
self.login(user)
addon = addon_factory(type=amo.ADDON_PERSONA, status=self.status)
addon.addonuser_set.create(user=user)
res = self.client.get(reverse('reviewers.themes.single',
args=[addon.slug]))
eq_(res.status_code, 200)
eq_(res.context['theme'].id, addon.persona.id)
eq_(res.context['reviewable'], False)
class TestThemeReviewQueue(ThemeReviewTestMixin, amo.tests.TestCase):
def setUp(self):
super(TestThemeReviewQueue, self).setUp()
self.queue_url = reverse('reviewers.themes.queue_themes')
def check_permissions(self, slug, status_code):
for url in [reverse('reviewers.themes.queue_themes'),
reverse('reviewers.themes.single', args=[slug])]:
eq_(self.client.get(url).status_code, status_code)
def test_permissions_reviewer(self):
slug = addon_factory(type=amo.ADDON_PERSONA, status=self.status).slug
self.assertLoginRedirects(self.client.get(self.queue_url),
self.queue_url)
self.login('[email protected]')
self.check_permissions(slug, 403)
self.create_and_become_reviewer()
self.check_permissions(slug, 200)
def test_can_review_your_app(self):
with self.settings(ALLOW_SELF_REVIEWS=False):
user = UserProfile.objects.get(
email='[email protected]')
self.login(user)
addon = addon_factory(type=amo.ADDON_PERSONA, status=self.status)
res = self.client.get(self.queue_url)
eq_(len(res.context['theme_formsets']), 1)
# I should be able to review this app. It is not mine.
eq_(res.context['theme_formsets'][0][0], addon.persona)
def test_cannot_review_my_app(self):
with self.settings(ALLOW_SELF_REVIEWS=False):
user = UserProfile.objects.get(
email='[email protected]')
self.login(user)
addon = addon_factory(type=amo.ADDON_PERSONA, status=self.status)
addon.addonuser_set.create(user=user)
res = self.client.get(self.queue_url)
# I should not be able to review my own app.
eq_(len(res.context['theme_formsets']), 0)
def test_theme_list(self):
self.create_and_become_reviewer()
addon_factory(type=amo.ADDON_PERSONA, status=self.status)
res = self.client.get(reverse('reviewers.themes.list'))
eq_(res.status_code, 200)
eq_(pq(res.content)('#addon-queue tbody tr').length, 1)
@mock.patch.object(rvw, 'THEME_INITIAL_LOCKS', 1)
def test_release_locks(self):
for x in range(2):
addon_factory(type=amo.ADDON_PERSONA, status=self.status)
other_reviewer = self.create_and_become_reviewer()
_get_themes(mock.Mock(), other_reviewer)
# Check reviewer's theme lock released.
reviewer = self.create_and_become_reviewer()
_get_themes(mock.Mock(), reviewer)
eq_(ThemeLock.objects.filter(reviewer=reviewer).count(), 1)
self.client.get(reverse('reviewers.themes.release_locks'))
eq_(ThemeLock.objects.filter(reviewer=reviewer).count(), 0)
# Check other reviewer's theme lock intact.
eq_(ThemeLock.objects.filter(reviewer=other_reviewer).count(), 1)
@mock.patch.object(rvw, 'THEME_INITIAL_LOCKS', 2)
def test_themes_less_than_initial(self):
"""
Number of themes in the pool is less than amount we want to check out.
"""
addon_factory(type=amo.ADDON_PERSONA, status=self.status)
reviewer = self.create_and_become_reviewer()
eq_(len(_get_themes(mock.Mock(), reviewer)), 1)
eq_(len(_get_themes(mock.Mock(), reviewer)), 1)
class TestThemeReviewQueueFlagged(ThemeReviewTestMixin, amo.tests.TestCase):
def setUp(self):
super(TestThemeReviewQueueFlagged, self).setUp()
self.status = amo.STATUS_REVIEW_PENDING
self.flagged = True
self.queue_url = reverse('reviewers.themes.queue_flagged')
def test_admin_only(self):
self.login('[email protected]')
eq_(self.client.get(self.queue_url).status_code, 403)
self.login('[email protected]')
eq_(self.client.get(self.queue_url).status_code, 200)
|
UTF-8
|
Python
| false | false | 2,013 |
14,654,428,429,759 |
4a125d0b949e4a4f8ed3d1cd9da827cd6c91fa97
|
97841ed9db24b7f4fa4d37b502094f11b21f2719
|
/p0067.py
|
6154af319e2cd680e3a8519155ff1ff19fa02e51
|
[
"Unlicense"
] |
permissive
|
ravish0007/patterns
|
https://github.com/ravish0007/patterns
|
9a5ea9d5efaeaa1c6ff1cbb10dfe891ffde3b116
|
5babd0d51d38259cd7497226617ab330141e07ba
|
refs/heads/master
| 2020-07-04T19:21:56.453406 | 2014-09-03T21:37:05 | 2014-09-03T21:37:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from IPython.html import widgets
from IPython.display import display
options = {
'first value' : 0,
'second value': 1,
'third value': 2,
}
s = widgets.SelectWidget()
s.values = options
display(s)
d = widgets.DropdownWidget()
d.values = options
display(d)
|
UTF-8
|
Python
| false | false | 2,014 |
3,685,081,987,106 |
0040b7c2115ad4ae9a8ff08387d13fddc37d28ec
|
ee9e6f63a640921654588ba3ddcc9354bb9badf5
|
/kdriver/gen_keys_file.py
|
2545caff926271b25be401e99f7f0a402b60d661
|
[] |
no_license
|
SecurityResearchTeam/pcmonitor
|
https://github.com/SecurityResearchTeam/pcmonitor
|
ba501494d6e1172f49e484435289de3eefecd2de
|
e02b4494957274accefe34a751a1a937c49a7041
|
refs/heads/master
| 2021-01-11T14:08:39.149253 | 2014-02-05T12:57:37 | 2014-02-05T12:57:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def read_crt_lines(path):
lines = []
f = open(path, "r")
while 1:
l = f.readline()
if l == '':
break
lines.append(l)
f.close()
return lines
ca_lines = read_crt_lines("..\\keys\\ca.crt")
client_lines = read_crt_lines("..\\keys\\ssl_client.crt")
f = open("keys.c", "w")
f.write('#include <inc\keys.h>\n')
f.write('char *CA_Cert =\n')
for l in ca_lines:
f.write('"' + l.replace('\n', '') + '\\r\\n"\n')
f.write(";\n");
f.write('char *Client_Cert =\n')
for l in client_lines:
f.write('"' + l.replace('\n', '') + '\\r\\n"\n')
f.write(";\n");
f.close()
|
UTF-8
|
Python
| false | false | 2,014 |
16,020,228,049,286 |
9bf371e973b615812b03b27382d0dda81285df27
|
c6574e2957c017ff39476d01b4816fe1941aad8a
|
/pysmartt/smartt_client.py
|
92c9c36f2f128a46eec81d1a2852b7206e173da9
|
[
"BSD-2-Clause-Views",
"Python-2.0"
] |
permissive
|
leocm/smartt-python-client
|
https://github.com/leocm/smartt-python-client
|
8b41d2ed04e58ec7d511214c6d10525b542277e7
|
94473e7f4a0d2128d420ab638a5a1f6f88111eb1
|
refs/heads/master
| 2020-12-25T15:50:55.349236 | 2013-08-19T17:23:02 | 2013-08-19T17:23:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Standard library imports
import socket
import ssl
import select
# Local imports
from smartt_simple_protocol import SmarttSimpleProtocol
class SmarttClientException(BaseException):
pass
##############################################################################
### SmarttClient class - encapsulates connection and communication with Smartt
### server, preseting a nice and easy to use API to the user
class SmarttClient(object):
##########################################################################
#############
# API Enums #
#############
marketNames = [
"Bovespa",
"BMF"
]
orderStatuses = [
"canceled",
"executed",
"hung",
"hung_cancellable",
"hung_pending",
"partially_canceled",
"partially_executed",
"partially_executed_cancellable",
"rejected",
"expired"
]
ordersEventsTypes = [
"order_sent",
"order_canceled",
"order_changed",
"order_executed",
"order_expired"
]
stopOrderStatuses = [
"canceled_by_client",
"canceled_expired_option",
"canceled_not_allowed_market",
"canceled_not_enough_balance",
"canceled_not_positioned",
"canceled_order_limit_exceeded",
"hung",
"sent",
"expired"
]
stopOrdersEventsTypes = [
"stop_order_sent",
"stop_order_canceled",
"stop_order_triggered",
"stop_order_expired"
]
validityTypes = [
"HJ",
"DE",
"AC"
]
##########################################################################
### Init function - connects to the server (possibly initializing the SSL
### protocol as well) and setups the protocol handler
def __init__(self, host="smartt.s10i.com.br", port=5060, use_ssl=True,
print_raw_messages=False):
self.host = host
self.port = port
self.smartt_socket = socket.create_connection((self.host, self.port))
if use_ssl:
self.smartt_socket = ssl.wrap_socket(self.smartt_socket)
self.protocol = SmarttSimpleProtocol(self.smartt_socket.recv,
self.smartt_socket.send,
print_raw_messages)
# Generic Wrapper for all Smartt functions - sends the function message
# and returns the response (next message from the server)
def smarttFunction(self, message):
self.protocol.send(message)
response = self.protocol.receive()
if len(response) > 0 and response[0] == "ERROR":
if len(response) != 2:
print "STRANGE! Error response doesn't have 2 values: %s" % \
str(response)
raise SmarttClientException(response[0] + ": " + response[1])
return response
##########################################################################
### Generic messages (list of strings) handling ###
###################################################
def sendMessage(self, message):
self.protocol.send(message)
def receiveMessage(self):
return self.protocol.receive()
##########################################################################
##########################################################################
### Raw messages handling ###
#############################
def sendRawMessage(self, message):
self.smartt_socket.send(message)
# Reads everything available until timing out
def receiveRawMessage(self):
# Read in chunks of at most 4K - the magical number for recv calls :)
receive_size = 4096
# Timeout of half a second - just enough so that a continuous
# transmission from the server isn't missed (totally arbitrary choice)
select_timeout = 0.5
# Has to receive something, so just use the blocking function
data = self.smartt_socket.recv(receive_size)
# Wait and check for data, if available, read, if times out, stops
while len(select.select([self.smartt_socket], [], [],
select_timeout)[0]) > 0:
data += self.smartt_socket.recv(receive_size)
return data
##########################################################################
##########################################################################
### Helper functions ###
########################
def checkAttributes(self, attributes, possibleValues):
for attribute in attributes:
if attribute not in possibleValues:
raise SmarttClientException("Invalid attribute: " + attribute)
def formatAttributes(self, name, attributes, possibleValues):
if not attributes:
return ""
self.checkAttributes(attributes, possibleValues)
return self.formatString(name, ",".join(attributes))
def formatString(self, name, value, optional=True):
if value is None:
if not optional:
raise SmarttClientException("Non-optional parameter is NULL: "
+ name)
else:
return []
return [("%s=%s" % (name, value))]
def formatInteger(self, name, value, optional=True):
formattedValue = (str(int(value))
if value is not None else None)
return self.formatString(name, formattedValue, optional)
def formatDecimal2(self, name, value, optional=True):
formattedValue = (("%.2f" % float(value))
if value is not None else None)
return self.formatString(name, formattedValue, optional)
def formatDecimal6(self, name, value, optional=True):
formattedValue = (("%.6f" % float(value))
if value is not None else None)
return self.formatString(name, formattedValue, optional)
def formatDatetime(self, name, value, optional=True):
formattedValue = (value.strftime("%Y-%m-%d %H:%M:%S")
if value is not None else None)
return self.formatString(name, formattedValue, optional)
def formatDate(self, name, value, optional=True):
formattedValue = (value.strftime("%Y-%m-%d")
if value is not None else None)
return self.formatString(name, formattedValue, optional)
def formatBoolean(self, name, value, falseAndTrueValues=["no", "yes"], optional=True):
formattedValue = None
if value == 0 or value is False or value == falseAndTrueValues[0]:
formattedValue = "0"
elif value == 1 or value is True or value == falseAndTrueValues[1]:
formattedValue = "1"
else:
raise SmarttClientException("Invalid boolean value '" + name +
"': " + value)
return self.formatString(name, formattedValue, optional)
def formatEnum(self, name, value, enumValues, optional=True):
if value is not None and value not in enumValues:
raise SmarttClientException("Invalid '" + name +
"' parameter value: " + value)
return self.formatString(name, value, optional)
def formatDictResponse(self, values, attributes, defaultAttributes=[]):
if len(attributes) == 0:
attributes = defaultAttributes
return dict(zip(attributes, values))
def formatListOfDictsResponse(self, values, attributes, defaultAttributes):
if not attributes:
attributes = defaultAttributes
k = len(attributes)
return [self.formatDictResponse(values[i:i + k], attributes) for i in
xrange(0, len(values), k)]
##########################################################################
### Smartt functions ###
########################
loginAttributes = [
"message"]
def login(self, s10iLogin = None, s10iPassword = None):
message = ["login"]
message += self.formatString("s10i_login", s10iLogin, optional=False)
message += self.formatString("s10i_password", s10iPassword, optional=False)
response = self.smarttFunction(filter(None, message))
return unicode(response[0])
logoutAttributes = [
"message"]
def logout(self):
message = ["logout"]
response = self.smarttFunction(filter(None, message))
return unicode(response[0])
loggedAttributes = [
"message"]
def logged(self):
message = ["logged"]
response = self.smarttFunction(filter(None, message))
return unicode(response[0])
getClientAttributes = [
"natural_person_or_legal_person",
"name_or_corporate_name",
"gender",
"document",
"email",
"s10i_login",
"address",
"number",
"complement",
"neighborhood",
"postal_code",
"city",
"state",
"country",
"birthday",
"main_phone",
"secondary_phone",
"company"]
def getClient(self, returnAttributes = None):
message = ["get_client"]
message += self.formatAttributes("return_attributes", returnAttributes, self.getClientAttributes)
response = self.smarttFunction(filter(None, message))
return self.formatDictResponse(response, returnAttributes, self.getClientAttributes)
updateClientAttributes = [
"message"]
def updateClient(self, s10iPassword = None, naturalPersonOrLegalPerson = None, nameOrCorporateName = None, gender = None, document = None, email = None, s10iLogin = None, newS10iPassword = None, address = None, number = None, complement = None, neighborhood = None, postalCode = None, city = None, state = None, country = None, birthday = None, mainPhone = None, secondaryPhone = None, company = None):
message = ["update_client"]
message += self.formatString("s10i_password", s10iPassword, optional=True)
message += self.formatBoolean("natural_person_or_legal_person", naturalPersonOrLegalPerson, optional=True)
message += self.formatString("name_or_corporate_name", nameOrCorporateName, optional=True)
message += self.formatChar("gender", gender, optional=True)
message += self.formatInteger("document", document, optional=False)
message += self.formatString("email", email, optional=False)
message += self.formatString("s10i_login", s10iLogin, optional=False)
message += self.formatString("new_s10i_password", newS10iPassword, optional=True)
message += self.formatString("address", address, optional=True)
message += self.formatString("number", number, optional=True)
message += self.formatString("complement", complement, optional=True)
message += self.formatString("neighborhood", neighborhood, optional=True)
message += self.formatString("postal_code", postalCode, optional=True)
message += self.formatString("city", city, optional=True)
message += self.formatString("state", state, optional=True)
message += self.formatString("country", country, optional=True)
message += self.formatDate("birthday", birthday, optional=True)
message += self.formatString("main_phone", mainPhone, optional=True)
message += self.formatString("secondary_phone", secondaryPhone, optional=True)
message += self.formatString("company", company, optional=True)
response = self.smarttFunction(filter(None, message))
return unicode(response[0])
getClientBrokeragesAttributes = [
"brokerage_id",
"brokerage_login"]
def getClientBrokerages(self, brokerageId = None, brokerageLogin = None, returnAttributes = None):
message = ["get_client_brokerages"]
message += self.formatInteger("brokerage_id", brokerageId, optional=True)
message += self.formatString("brokerage_login", brokerageLogin, optional=True)
message += self.formatAttributes("return_attributes", returnAttributes, self.getClientBrokeragesAttributes)
response = self.smarttFunction(filter(None, message))
return self.formatDictResponse(response, returnAttributes, self.getClientBrokeragesAttributes)
insertClientBrokerageAttributes = [
"message"]
def insertClientBrokerage(self, brokerageId = None, brokerageLogin = None, brokeragePassword = None, brokerageDigitalSignature = None):
message = ["insert_client_brokerage"]
message += self.formatInteger("brokerage_id", brokerageId, optional=False)
message += self.formatString("brokerage_login", brokerageLogin, optional=False)
message += self.formatString("brokerage_password", brokeragePassword, optional=False)
message += self.formatString("brokerage_digital_signature", brokerageDigitalSignature, optional=False)
response = self.smarttFunction(filter(None, message))
return unicode(response[0])
updateClientBrokerageAttributes = [
"message"]
def updateClientBrokerage(self, brokerageId = None, newBrokerageId = None, brokerageLogin = None, brokeragePassword = None, brokerageDigiralSignature = None):
message = ["update_client_brokerage"]
message += self.formatInteger("brokerage_id", brokerageId, optional=False)
message += self.formatInteger("new_brokerage_id", newBrokerageId, optional=True)
message += self.formatString("brokerage_login", brokerageLogin, optional=True)
message += self.formatString("brokerage_password", brokeragePassword, optional=True)
message += self.formatString("brokerage_digiral_signature", brokerageDigiralSignature, optional=True)
response = self.smarttFunction(filter(None, message))
return unicode(response[0])
deleteClientBrokeragesAttributes = [
"message"]
def deleteClientBrokerages(self, brokerageId = None, brokerageLogin = None):
message = ["delete_client_brokerages"]
message += self.formatInteger("brokerage_id", brokerageId, optional=True)
message += self.formatString("brokerage_login", brokerageLogin, optional=True)
response = self.smarttFunction(filter(None, message))
return unicode(response[0])
getStockAttributes = [
"stock_code",
"market_name",
"company_name",
"kind_of_stock",
"isin_code",
"trading_lot_size",
"kind_of_quotation",
"type",
"code_underlying_stock",
"exercise_price",
"expiration_date"]
def getStock(self, stockCode = None, marketName = None, returnAttributes = None):
message = ["get_stock"]
message += self.formatString("stock_code", stockCode, optional=False)
message += self.formatString("market_name", marketName, optional=True)
message += self.formatAttributes("return_attributes", returnAttributes, self.getStockAttributes)
response = self.smarttFunction(filter(None, message))
return self.formatDictResponse(response, returnAttributes, self.getStockAttributes)
sendOrderAttributes = [
"order_id"]
def sendOrder(self, investmentCode = None, brokerageId = None, orderType = None, stockCode = None, marketName = None, numberOfStocks = None, price = None, validityType = None, validity = None):
message = ["send_order"]
message += self.formatString("investment_code", investmentCode, optional=False)
message += self.formatInteger("brokerage_id", brokerageId, optional=True)
message += self.formatBoolean("order_type", orderType, optional=False)
message += self.formatString("stock_code", stockCode, optional=False)
message += self.formatString("market_name", marketName, optional=True)
message += self.formatInteger("number_of_stocks", numberOfStocks, optional=False)
message += self.formatDecimal2("price", price, optional=False)
message += self.formatString("validity_type", validityType, optional=True)
message += self.formatDate("validity", validity, optional=True)
response = self.smarttFunction(filter(None, message))
return int(response[1])
cancelOrderAttributes = [
"order_id"]
def cancelOrder(self, orderId = None):
message = ["cancel_order"]
message += self.formatInteger("order_id", orderId, optional=False)
response = self.smarttFunction(filter(None, message))
return int(response[1])
changeOrderAttributes = [
"order_id"]
def changeOrder(self, orderId = None, newNumberOfStocks = None, newPrice = None):
message = ["change_order"]
message += self.formatInteger("order_id", orderId, optional=False)
message += self.formatInteger("new_number_of_stocks", newNumberOfStocks, optional=True)
message += self.formatDecimal2("new_price", newPrice, optional=True)
response = self.smarttFunction(filter(None, message))
return int(response[1])
getOrdersAttributes = [
"order_id",
"order_id_in_brokerage",
"investment_code",
"brokerage_id",
"is_real",
"order_type",
"stock_code",
"market_name",
"datetime",
"number_of_stocks",
"price",
"financial_volume",
"validity_type",
"validity",
"number_of_traded_stocks",
"average_nominal_price",
"status",
"absolute_brokerage_tax_cost",
"percentual_brokerage_tax_cost",
"iss_tax_cost"]
def getOrders(self, orderId = None, investmentCode = None, brokerageId = None, initialDatetime = None, finalDatetime = None, status = None, returnAttributes = None):
message = ["get_orders"]
message += self.formatInteger("order_id", orderId, optional=True)
message += self.formatString("investment_code", investmentCode, optional=True)
message += self.formatInteger("brokerage_id", brokerageId, optional=True)
message += self.formatDatetime("initial_datetime", initialDatetime, optional=True)
message += self.formatDatetime("final_datetime", finalDatetime, optional=True)
message += self.formatString("status", status, optional=True)
message += self.formatAttributes("return_attributes", returnAttributes, self.getOrdersAttributes)
response = self.smarttFunction(filter(None, message))
return self.formatListOfDictsResponse(response, returnAttributes, self.getOrdersAttributes)
getOrdersEventsAttributes = [
"order_id",
"investment_code",
"brokerage_id",
"number_of_events",
"datetime",
"event_type",
"description"]
def getOrdersEvents(self, orderId = None, investmentCode = None, brokerageId = None, initialDatetime = None, finalDatetime = None, eventType = None, returnAttributes = None):
message = ["get_orders_events"]
message += self.formatInteger("order_id", orderId, optional=True)
message += self.formatString("investment_code", investmentCode, optional=True)
message += self.formatInteger("brokerage_id", brokerageId, optional=True)
message += self.formatDatetime("initial_datetime", initialDatetime, optional=True)
message += self.formatDatetime("final_datetime", finalDatetime, optional=True)
message += self.formatString("event_type", eventType, optional=True)
message += self.formatAttributes("return_attributes", returnAttributes, self.getOrdersEventsAttributes)
response = self.smarttFunction(filter(None, message))
return self.formatListOfDictsResponse(response, returnAttributes, self.getOrdersEventsAttributes)
getOrderIdAttributes = [
"order_id"]
def getOrderId(self, orderIdInBrokerage = None, brokerageId = None):
message = ["get_order_id"]
message += self.formatString("order_id_in_brokerage", orderIdInBrokerage, optional=False)
message += self.formatInteger("brokerage_id", brokerageId, optional=False)
response = self.smarttFunction(filter(None, message))
return int(response[0])
sendStopOrderAttributes = [
"stop_order_id"]
def sendStopOrder(self, investmentCode = None, brokerageId = None, orderType = None, stopOrderType = None, stockCode = None, marketName = None, numberOfStocks = None, stopPrice = None, limitPrice = None, validity = None, validAfterMarket = None):
message = ["send_stop_order"]
message += self.formatString("investment_code", investmentCode, optional=False)
message += self.formatInteger("brokerage_id", brokerageId, optional=True)
message += self.formatBoolean("order_type", orderType, optional=False)
message += self.formatBoolean("stop_order_type", stopOrderType, optional=False)
message += self.formatString("stock_code", stockCode, optional=False)
message += self.formatString("market_name", marketName, optional=True)
message += self.formatInteger("number_of_stocks", numberOfStocks, optional=False)
message += self.formatDecimal2("stop_price", stopPrice, optional=False)
message += self.formatDecimal2("limit_price", limitPrice, optional=False)
message += self.formatDate("validity", validity, optional=False)
message += self.formatBoolean("valid_after_market", validAfterMarket, optional=False)
response = self.smarttFunction(filter(None, message))
return int(response[1])
cancelStopOrderAttributes = [
"stop_order_id"]
def cancelStopOrder(self, stopOrderId = None):
message = ["cancel_stop_order"]
message += self.formatInteger("stop_order_id", stopOrderId, optional=False)
response = self.smarttFunction(filter(None, message))
return int(response[1])
getStopOrdersAttributes = [
"stop_order_id",
"order_id_in_brokerage",
"investment_code",
"brokerage_id",
"is_real",
"order_type",
"stop_order_type",
"stock_code",
"market_name",
"datetime",
"number_of_stocks",
"stop_price",
"limit_price",
"validity",
"valid_after_market",
"status",
"sent_order_id"]
def getStopOrders(self, stopOrderId = None, investmentCode = None, brokerageId = None, initialDatetime = None, finalDatetime = None, status = None, returnAttributes = None):
message = ["get_stop_orders"]
message += self.formatInteger("stop_order_id", stopOrderId, optional=True)
message += self.formatString("investment_code", investmentCode, optional=True)
message += self.formatInteger("brokerage_id", brokerageId, optional=True)
message += self.formatDatetime("initial_datetime", initialDatetime, optional=True)
message += self.formatDatetime("final_datetime", finalDatetime, optional=True)
message += self.formatString("status", status, optional=True)
message += self.formatAttributes("return_attributes", returnAttributes, self.getStopOrdersAttributes)
response = self.smarttFunction(filter(None, message))
return self.formatListOfDictsResponse(response, returnAttributes, self.getStopOrdersAttributes)
getStopOrdersEventsAttributes = [
"stop_order_id",
"investment_code",
"brokerage_id",
"number_of_events",
"datetime",
"event_type",
"description"]
def getStopOrdersEvents(self, stopOrderId = None, investmentCode = None, brokerageId = None, initialDatetime = None, finalDatetime = None, eventType = None, returnAttributes = None):
message = ["get_stop_orders_events"]
message += self.formatInteger("stop_order_id", stopOrderId, optional=True)
message += self.formatString("investment_code", investmentCode, optional=True)
message += self.formatInteger("brokerage_id", brokerageId, optional=True)
message += self.formatDatetime("initial_datetime", initialDatetime, optional=True)
message += self.formatDatetime("final_datetime", finalDatetime, optional=True)
message += self.formatString("event_type", eventType, optional=True)
message += self.formatAttributes("return_attributes", returnAttributes, self.getStopOrdersEventsAttributes)
response = self.smarttFunction(filter(None, message))
return self.formatListOfDictsResponse(response, returnAttributes, self.getStopOrdersEventsAttributes)
getStopOrderIdAttributes = [
"stop_order_id"]
def getStopOrderId(self, stopOrderIdInBrokerage = None, brokerageId = None):
message = ["get_stop_order_id"]
message += self.formatString("stop_order_id_in_brokerage", stopOrderIdInBrokerage, optional=False)
message += self.formatInteger("brokerage_id", brokerageId, optional=False)
response = self.smarttFunction(filter(None, message))
return int(response[0])
getTradesAttributes = [
"order_id",
"trade_id_in_brokerage",
"investment_code",
"brokerage_id",
"is_real",
"trade_type",
"stock_code",
"market_name",
"datetime",
"number_of_stocks",
"price",
"financial_volume",
"trading_tax_cost",
"liquidation_tax_cost",
"register_tax_cost",
"income_tax_cost",
"withholding_income_tax_cost",
"other_taxes_cost"]
def getTrades(self, orderId = None, investmentCode = None, brokerageId = None, initialDatetime = None, finalDatetime = None, returnAttributes = None):
message = ["get_trades"]
message += self.formatInteger("order_id", orderId, optional=True)
message += self.formatString("investment_code", investmentCode, optional=True)
message += self.formatInteger("brokerage_id", brokerageId, optional=False)
message += self.formatDatetime("initial_datetime", initialDatetime, optional=True)
message += self.formatDatetime("final_datetime", finalDatetime, optional=True)
message += self.formatAttributes("return_attributes", returnAttributes, self.getTradesAttributes)
response = self.smarttFunction(filter(None, message))
return self.formatListOfDictsResponse(response, returnAttributes, self.getTradesAttributes)
getInvestmentsAttributes = [
"name",
"code",
"brokerage_id",
"setup_code",
"is_real",
"initial_datetime",
"final_datetime"]
def getInvestments(self, investmentCode = None, brokerageId = None, returnAttributes = None):
message = ["get_investments"]
message += self.formatString("investment_code", investmentCode, optional=True)
message += self.formatInteger("brokerage_id", brokerageId, optional=True)
message += self.formatAttributes("return_attributes", returnAttributes, self.getInvestmentsAttributes)
response = self.smarttFunction(filter(None, message))
return self.formatDictResponse(response, returnAttributes, self.getInvestmentsAttributes)
getReportAttributes = [
"investment_code",
"brokerage_id",
"setup_code",
"initial_datetime",
"final_datetime",
"number_of_days",
"total_contributions",
"total_withdraws",
"initial_capital",
"balance",
"equity",
"taxes_and_operational_costs",
"gross_return",
"gross_daily_return",
"gross_annualized_return",
"net_return",
"net_daily_return",
"net_annualized_return",
"absolute_initial_drawdown",
"percentual_initial_drawdown",
"absolute_maximum_drawdown",
"percentual_maximum_drawdown",
"gross_profit",
"gross_loss",
"total_gross_profit",
"net_profit",
"net_loss",
"total_net_profit",
"profit_factor",
"number_of_eliminations",
"expected_payoff",
"absolute_number_of_profit_eliminations",
"percentual_number_of_profit_eliminations",
"absolute_largest_profit_elimination",
"percentual_largest__profit_elimination",
"average_profit_in_profit_eliminations",
"maximum_consecutive_profit_eliminations",
"total_profit_in_maximum_consecutive_profit_eliminatons",
"absolute_number_of_loss_eliminations",
"percentual_number_of_loss_eliminations",
"absolute_largest_loss_elimination",
"percentual_largest__loss_elimination",
"average_loss_in_loss_eliminations",
"maximum_consecutive_loss_eliminations",
"total_loss_in_maximum_consecutive_loss_eliminations",
"absolute_number_of_eliminations_of_long_positions",
"percentual_number_of_eliminations_of_long_positions",
"absolute_number_of_profit_eliminations_of_long_positions",
"percentual_number_of_profit_eliminations_of_long_positions",
"absolute_number_of_loss_eliminations_of_long_positions",
"percentual_number_of_loss_eliminations_of_long_positions",
"absolute_number_of_eliminations_of_short_positions",
"percentual_number_of_eliminations_of_short_positions",
"absolute_number_of_profit_eliminations_of_short_positions",
"percentual_number_of_profit_eliminations_of_short_positions",
"absolute_number_of_loss_eliminations_of_short_positions",
"percentual_number_of_loss_eliminations_of_short_positions"]
def getReport(self, investmentCode = None, brokerageId = None, returnAttributes = None):
message = ["get_report"]
message += self.formatString("investment_code", investmentCode, optional=False)
message += self.formatInteger("brokerage_id", brokerageId, optional=True)
message += self.formatAttributes("return_attributes", returnAttributes, self.getReportAttributes)
response = self.smarttFunction(filter(None, message))
return self.formatDictResponse(response, returnAttributes, self.getReportAttributes)
getDailyCumulativePerformanceAttributes = [
"investment_code",
"brokerage_id",
"daily_cumulative_performance"]
def getDailyCumulativePerformance(self, investmentCode = None, brokerageId = None):
message = ["get_daily_cumulative_performance"]
message += self.formatString("investment_code", investmentCode, optional=False)
message += self.formatInteger("brokerage_id", brokerageId, optional=True)
response = self.smarttFunction(filter(None, message))
return self.formatDictResponse(response, [], self.getDailyCumulativePerformanceAttributes)
getDailyDrawdownAttributes = [
"investment_code",
"brokerage_id",
"daily_drawdown"]
def getDailyDrawdown(self, investmentCode = None, brokerageId = None):
message = ["get_daily_drawdown"]
message += self.formatString("investment_code", investmentCode, optional=False)
message += self.formatInteger("brokerage_id", brokerageId, optional=True)
response = self.smarttFunction(filter(None, message))
return self.formatDictResponse(response, [], self.getDailyDrawdownAttributes)
getPortfolioAttributes = [
"investment_code",
"brokerage_id",
"stock_code",
"position_type",
"number_of_stocks",
"average_price",
"financial_volume"]
def getPortfolio(self, investmentCode = None, brokerageId = None, returnAttributes = None):
message = ["get_portfolio"]
message += self.formatString("investment_code", investmentCode, optional=False)
message += self.formatInteger("brokerage_id", brokerageId, optional=True)
message += self.formatAttributes("return_attributes", returnAttributes, self.getPortfolioAttributes)
response = self.smarttFunction(filter(None, message))
return self.formatListOfDictsResponse(response, returnAttributes, self.getPortfolioAttributes)
getAvailableLimitsAttributes = [
"spot",
"option",
"margin"]
def getAvailableLimits(self, investmentCode = None, brokerageId = None, returnAttributes = None):
message = ["get_available_limits"]
message += self.formatString("investment_code", investmentCode, optional=True)
message += self.formatInteger("brokerage_id", brokerageId, optional=True)
message += self.formatAttributes("return_attributes", returnAttributes, self.getAvailableLimitsAttributes)
response = self.smarttFunction(filter(None, message))
return self.formatListOfDictsResponse(response, returnAttributes, self.getAvailableLimitsAttributes)
getSetupsAttributes = [
"name",
"code",
"initial_capital",
"slippage",
"absolute_brokerage_tax",
"percentual_brokerage_tax",
"position_trading_tax",
"position_liquidation_tax",
"position_register_tax",
"position_income_tax",
"position_withholding_income_tax",
"position_other_taxes",
"day_trade_trading_tax",
"day_trade_liquidation_tax",
"day_trade_regiter_tax",
"day_trade_income_tax",
"day_trade_withholding_income_tax",
"day_trade_other_taxes",
"iss_tax",
"custody_tax",
"lease_tax",
"income_tax_payment"]
def getSetups(self, code = None, returnAttributes = None):
message = ["get_setups"]
message += self.formatString("code", code, optional=True)
message += self.formatAttributes("return_attributes", returnAttributes, self.getSetupsAttributes)
response = self.smarttFunction(filter(None, message))
return self.formatDictResponse(response, returnAttributes, self.getSetupsAttributes)
updateSetupAttributes = [
"message"]
def updateSetup(self, code = None, name = None, newCode = None, initialCapital = None, slippage = None, absoluteBrokerageTax = None, percentualBrokerageTax = None, positionTradingTax = None, positionLiquidationTax = None, positionRegisterTax = None, positionIncomeTax = None, positionWithholdingIncomeTax = None, positionOtherTaxes = None, dayTradeTradingTax = None, dayTradeLiquidationTax = None, dayTradeRegiterTax = None, dayTradeIncomeTax = None, dayTradeWithholdingIncomeTax = None, dayTradeOtherTaxes = None, issTax = None, custodyTax = None, leaseTax = None, incomeTaxPayment = None):
message = ["update_setup"]
message += self.formatString("code", code, optional=False)
message += self.formatString("name", name, optional=True)
message += self.formatString("new_code", newCode, optional=True)
message += self.formatString("initial_capital", initialCapital, optional=True)
message += self.formatDecimal2("slippage", slippage, optional=True)
message += self.formatDecimal2("absolute_brokerage_tax", absoluteBrokerageTax, optional=True)
message += self.formatDecimal2("percentual_brokerage_tax", percentualBrokerageTax, optional=True)
message += self.formatDecimal2("position_trading_tax", positionTradingTax, optional=True)
message += self.formatDecimal2("position_liquidation_tax", positionLiquidationTax, optional=True)
message += self.formatDecimal2("position_register_tax", positionRegisterTax, optional=True)
message += self.formatDecimal2("position_income_tax", positionIncomeTax, optional=True)
message += self.formatDecimal2("position_withholding_income_tax", positionWithholdingIncomeTax, optional=True)
message += self.formatDecimal2("position_other_taxes", positionOtherTaxes, optional=True)
message += self.formatDecimal2("day_trade_trading_tax", dayTradeTradingTax, optional=True)
message += self.formatDecimal2("day_trade_liquidation_tax", dayTradeLiquidationTax, optional=True)
message += self.formatDecimal2("day_trade_regiter_tax", dayTradeRegiterTax, optional=True)
message += self.formatDecimal2("day_trade_income_tax", dayTradeIncomeTax, optional=True)
message += self.formatDecimal2("day_trade_withholding_income_tax", dayTradeWithholdingIncomeTax, optional=True)
message += self.formatDecimal2("day_trade_other_taxes", dayTradeOtherTaxes, optional=True)
message += self.formatDecimal2("iss_tax", issTax, optional=True)
message += self.formatDecimal2("custody_tax", custodyTax, optional=True)
message += self.formatDecimal2("lease_tax", leaseTax, optional=True)
message += self.formatString("income_tax_payment", incomeTaxPayment, optional=True)
response = self.smarttFunction(filter(None, message))
return unicode(response[0])
getFinancialTransactionsAttributes = [
"financial_transaction_id",
"investment_code",
"brokerage_id",
"datetime",
"contribution_or_withdrawal",
"value",
"operational_tax_cost",
"description"]
def getFinancialTransactions(self, financialTransactionId = None, investmentCode = None, brokerageId = None, returnAttributes = None):
message = ["get_financial_transactions"]
message += self.formatString("financial_transaction_id", financialTransactionId, optional=True)
message += self.formatString("investment_code", investmentCode, optional=True)
message += self.formatInteger("brokerage_id", brokerageId, optional=True)
message += self.formatAttributes("return_attributes", returnAttributes, self.getFinancialTransactionsAttributes)
response = self.smarttFunction(filter(None, message))
return self.formatDictResponse(response, returnAttributes, self.getFinancialTransactionsAttributes)
insertFinancialTransactionAttributes = [
"message"]
def insertFinancialTransaction(self, investmentCode = None, brokerageId = None, datetime = None, contributionOrWithdrawal = None, value = None, operationalTaxCost = None, description = None):
message = ["insert_financial_transaction"]
message += self.formatString("investment_code", investmentCode, optional=False)
message += self.formatInteger("brokerage_id", brokerageId, optional=True)
message += self.formatDatetime("datetime", datetime, optional=False)
message += self.formatBoolean("contribution_or_withdrawal", contributionOrWithdrawal, optional=False)
message += self.formatDecimal2("value", value, optional=False)
message += self.formatDecimal2("operational_tax_cost", operationalTaxCost, optional=False)
message += self.formatString("description", description, optional=True)
response = self.smarttFunction(filter(None, message))
return unicode(response[0])
updateFinancialTransactionAttributes = [
"message"]
def updateFinancialTransaction(self, financialTransactionId = None, investmentCode = None, brokerageId = None, datetime = None, contributionOrWithdrawal = None, value = None, operationalTaxCost = None, description = None):
message = ["update_financial_transaction"]
message += self.formatString("financial_transaction_id", financialTransactionId, optional=False)
message += self.formatString("investment_code", investmentCode, optional=True)
message += self.formatInteger("brokerage_id", brokerageId, optional=True)
message += self.formatDatetime("datetime", datetime, optional=True)
message += self.formatBoolean("contribution_or_withdrawal", contributionOrWithdrawal, optional=True)
message += self.formatDecimal2("value", value, optional=True)
message += self.formatDecimal2("operational_tax_cost", operationalTaxCost, optional=True)
message += self.formatString("description", description, optional=True)
response = self.smarttFunction(filter(None, message))
return unicode(response[0])
deleteFinancialTransactionsAttributes = [
"message"]
def deleteFinancialTransactions(self, financialTransactionId = None, investmentCode = None, brokerageId = None):
message = ["delete_financial_transactions"]
message += self.formatString("financial_transaction_id", financialTransactionId, optional=True)
message += self.formatString("investment_code", investmentCode, optional=True)
message += self.formatInteger("brokerage_id", brokerageId, optional=True)
response = self.smarttFunction(filter(None, message))
return unicode(response[0])
|
UTF-8
|
Python
| false | false | 2,013 |
5,231,270,190,493 |
34e63e72329123bb55b86675325db8d833b20f81
|
6bbbd21fc98b7367169b4d74bf65283d42a24952
|
/cob_hwmonitor/src/db_test.py
|
92abb4b52e39442530484b905ee2ad735d348c62
|
[] |
no_license
|
uhr-eh/cob_driver_sandbox
|
https://github.com/uhr-eh/cob_driver_sandbox
|
47718cbd2921d6b3c75a1e3e24d9239bdff636d0
|
c0d4fee17b663d084a2430514079f476a3493c40
|
refs/heads/master
| 2021-01-15T21:34:46.581747 | 2012-04-13T11:29:55 | 2012-04-13T11:29:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import sqlite3
db = sqlite3.connect('/home/uhr-eh/git/care-o-bot/cob_apps/cob_hwmonitor/db/hwmonitor.db')
cursor = db.cursor()
cursor.execute('select * from hwmonitor')
for row in cursor:
print row
|
UTF-8
|
Python
| false | false | 2,012 |
11,252,814,322,943 |
6f5503187c09f5093100da6c6f78080f9ebb5837
|
2367f3728fb2c13efd74dcb03ddc4c50e8ecd7a4
|
/control/setup.py
|
53a779d28a0928c6c4ddf65c45daa1438cb00d8a
|
[] |
no_license
|
weallen/FlyTracker
|
https://github.com/weallen/FlyTracker
|
21d0412a67ec993da5fb1fb410ed16d440bde90b
|
76688cace0fd2e8590b6a5fdb92c130ec8f1c120
|
refs/heads/master
| 2020-05-18T15:54:41.111113 | 2013-05-04T11:54:00 | 2013-05-04T11:54:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from setuptools import setup, find_packages
import sys, os
setup(name="motmot.control",
description="Odor delivery and video acquisition synchronization for FView",
version="0.0.1",
packages=find_packages(),
author="Will Allen",
author_email="[email protected]",
url="",
entry_points = {
"motmot.fview.plugins" : "odor_control = motmot.control.control:OdorControl"
},
)
|
UTF-8
|
Python
| false | false | 2,013 |
7,627,861,949,067 |
9a55606ee6812d1c4008510a7b5bbc63a1dec980
|
70b44cdd17821591a489910dac3a89bc2153d5a9
|
/CassandraTransactionDemo.py
|
8c562018a8f0117cd6c8527c9c14590303ed5199
|
[
"Apache-2.0"
] |
permissive
|
st028/books
|
https://github.com/st028/books
|
02b5517f227ab2e88134e818add8111e20bb4b7e
|
81e5fa6773b3d0539daf5cecd36676597c8613a9
|
refs/heads/master
| 2015-08-14T14:29:21.396389 | 2014-12-05T21:45:02 | 2014-12-05T21:45:02 | 26,409,069 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import uuid
import Queue
from cassandra.cluster import Cluster
from cassandra.query import dict_factory
import multiprocessing
class CassandraTransaction:
DELETE = 0
INSERT = 1
SELECT = 2
UPDATE = 3
DEC = 4
def __init__(self):
self.trans = []
def add_delete(self, where):
self.trans.append((CassandraTransaction.DELETE, (where)))
def add_insert(self, items, keys):
self.trans.append((CassandraTransaction.INSERT, (items, keys)))
def add_select(self, cols, where=None, extra=None):
"""
cols is tuple, where is dict
"""
self.trans.append((CassandraTransaction.SELECT, (cols, where, extra)))
def add_update(self, col, newval, where):
"""
col is column, newval is a tuple of (colname, newvalue),
"""
self.trans.append((CassandraTransaction.UPDATE, (col, newval, where)))
def add_conditional_decrement(self, col, decby, where):
"""
decby is a tuple of (colname, valueto decrement by)
"""
self.trans.append((CassandraTransaction.DEC, (col, decby, where, lambda x: x[col] >= decby)))
def __iter__(self):
return self.trans.__iter__()
def __len__(self):
return len(self.trans)
def __getitem__(self, a):
return self.trans[a]
class CassandraTransactionProcessor:
def __init__(self, tablename):
self.t = tablename
def process_where(self, where):
query = " where "
sep = ""
for c in where:
query += sep + " " + c + "=" + str(where[c])
sep = " and "
return query
def process_transaction(self, transaction):
"""
returns a list of statements tuples with
(statements, where, get objects in where, undo is insert/delete/update, conditional)
"""
statements = []
for t, d in transaction:
if t == CassandraTransaction.DELETE:
where = d
query = "delete from " + self.t
query += self.process_where(where)
statements.append((query, where, True, True, 0, None))
elif t == CassandraTransaction.INSERT:
items, keys = d
query = "insert into " + self.t + " ("
sep = ""
for k in keys:
query += sep + k
sep = ", "
query += ")"
query += " values ("
sep = ""
for c in items:
query += sep + str(c)
sep = ", "
query += ")"
statements.append((query, (keys, items), True, False, 1, None))
elif t == CassandraTransaction.SELECT:
cols, where, extra = d
query = "select "
sep = ""
for c in cols:
query += sep + c
sep = ", "
query += " from " + self.t
if where != None:
query += self.process_where(where)
if extra != None:
query += " " + extra
statements.append((query, {}, False, False, -1, None))
elif t == CassandraTransaction.UPDATE:
col, newval, where = d
query = "update " + self.t
query += " set " + col + " = " + str(newval)
query += self.process_where(where)
statements.append((query, where, True, True, 2, None))
else:
col, decby, where, conditional = d
query = "update " + self.t
query += " set " + col + " = %s "
query += self.process_where(where)
statements.append((query, where, True, True, 3, (conditional, col, decby)))
return statements
transactionQueue = Queue.Queue()
def submit_transaction(transact):
transactionQueue.put(transact)
def gtz(i):
return i['qty'] > 0
def run_transaction(tp, tablename, session, tablecols):
transaction = transactionQueue.get()
statements = tp.process_transaction(transaction)
print "****STATEMENTS****"
for s in statements:
print s[0]
undostack = []
i = 0
rollback = False
while i < len(statements) and not rollback:
query, where, undo, getprev, typ, cond = statements[i]
if undo:
rows = []
if getprev:
undoq = "select * from " + tablename
if where != None and len(where) > 0:
undoq += " where "
sep = ""
for w in where:
undoq += sep + w + " = " + str(where[w])
sep = " and "
try:
rows = session.execute(undoq)
except:
print "****FAILED TO PERFORM UNDO QUERY: '" + undoq + ".' ABOUT TO ROLLBACK****"
rollback = True
break
if typ == 0:
for row in rows:
undo = "insert into " + tablename + " ("
for col in tablecols:
undo += str(col)
undo +=")"
undo += " values ("
sep = ""
for key in row:
undo += sep + str(row[key])
sep = ", "
undo += ")"
undostack.append(undo)
elif typ == 1:
keys, items = where
undo = "delete from " + tablename + " where " + \
str(keys[0]) + "=" + str(items[0])
undostack.append(undo)
elif typ == 2:
for row in rows:
undo = "update " + tablename + " set "
sep = ""
for key in row:
undo += sep + str(key) + " = " + str(row[key])
sep = " and "
undo += " where id = " + row['id']
undostack.append(undo)
else:
condi, col, decby = cond
for row in rows:
undo = "update " + tablename + " set " + \
cond[1] + " = " + str(row[col]) + \
" where id = " + str(row['id'])
undostack.append(undo)
for row in rows:
if cond != None and not cond[0](row):
print "****CONDITIONAL FAILED ABOUT TO ROLL BACK****"
rollback = True
print "****EXECUTING****", query
if not rollback:
if cond == None:
session.execute(query)
else:
condi, col, decby = cond
session.execute(query, (rows[0][col] - decby,))
i += 1
if rollback:
while len(undostack) > 0:
undo = undostack.pop()
print "****ROLLBACK****", undo
session.execute(undo)
transactionQueue.task_done()
print "****CHANGES COMMITTED****"
print
def transaction_test():
tp = CassandraTransaction()
tp.add_insert([uuid.uuid4(), "'foof'", 100.0, 1, "'barf'"], ['id', 'author', 'price', 'qty', 'title'])
tp.add_select(['*'], {'title': "'foo'"})
tp.add_update('title', '"foo2"', {'id': 1})
submit_transaction(tp)
def transaction_test2():
tp = CassandraTransaction()
tp.add_insert([uuid.uuid4(), "'foof'", 100.0, 1, "'barf'"], ['id', 'author', 'price', 'qty', 'title'])
tp.add_conditional_decrement('qty', 1, {'id': 'bbc43f49-2a44-4597-81ea-2f736c56c522'})
submit_transaction(tp)
def transaction_test3():
tp = CassandraTransaction()
tp.add_insert([uuid.uuid4(), "'foof'", 100.0, 1, "'barf'"], ['id', 'author', 'price', 'qty', 'title'])
tp.add_conditional_decrement('qty', 67, {'id': 'bbc43f49-2a44-4597-81ea-2f736c56c522'})
submit_transaction(tp)
if __name__=='__main__':
print
cluster = Cluster()
session = cluster.connect()
session.row_factory = dict_factory
session.set_keyspace('excelsior')
transaction_test()
run_transaction(CassandraTransactionProcessor('books_info'), 'books_info', session, ['id', 'author', 'price', 'qty','title'])
transaction_test2()
run_transaction(CassandraTransactionProcessor('books_info'), 'books_info', session, ['id', 'author', 'price', 'qty','title'])
transaction_test3()
run_transaction(CassandraTransactionProcessor('books_info'), 'books_info', session, ['id', 'author', 'price', 'qty','title'])
|
UTF-8
|
Python
| false | false | 2,014 |
18,245,021,084,814 |
3b1639fff53b905b6f17e43fb146bd4fad93f5ba
|
4db49b9320409b92f874a0121cd71e561f63322e
|
/add_to_index.py
|
fd4c16516f5c41398211943fd4c25ec94a88572d
|
[] |
no_license
|
pranavmishra/Udacity
|
https://github.com/pranavmishra/Udacity
|
b56552c16d89b48023cb93ff5838bbebb63120f9
|
e0f42b66789e12e2607db6d0b7dd4b7a822f2661
|
refs/heads/master
| 2021-01-13T11:58:46.239031 | 2013-07-27T18:31:49 | 2013-07-27T18:31:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Define a procedure, add_to_index,
# that takes 3 inputs:
# - an index: [[<keyword>,[<url>,...]],...]
# - a keyword: String
# - a url: String
# If the keyword is already
# in the index, add the url
# to the list of urls associated
# with that keyword.
# If the keyword is not in the index,
# add an entry to the index: [keyword,[url]]
index = []
def add_to_index(index,keyword,url):
element_num = 0
keyword_in_index = False
while element_num < len(index):
element = index[element_num]
if keyword in element:
element[1].append(url)
keyword_in_index = True
element_num = element_num + 1
if keyword_in_index == False:
index.append([keyword, [url]])
return index
def add_page_to_index(index,url,content):
new_keywords = content.split()
for keyword in new_keywords:
add_to_index(index, keyword, url)
#add_to_index(index,'udacity','http://udacity.com')
#add_to_index(index,'computing','http://acm.org')
#add_to_index(index,'udacity','http://npr.org')
#print index
#>>> [['udacity', ['http://udacity.com', 'http://npr.org']],
#>>> ['computing', ['http://acm.org']]]
|
UTF-8
|
Python
| false | false | 2,013 |
14,018,773,286,694 |
076fa9c0ab8e4913f8d88922f5a5e16ae4e8dc7e
|
cb6cca0f19484f888134e4ffae350976ef2de833
|
/example/client.py
|
bd4fdfb9bc2e0acc66d09c768e9f77e170bd1be2
|
[] |
no_license
|
binarybison/schedcat
|
https://github.com/binarybison/schedcat
|
e5c99e6cfa00c107c95f2e49fd1e244710dd64c9
|
2a34920fd88a7987e5b75c01210a33bd455566c7
|
refs/heads/master
| 2021-01-15T18:31:59.701450 | 2014-11-13T19:20:08 | 2014-11-13T19:20:08 | 4,034,777 | 0 | 0 | null | true | 2019-04-21T22:52:33 | 2012-04-15T20:22:45 | 2014-08-04T20:06:39 | 2014-11-13T19:20:21 | 656 | 1 | 1 | 0 |
C++
| false | false |
#!/usr/bin/env python
from __future__ import division
import argparse
import random
import math
import time
from twisted.spread import pb
from twisted.internet import reactor
from copy import deepcopy
import schedcat.mapping.binpack as bp
import schedcat.model.resources as resources
import schedcat.generator.tasks as tasks
import schedcat.locking.bounds as bounds
import schedcat.sched.fp as fp
from schedcat.model.tasks import SporadicTask, TaskSystem
from schedcat.generator.tasksets import NAMED_PERIODS, NAMED_UTILIZATIONS
from schedcat.distributor.stats import BernoulliEstimator
from schedcat.distributor.rpc import SchedulabilityClient
INFINITY = 999999
class DesignPointFactory(object):
def build_design_point(self, trials, **levels):
return DesignPointRunner(trials, **levels)
class DesignPointRunner(object):
metrics = ["OMLP", "FMLP"]
tests = []
def __init__(self, trials, **levels):
self.__dict__.update(levels)
self.trials = trials
self.levels = levels
self.data = dict([(m, BernoulliEstimator()) for m in DesignPointRunner.metrics])
def run(self):
start = time.clock()
for _ in xrange(self.trials):
org_ts = self.create_task_set()
for test in DesignPointRunner.tests:
ts = deepcopy(org_ts)
test(self, ts)
elapsed = time.clock() - start
return (elapsed, dict([(k,v.mean) for (k,v) in self.data.iteritems()]))
def fmlp(self, ts):
# overapproximation needed for blocking-bound calculations
for t in ts:
t.response_time = t.period
bounds.apply_part_fmlp_bounds(ts)
res = fp.is_schedulable(1, ts)
self.data["FMLP"].add_sample(res)
return res
tests.append(fmlp)
def omlp(self, ts):
# overapproximation needed for blocking-bound calculations
for t in ts:
t.response_time = t.period
bounds.apply_clustered_omlp_bounds(ts, 1)
res = fp.is_schedulable(1, ts)
self.data["OMLP"].add_sample(res)
return res
tests.append(omlp)
def create_task_set(self):
tg = tasks.TaskGenerator(period = NAMED_PERIODS[self.period],
util = NAMED_UTILIZATIONS[self.task_util])
ts = tg.make_task_set(max_util = self.sys_util, squeeze = True)
resources.initialize_resource_model(ts)
for t in ts:
t.resmodel[0].add_request(10)
bounds.assign_fp_preemption_levels(ts)
for i, partition in enumerate(bp.worst_fit(ts, self.processors,
weight = SporadicTask.utilization,
capacity = INFINITY,
empty_bin = TaskSystem)):
for t in partition:
t.partition = i
return ts
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-p', "--port", default=1234,
help = "Port on which to connect to server")
parser.add_argument('-s', "--server", default="localhost",
help = "Server from which to pull design points")
args = parser.parse_args()
dpfactory = DesignPointFactory()
SchedulabilityClient(dpfactory, args.server, args.port).start()
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,014 |
1,649,267,485,813 |
b40c6a77fe9a39e0e86db8e148b8989c871348ce
|
c0f596506eb931b1cf1cdac6d0deabae00d65147
|
/opencv_programs/Pictures/raspicam.py
|
47f256d8bec60af30ece713d18f1c39675b03d9a
|
[] |
no_license
|
bzhao41/RaspberryPi2.0
|
https://github.com/bzhao41/RaspberryPi2.0
|
b8c430c21c0e7ab0a16dcfca5a683966a2907711
|
a9ab55f5a87ec7a5b5a1a04d519be82eedb419da
|
refs/heads/master
| 2020-04-23T04:07:02.531351 | 2013-09-12T18:32:43 | 2013-09-12T18:32:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
'''
@author Jeremy Barr
@date 5/15/2013
@brief This script will serve as a prototype for a class
to capture images from the new Raspberry Pi Camera Module
version 1.1: 7/11/2013
-refined raspicam options
-added raspivid options
'''
import os
from subprocess import call
import cv
'''
sample code:
# capture still images from camera module
call (["raspistill -o image.jpg"], shell=True)
# capture 5s video (default) from the camera module
call (["raspivid -o video.h264"], shell=True)
# capture 10s video from the camera module
call (["raspivid -o video.h264 -t 10000"], shell=True)
# capture 10s video from the camera module in demo mode
call (["raspivid -o video.h264 -t 10000 -d"], shell=True)
'''
class RaspiCam:
"""RaspiCam class used to capture images and video from
the Raspberry Pi Camera Board Module"""
"""Define class variables"""
''' these are the default raspistill image parameters
For Complete list of commands enter:
raspistill | less
raspivid | less
'''
'''-- raspistill 2592x1944 --'''
height = 1944 # default height of 1944 pixels
width = 2592 # default width of 2592 pixels
exposure = "off"
AWB = "off"
ifx = "none"
filename = "output.jpg"
SAVE_DIR = os.path.dirname(os.path.abspath(__file__)) # default is current dir
metering = None # average, spot, backlite, matrix
vf = False # Flip vertically
hf = False # Flip horizontally
'''-- raspivid --'''
#Please add options... at least 1 option for God sakes...
"""END of class variables"""
def __init__(self):
print "RPI Cam Initialized"
def piCapture(self, time=0):
''' capture still images from camera module
Must have 'sudo' to write output file to directory
'''
# '-n' no preview window opens, '-vf' currently flips the image vertically
command = "sudo raspistill -n -o %s/%s -ex %s -t %d" % (self.SAVE_DIR,self.filename,self.exposure,time)
# if Metering Mode is on then add to command list
if (self.metering != None):
command += str(" -mm %s" % self.metering)
if (self.height != 2592):
command += str(" -h %d" % self.height)
if (self.width != 1944):
command += str(" -w %d" % self.width)
if (self.vf != False or self.vf == True):
command += str(" -vf")
if (self.hf != False or self.hf == True):
command += str(" -hf")
print "command: ", command # print the full command in terminal
call ([command], shell=True)
#call (["sudo raspistill -o output.jpg -hf -t %d" % time], shell=True)
#print "Image Captured as %s" % self.filename
# OpenCV commands to return output image as CvCapture structure
rPic = cv.LoadImage(self.filename, 1) # 1 = CV_LOAD_IMAGE_COLOR,
# 2 = CV_LOAD_IMAGE_GRAYSCALE,
# 3 = CV_LOAD_IMAGE_UNCHANGED
#rCapture = cv.CaptureFromFile(self.filename)
#rPic = cv.QueryFrame(rCapture)
return rPic
def piVideo(self,time=0):
''' capture 5s video (default) from the camera module
or input a video duration (time) in milliseconds.
'''
if time>0:
call (["sudo raspivid -o video_out.h264 -t %d" % time], shell=True)
time = time*1000 #converts to milliseconds
print "Capturing Video for "+str(time/1000)+ " seconds"
else:
print "Capturing Video for 5 seconds (default)"
call (["raspivid -o video_out.h264"], shell=True)
|
UTF-8
|
Python
| false | false | 2,013 |
9,431,748,227,045 |
625f98e8ac9bd3461302ca2fad7af2cab1c9da7e
|
30ecbda5e452d34598a29a3893fd0831152e8eca
|
/tests/receipt_tests.py
|
70cefe902f01dc6345a58cfb431018dce02ebb6c
|
[
"BSD-3-Clause"
] |
permissive
|
jhooey/shopping-cart-trends
|
https://github.com/jhooey/shopping-cart-trends
|
ce767df0687ad36a257fc46b2ed5f9e2af4ed336
|
e2ee65c2cd1f95942000175479a6666459dff854
|
refs/heads/master
| 2021-01-19T20:19:27.879999 | 2014-03-24T19:43:50 | 2014-03-24T19:43:50 | 14,719,130 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from nose.tools import *
import unittest
import shoppingtrends.receipt as receipt
from shoppingtrends.receipt import Receipt, Item
from shoppingtrends.localization import Province, Store
class test_Item(unittest.TestCase):
def setUp(self):
self.bananas = receipt.Item('Bananas', 0.79, 1.8)
self.pears = receipt.Item('Pears', 1.49, 4)
self.napkins = receipt.Item('Napkins', 2.0, 1, True)
def tearDown(self):
self.bananas = None
self.pears = None
self.napkins = None
def test_Item_total_cost(self):
#Item without tax, but tax still has to be passed
self.assertAlmostEqual(self.bananas.total_cost(13.0),1.422)
#Item with tax
self.assertAlmostEqual(self.napkins.total_cost(13.0), 2.26)
class test_Receipt(unittest.TestCase):
def setUp(self):
self.quebec = Province("Quebec", "QC", 13)
self.loblaws = Store("Loblaws", self.quebec)
self.loblaws_receipt = Receipt(self.loblaws)
self.bananas = Item('Bananas', 0.79, 1.8)
self.napkins = Item('Napkins', 2.0, 1, True)
def tearDown(self):
self.quebec = None
self.loblaws = None
self.loblaws_receipt = None
self.bananas = None
self.pears = None
self.napkins = None
def test_Receipt_add_remove_item(self):
assert not self.loblaws_receipt.items
self.loblaws_receipt.add_item(self.bananas)
assert self.loblaws_receipt.items
self.loblaws_receipt.remove_item_by_name(self.bananas.name)
assert not self.loblaws_receipt.items
def test_Receipt_total(self):
self.assertAlmostEqual(self.loblaws_receipt.total(),0.0)
self.loblaws_receipt.add_item(self.bananas)
self.assertAlmostEqual(self.loblaws_receipt.total(),1.422)
self.loblaws_receipt.add_item(self.napkins)
self.assertAlmostEqual(self.loblaws_receipt.total(),3.682)
self.loblaws_receipt.remove_item_by_name(self.bananas.name)
self.assertAlmostEqual(self.loblaws_receipt.total(),2.26)
|
UTF-8
|
Python
| false | false | 2,014 |
17,179,909,032 |
6086b586d1d8bf75ffbbab2fbb1ab1f1bbb774d4
|
2385224017b86818d2bbd6b981cc2a30f059c851
|
/get_lyrics.py
|
c8ec29d4334030a0c16234b5dd52e13db24f65e1
|
[] |
no_license
|
pariweshsubedi/lyrics-grabber-python
|
https://github.com/pariweshsubedi/lyrics-grabber-python
|
0965e42121a20cc98133f63c265692243c7432b3
|
10edaeeaeb8f8016c56f799704f12c849773cbdd
|
refs/heads/master
| 2021-01-16T19:18:27.367305 | 2014-10-23T09:18:15 | 2014-10-23T09:18:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os,sys,re,StringIO,urllib2
from lxml import etree
global path
def call_eyed3(f):
try:
import eyeD3
tag = eyeD3.Tag()
tag.link(f)
artist = tag.getArtist().lower()
album = ''.join(tag.getAlbum())
title = re.sub('[\(\)\{\}\,\.<>]', '', ''.join(tag.getTitle().split(' '))).lower()
print "\n\nArtist: " + artist + " Title: "+ title
get_lyrics(artist,title)
except:
print "tag recognition error"
exit()
def get_lyrics(artist,title):
generate_url = 'http://azlyrics.com/lyrics/'+artist+'/'+title +'.html'
print generate_url
processing(generate_url, artist, title)
def processing(generate_url, artist, title):
try :
print "Fetching lyrics to "+ path + "lyrics/"+artist + '_' + title + '.txt'
print "from " + generate_url
response = urllib2.urlopen(generate_url)
read_lyrics = response.read()
parser = etree.HTMLParser()
tree = etree.parse(StringIO.StringIO(read_lyrics), parser)
lyrics = tree.xpath('''//div[@style='margin-left:10px;margin-right:10px;']/text()''')
printing(artist, title, lyrics)
except:
"Error fetching from web."
def printing(artist, title, lyrics):
for words in lyrics:
# print str(words).strip()
saving(artist, title, lyrics)
def saving(artist, title, lyrics):
filename = path + "lyrics/"+artist + '_' + title + '.txt'
f = open(filename, 'w')
f.write("\n".join(lyrics).strip())
f.close()
if len(sys.argv)==1: #for the working directory
path = os.path.abspath(__file__)
print path
path = path.split("/")[:-1]
path = '/'.join(path)+"/"
files = next(os.walk(path))[2]
elif len(sys.argv)==2:
path = os.path.abspath(__file__) #for single audio file
path = path.split("/")[:-1]
path = '/'.join(path)+"/"
single_file = sys.argv[1]
call_eyed3(single_file)
exit()
elif (sys.argv[1])=='-d' and len(sys.argv)==3: #for a directory
path = sys.argv[2]
print path
files = next(os.walk(path))[2]
else:
print "\n Usage: \n \n 1) python get_lyrics.py [-d] [path to audio directory] \n 2) python get_lyrics.py filename \n "
exit()
for f in files:
print f
call_eyed3(f)
|
UTF-8
|
Python
| false | false | 2,014 |
4,896,262,760,206 |
eaec164a5e8598e6f648cd23a448fa0b726fd996
|
9412f4ba84f6b54f67c0d6534ab3804fa621bee8
|
/faq/urls.py
|
52e219e063c71a5be6e054f89cc6c02f7d3af338
|
[] |
no_license
|
ruspython/adler-m
|
https://github.com/ruspython/adler-m
|
5fbeb44d1a5187d481391e49d6cca86b69d14b7a
|
c9b27ee7c1794c4632742887599545893621a58d
|
refs/heads/master
| 2020-12-24T14:45:16.535606 | 2014-12-01T10:55:34 | 2014-12-01T10:55:34 | 31,331,474 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import patterns, url
from .views import *
from django.views.generic import TemplateView
urlpatterns = patterns('',
url(r'^$', FaqListView.as_view()),
url(r'^add/$', FaqCreateView.as_view(), name='create_faq'),
url(r'^faq_add_success/$',
TemplateView.as_view(template_name='faq/faq_success.html'),
name='faq_add_success'),
)
|
UTF-8
|
Python
| false | false | 2,014 |
8,564,164,822,293 |
5218a51f82430d59dfec6534d13cd1136e6df960
|
3332974bd18221f4718b052f27311e715e9d3e85
|
/piebot/modules/urldupe.py
|
e962b26c72b7dd843cb0c71a0f9502dcc99557c8
|
[
"MIT"
] |
permissive
|
klnusbaum/piebot
|
https://github.com/klnusbaum/piebot
|
c829bb785dc1f541e639cc85f1dfcb614f3bf4b0
|
a254459af73475dd321c5bd9188ac9d9e7bb667d
|
refs/heads/master
| 2020-12-25T22:57:55.677427 | 2014-09-29T21:56:01 | 2014-09-29T21:56:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""Checks if someone has posted a link already and yells at them.
@package ppbot
"""
import re
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from piebot.modules import *
from piebot.db import db
class Urldupe(Module):
youtube_pattern = re.compile('(?:youtube.com/watch).*?v=([a-zA-Z0-9_-]+)')
url_pattern = re.compile('http(?:s|)://[^ #]+')
def __init__(self, *args, **kwargs):
"""Constructor"""
Module.__init__(self, kwargs=kwargs)
def _register_events(self):
self.add_event('pubmsg', 'urldupe')
def get_dupes(self, url, username, channel):
"""Checks if there is a dupe url already"""
params = {
'channel': channel,
'username': {
'$ne': username
}
}
m = Urldupe.youtube_pattern.search(url)
if m:
youtube_id = m.group(1)
else:
youtube_id = ''
params['$or'] = [{
'url': url
}, {
'youtube_id': youtube_id
}]
return self.db.urls.find(params)
def save_url(self, url, username, channel):
data = {'url': url,
'username': username,
'channel': channel,
'time': datetime.now()}
m = Urldupe.youtube_pattern.search(url)
if m:
data['youtube_id'] = m.group(1)
self.db.urls.insert(data)
def urldupe(self, event):
"""Action to react/respond to chat messages."""
m = Urldupe.url_pattern.search(event['message'])
if m:
match = m.group(0).rstrip('/')
# check if this url has been posted before
dupes = self.get_dupes(match, event['nick'], event['target'])
if dupes.count() == 1:
rd = relativedelta(datetime.now(), dupes[0]['time'])
message = "%s: That url was already linked by \x02%s\x02 %s ago." % (
event['nick'],
dupes[0]['username'],
self.pretty_time_duration(rd)
)
self.reply(message)
elif dupes.count() > 1:
rd = relativedelta(datetime.now(), dupes[0]['time'])
message = ("%s: That url has been linked %d times already"
" (first linked by \x02%s\x02 %s ago).") % (
event['nick'],
dupes.count(),
dupes[0]['username'],
self.pretty_time_duration(rd)
)
self.reply(message)
# add to database
self.save_url(match, event['nick'], event['target'])
def pretty_time_duration(self, rd):
"""Formats the time difference in a pretty string"""
output = ''
delta = {'years': rd.years,
'months': rd.months,
'days': rd.days,
'hours': rd.hours,
'minutes': rd.minutes,
'seconds': rd.seconds}
if rd.years > 1:
output += '%(years)d years '
elif rd.years > 0:
output += '%(years)d year '
elif rd.months > 1:
output += '%(months)d months '
elif rd.months > 0:
output += '%(months)d month '
elif rd.days > 1:
output += '%(days)d days '
elif rd.days > 0:
output += '%(days)d day '
elif rd.hours > 1:
output += '%(hours)d hours '
elif rd.hours > 0:
output += '%(hours)d hour '
elif rd.minutes > 1:
output += '%(minutes)d minutes '
elif rd.minutes > 0:
output += '%(minutes)d minute '
elif rd.seconds > 1:
output += '%(seconds)d seconds '
elif rd.seconds > 0:
output += '%(seconds)d second '
return output.strip() % delta
|
UTF-8
|
Python
| false | false | 2,014 |
12,257,836,683,037 |
3c6c782f01de029df58590a65ac699e5cbad24b6
|
3fb1ed761ef05dec38cb650295ba26b96879bcd6
|
/www/inventory/create_file_vendor.py
|
a407727d38b306d2c818cd180f400c1415dd2d10
|
[] |
no_license
|
eshanbhatt/master
|
https://github.com/eshanbhatt/master
|
8e983cb8b318d79516740c43ac9f9b4acc36ae6d
|
1dd5617db9dfa7616a8d872e595f313fb5188d5a
|
refs/heads/master
| 2016-09-08T14:23:20.896376 | 2014-07-18T13:07:13 | 2014-07-18T13:07:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'snehil'
##create individual files for vendor
##send mail with the inventory list
import csv
from classes.db import *
import xlwt
from classes.logs import *
from Constants import *
from datetime import datetime
from classes.Mail_attachment import *
import time
try:
time=datetime.now().strftime("%Y%m%d%H%M%S")
display_time=datetime.now().strftime("%m-%d-%Y %H:%M:%S")
logging.info("Script Started at : - "+datetime.now().strftime("%d-%m-%Y %H:%M:%S"))
vendor_code=''
z=0
item_skuCode=[]
vendor_code=[]
def find_email(a):
cur2.execute("select vc.vendor_code, vc.email as email,vd.name from vendor_detail vd join vendor_contact vc on vd.code=vc.vendor_code \
where vc.contact_type=4 and vc.vendor_code='"+a+"' group by vc.vendor_code")
for row in cur2.fetchall():
return row[1],row[2]
try:
with open(Constants.originals_folder+'Inventory Snapshot.csv', 'rb') as csvread:
spamreader = csv.reader(csvread, delimiter=',')
for csvrow in spamreader:
if z!=0:
item_skuCode=csvrow[0].split("-")
#print csvrow[0]," ",item_skuCode
vendor_code.append(item_skuCode[0])
else:
z=z+1
except Exception:
#logging.info("File not found")
print ""
unique_vendor_code=list(set(vendor_code))
print unique_vendor_code
supc=[]
z=0
vendor_code1=[]
new1=[]
for a in unique_vendor_code:
try:
workbook = xlwt.Workbook(encoding = "ISO-8859-1")
sheet = workbook.add_sheet("Sheet1")
sheet.write(0,0,"Vendor Code")
sheet.write(0,1,"Supc")
sheet.write(0,2,"Inventory")
row_count=1
print a
with open(Constants.originals_folder+'Inventory Snapshot.csv', 'rb') as csvread:
spamreader = csv.reader(csvread, delimiter=',')
for csvrow in spamreader:
if z!=0:
supc=csvrow[0].split("-")
vendor_code=supc[0]
if len(supc)==1:
new='Empty'
else:
new=supc[1]
if a==vendor_code:
add=int(csvrow[1])+int(csvrow[2])
try:
sheet.write(row_count, 0, vendor_code)
sheet.write(row_count, 1, new)
sheet.write(row_count, 2, add)
row_count =row_count+1
except Exception:
""
add=''
z=z+1
workbook.save(Constants.downloads_folder+a+'_'+time+'.xls')
email_id,name=find_email(a)
print email_id,name
send_mail('template',email_id,name,display_time,a+'_'+time+'.xls')
#logging.info("mail sent to::"+a)
except Exception:
#logging.info("Data corrupted /mail not sent to vendor code::"+a)
print ""
logging.info("Script Completed at : - "+datetime.now().strftime("%d-%m-%Y %H:%M:%S"))
except Exception:
#logging.info("Error in create_file_vendor.py")
print ""
|
UTF-8
|
Python
| false | false | 2,014 |
15,461,882,312,092 |
9716c818e4e0556b673d5a7bda9910ffedc0d2df
|
922e9c174f764df4ff6713792794d5cc2ce709af
|
/manifold_clustering.py
|
70b3621e124ec331ca1aca814e11d3df57ee4cc3
|
[] |
no_license
|
ysong07/neuro_coding
|
https://github.com/ysong07/neuro_coding
|
6e7045bf33b0a36edf49f694643a3b273a278262
|
00831641d833a14d90cca211933f6bbe480c611b
|
refs/heads/master
| 2021-03-12T20:37:18.614429 | 2014-02-07T04:34:12 | 2014-02-07T04:34:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from time import time
import numpy as np
import pylab as pl
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection,mixture)
from sklearn.hmm import GaussianHMM
import delay_map_clustering as dmc
import math
import clustering_delay_maps as cdm
import scipy.io as scipy_io
import scipy.cluster.vq as scivq
def
if __name__ == '__main__':
#DP
# dp_components = 25 and alpha = 10 => best so far in terms of coherence
# alpha => 1 and .1 is also quite good
dp_components = (30, 20)
alpha = (0.1, 1)
n_iter = 10000
n_neighbors = 30
delay,energy,mask = dmc.load_data_40_maps()
all_correlation = dmc.load_raw_correlation('all_correlation_40.mat')
X = all_correlation
X_iso = manifold.Isomap(n_neighbors, n_components=5).fit_transform(X)
# w_correlation = scivq.whiten(X_iso);
# print(w_correlation.shape)
(aic, bic), dp_indices,labels = cdm.dirichlet_process_clustering(X_iso, dp_components[0], a=alpha[0], n_iter=n_iter)
scipy_io.savemat('data_40_Dirichlet_correlation_isomap_label',{'labels':labels})
print(labels.shape)
|
UTF-8
|
Python
| false | false | 2,014 |
18,631,568,135,967 |
ee297114b9bbecffa39a6f323641573e7288e92c
|
5c97dc02483063f6e66f3aefc05f54a5ce33a600
|
/jne/pipelines.py
|
d73b90b5834c47be1a8090c83dd0a2618261869d
|
[] |
no_license
|
HackSpacePeru/candidos2014
|
https://github.com/HackSpacePeru/candidos2014
|
249176ef6e4d68232fd81a2186f1898db5737162
|
938212ff31e42852c41e4cf1621e15044da1d265
|
refs/heads/master
| 2021-03-12T20:09:04.637397 | 2014-07-20T01:40:53 | 2014-07-20T01:40:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.contrib.exporter import CsvItemExporter
from jne.items import CandidatoItem, ExperienciaLaboralItem
class JnePipeline(object):
candidato_filename = './output/candidato.csv'
experiencia_laboral_filename = './output/experiencia_laboral.csv'
def __init__(self):
self.candidato_file = open(self.candidato_filename, 'wb')
self.candidato_exporter = CsvItemExporter(self.candidato_file)
self.experiencia_laboral_file = open(self.experiencia_laboral_filename, 'wb')
self.experiencia_laboral_exporter = CsvItemExporter(self.experiencia_laboral_file)
def process_item(self, item, spider):
if isinstance(item, CandidatoItem):
self.candidato_exporter.export_item(item)
if isinstance(item, ExperienciaLaboralItem):
self.experiencia_laboral_exporter.export_item(item)
return item
|
UTF-8
|
Python
| false | false | 2,014 |
4,380,866,645,619 |
82b7aa20746757bd1a82d4f114f0b2a2da908bbb
|
d5464985c22c0ea937929d18d1599f4d369ffbf0
|
/pressgang/core/decorators.py
|
b7dddf77eb5dec30852c3f327ffd8a95f79020d9
|
[
"BSD-2-Clause"
] |
permissive
|
cilcoberlin/pressgang
|
https://github.com/cilcoberlin/pressgang
|
f67e3182ea88a53ea4b10ab051a1c1f367a523bf
|
24e354769841dd6962f38cef3bd45f8297b040c6
|
refs/heads/master
| 2021-01-01T18:12:50.346476 | 2012-07-17T16:47:18 | 2012-07-17T16:47:18 | 2,030,853 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.http import urlquote
from django.utils.translation import ugettext_lazy as _
from pressgang.core.exceptions import PressGangConfigurationError
from pressgang.core.models import Blog
from pressgang.core.views import NEXT_PAGE_PARAM
from functools import wraps
def admin_info_required(view_function):
"""
A decorator that should be applied to any view that performs an operation
on a single blog that requires a valid admin username and password.
The decorated view must receive a `blog_id` kwarg for this decorator to work.
If the view does not provide this, a PressGangConfigurationError is raised.
"""
def wrapper(request, *args, **kwargs):
# Verify that the decorator is placed on a view that uses a blog ID
if _BLOG_ID_KWARG not in kwargs:
raise PressGangConfigurationError(_("Any view decorated with %(decorator)s must receive a blog ID via the kwarg %(kwarg)s") % {'decorator': 'admin_info_required', 'kwarg': _BLOG_ID_KWARG})
# If the blog referenced can't be found, just return the view's response,
# so that it can produce a 404 if it wishes to
response = view_function(request, *args, **kwargs)
try:
blog = Blog.objects.get(pk=kwargs[_BLOG_ID_KWARG])
except Blog.DoesNotExist:
return response
# If the blog does not have admin information defined, redirect to a
# page that asks the user for this information
if not blog.admin_user or not blog.admin_password:
return HttpResponseRedirect("%(url)s?%(param)s=%(next)s" % {
'url': reverse('pressgang:get-admin-info', kwargs={'blog_id': blog.pk}),
'param': NEXT_PAGE_PARAM,
'next': urlquote(request.get_full_path())
})
return response
return wraps(view_function)(wrapper)
_BLOG_ID_KWARG = "blog_id"
|
UTF-8
|
Python
| false | false | 2,012 |
19,275,813,261,396 |
e3d16eded5cf87176cef484db54b855a00fbc163
|
41b7eb743693a5944cc81a99751009e99b5a2d38
|
/gestion_produits/models.py
|
fe8be97e1135158063864ac79e76bff9243a82c3
|
[] |
no_license
|
crichon/tx
|
https://github.com/crichon/tx
|
92f10b7644c5a8e27a8d7a91f56724ddf79199d4
|
6d4d036b9c4b0acdef69a811db7bf23674e05cda
|
refs/heads/master
| 2020-04-06T04:10:04.471511 | 2014-12-13T14:02:53 | 2014-12-13T14:02:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf8 -*-
from django.db import models
class Category(models.Model):
name = models.CharField(u'categorie', max_length=50)
class Meta:
verbose_name = u'Catégorie'
ordering = ["name"]
def __unicode__(self):
return self.name
class Supplier(models.Model):
adress = models.CharField(u'adresse', max_length=100)
name = models.CharField(u'nom', max_length=50)
website = models.CharField(u'site web/catalogue', max_length=50, null=True, blank=True)
class Meta:
verbose_name = u'Fournisseur'
ordering = ["name"]
def __unicode__(self):
return self.name
class Item(models.Model):
category = models.ForeignKey(Category, null=True, blank=True, verbose_name=u'Catégories')
supplier = models.ForeignKey(Supplier, verbose_name=u'Fournisseur')
ref = models.CharField(u'référence', max_length=50)
name = models.CharField(u'identifiant', max_length=50)
quantity = models.CharField(u'quantité par unité de vente', max_length=50)
place = models.CharField(u'lieu de stockage', max_length=50)
stockage_modality = models.CharField(u'modalité de stockage', max_length=50, null=True, blank=True)
current_stock = models.IntegerField(verbose_name=u'Quantité en stock', default=0)
class Meta:
verbose_name = u'produit'
ordering = ["name"]
def __unicode__(self):
return self.name + u', ref: ' + self.ref + u' (' + self.category.name + u')'
|
UTF-8
|
Python
| false | false | 2,014 |
11,845,519,807,990 |
95c9f407fb2519b72bd067c36358d9a38571e4cf
|
3269e7efc803a5deda4c32f1cad001a3cd5dd10f
|
/models.py
|
32d813407678374f9de2e3afc3956e1e091075fb
|
[] |
no_license
|
ursumarius/first_web_app_marius
|
https://github.com/ursumarius/first_web_app_marius
|
417424fb3e6a8e0341008b583c9db48e1dfd7f69
|
3329bfbf52a725a2b6b89366b8a23f6d8b6d9242
|
refs/heads/master
| 2016-09-06T12:05:37.796909 | 2014-11-26T08:44:03 | 2014-11-26T08:44:03 | 39,856,215 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import re
import utilities_mu
import jinja_util
from collections import namedtuple
from google.appengine.ext import db
import logging
import datetime
render_str = jinja_util.render_str
valid_pw = utilities_mu.valid_pw
make_pw_hash = utilities_mu.make_pw_hash
class Series(db.Model):
Title = db.StringProperty(required = True)
ReleaseDate = db.DateProperty(required = False)
Created = db.DateTimeProperty(auto_now_add = True)
Last_modified = db.DateTimeProperty(auto_now = True)
@classmethod
def NewSeries(cls, Title, ReleaseDate):
if not ReleaseDate:
ReleaseDate = datetime.date.today()
q = Series.gql("Where Title= :title", title=Title)
p = list(q)
if not p:
q = Series(Title = Title, ReleaseDate = ReleaseDate)
q.put() #assume successful entry if good values
return True
return False
class MovieListing(db.Model):
Title = db.StringProperty(required = True)
IMDB_link = db.LinkProperty(required = True)
Followed = db.IntegerProperty(required = True)
Poster_link = db.LinkProperty(required = False)
Creators = db.StringProperty(required = False)
Actors = db.StringProperty(required = False)
FoundTorrent = db.IntegerProperty(required = False)
TorrentLink1 = db.StringProperty(required = False)
Last_found_check = db.DateProperty(required = True)
ReleaseDate = db.DateProperty(required = False)
Created = db.DateTimeProperty(auto_now_add = True)
Last_modified = db.DateTimeProperty(auto_now = True)
@classmethod
def FollowedChange(cls, movie_id, truth):
db_key = db.Key.from_path('MovieListing', movie_id)
q = db.get(db_key)
if q and (truth == 1 or truth == 0 ):
q.Followed = int(truth)
q.put()
#logging.error("Followed in db = %s"%str(q.Followed))
return True
else:
return False
@classmethod
def FoundTorrentChange(cls, title, url, truth):
q = MovieListing.gql("Where Title= :title", title=title)
p = list(q)
if p[0]:
p[0].FoundTorrent = truth
p[0].TorrentLink1 = url
p[0].Last_found_check = datetime.date.today()
p[0].put()
#logging.error("Followed in db = %s"%str(q.Followed))
return True
else:
return False
@classmethod
def TorrentLink1Edit(cls, movie_id, newlink):
db_key = db.Key.from_path('MovieListing', movie_id)
q = db.get(db_key)
if q and newlink:
q.TorrentLink1 = str(newlink)
q.put()
#logging.error("Followed in db = %s"%str(q.Followed))
return True
else:
return False
@classmethod
def NewListing(cls, Title, IMDB_link, Poster_link, ReleaseDate, Followed = 1, Creators = "", Actors = "",
FoundTorrent = 0, TorrentLink1 = "", Last_found_check = datetime.date.today()):
#assume all data is valid (e.g. link is link) -- how to handler errors while entering in DB?
q = MovieListing.gql("Where Title= :title", title=Title)
p = list(q)
if not p:
q = MovieListing(Title = Title, IMDB_link = IMDB_link, Poster_link = Poster_link, Followed = Followed,
Creators = Creators, Actors = Actors, FoundTorrent = FoundTorrent,
TorrentLink1 = str(TorrentLink1), Last_found_check = Last_found_check,
ReleaseDate = ReleaseDate)
q.put() #assume successful entry if good values
return True
return False
def render(self):
return render_str("Movie_listing.html", listing = self)
def single_listing( listing_id):
db_key = db.Key.from_path('MovieListing', int(listing_id) )
return db.get(db_key)
class System_tools(db.Model):
name = db.StringProperty(required = True)
value = db.StringProperty(required = False)
Created = db.DateTimeProperty(auto_now_add = True)
Last_modified = db.DateTimeProperty(auto_now = True)
class Users(db.Model):
username = db.StringProperty(required = True)
password_hash = db.StringProperty(required = True)
email = db.StringProperty()
#pass hash = db.StringProperty(required = True)
#sign stuff
@classmethod
def correct_password(self, username_in,password_in):
q = Users.gql(("WHERE username = :user"), user = str(username_in))
password_hash_db = ""
for entry in q:
password_hash_db = str(entry.password_hash)
if valid_pw(password_in, password_hash_db):
return password_hash_db
@classmethod
def free_username(cls, username):
q = cls.gql(("WHERE username = :user"), user = str(username))
for entry in q:
return False
return True
@classmethod
def correct_cookie(cls,cookie_value):
try:
CookieUser = cookie_value.split("-")[0]
CookieHash = cookie_value.split("-")[1]
except:
return False,""
q = cls.gql(("WHERE username = :user"), user = str(CookieUser))
password_hash_db = ""
for entry in q:
password_hash_db = str(entry.password_hash)
return CookieHash == password_hash_db,CookieUser
@classmethod
def signup(cls, username, password, verify, email):
have_error = False
params = {}
if not valid_username(username):
params['error_username'] = "That's not a valid username."
have_error = True
if (cls.free_username(username) == False):
params['error_username'] = "Username already taken"
have_error = True
if not valid_password(password):
params['error_password'] = "That wasn't a valid password."
have_error = True
elif password != verify:
params['error_verify'] = "Your passwords didn't match."
have_error = True
if not valid_email(email):
params['error_email'] = "That's not a valid email."
have_error = True
password_hash = make_pw_hash(password)
return have_error, params, password_hash
@classmethod
def login_check(cls, username, password):
have_error = False
params = {}
if cls.free_username(username)==True:
have_error = True
params['error_username'] = "That's not a valid username."
cookie_hash = cls.correct_password(username,password)
if not cookie_hash:
params['error_password'] = "Wrong password."
have_error = True
return have_error, params,cookie_hash
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
def valid_username(username):
return username and USER_RE.match(username)
PASS_RE = re.compile(r"^.{3,20}$")
def valid_password(password):
return password and PASS_RE.match(password)
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_email(email):
return not email or EMAIL_RE.match(email)
|
UTF-8
|
Python
| false | false | 2,014 |
14,456,859,923,675 |
26a61f939392c8cf9ba494001da8e3e0e488d75f
|
e7a33917e5c136216a4c78d694bdde91952ea2f8
|
/score_guitar_test.py
|
07128b4182a622455cd49ee07a8ac1b6ba3695b6
|
[] |
no_license
|
phillipgreenii/guitar-config-optimization
|
https://github.com/phillipgreenii/guitar-config-optimization
|
d9a927bde6919a9cd353467ca39a7019b503ac9c
|
b87948e8866b8d7c09542d856fb9bd58fbee3111
|
refs/heads/master
| 2021-01-20T09:23:15.018593 | 2014-03-23T01:46:50 | 2014-03-23T01:46:50 | 101,592,717 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import unittest
import guitar
from semitone import Semitone
from score_guitar import GuitarScorer
class TestGuitarScorer(unittest.TestCase):
def test_score_with_standard_guitar_works(self):
guitar_instance = guitar.Guitar(12, 1,
tuple(
map(Semitone.from_string, ('E', 'D', 'G' 'E'))),
name='Test Tuning')
instance = GuitarScorer()
result = instance.score(guitar_instance)
for scores in result:
print "%(key_signature)-10s%(root)-3s\t%(count)5i\t%(min_score)3.2f\t%(25_percentile_score)3.2f\t%(mean_score)3.2f\t%(75_percentile_score)3.2f\t%(max_score)3.2f" % scores
# TODO add assert
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false | false | 2,014 |
11,106,785,467,387 |
6516097235bad1fc94ea0d3422f74a4a1718a554
|
64b6944ac218edee2fd006c7a942a93d1fde0e61
|
/mystore/StoreApp/models/products.py
|
d89c64b08872a4af37943929f6e4072791cff822
|
[] |
no_license
|
ufomysis/Store-Management-App
|
https://github.com/ufomysis/Store-Management-App
|
45a81dcf773fba4e472726f4765687f14a146e51
|
322ce9e52cea7b2da546ce9a319f69c6d4d3677b
|
refs/heads/master
| 2021-01-21T08:37:16.256145 | 2014-11-07T09:01:43 | 2014-11-07T09:01:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
class Products(models.Model):
product_name = models.CharField("Product Name", max_length=70)
description = models.CharField("Description", max_length=250)
class Meta:
app_label = "StoreApp"
db_table = "Products"
def __unicode__(self):
return self.product_name
|
UTF-8
|
Python
| false | false | 2,014 |
910,533,111,329 |
bffb6a88b46e0d30006f7d53d66fc1ee3963b9ea
|
f28dd4dcfe4f1fee787e582097d0c834339859a0
|
/vs/socialfooter/browser/mono/rounded/white/metadater.py
|
e03e70e796195897d474de93f3009982d1411450
|
[] |
no_license
|
virtualsciences/vs.socialfooter
|
https://github.com/virtualsciences/vs.socialfooter
|
6c0e008074445e54ca832d6db96ca5c3c891a12a
|
df3d77dd764fd6464fbf955f89d1b6bf86504377
|
refs/heads/master
| 2021-01-18T00:24:41.689446 | 2014-02-13T16:03:51 | 2014-02-13T16:03:51 | 15,901,686 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"Adding cache headers for image files in a directory"
import glob
import os
file_names = []
# get all files in the directory with an image extension
for file_type in ['*.png','*.gif','*.jpg']:
file_names.extend(glob.glob(file_type))
# create a files with '.metadata' added and fill them with the string in line 16
for file_name in file_names:
file = open('%s.metadata' % file_name, 'w')
file.write('[default]\ncache = HTTPCache')
|
UTF-8
|
Python
| false | false | 2,014 |
13,426,067,780,524 |
cec04edc56fcc98f9ac0c12b3537c187a662b975
|
ede325894c6a6f153cbc71199225bde30b0d5700
|
/loadtest/test_m_memcached.py
|
e76d7b453bca3d166ac60d1fb30433d2fd104b78
|
[] |
no_license
|
lhwork/erlq
|
https://github.com/lhwork/erlq
|
54e1ec7ac1e67f15396d9793ab319807f2999ac9
|
7247aa269a874e3e43e2114663360752510fa1ee
|
refs/heads/master
| 2020-05-22T14:58:27.572535 | 2012-10-24T09:55:37 | 2012-10-24T09:55:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pylibmc
from time import ctime, time
from threading import Thread
class MemcachedTest(Thread):
def __init__ (self,number,opts):
Thread.__init__(self)
self.threadName = number
self.timeUsed = -1
self.options = opts
self.miss = 0
def run(self):
mc = pylibmc.Client([self.options.server_address])
l = [str(i) for i in xrange(self.threadName * 250000, (self.threadName + 1) * 250000)]
if self.options.method == 'write':
print 'Create Queue 250000.'
start = time()
for i in l:
mc.set('test',i)
end = time()
self.timeUsed = end - start
else:
print 'Read Queue 250000.'
start = time()
for i in l:
v = mc.get('test')
if not v:
self.miss += 1
end = time()
self.timeUsed = end - start
print 'get miss:%d' % self.miss
print '___Thread(%d) Used: %r' % (self.threadName, self.timeUsed)
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-a', '--server-address', dest='server_address', default='127.0.0.1:11211', help="address:port of memcached [default: %default]")
parser.add_option('-m', '--method', dest='method', default='write', help="test method: write, read [default: %default]")
global opts
opts, args = parser.parse_args()
print ctime()
start = time()
threadList = []
for i in range(4):
currMemTest = MemcachedTest(i, opts)
threadList.append(currMemTest)
currMemTest.start()
for t in threadList:
t.join()
end = time()
print 'Used:%r' % (end - start)
print ctime()
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 2,012 |
12,343,736,018,017 |
a2ff63df466085c19ca4b73b7e65be3c2261d99c
|
54281234e4268b0ff897a827b573f7c499172d10
|
/start_server.py
|
8451114cd7df6dd872a0584f9d8da5821c99f1d2
|
[] |
no_license
|
pombredanne/kokki-pywebsf-demo
|
https://github.com/pombredanne/kokki-pywebsf-demo
|
c90725f5008ca715a84855fe9e5661baf0be29d7
|
a440814c834d87c10b19b1737605e86f63ffaf65
|
refs/heads/master
| 2018-03-23T04:19:55.089039 | 2011-03-17T22:01:12 | 2011-03-17T22:01:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import os
import time
from optparse import OptionParser
from boto.ec2.connection import EC2Connection
from textwrap import dedent
INSTANCE_TYPES = {
"t1.micro": [32, 64],
"m1.small": 32,
"m1.large": 64,
"m1.xlarge": 64,
"c1.medium": 32,
"c1.xlarge": 64,
"m2.xlarge": 64,
"m2.2xlarge": 64,
"m2.4xlarge": 64,
"cc1.4xlarge": 64,
}
AMIS = {
"ubuntu-maverick": {
"us-east-1": {
"ebs": {
32: "ami-508c7839",
64: "ami-548c783d",
},
"local": {
32: "ami-1a837773",
64: "ami-688c7801",
},
},
},
"ubuntu-lucid": {
"us-east-1": {
"ebs": {
32: "ami-480df921",
64: "ami-4a0df923",
},
"local": {
32: "ami-a403f7cd",
64: "ami-da0cf8b3",
},
},
},
"ubuntu-karmic": {
"us-east-1": {
"ebs": {
32: "ami-6743ae0e",
64: "ami-7d43ae14",
},
"local": {
32: "ami-563dd73f",
64: "ami-6832d801",
},
},
},
}
LOCAL_DISKS = {
"t1.micro": [],
"m1.small": ["/dev/sda2"],
"m1.large": ["/dev/sdb", "/dev/sdc"],
"m1.xlarge": ["/dev/sdb", "/dev/sdc", "/dev/sdd", "/dev/sde"],
"m2.2xlarge": ["/dev/sdb"],
# Unverified
"c1.medium": ["/dev/sda2"],
"c1.xlarge": ["/dev/sdb", "/dev/sdc", "/dev/sdd", "/dev/sde"],
"m2.xlarge": ["/dev/sdb"],
"m2.4xlarge": ["/dev/sdb", "/dev/sdc"],
"cc1.4xlarge": ["/deb/sdb", "/dev/sdc"],
}
RAID = dedent("""
export DEVICES="{local_disks}"
export DEVICE_COUNT="{local_disk_count}"
if [ ! -e /dev/md0 ]; then
apt-get -y install xfsprogs mdadm
umount /mnt || true
mdadm --create /dev/md0 -R -c 256 --level 0 --metadata=1.1 --raid-devices $DEVICE_COUNT $DEVICES
blockdev --setra 65536 /dev/md0
/sbin/mkfs.xfs /dev/md0
echo "DEVICE $DEVICES" > /etc/mdadm/mdadm.conf
mdadm --detail --scan >> /etc/mdadm/mdadm.conf
sed -i -e's/^\(.*\/mnt.*\)/#\1/' /etc/fstab
echo "/dev/md0 /mnt xfs noatime 0 0" | tee -a /etc/fstab
mount /mnt
dd if=/dev/zero of=/mnt/swap bs=1M count=2048
mkswap -f /mnt/swap
swapon /mnt/swap
fi
""").strip()+"\n"
def build_userdata(project_url, config_path, roles, raid_code, private_key, kokki_args=None):
context = dict(
private_key = private_key,
project_url = project_url,
config_path = config_path,
roles = " ".join(roles),
raid_code = raid_code,
kokki_args = kokki_args or "",
)
userdata = [dedent("""
#!/bin/sh
# Make sure this script is only ever run once
if [ -e /etc/kokki-run ]; then
exit 0
fi
date > /etc/kokki-run
set -e -x
export DEBIAN_FRONTEND=noninteractive
if [ -f /home/ubuntu/.ssh/authorized_keys ]
then
mkdir /root/.ssh &> /dev/null
cp /home/ubuntu/.ssh/authorized_keys /root/.ssh/
fi
apt-get update
# aptitude dist-upgrade -y
apt-get -y upgrade
apt-get -y install git-core python python-setuptools python-jinja2
easy_install -U boto
{raid_code}
""".format(**context)).strip()+"\n"]
if private_key:
userdata.append(dedent("""
cat > /root/.ssh/id_kokki_private <<EOF
{private_key}
EOF
cat > /root/.ssh/kokki_ssh.sh <<EOF
#!/bin/sh
exec ssh -o StrictHostKeyChecking=no -i /root/.ssh/id_kokki_private "\$@"
EOF
chmod +x /root/.ssh/kokki_ssh.sh
chmod go-rwx /root/.ssh/*
""".format(**context)).strip()+"\n")
context["git_ssh"] = "export GIT_SSH=/root/.ssh/kokki_ssh.sh"
else:
context["git_ssh"] = ""
userdata.append(dedent("""
cd /root
{git_ssh}
git clone git://github.com/samuel/kokki.git kokki
git clone {project_url} kokki/private
cd kokki
cat > update.sh <<EOF
#!/bin/sh
{git_ssh}
cd /root/kokki
git pull
cd private
git pull
cd ..
unset GIT_SSH
export GIT_SSH
python -m kokki.command -f private/{config_path} {kokki_args} \$@ {roles}
EOF
chmod +x update.sh
./update.sh 1> /var/log/kokki.log 2> /var/log/kokki.log
echo FIN >> /var/log/kokki.log
""".format(**context)).strip()+"\n")
return "\n".join(userdata)
def build_parser():
parser = OptionParser(usage="Usage: %prog [options] <system>")
parser.add_option("-a", "--app", dest="apps", help="App to tag the instance for deployment", action="append")
parser.add_option("-c", "--config", dest="config_path", help="Config file path")
parser.add_option("-e", "--env", dest="environment", help="Environment (production, development, testing)")
parser.add_option("-o", "--option", dest="options", help="Additional option", action="append")
return parser
def read_config(path):
import os, sys
from ConfigParser import SafeConfigParser, NoSectionError, NoOptionError
parser = SafeConfigParser()
parser.read(path)
def _fixup_dict(items):
for key, value in items:
if key == 'private_key':
if not os.path.isabs(value):
value = os.path.realpath(os.path.join(os.path.dirname(path), value))
yield key, value
config = {'default': dict(_fixup_dict(parser.items("DEFAULT")))}
for s in parser.sections():
config[s] = dict(_fixup_dict(parser.items(s)))
return config
def start_instance(aws_key, aws_secret, zone, instance_type, roles, environment=None, basename=None, forcename=None, wait=True, root_type="local", tags=None, word_size=None, raid=False, ami_id=None, base=None, key_name=None, groups=None, private_key=None, config_path="config", project_url=None, min_count=1, max_count=1, kokki_args=None):
word_size = word_size or INSTANCE_TYPES[instance_type]
if instance_type == "t1.micro":
root_type = "ebs"
if not ami_id:
ami_id = AMIS[base][zone[:-1]][root_type][word_size]
if raid and len(LOCAL_DISKS[instance_type]) > 1:
raid_code = RAID.format(
local_disks = " ".join(LOCAL_DISKS[instance_type]),
local_disk_count = len(LOCAL_DISKS[instance_type]),
)
else:
raid_code = ""
if groups is None:
groups = ["default"]
ec2 = EC2Connection(aws_key, aws_secret)
# Get list of existing host names
res = ec2.get_all_instances()
host_names = set()
for r in res:
for i in r.instances:
if i.state != 'running':
continue
if not environment or i.tags.get('environment') == environment:
name = i.tags.get('Name')
if name:
host_names.add(name)
if forcename:
instancename = forcename
elif basename:
for i in range(1, 1000):
hostname = "%s%02d" % (basename, i)
if hostname not in host_names:
instancename = hostname
break
userdata = build_userdata(
private_key = private_key,
project_url = project_url,
config_path = config_path,
roles = roles,
raid_code = raid_code,
kokki_args = kokki_args,
)
image = ec2.get_image(ami_id)
res = image.run(
min_count = min_count,
max_count = max_count,
key_name = key_name,
security_groups = groups,
user_data = userdata,
instance_type = instance_type,
placement = zone,
)
instance = res.instances[0]
time.sleep(1)
instance.add_tag("Name", instancename)
if tags:
for name, value in tags.items():
instance.add_tag(name.strip(), value.strip())
if not wait:
return {}
while True:
instance.update()
if instance.state == 'running':
break
time.sleep(3)
return dict(
public_dns = instance.public_dns_name,
private_dns = instance.private_dns_name,
private_ip = instance.private_ip_address,
name = instancename,
)
def main():
parser = build_parser()
options, system = parser.parse_args()
if not options.config_path:
parser.error("must specify config path")
if not system:
parser.error("must specify system type")
if not options.environment:
parser.error("must specify an environment")
system = system[0]
fullconfig = read_config(options.config_path)
config = fullconfig['default'].copy()
config.update(**fullconfig[system])
if options.options:
for o in options.options:
k, v = o.split('=', 1)
config[k] = v
if config.get('word_size'):
config['word_size'] = int(config['word_size'])
config['groups'] = [x.strip() for x in config['groups'].split(',')]
config['roles'] = [x.strip() for x in config['roles'].split(',')]
config['groups'].insert(0, options.environment)
config['roles'].insert(0, options.environment)
if config.get('tags'):
config['tags'] = dict(x.split(':') for x in config['tags'].split(','))
else:
config['tags'] = {}
apps = [x.strip() for x in config.pop('apps', '').split(',') if x.strip()] or options.apps
if apps:
config['tags']['apps'] = ",".join(x.strip() for x in apps if x.strip())
config['tags']['environment'] = options.environment
config['environment'] = options.environment
config['aws_key'] = config.get('aws_key') or None
config['aws_secret'] = config.get('aws_secret') or None
if 'roles' not in config['tags']:
config['tags']['roles'] = ",".join(config['roles'])
if 'type' not in config['tags']:
config['tags']['type'] = system
if config.get("private_key"):
with open(config['private_key'], "rb") as fp:
config['private_key'] = fp.read()
else:
config["private_key"] = None
config['basename'] = system
res = start_instance(**config)
import pprint
pprint.pprint(res)
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 2,011 |
2,388,001,847,448 |
fac17e699c54f4e6247d0c8c5b31638dd10bce95
|
14b68230b53845ca78e3fa8a9476b70394f0a11d
|
/makerbase/models.py
|
fdedf22dfd7e454f18b345e6034caef448ae7c70
|
[
"MIT"
] |
permissive
|
markpasc/makerbase
|
https://github.com/markpasc/makerbase
|
165b7ebeff314dc5a0ef1293f085ff4db7d99ca8
|
d35bc9da8fc843806465c2159b220cb8ca9234f6
|
refs/heads/master
| 2021-01-22T06:58:29.171736 | 2013-03-14T02:12:08 | 2013-03-14T02:12:08 | 3,871,796 | 5 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import datetime
from functools import partial
from itertools import chain
from urllib import urlencode
from urlparse import parse_qs, urlunsplit
import uuid
from flask import url_for
import riak
from makerbase import app
riakclient = riak.RiakClient(port=8087, transport_class=riak.RiakPbcTransport)
class LinkError(Exception):
pass
class RobjectMetaclass(type):
class_for_bucket = dict()
def __new__(cls, name, bases, attr):
if '_bucket' not in attr:
attr['_bucket'] = name.lower()
new_cls = type.__new__(cls, name, bases, attr)
cls.class_for_bucket[attr['_bucket']] = new_cls
return new_cls
class Link(object):
def __init__(self, tag):
self.tag = tag
def __get__(self, instance, cls):
if instance is None:
return self
return instance.get_link(self.tag)
class LinkSet(object):
def __init__(self, tag):
self.tag = tag
def __get__(self, instance, cls):
if instance is None:
return self
return instance.get_links(self.tag)
class Robject(object):
__metaclass__ = RobjectMetaclass
_bucket = None
def __init__(self, *args, **kwargs):
if args:
self.id = args[0]
if kwargs:
self.__dict__.update(kwargs)
self._before_store = list()
self._after_store = list()
@classmethod
def get_bucket(cls):
return riakclient.bucket(cls._bucket)
@property
def id(self):
try:
return self.__dict__['_id']
except KeyError:
ident = str(uuid.uuid1())
self.__dict__['_id'] = ident
return ident
@id.setter
def id(self, value):
if isinstance(value, unicode):
value = value.encode('utf-8')
self.__dict__['_id'] = value
@classmethod
def _new_for_entity(cls, entity):
self = cls(entity.get_key(), **entity.get_data())
self._entity = entity
return self
@classmethod
def get(cls, ident):
if isinstance(ident, unicode):
ident = ident.encode('utf-8')
entity = riakclient.bucket(cls._bucket).get(ident)
if not entity or not entity.exists():
app.logger.warning("Tried to load %s with id %r but found none", cls.__name__, ident)
return None
self = cls._new_for_entity(entity)
app.logger.debug("Found for %s id %r entity %r! Returning %s %r!", cls._bucket, ident, entity, cls.__name__, self)
return self
def get_entity_data(self):
return dict((k, v) for k, v in self.__dict__.iteritems() if not k.startswith('_'))
def get_api_data(self, include_links=True):
data = self.get_entity_data()
data['id'] = self.id
if include_links:
links = self._entity.get_links()
for link in links:
tag, value = link.get_tag(), link.get_key()
if tag not in data:
data[tag] = value
# TODO: use whether the model's link is a Link or LinkSet
# to determine listiness, so we don't get sets of one link
# serialized as scalar data.
elif tag in data and not isinstance(data[tag], list):
data[tag] = [data[tag], value]
else:
data[tag].append(value)
return data
def save(self):
try:
entity = self._entity
except AttributeError:
entity = riakclient.bucket(self._bucket).new(self.id, data=self.get_entity_data())
self._entity = entity
else:
entity.set_data(self.get_entity_data())
before_store, self._before_store = self._before_store, list()
for fn in before_store:
app.logger.debug("~YAY~ doing a thing saved for before %r saves", self)
fn()
app.logger.debug("finished before-store steps for %r, storing entity", self)
entity.store()
after_store, self._after_store = self._after_store, list()
for fn in after_store:
app.logger.debug("~YAY~ doing a thing saved for after %r saves", self)
fn()
app.logger.debug("finished after-store steps for %r, yay yay", self)
def delete(self):
try:
entity = self._entity
except AttributeError:
pass
else:
entity.delete()
@classmethod
def search(cls, *args, **kwargs):
query_parts = chain(args, (':'.join((k, v)) for k, v in kwargs.iteritems()))
query_text = ' AND '.join(query_parts)
app.logger.debug('Searching bucket %r with query %r', cls._bucket, query_text)
query = riakclient.search(cls._bucket, query_text)
for result in query.run():
yield cls._new_for_entity(result.get())
def get_link(self, tag):
link_iter = self.get_links(tag)
try:
return link_iter.next()
except StopIteration:
return
def get_links(self, tag):
if tag is None:
raise ValueError("A tag is required to get links with get_links()")
try:
entity = self._entity
except AttributeError:
return iter()
return (RobjectMetaclass.class_for_bucket[link.get_bucket()]._new_for_entity(link.get()) for link in entity.get_links() if link.get_tag() == tag)
def add_link(self, target, tag=None):
try:
entity = self._entity
except AttributeError:
app.logger.debug("Oops, remembering link from %r to %r with tag %r to save later when source %r is saved", self, target, tag, self)
self._before_store.append(partial(self.add_link, target, tag=tag))
return self
try:
target_entity = target._entity
except AttributeError:
app.logger.debug("Oops, remembering link from %r to %r with tag %r to save later when target %r is saved", self, target, tag, target)
target._after_store.append(partial(self.add_link, target, tag=tag))
return self
entity.add_link(target_entity, tag=tag)
return self
def permalink(self):
return url_for(self.permalink_view, slug=self._id)
class Project(Robject):
permalink_view = 'project'
parties = LinkSet('participation')
history = LinkSet('history')
class Maker(Robject):
permalink_view = 'maker'
parties = LinkSet('participation')
history = LinkSet('history')
class Participation(Robject):
maker = Link('maker')
project = Link('project')
history = LinkSet('history')
def get_api_data(self, include_links=True):
data = super(Participation, self).get_api_data(include_links=include_links)
try:
del data['start_year']
del data['start_month']
except KeyError:
pass
else:
data['start'] = self.start_date.isoformat()
try:
del data['end_year']
del data['end_month']
except KeyError:
pass
else:
data['end'] = self.end_date.isoformat()
return data
@property
def start_date(self):
return datetime.date(year=self.start_year, month=self.start_month + 1, day=1)
@start_date.setter
def start_date(self, dt):
self.start_year = dt.year
self.start_month = dt.month - 1
@property
def end_date(self):
if not self.end_year:
return
return datetime.date(year=self.end_year, month=self.end_month + 1, day=1)
@end_date.setter
def end_date(self, dt):
if dt is None:
try:
del self.end_year
except AttributeError:
pass
try:
del self.end_month
except AttributeError:
pass
return
self.end_year = dt.year
self.end_month = dt.month - 1
class User(Robject):
def is_authenticated(self):
return True
def is_active(self):
return True
@staticmethod
def is_anonymous():
return False
def get_id(self):
return self.id
class History(Robject):
user = Link('user')
maker = Link('maker')
project = Link('project')
participation = Link('participation')
@property
def when_date(self):
if not self.when:
return
return datetime.datetime.strptime(self.when, '%Y-%m-%dT%H:%M:%S')
@when_date.setter
def when_date(self, dt):
self.when = dt.isoformat()
__all__ = ('Project', 'Maker', 'Participation', 'User', 'History')
|
UTF-8
|
Python
| false | false | 2,013 |
10,728,828,323,935 |
d4bf7cea50a0414b169a3c861e706bdea9759366
|
b03621a8bcef517cb9f6de9e168cd24e86a85232
|
/egads/egads/third-party/cdms_utils/time_utils.py
|
d5f15a29da86503dab76fd66518be450a4767f63
|
[
"BSD-3-Clause"
] |
permissive
|
mfreer/eufar-egads
|
https://github.com/mfreer/eufar-egads
|
ed20944dc0ea5c4a3b75a8e3c9cf0e8d675d2cbe
|
05fce4d36f070587171506caa8b136508fa9405c
|
refs/heads/master
| 2021-01-10T18:46:28.109667 | 2013-09-02T22:00:28 | 2013-09-02T22:00:28 | 33,680,438 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
time_utils.py
=============
Some useful time utilities for use with cdms and cdtime
"""
# Import python modules
import os
import re
import time
# Import third-party software
import cdtime
try:
import cdms2 as cdms
except:
import cdms
dateTimePattern=re.compile(r"^(\d{4}).(\d{1,2}).(\d{1,2})(\s+|T)(\d+):(\d+):(\d+\.?.*)$")
def getDateTimeComponents(dateTimeString):
"""
Takes in a time string in standard DateTime format and returns the items in it.
"""
match=dateTimePattern.match(dateTimeString)
if not match:
raise Exception("Cannot match date time string: %s" % dateTimeString)
items=match.groups()
(year, month, day, hour, minute)=[int(i) for i in items[:3]+items[4:6]]
second=float(items[6])
return (year, month, day, hour, minute, second)
def convertDateTimeStringToYYYYMMDDHH(timeString):
"""
Takes in a long CF-compliant time string and returns a shorter
YYYYMMDDHH string.
"""
match=re.match(r"(\d{4}).(\d{1,2}).(\d{1,2})(\s+|T)(\d+):", timeString)
if match:
(y,m,d,blank,h)=match.groups()
timeString="%.4d%.2d%.2d%.2d" % (int(y), int(m), int(d), int(h))
return timeString
def getTodayMinus(n, format="string"):
"Returns today minus n days as a string, dict or tuple."
now = time.time()
oneday = 24*60*60
targettime = now - (int(n)*oneday)
if format == "string":
targetdate=time.strftime("%Y%m%d", time.localtime(targettime))
else:
(y, m, d) = time.localtime(targettime)[:3]
if format == "dict":
targetdate = {"year":"%.4d"%y, "month":"%.2d"%m, "day":"%.2d"%d}
elif format == "list":
targetdate = [y, m, d]
elif format == "tuple":
targetdate = (y, m, d)
else :
raise Exception("Unknown output format :" + format)
return targetdate
def getTimeSubsetStartEndIndices(time_axis, start, end, required_hour=None):
"""
Analyses time_axis and returns a (start_index, end_index) tuple that
represent the correct indices in the array for start and end. If
required_hour is given then it also checks start is set on the time
of day (e.g. 0 for midnight and 12 for midday) expected. If not it adjusts
the start_index so it is on the required_hour. It does NOT adjust the
end_index.
"""
units = time_axis.units
start_time = cdtime.s2r(start, units, cdtime.Calendar360)
end_time = cdtime.s2r(end, units, cdtime.Calendar360)
# Check hour of start_time if required
if required_hour != None:
required_hour = int(required_hour)
comp_time = start_time.tocomp()
hour = comp_time.hour
print start_time
print start_time.tocomp()
print hour
if hour != required_hour:
print "Adjusting to next day to get required hour right."
new_start = comp_time.add(1, cdtime.Day)
new_start.hour = required_hour
print "New start time:", new_start
start_time = new_start.torel(units, cdtime.Calendar360)
start_value = start_time.value
end_value = end_time.value
# Check both indices are in the axis values
if start_value not in time_axis[:]:
raise Exception("Start index not in axis values: " + str(start_value))
if end_value not in time_axis[:]:
raise Exception("End index not in axis values: " + str(end_value))
t_values = list(time_axis[:])
start_index = t_values.index(start_value)
end_index = t_values.index(end_value)
return (start_index, end_index)
if __name__ == "__main__":
t = cdms.createAxis([i/4. for i in range(100)])
t.id = t.standard_name = t.long_name = "time"
t.units = "days since 1970-01-01 00:00:00"
t.axis = "T"
t.designateTime()
start = "1970-01-03 00:00:00"
end = "1970-01-6 18:00:00"
print "Testing with no required hour..."
print getTimeSubsetStartEndIndices(t, start, end)
print "Testing with required hour..."
start = "1970-01-03 12:00:00"
print getTimeSubsetStartEndIndices(t, start, end, required_hour=0)
start = "1970-01-03 12:00:00"
print getTimeSubsetStartEndIndices(t, start, end, required_hour=13)
|
UTF-8
|
Python
| false | false | 2,013 |
8,693,013,836,803 |
a888004da53a8249984c43a8d073e5c4fe1c06b2
|
f66d274d72a565c265bd4e4d8f3dc40618642620
|
/vctasks/addtask/admin.py
|
c9119199e266bc3abade51cf26cf46187ea81997
|
[] |
no_license
|
andrius-momzyakov/task_manager
|
https://github.com/andrius-momzyakov/task_manager
|
cbd6ea52c743b63fd3e4378f3e90ece553fcd199
|
74097ef2762eba3146fcd455ef631b3f181c698b
|
refs/heads/master
| 2020-04-11T07:59:32.922477 | 2013-02-21T09:05:27 | 2013-02-21T09:05:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from vctasks.addtask.models import Task, Doc, Person, Module, TaskCategory
admin.site.register(Task)
admin.site.register(Doc)
admin.site.register(Person)
admin.site.register(Module)
admin.site.register(TaskCategory)
|
UTF-8
|
Python
| false | false | 2,013 |
18,597,208,416,579 |
b26c6722f6230786568fc7eaf701f4a9b767610a
|
ad29efb8d03390dfe383331f5ff0356e08850b82
|
/scripts/bootstrap_globals.py
|
49fa2705654b255036c2ae5c1f85d7d7d69a4fa2
|
[
"W3C-19980720",
"W3C",
"CDDL-1.0",
"CPL-1.0",
"Zlib",
"MIT",
"EPL-1.0",
"bzip2-1.0.6",
"Apache-2.0",
"BSD-3-Clause",
"BSD-2-Clause",
"MPL-1.1"
] |
non_permissive
|
krichter722/pgalise
|
https://github.com/krichter722/pgalise
|
39bf6ba1e1ed63a971cebda7f8d124ca4c418e9a
|
26e33dcb055a014f1bf5767fd6fdd9dd01da8f7b
|
refs/heads/master
| 2021-05-28T16:01:27.684230 | 2014-11-10T13:49:34 | 2014-11-10T13:49:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# a place to share constants to avoid too complex dependency issues in import
# chains. The location of the file influences the constants!
import os
base_dir = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
bin_dir = os.path.join(base_dir, "scripts", "bin")
|
UTF-8
|
Python
| false | false | 2,014 |
3,985,729,653,553 |
ae6cb07c73c513b8e78e88aa1ab62bc26323d207
|
f4feb717d95dfbe7e62f1a66e534fb04ecf7c7e9
|
/datautil/load_data.py
|
008c438a9b55f2025eca72fe5e028a9e651c5fd9
|
[
"Apache-2.0"
] |
permissive
|
mramire8/active
|
https://github.com/mramire8/active
|
52e9e55b1b0bcb4ad6817c329da2ac62524f745d
|
1da9295072091df0a1e2f00bcc754a0791427e97
|
refs/heads/master
| 2016-09-05T12:51:18.411757 | 2014-12-04T21:25:10 | 2014-12-04T21:25:10 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'mramire8'
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.datasets import load_files
from sklearn.datasets import fetch_20newsgroups
from sklearn.cross_validation import train_test_split, ShuffleSplit
import numpy as np
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import XMLParser
from goose import Goose
from lxml import etree
from bs4 import BeautifulSoup
from boilerpipe.extract import Extractor
from os import listdir
# import bunch
# from bunch import Bunch
from sklearn.datasets import base as bunch
import os
import pickle
import json
from sklearn.utils.validation import check_random_state
if "nt" in os.name:
IMDB_HOME = 'C:/Users/mramire8/Documents/Research/Oracle confidence and Interruption/dataset/aclImdb/raw-data'
AVI_HOME = 'C:/Users/mramire8/Documents/Research/Oracle confidence and Interruption/dataset/sraa/sraa/sraa/partition1/data'
# AVI_HOME = 'C:/Users/mramire8/Documents/Research/Oracle confidence and Interruption/dataset/sraa/sraa/sraa/partition1/dummy'
TWITTER_HOME="C:/Users/mramire8/Documents/Datasets/twitter"
else:
IMDB_HOME = '/Users/maru/Dataset/aclImdb'
AVI_HOME = '/Users/maru/Dataset/aviation/data'
TWITTER_HOME="/Users/maru/Dataset/twitter"
def keep_header_subject(text, keep_subject=False):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
sub = [l for l in _before.split("\n") if "Subject:" in l]
if keep_subject:
final = sub[0] + "\n" + after
else:
final = after
return final
def load_20newsgroups(categories=None, vectorizer=CountVectorizer(min_df=5, max_df=1.0, binary=False), min_size=None,
fix_k=None, raw=False):
print "Loading 20 newsgroups dataset for categories:", categories
data = bunch.Bunch()
data.train = fetch_20newsgroups(subset='train', categories=categories, remove=('headers','footers', 'quotes'),
shuffle=True, random_state=42)
data.train.data = [keep_header_subject(text) for text in data.train.data]
data.test = fetch_20newsgroups(subset='test', categories=categories, remove=('headers','footers', 'quotes'),
shuffle=True, random_state=42)
data.test.data = [keep_header_subject(text) for text in data.test.data]
print 'data loaded'
categories = data.train.target_names
print "%d categories" % len(categories)
print
if not raw:
data = process_data(data, fix_k, min_size, vectorizer)
return data
def load_imdb(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False):
"""
load text files from IMDB movie reviews from folders to memory
:param path: path of the root directory of the data
:param subset: what data will be loaded, train or test or all
:param shuffle:
:param rnd: ranom seed value
:param vct: vectorizer
:return: :raise ValueError:
"""
#analizer = vct.build_tokenizer()
# C:\Users\mramire8\Documents\Research\Oracle confidence and Interruption\dataset\aclImdb\raw-data
data = bunch.Bunch()
if subset in ('train', 'test'):
data[subset] = load_files("{0}/{1}".format(IMDB_HOME, subset), encoding="latin-1", load_content=True,
random_state=rnd)
elif subset == "all":
data["train"] = load_files("{0}/{1}".format(IMDB_HOME, "train"), encoding="latin-1", load_content=True,
random_state=rnd)
data["test"] = load_files("{0}/{1}".format(IMDB_HOME, "test"), encoding="latin-1", load_content=True,
random_state=rnd)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
def load_aviation(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
"""
load text files from Aviation-auto dataset from folders to memory. It will return a 25-75 percent train test split
:param path: path of the root directory of the data
:param subset: what data will be loaded, train or test or all
:param shuffle:
:param rnd: random seed value
:param vct: vectorizer
:return: :raise ValueError:
"""
data = bunch.Bunch()
if subset in ('train', 'test'):
# data[subset] = load_files("{0}/{1}".format(AVI_HOME, subset), encoding="latin1", load_content=True,
# random_state=rnd)
raise Exception("We are not ready for train test aviation data yet")
elif subset == "all":
data = load_files(AVI_HOME, encoding="latin1", load_content=True,
random_state=rnd)
data.data = [keep_header_subject(text) for text in data.data]
# data["test"] = load_files("{0}/{1}".format(AVI_HOME, "test"), encoding="latin1", load_content=True,
# random_state=rnd)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
# train_x, test_x, train_y, test_y = train_test_split(data.data, data.target, test_size=0.25,
# random_state=rnd)
indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)
for train_ind, test_ind in indices:
data = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind],
target_names=data.target_names),
test=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind],
target_names=data.target_names))
# if shuffle:
# random_state = np.random.RandomState(rnd)
# indices = np.arange(data.train.target.shape[0])
# random_state.shuffle(indices)
# data.train.filenames = data.train.filenames[indices]
# data.train.target = data.train.target[indices]
# # Use an object array to shuffle: avoids memory copy
# data_lst = np.array(data.train.data, dtype=object)
# data_lst = data_lst[indices]
# data.train.data = data_lst.tolist()
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
## convert the tweet into a data format of text documents
# from sklearn.datasets.base import Bunch
def preprocess(string, lowercase, collapse_urls, collapse_mentions):
import re
if not string:
return ""
if lowercase:
string = string.lower()
# tokens = []
if collapse_urls:
string = re.sub('http\S+', 'THIS_IS_A_URL', string)
if collapse_mentions:
string = re.sub('@\S+', 'THIS_IS_A_MENTION', string)
# if prefix:
# tokens = ['%s%s' % (prefix, t) for t in tokens]
return string
def timeline_to_doc(user, *args):
tweets = []
for tw in user:
tweets.append(preprocess(tw['text'], *args))
return tweets
def user_to_doc(users, *args):
timeline = []
user_names = []
user_id = []
for user in users:
timeline.append(timeline_to_doc(user, *args))
user_names.append(user[0]['user']['name'])
user_id.append(user[0]['user']['screen_name'])
return user_id, user_names, timeline
def bunch_users(class1, class2, vct, lowercase, collapse_urls, collapse_mentions, rnd, class_name=None):
labels = None
if labels is None:
labels = [0,1]
user_id, user_names, timeline = user_to_doc(class1, lowercase, collapse_urls, collapse_mentions)
user_id2, user_names2, timeline2 = user_to_doc(class2, lowercase, collapse_urls, collapse_mentions)
target = [labels[0]] * len(user_id)
user_id.extend(user_id2)
user_names.extend(user_names2)
timeline.extend(timeline2)
target.extend([labels[1]] * len(user_id2))
user_text = ["######".join(t) for t in timeline]
data = bunch.Bunch(data=user_text, target=target, user_id=user_id,
user_name=user_names, user_timeline=timeline)
# data = {'data':timeline, 'target':np.array(target), 'user_id':user_id, 'user_name':user_names, 'user_text':user_text}
random_state = np.random.RandomState(rnd)
indices = np.arange(len(data.target))
random_state.shuffle(indices)
data.target = np.array(data.target)[indices]
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
data.user_id = np.array(data.user_id)[indices]
data.user_name = np.array(data.user_name)[indices]
data.user_timeline = np.array(data.user_timeline)[indices]
data.target_names = class_name
return data
import datetime
def get_date(date_str):
return datetime.datetime.strptime(date_str.strip('"'), "%a %b %d %H:%M:%S +0000 %Y")
def convert_tweet_2_data(data_path, vct, rnd):
"""
Convert tweet time lines into dataset
:param data_path:
:param vct:
:return: bunch.Bunch
Bunch with the data in train and test from twitter bots and human accounts
"""
good = get_tweets_file(data_path + "/good.json")
print "Real users %s" % (len(good))
bots = get_tweets_file(data_path + "/bots.json")
print "Bot users %s" % (len(bots))
gds = [g for g in good if get_date(g[0]['created_at']).year > 2013]
bts = [b for b in bots if get_date(b[0]['created_at']).year > 2013]
data = bunch_users(gds,bts, vct, True, True, True, rnd, class_name=['good', 'bots'])
return data
def get_tweets_file(path):
f = open(path)
i = 0
users = []
data=[]
last = 0
for line in f:
data = line.split("]][[")
last = len(data)
for i,tweets in enumerate(data):
if i == 0:
t = json.loads(tweets[1:] + "]")
elif i == (last-1):
t = json.loads("["+tweets[:-1])
else:
t = json.loads("["+tweets+"]")
users.append(t)
return users
def load_twitter(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
"""
load text files from twitter data
:param path: path of the root directory of the data
:param subset: what data will be loaded, train or test or all
:param shuffle:
:param rnd: random seed value
:param vct: vectorizer
:return: :raise ValueError:
"""
data = bunch.Bunch()
if subset in ('train', 'test'):
raise Exception("We are not ready for train test aviation data yet")
elif subset == "all":
data = convert_tweet_2_data(TWITTER_HOME, vct, rnd)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)
for train_ind, test_ind in indices:
data = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind],
target_names=data.target_names),
test=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind],
target_names=data.target_names))
# if shuffle:
# random_state = np.random.RandomState(rnd)
# indices = np.arange(data.train.target.shape[0])
# random_state.shuffle(indices)
# data.train.target = data.train.target[indices]
# # Use an object array to shuffle: avoids memory copy
# data_lst = np.array(data.train.data, dtype=object)
# data_lst = data_lst[indices]
# data.train.data = data_lst.tolist()
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
ARXIV_HOME = 'C:/Users/mramire8/Documents/Datasets/arxiv'
def load_arxiv(path, categories=None, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
"""
load text files from Aviation-auto dataset from folders to memory. It will return a 25-75 percent train test split
:param path: path of the root directory of the data
:param subset: what data will be loaded, train or test or all
:param shuffle:
:param rnd: random seed value
:param vct: vectorizer
:return: :raise ValueError:
"""
data = bunch.Bunch()
if subset in ('train', 'test'):
raise Exception("We are not ready for train test arxiv data yet")
elif subset == "all":
data = load_files(ARXIV_HOME, encoding="latin1", load_content=True,
random_state=rnd, categories=categories)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)
for train_ind, test_ind in indices:
data = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind],
target_names=data.target_names),
test=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind],
target_names=data.target_names))
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
def load_dummy(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False):
"""
load text files from IMDB movie reviews from folders to memory
:param path: path of the root directory of the data
:param subset: what data will be loaded, train or test or all
:param shuffle:
:param rnd: random seed value
:param vct: vectorizer
:return: :raise ValueError:
"""
data = bunch.Bunch()
if subset in ('train', 'test'):
data[subset] = load_files("{0}/{1}".format(path, subset), charset="latin1", load_content=True, random_state=rnd)
elif subset == "all":
data["train"] = load_files("{0}/{1}".format(path, "train"), charset="latin1", load_content=True,
random_state=rnd)
data["test"] = load_files("{0}/{1}".format(path, "test"), charset="latin1", load_content=True, random_state=rnd)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
def process_data(data, fix_k, min_size, vct, silent=True):
# create a fixed k version of the data
analizer = vct.build_tokenizer()
fixk = bunch.Bunch()
fixk.all = data.train.data
if fix_k is not None:
# TODO check the size by simple split or by analizer?
fixk.kwords = [" ".join(analizer(doc)[0:fix_k]) for doc in data.train.data]
#fixk.kwords = [" ".join(doc.split(" ")[0:fix_k]) for doc in data.train.data]
else:
fixk.kwords = data.train.data
print "Total Documents: %s" % len(fixk.kwords) if silent else ""
fixk.target = data.train.target
print "Minimum size: %s" % min_size if silent else ""
if min_size is not None:
filtered = [(x, y, z) for x, y, z in zip(data.train.data, fixk.kwords, fixk.target)
if len(analizer(x)) >= min_size]
fixk.all = [x[0] for x in filtered] # all words
fixk.kwords = [x[1] for x in filtered] # k words
fixk.target = np.array([x[2] for x in filtered], dtype=int) # targets
print "Fix k: %s" % fix_k if silent else ""
print "Docs left: %s" % len(fixk.all) if silent else ""
print "Vectorizing ..." if silent else ""
# add the target values
# add a field for the vectorized data
data.train.data = fixk.all # raw documents
try:
data.train.bow = vct.transform(fixk.all) # docs with all the words bow
except ValueError:
data.train.bow = vct.fit_transform(fixk.all) # docs with all the words bow
data.train.bowk = vct.transform(fixk.kwords) # docs with k words bow
data.train.kwords = fixk.kwords # docs with k words
data.train.target = fixk.target
data.test.bow = vct.transform(data.test.data) # traget
return data
def load_dataset(name, fixk, categories, vct, min_size, raw=False, percent=.5):
data = bunch.Bunch()
if "imdb" in name:
########## IMDB MOVIE REVIEWS ###########
# data = bunch.Bunch(load_imdb(name, shuffle=True, rnd=2356, vct=vct, min_size=min_size, fix_k=fixk, raw=raw)) # should brind data as is
data = load_imdb(name, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=fixk, raw=raw) # should brind data as is
elif "aviation" in name:
########## sraa dataset ######
data = load_aviation(name, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=fixk, raw=raw, percent=percent)
elif "arxiv" in name:
########## sraa dataset ######
data = load_arxiv(name, categories=categories, shuffle=True, rnd=2356, vct=vct, min_size=None,
fix_k=None, raw=raw, percent=percent)
elif "20news" in name:
########## 20 news groups ######
data = load_20newsgroups(categories=categories, vectorizer=vct, min_size=min_size,
fix_k=fixk, raw=raw)
elif "bgender" in name:
########## 20 news groups ######
data = load_bloggender(name, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=fixk, raw=raw, percent=percent)
elif "gmo" in name:
########## article pro-con gmo ######
data = load_gmo(name, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=fixk, raw=raw, percent=percent)
elif "evergreen" in name:
########## evergreen content blogs ######
data = load_evergreen(name, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=fixk, raw=raw, percent=percent)
elif "pan" in name:
########## author gender classification from blogs ######
data = load_blogpan(name, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=fixk, raw=raw, percent=percent)
elif "webkb" in name:
# raise Exception("We are not ready for that data yet")
data = load_webkb(name, categories=categories, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=fixk, raw=raw, percent=percent)
elif "biocreative" in name:
# raise Exception("We are not ready for that data yet")
data = load_biocreative(name, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=fixk, raw=raw, percent=percent)
elif "twitter" in name:
# raise Exception("We are not ready for that data yet")
data = load_twitter(name, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=fixk, raw=raw, percent=percent)
elif "dummy" in name:
########## DUMMY DATA###########
data = load_dummy("C:/Users/mramire8/Documents/code/python/data/dummy", shuffle=True, rnd=2356,
vct=vct, min_size=0, fix_k=fixk, raw=raw)
else:
raise Exception("We do not know that dataset")
return data
def load_dictionary(datafile=""):
f = open(datafile)
with f:
line = f.readlines()
line = [l.strip() for l in line]
return line
def load_documents(datafile="", header=True):
f = open(datafile)
feature_names = []
if header:
feature_names = f.readline().split() # skip the header
# print ('HEADER NAMES: \n %s' % feature_names)
docs = []
with f:
# uniqueid truelabel text words seenwords avgtime
line = f.readlines()
docs = [l.strip().split('\t') for l in line]
#b = [ai for ai in a if ai % 2 == 0] # another way to do filtering when loading the datasets
return docs, feature_names
def load_from_file(train, categories, fixk, min_size, vct, raw=True):
fixk_saved = "{0}-MIN{1}.p".format(train, min_size)
try:
print "Loading existing file... %s " % train
fixk_file = open(fixk_saved, "rb")
data = pickle.load(fixk_file)
fixk_file.close()
# vectorizer = open("{0}vectorizer.p".format(train), "rb")
# vct = pickle.load(vectorizer)
# vectorizer.close()
except (IOError, ValueError):
print "Loading from scratch..."
data = load_dataset(train, fixk, categories[0], vct, min_size, percent=.5)
fixk_file = open(fixk_saved, "wb")
pickle.dump(data, fixk_file)
fixk_file.close()
# vectorizer = open("{0}vectorizer.p".format(train), "wb")
# pickle.dump(vct, vectorizer)
# vectorizer.close()
return data, vct
BLOGGEN_HOME = "C:/Users/mramire8/Documents/Datasets/textclassification/raw data/author-profiling-gender/gender/blog-gender-dataset.tsv"
def load_bloggender(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
import csv
docs = []
labels = []
clases = ['F', 'M']
with open(BLOGGEN_HOME, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='"')
for row in reader:
if len(row[0])>0 and len(row[1])>0:
docs.append(row[0])
labels.append(clases.index(row[1].strip().upper()))
data = bunch.Bunch()
data.data = docs
data.target=np.array(labels)
indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)
for train_ind, test_ind in indices:
data = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind], target_names=clases),
test=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind], target_names=clases))
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
PAN13_HOME = "C:/Users/mramire8/Documents/Datasets/textclassification/raw data/author-profiling-gender/gender/blogs/blogs"
def load_pan13(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
data = bunch.Bunch()
if subset in ('train', 'test'):
# data[subset] = load_files("{0}/{1}".format(AVI_HOME, subset), encoding="latin1", load_content=True,
# random_state=rnd)
raise Exception("We are not ready for train test aviation data yet")
elif subset == "all":
data = load_files(PAN13_HOME, encoding="latin1", load_content=True,
random_state=rnd)
data.data = [keep_header_subject(text) for text in data.data]
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
for iDoc in data.data:
pass
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
EVERGREEN_HOME = "C:/Users/mramire8/Documents/Datasets/textclassification/raw data/evergreen"
def get_content(url):
g = Goose({'enable_image_fetching':False})
article = g.extract(url=url)
# article = g.extract(raw_html=url)
text = "{0} {1}".format(article.title, article.cleaned_text)
return text
def read_evergreenjs(filename):
import csv
docs = []
labels = []
# i =0
## EVERGREEN = 0, NON-EVERGREEN=1
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='"')
header = None
for row in reader:
# print row
if header is None:
header = row
else:
## Original boiler plate the text has not punctuation
# content = json.loads(row[header.index('boilerplate')])
# content['title']
# if len(content)>1 and content['body'] is not None:
# docs.append(content['body'])
# labels.append(int(row[header.index('label')]))
## EXTRACT BODY-ISH OF THE HTML FILE
url = "{0}/raw_content/{1}.".format(EVERGREEN_HOME, row[header.index('urlid')])
text = open(url).read()
soup = BeautifulSoup(text)
# print "*"*50
# remove non-text tags
for tag in ['script', 'style', 'a', 'img']:
for el in soup.find_all(tag):
el.extract()
extractor = Extractor(extractor='ArticleExtractor', html=unicode(soup.get_text()))
## ADD CONTENT AND LABEL TO THE LIST
docs.append(extractor.getText())
# docs.append(get_content(url))
labels.append(int(row[header.index('label')]))
# print i
# i+=1
return docs, labels
def load_evergreen(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
docs = []
labels = []
## EVERGREEN = 0, NON-EVERGREEN=1
clases = ['EVERGREEN', 'SEASONAL']
filename = "{0}/{1}".format(EVERGREEN_HOME, "train.tsv")
docs, labels = read_evergreenjs(filename)
# filename = "{0}/{1}".format(EVERGREEN_HOME, "test.tsv")
# docst, labelst = read_evergreenjs(filename)
# data = bunch.Bunch(train=bunch.Bunch(data=docs, target=np.array(labels)),
# test=bunch.Bunch(data=docst, target=np.array(labelst)))
data = bunch.Bunch()
data.data = docs
data.target=np.array(labels)
indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)
for train_ind, test_ind in indices:
data = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind], target_names=clases),
test=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind], target_names=clases))
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
def create_gmo(file):
docs, _ = load_documents(file, header=False)
content = []
iDoc = []
for line in docs:
text = line[0]
if "Document Number:" in text and len(iDoc)>0:
content.append("\n".join(iDoc))
iDoc = []
iDoc.append(text)
return content
def load_gmo(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
GMO_HOME = "C:/Users/mramire8/Documents/Datasets/textclassification/raw data/gmo-hedging/GMOHedging_v1.0/gmo-anti/{}"
parts = create_gmo(GMO_HOME.format("anti_GMO"))
labels = np.zeros(len(parts))
parts.extend(create_gmo(GMO_HOME.format("pro_GMO")))
labels = np.append(labels, np.ones(len(parts)-len(labels)))
data = bunch.Bunch()
data.data = parts
data.target = labels
indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)
for train_ind, test_ind in indices:
data = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind]),
test=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind]))
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
def clean_xml_pan(xml_text, parser=None):
text = ""
# try:
root = ET.fromstring(xml_text, parser=parser)
for post in root.findall("post"):
text += "\n" + post.text.strip()
# except Exception:
# print xml_text
return text
def load_blogpan(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
"""
load text files from author gender profiling dataset from folders to memory.
:param path: path of the root directory of the data
:param subset: what data will be loaded, train or test or all
:param shuffle:
:param rnd: random seed value
:param vct: vectorizer
:return: :raise ValueError:
"""
PAN13_HOME = "C:/Users/mramire8/Documents/Datasets/textclassification/raw data/author-profiling-gender/gender-profiling/blogs/blogs"
data = bunch.Bunch()
if subset in ('train', 'test'):
# data[subset] = load_files("{0}/{1}".format(AVI_HOME, subset), encoding="latin1", load_content=True,
# random_state=rnd)
raise Exception("We are not ready for train test aviation data yet")
elif subset == "all":
data = load_files(PAN13_HOME, encoding="latin1", load_content=True,
random_state=rnd)
# parser = XMLParser(encoding="latin-1", recover=True)
parser = etree.XMLParser(recover=True)
data.data = [clean_xml_pan(text, parser=parser) for text in data.data]
# data["test"] = load_files("{0}/{1}".format(AVI_HOME, "test"), encoding="latin1", load_content=True,
# random_state=rnd)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
# train_x, test_x, train_y, test_y = train_test_split(data.data, data.target, test_size=0.25,
# random_state=rnd)
indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)
for train_ind, test_ind in indices:
data = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind]),
test=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind]))
# if shuffle:
# random_state = np.random.RandomState(rnd)
# indices = np.arange(data.train.target.shape[0])
# random_state.shuffle(indices)
# data.train.filenames = data.train.filenames[indices]
# data.train.target = data.train.target[indices]
# # Use an object array to shuffle: avoids memory copy
# data_lst = np.array(data.train.data, dtype=object)
# data_lst = data_lst[indices]
# data.train.data = data_lst.tolist()
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
# from sklearn.datasets import fetch_mldata
def load_biocreative(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
# target = []
# target_names = []
# filenames = []
#
# folders = [f for f in sorted(listdir(container_path))
# if isdir(join(container_path, f))]
#
# if categories is not None:
# folders = [f for f in folders if f in categories]
#
# for label, folder in enumerate(folders):
# target_names.append(folder)
# folder_path = join(container_path, folder)
# documents = [join(folder_path, d)
# for d in sorted(listdir(folder_path))]
# target.extend(len(documents) * [label])
# filenames.extend(documents)
#
# # convert to array for fancy indexing
# filenames = np.array(filenames)
# target = np.array(target)
#
# if shuffle:
# random_state = check_random_state(random_state)
# indices = np.arange(filenames.shape[0])
# random_state.shuffle(indices)
# filenames = filenames[indices]
# target = target[indices]
#
# if load_content:
# data = [open(filename, 'rb').read() for filename in filenames]
# if encoding is not None:
# data = [d.decode(encoding, decode_error) for d in data]
# return Bunch(data=data,
# filenames=filenames,
# target_names=target_names,
# target=target,
# DESCR=description)
#
# return Bunch(filenames=filenames,
# target_names=target_names,
# target=target,
# DESCR=description)
raise Exception("We are not ready for that data yet")
WEBKB_HOME='C:/Users/mramire8/Documents/Datasets/webkb/webkb'
def clean_html_text(html_text):
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_text)
return soup.get_text()
def get_sub_filenames(input_dir):
names = []
for path, subdirs, files in os.walk(input_dir):
for filename in files:
names.append(os.path.join(path, filename))
return names
def load_files_sub(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
charset=None, charset_error=None,
decode_error='strict', random_state=0):
"""
Adapted from load_file of sklearn, this loads files from directories and subdirectories
:param container_path:
:param description:
:param categories:
:param load_content:
:param shuffle:
:param encoding:
:param charset:
:param charset_error:
:param decode_error:
:param random_state:
:return:
"""
from os.path import isdir
from os import listdir
from os.path import join
target = []
target_names = []
filenames = []
## get the folders
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
# get categories
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
## get all files from subfolders
documents = [join(folder_path, d)
for d in sorted(get_sub_filenames(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = [open(filename, 'rb').read() for filename in filenames]
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return bunch.Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return bunch.Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_webkb(path, categories=None, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
"""
Read and process data from webkb dataset. Documents are files in html format
:param path: loacation of the root directory of the data
:param categories: categories to load COURSE, DEPARTMENT, FACULTY, OTHER, PROJECT, STAFF, STUDENT
:param subset: --unused at the moment --
:param shuffle: --unused at the moment --
:param rnd: random seed value
:param vct: vectorizer for feature vector representation
:param fix_k: truncate data a the k-th word, none if including all words
:param min_size: minimum size document acceptable to load
:param raw: return data without feature vectores
:param percent: Percentage to split train-test dataset e.g. .25 will produce a 75% training, 25% test
:return: Bunch :
.train.data text of data
.train.target target vector
.train.bow feature vector of full documents
.train.bowk feature of k-words documents
.train.kwords text of k-word documents
.test.data test text data
.test.target test target vector
.text.bow feature vector of test documents
:raise ValueError:
"""
data = bunch.Bunch()
if subset in ('train', 'test'):
# data[subset] = load_files("{0}/{1}".format(AVI_HOME, subset), encoding="latin1", load_content=True,
# random_state=rnd)
raise Exception("We are not ready for train test webkb data yet")
elif subset == "all":
data = load_files_sub(WEBKB_HOME, encoding="latin1", load_content=True, random_state=rnd)
data.data = [clean_html_text(text) for text in data.data]
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(rnd)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)
for train_ind, test_ind in indices:
data = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind],
target_names=data.target_names),
test=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind],
target_names=data.target_names))
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
def split_data(data, splits=2.0, rnd=987654321):
"""
:param data: is a bunch with data.data and data.target
:param splits: number of splits (translates into percentages
:param rnd: random number
:return: two bunches with the split
"""
percent = 1.0 / splits
indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)
part1 = bunch.Bunch()
part2 = bunch.Bunch()
for train_ind, test_ind in indices:
part1 = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind]))#, target_names=data.target_names))
part2 = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind])) #, target_names=data.target_names))
return part1, part2
|
UTF-8
|
Python
| false | false | 2,014 |
3,066,606,677,127 |
25194ea88fc81908f7486408ef40816a38991501
|
9e1990e0d84387ee2a93855b11b927f34d93b83c
|
/fibonacci.py
|
9a5fcd8ae0625609035648f8e4da8114217d833b
|
[] |
no_license
|
nobu4869/python_test
|
https://github.com/nobu4869/python_test
|
4ba32040e57a78bb04982c6321306dccffcdf68d
|
f894539fd69442125ba0e93f84e16598a8dc55f1
|
refs/heads/master
| 2021-01-25T10:07:28.676689 | 2014-07-20T00:25:09 | 2014-07-20T00:25:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'yamakido.n'
def fib(n):
result = []
a,b = 0,1
print 'a=' + a.__str__()
print 'b=' , b.__str__()
while b <n:
print b
result.append(a)
a,b=b,a+b
return result
f100 = fib(100)
f100
|
UTF-8
|
Python
| false | false | 2,014 |
8,504,035,261,338 |
ed22788d53c516d89a451842fcd980594fa652f0
|
9a8eda5cd93c581f5c367a059b66c2bf28d4df21
|
/project.py
|
c6fc3bd46a3c86e86135799547ef9e43296bf30a
|
[] |
no_license
|
matts1/ScrollingShooter
|
https://github.com/matts1/ScrollingShooter
|
160403cef41d0782127ad0843ba58f9d6dfa4187
|
fc5772fd97a71bbe6bfc5be8e5e6c1e0e12639f3
|
refs/heads/master
| 2021-01-10T20:37:08.171452 | 2012-10-31T10:51:34 | 2012-10-31T10:51:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#pixel collision checking, difficulty level
import pyglet
from pyglet.window import key
from random import randint
import math
from chars import CharSprite
from bullets import BulletSprite
from functions import in_bounds, highscore, detect_collision, Rectangle, DrawHealthBar, force
from enemy import Enemy, spawn_enemy
from effects import BackGround, Door, BACK
SPEED = 5
FLOOR = 220
class Window(pyglet.window.Window):
def __init__(self):
# Call the superclass's constructor.
super(Window, self).__init__()
self.set_fullscreen()
self.keys = key.KeyStateHandler()
self.push_handlers(self.keys)
pyglet.clock.schedule_interval(self.update, 0.02)
#sound
self.player = pyglet.media.Player()
self.music = pyglet.media.load('sounds/bg2.wav', None, False)
self.player.queue(self.music)
self.player.eos_action = 'loop'
self.player.play()
self.gunshot = pyglet.resource.media('sounds/shoot.wav', streaming=False)
#plane
self.char = CharSprite()
self.char.x = self.width / 2
self.char.y = self.char.floor
self.mousex = self.char.x + 50
self.mousey = self.char.y + (self.char.height / 2)
#bullets
self.bullets = []
self.can_shoot = 1.0
self.leftClick = 0
#weapons
self.weapons = {
#name type(0=gun, 1=melee), recoil, damage, ROF (frames between shot)
"pistol": [0, 30, 50, 15],
"ak47": [0, 20, 35, 10],
"sword": [1, 50, 50, 20],
"ram": [1, 50, 100, 30]
}
self.selected = ["pistol", "sword"]
self.oldrecoil = self.recoil = self.olddir = 0
self.scroll = 1
self.toaffect = []
#enemies
self.enemies = []
self.spawn_time = 31
#background
self.bg = [BACK(), BACK()]
self.bg[1].x = self.bg[0].width
for bg in self.bg:
bg.scale = float(self.height) / bg.height
self.totx = self.char.x
self.backs = []
back = BackGround()
for x in xrange(0, self.width + back.width, back.width):
back = BackGround()
back.x = x
self.backs.append(back)
#labels
self.score = 0
self.scorelabel = pyglet.text.Label("Score: 0",
anchor_y = 'top', y = self.height,
color = (255, 0, 0, 255), font_size = 16)
self.credits = [pyglet.text.Label("Graphics: Ruby Yu",
anchor_y = 'bottom', y = 60,
color = (255, 0, 0, 255), font_size = 12),
pyglet.text.Label("Documentation: Ruby Yu",
anchor_y = 'bottom', y = 40,
color = (255, 0, 0, 255), font_size = 12),
pyglet.text.Label("Programming: Matt Stark",
anchor_y = 'bottom', y = 20,
color = (255, 0, 0, 255), font_size = 12),
pyglet.text.Label("Music: Eitan Muir",
anchor_y = 'bottom', y = 0,
color = (255, 0, 0, 255), font_size = 12)
]
self.time = 0
self.health = 100
#health
self.healthbars = []
###############################################################################
def update(self, dt):
#char
self.char.update(self.keys, self.mousex, self.mousey)
for val, d in {"A": -1, "D": 1}.items():
if eval("self.keys[key." + val + "]"):
for bg in self.backs:
bg.x -= d * SPEED
for enemy in self.enemies:
enemy.x -= d * SPEED
for bullet in self.bullets:
bullet.x -= d * SPEED
self.time += 10
#BG
if self.backs[0].x > 0:
self.backs = [self.backs.pop(-1)] + self.backs
self.backs[0].x = self.backs[1].x - self.backs[0].width
if self.backs[-1].x + self.backs[-1].width < self.width:
self.backs = self.backs + [self.backs.pop(0)]
self.backs[-1].x = self.backs[-1].x + self.backs[0].width
#bullets and boundaries
bulletremove = []
enemyremove = []
for bullet in self.bullets:
bullet.update()
if bullet.y < FLOOR or not in_bounds(bullet, self.height, self.width, bullet.width / 2, bullet.height / 2)[0]:
bulletremove.append(bullet)
#enemies
self.spawn_time += 1
if self.spawn_time > 20:
enemy = spawn_enemy(self.width)
if enemy:
self.enemies.append(enemy)
self.spawn_time = 0
for enemy in self.enemies:
enemy.update(3, self.char.x, self.time)
#collision
self.healthbars = []
w = 75
for enemy in self.enemies:
for bullet in self.bullets:
if detect_collision(bullet.x, bullet.y, enemy):
if bullet not in bulletremove:
bulletremove.append(bullet)
enemy.health -= self.weapons[self.selected[0]][2]
collideleft = detect_collision(enemy.x - (enemy.width / 2), enemy.y + (enemy.height / 2), self.char)
collideright = detect_collision(enemy.x + (enemy.width / 2), enemy.y + (enemy.height / 2), self.char)
if collideleft or collideright:
self.health -= 15
enemy.health = 0
self.score -= 15
enemy.health = max(enemy.health, 0)
if enemy.health == 0 and enemy not in enemyremove:
enemyremove.append(enemy)
self.score += 15
if detect_collision(self.mousex, self.mousey, enemy):
self.healthbars.append(DrawHealthBar(enemy.x + (w / 2), enemy.y - 15, w, 15, enemy.health, 1, enemy.maxhealth))
for enemy in enemyremove:
self.enemies.remove(enemy)
for bullet in bulletremove:
self.bullets.remove(bullet)
#weapons
if self.scroll == 0 or self.keys[key.E]:
self.selected = self.selected[::-1]
self.char.arm.changeimg(self.selected[0], self.char.x - self.mousex)
self.scroll += 1
self.recoil = max(0, force(self.recoil, 0, 1.75))
mul = 1
if self.char.x < self.mousex:
mul *= -1
if self.weapons[self.selected[0]][0] == 1:
mul *= -1
self.char.arm.rotation += self.recoil * mul
#weapon upgrades
if self.score >= 200 and "pistol" in self.selected:
self.selected[self.selected.index("pistol")] = "ak47"
self.char.arm.changeimg(self.selected[0], self.char.x - self.mousex)
if self.score >= 200 and "sword" in self.selected:
self.selected[self.selected.index("sword")] = "ram"
self.char.arm.changeimg(self.selected[0], self.char.x - self.mousex)
#us shooting
self.can_shoot += 1
if self.leftClick and self.can_shoot >= self.weapons[self.selected[0]][3]:
if self.weapons[self.selected[0]][0] == 0:
bullet = BulletSprite()
bullet.x = self.char.arm.x
bullet.y = self.char.arm.y
bullet.rotation = 90 + self.char.arm.rotation
if self.char.x > self.mousex:
bullet.rotation += 180
self.bullets.append(bullet)
self.gunshot.play()
self.can_shoot = 0
self.recoil += self.weapons[self.selected[0]][1]
if self.weapons[self.selected[0]][0] == 1:
if self.recoil < self.oldrecoil and abs(self.olddif) == self.olddif:
#at the end of the swing
add = 150
if self.char.x > self.mousex:
add *= -1
for enemy in self.enemies:
for x in xrange(self.char.x, self.char.x + add, (add / abs(add))* 20):
if detect_collision(x, self.char.y + (self.char.height / 2), enemy):
enemy.fly(20 * (add / abs(add)), 15)
enemy.health -= self.weapons[self.selected[0]][2]
break
self.olddif = self.recoil - self.oldrecoil
self.oldrecoil = self.recoil
#labels
self.scorelabel.text = "Score: " + str(self.score)
if self.health < 0:
self.health = 0
self.healthbars.append(DrawHealthBar(self.width - 10, self.height - 10, 200, 30, self.health))
#closing window
if self.health <= 0:
self.player.pause()
pyglet.window.Window.on_close(self)
highscore(self.score)
################################################################################
def on_draw(self):
# Clear what was drawn last frame.
self.clear()
for bg in self.bg:
bg.draw()
for back in self.backs:
back.draw()
for enemy in self.enemies:
enemy.draw()
for bullet in self.bullets:
bullet.draw()
self.char.draw()
self.char.arm.draw()
for bar in self.healthbars:
bar.draw()
self.scorelabel.draw()
for credit in self.credits:
credit.draw()
def on_mouse_motion(self, x, y, dx, dy):
self.mousex = x
self.mousey = y
def on_mouse_drag(self, x, y, dx, dy, button, mod):
self.mousex = x
self.mousey = y
def on_mouse_press(self, x, y, button, modifiers):
if button == 1:
self.leftClick = 1
def on_mouse_release(self, x, y, button, modifiers):
if button == 1:
self.leftClick = 0
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
self.scroll = 0
win = Window()
pyglet.app.run()
|
UTF-8
|
Python
| false | false | 2,012 |
9,019,431,336,785 |
09fd459b3bee99c84b03f99bb949afc75e93f73c
|
971b919a2ffc9ada9834bfa985919c0e492848ba
|
/flip
|
08e6ea7a1334d01e391fa42fafec2a8dab3b895c
|
[] |
no_license
|
rath/toys
|
https://github.com/rath/toys
|
d0c82f1c877fa759ed8c5171983dd877defd16ac
|
56260e0a17d015b0fa587c24948851b208eff158
|
refs/heads/master
| 2021-01-21T12:39:34.509810 | 2014-06-04T18:56:07 | 2014-06-04T18:56:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
FLIP_RESOURCE = """
a - ɐ
b - q
c - ɔ
d - p
e - ə,∃
f - ɟ,Ⅎ
g - ƃ
h - ɥ,H
i - ı,I
j - ɾ
k - ʞ
l - l
m - ɯ
n - u
o - o
p - d
q - b
r - ɹ
s - s
t - ʇ
u - n
v - ʌ
w - ʍ
x - x
y - ʎ
z - z"""
def prepare_flip_map():
ret = {}
for record in FLIP_RESOURCE[1:].split('\n'):
record = record.strip()
ch_key, ch_value = record.split(' - ')
values = ch_value.split(',')
ret[ch_key] = {'lower': values[0]}
if len(values)>1:
ret[ch_key]['upper'] = values[1]
else:
ret[ch_key]['upper'] = values[0]
return ret
FLIP_MAP = prepare_flip_map()
if __name__=='__main__':
p = sys.stdout.write
for input_ch in ' '.join(sys.argv[1:])[::-1]:
key = input_ch.lower()
if key in FLIP_MAP:
value = FLIP_MAP[key]
if input_ch.isupper():
p(value['upper'])
else:
p(value['lower'])
else: p(input_ch)
|
UTF-8
|
Python
| false | false | 2,014 |
6,021,544,178,536 |
8a247c9829c53063866d315be7f95e1bbaf03260
|
07091f53e29efabba7e9a13b9b28651fe85c7912
|
/scripts/object/intangible/vehicle/barc_speeder_imperial_pcd.py
|
d0506fa13bf570291aefe63341a498a0012af9b4
|
[
"LGPL-3.0-only",
"GPL-1.0-or-later"
] |
non_permissive
|
Undercova/NGECore2
|
https://github.com/Undercova/NGECore2
|
377d4c11efba071e313ec75b3c2d864089733dc4
|
16d52e678201cab7c6e94924050ae1fc4a40de95
|
refs/heads/master
| 2019-01-03T17:35:40.610143 | 2014-11-09T03:34:03 | 2014-11-09T03:34:03 | 26,386,905 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
def setup(core, object):
object.setAttachment('radial_filename', 'datapad/vehicle_pcd')
object.setIntAttribute('no_trade', 1)
object.setStringAttribute('faction_restriction', 'Imperial')
return
|
UTF-8
|
Python
| false | false | 2,014 |
10,385,230,944,007 |
293733f3a995cf0976d7a3fd3adde5509ffdd728
|
35037a8d006191d670972aa353ba85ac242e9c33
|
/artmaps/artmaps.wsgi
|
8f513bd95dc03475d1c94e9e21ad47aed3b4e287
|
[] |
no_license
|
howdiz/artMap
|
https://github.com/howdiz/artMap
|
7d281a5cbaf57e55b9e58c58217df06f76f21762
|
143c88669c4c9d453022bda57b3920987eb2507f
|
refs/heads/master
| 2021-01-19T02:13:50.717902 | 2011-05-14T18:04:35 | 2011-05-14T18:04:35 | 318,637 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import sys
sys.path = ['/home/echoability/webapps/django', '/home/echoability/webapps/django/lib/python2.5'] + sys.path
from django.core.handlers.wsgi import WSGIHandler
os.environ['DJANGO_SETTINGS_MODULE'] = 'artmaps.settings'
application = WSGIHandler()
|
UTF-8
|
Python
| false | false | 2,011 |
12,687,333,421,543 |
3d297e5b178684e5e9ea89da68e2891ea9a8ff89
|
40147d4f926d9d6339e8774e6a3473dc33de9fcb
|
/ebookr.py
|
3282a8d659c857f5eff7e504c878d65806d193c1
|
[] |
no_license
|
jensechu/ebookr
|
https://github.com/jensechu/ebookr
|
b321b4228d53dc12545b7547af0fec24e2bf6a18
|
3e4dfa81fe0648ad825aeb706ac7cd54efb8b986
|
refs/heads/master
| 2020-05-18T02:06:45.413851 | 2011-03-20T14:17:56 | 2011-03-20T14:17:56 | 1,319,531 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from sys import argv
import codecs, re, os
# Prompts for a file name if none is given.
if len(argv) != 2:
file_name = raw_input("File name: ")
else:
file_name = argv[1]
# Extract text and close the file
book_open = codecs.open(file_name, "r+", "utf-8")
book_read = book_open.read()
book_open.close()
# Checks if file has already been ebooked
def check(booked):
search = booked.find("ebooked")
if search == -1:
decoder(booked)
print "REPLACED YOUR STUFF"
else:
print "Your book has already been ebooked."
# Creates a .pdf with LaTeX from the ebooked file_name
def create_tex(ebooked):
print file_name
os.system("pdflatex %s" % file_name)
# LaTeX character replacements
def decoder(book_new):
# Adds the LaTeX preamble and closer to the book
latex_preamble = '''
\documentclass[12pt]{book} \n \n
%Packages \n
\usepackage[usenames,dvipsnames]{color} \n
\usepackage[top=1.5in, bottom=1.5in, left=1.75in, right=1.75in]{geometry} \n
\usepackage[utf8]{inputenc} \n \n
%Book Properties\n
\linespread{1.3} \n\n
%Begin
\\begin{document} \n
%Centers the page number
\pagestyle{plain}
%Deletes extra pages
\let\cleardoublepage\clearpage
'''
book_tex = latex_preamble + book_new + "\n \end{document}"
# Replace double quotes
double_quotes = re.compile(r'"(.+?)"', re.MULTILINE | re.DOTALL)
book_tex = double_quotes.sub(r'\\textquotedblleft \\textquotedblright \\textcolor{White}{a} ', book_tex)
# Write new code to file
book_open = codecs.open(file_name, "w", "utf-8")
book_open.write(book_tex)
# Makes it false for the check() next time
book_add = codecs.open(file_name, "a", "utf-8")
book_add.write("% ebooked")
check(book_read)
create_tex(file_name)
|
UTF-8
|
Python
| false | false | 2,011 |
10,849,087,420,834 |
7b1d9c3ea0c0093bee234dac4bc326694b993a7d
|
46e62006f442bccd62d6edad058deef147e78678
|
/zoom/db.py
|
a93a079e58e1b7be0c86971216db59f8103f29ca
|
[
"GPL-3.0-or-later"
] |
non_permissive
|
hwaring/python-zoom
|
https://github.com/hwaring/python-zoom
|
d408a9588e4e44d0e4718c1573bc46cc18fd6288
|
59ae9895cc378be18f3caecde7e42ad477a75499
|
refs/heads/master
| 2017-04-29T19:42:51.754423 | 2014-07-29T17:08:40 | 2014-07-29T17:08:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
db
a database module that does less
"""
import MySQLdb
import warnings
ARRAY_SIZE = 1000
def ResultIter(cursor, arraysize=ARRAY_SIZE):
while True:
results = cursor.fetchmany(arraysize)
if not results:
break
for result in results:
yield result
class Result(object):
def __init__(self, cursor):
self.cursor = cursor
def __iter__(self):
return ResultIter(self.cursor)
def __len__(self):
return self.cursor.rowcount
def __repr__(self):
return repr(list(self))
def first(self):
for i in self: return i
class Database(object):
"""
database object
>>> db = database(host='database', db='test', user='testuser', passwd='password')
>>> db('drop table if exists person')
0L
>>> db(\"\"\"
... create table if not exists person (
... id int not null auto_increment,
... name varchar(100),
... age smallint,
... kids smallint,
... birthdate date,
... salary decimal(8,2),
... PRIMARY KEY (id)
... )
... \"\"\")
0L
>>> for r in db('describe person'):
... print r
('id', 'int(11)', 'NO', 'PRI', None, 'auto_increment')
('name', 'varchar(100)', 'YES', '', None, '')
('age', 'smallint(6)', 'YES', '', None, '')
('kids', 'smallint(6)', 'YES', '', None, '')
('birthdate', 'date', 'YES', '', None, '')
('salary', 'decimal(8,2)', 'YES', '', None, '')
>>> db("insert into person (name, age) values ('Joe',32)")
1L
>>> db('select * from person')
[(1L, 'Joe', 32, None, None, None)]
>>> from decimal import Decimal
>>> amt = Decimal('1234.56')
>>> db("insert into person (name, salary) values ('Pat',%s)", amt)
2L
>>> for r in db('select * from person'):
... print r
(1L, 'Joe', 32, None, None, None)
(2L, 'Pat', None, None, None, Decimal('1234.56'))
>>> db('select * from person where id=2')
[(2L, 'Pat', None, None, None, Decimal('1234.56'))]
>>> t = list(db('select * from person where id=2'))[0][-1]
>>> t
Decimal('1234.56')
>>> assert amt == t
>>> db('drop table person')
0L
>>> failed = False
>>> try:
... db('select * from person where id=2')
... except MySQLdb.ProgrammingError, m:
... failed = True
>>> assert failed
"""
def __init__(self, factory, *args, **keywords):
"""Initialize with factory method to generate DB connection
(e.g. odbc.odbc, cx_Oracle.connect) plus any positional and/or
keyword arguments required when factory is called."""
self.__connection = None
self.__factory = factory
self.__args = args
self.__keywords = keywords
self.__debug = 0
def __getattr__(self, name):
if self.__connection is None:
self.__connection = self.__factory(*self.__args, **self.__keywords)
return getattr(self.__connection, name)
def __call__(self, sql, *a, **k):
cursor = self.cursor()
if self.__debug:
start = time.time()
try:
result = cursor.execute(sql, a)
finally:
if self.__debug:
print 'SQL (%s): %s - %s<br>\n' % (time.time()-start, sql, args)
if cursor.description:
return Result(cursor)
else:
self.lastrowid = cursor.lastrowid
return self.lastrowid
def execute_many(self, sql, *a):
cursor = self.cursor()
if self.__debug:
start = time.time()
try:
result = cursor.executemany(sql, *a)
finally:
if self.__debug:
print 'SQL (%s): %s - %s<br>\n' % (time.time()-start, sql, a)
if cursor.description:
return Result(cursor)
else:
self.lastrowid = cursor.lastrowid
return self.lastrowid
def database(engine='mysql', host='database', db='test', user='testuser', *a, **k):
if engine == 'mysql':
db = Database(MySQLdb.connect, host=host, db=db, user=user, *a, **k)
db.autocommit(1)
return db
def get_mysql_log_state():
for rec in db('show variables like "log"'):
return rec[1]
def set_mysql_log_state(new_state):
if new_state in ['ON','OFF']:
db('SET GLOBAL general_log = %s;', new_state)
|
UTF-8
|
Python
| false | false | 2,014 |
6,889,127,582,807 |
c572d675b4121fce1a520b92b33e7d47e639d7a0
|
be37ef1410e41e18bd6e8da77f95812bed4f5743
|
/tracker.py
|
b0a23cef85f2d9e95f1b60b69f470b7fb9c97d17
|
[] |
no_license
|
Aparna91/Attendance-Tracker
|
https://github.com/Aparna91/Attendance-Tracker
|
b9ff6a6233b946b10653f271b53f8bff214037f4
|
ecf3d3c3f429f73207f41b5ab27e3f900de564d4
|
refs/heads/master
| 2016-09-06T19:41:20.592150 | 2012-08-31T09:13:58 | 2012-08-31T09:13:58 | 5,626,732 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import Tkinter
import sqlite3
import types
import globals
class Table:
def __init__(self, conn, tbl_name):
self.conn = sqlite3.connect(globals.db_path)
self.cursor = self.conn.cursor()
self.tbl_name = tbl_name
def insert1(self,t1):
query = "insert into %s ('Mon','Tue','Wed','Thu','Fri','Sat','Sun') values %s"%(self.tbl_name,str(t1))
self.cursor.execute(query)
self.conn.commit()
def insert2(self,t1):
query = "insert into %s ('SlotId','CourseId') values %s"%(self.tbl_name,str(t1))
self.cursor.execute(query)
self.conn.commit()
def insert3(self,t1):
query = "insert into %s ('CourseId','Title','DueDate','Desc') values %s"%(self.tbl_name,str(t1))
self.cursor.execute(query)
self.conn.commit()
def insert4(self,t1):
query = "insert into %s ('CourseId','CourseName','ProfName','RoomNo','Credits') values %s"%(self.tbl_name,str(t1))
self.cursor.execute(query)
self.conn.commit()
def insert5(self,t1):
query = "insert into %s ('SlotId','Date') values %s"%(self.tbl_name,str(t1))
self.cursor.execute(query)
self.conn.commit()
def update(self, id_, **kwargs):
updations = []
for (k,v) in kwargs.items():
k = str(k)
if type(v) == str:
updations.append( '%s = "%s"'%(k,v))
else:
updations.append( '%s = %s'%(k,str(v)))
update_values = ' ,'.join(updations)
print update_values
query = 'update %s set %s where Id = %d'%(self.tbl_name,update_values,id_)
print query
self.cursor.execute(query)
self.conn.commit()
def remove(self, id_):
self.cursor.execute('delete from %s where Id = (%s)'%(self.tbl_name,id_))
self.conn.commit()
def get(self, conditions):
query = 'select * from %s where %s'%(self.tbl_name,str(conditions))
#print query
self.cursor.execute(query)
self.str1 = self.cursor.fetchone()
self.conn.commit()
def count(self,conditions):
self.cursor.execute('select count(*) from %s group by %s'%(self.tbl_name,conditions))
self.ct = self.cursor.fetchone()
self.conn.commit()
def join(self,tbl_name2,com_col,check_col,value):
self.cursor.execute('select * from %s,%s where %s.%s = %s.%s and %s = %s'%(self.tbl_name, tbl_name2, self.tbl_name, com_col, tbl_name2, com_col, check_col, value))
self.str2 = self.cursor.fetchone()
self.conn.commit()
class TimeTable(Table):
def insert(self, mon = "", tue = "", wed = "", thu = "", fri = "", sat = "", sun = ""):
Table.insert1(self,(mon,tue,wed,thu,fri,sat,sun))
def remove(self, id_):
Table.remove(self, id_)
def update(self, id_, **kwargs):
Table.update(self, (id_), **kwargs)
def get(self, conditions):
Table.get(self, conditions)
def count(self,conditions):
Table.count(self,conditions)
def join(self,tbl_name2,com_col,check_col,value):
Table.join(self,tbl_name2,com_col,check_col,value)
class SlotMap(Table):
def insert(self, slot_id, course_id):
Table.insert2(self, (slot_id, course_id))
def remove(self, id_):
Table.remove(self, id_)
def update(self, id_, **kwargs):
Table.update(self, id_, **kwargs)
def get(self, conditions):
Table.get(self, conditions)
def count(self,conditions):
Table.count(self,conditions)
def join(self,tbl_name2,com_col,check_col,value):
Table.join(self,tbl_name2,com_col,check_col,value)
class Assignment(Table):
def insert(self, course_id, title, duedate, desc):
Table.insert3(self, (course_id, title, duedate, desc))
def remove(self, id_):
Table.remove(self, (id_))
def remove1(self, date):
query = 'delete from %s where DueDate = %s'%(self.tbl_name,str(date))
print query
self.cursor.execute(query)
print "Removed"
self.conn.commit()
def update(self, id_, **kwargs):
Table.update(self, id_, **kwargs)
def get(self, conditions):
Table.get(self, conditions)
def getall(self,conditions):
self.cursor.execute('select * from %s where %s' %(self.tbl_name,conditions))
self.str4 = self.cursor.fetchall()
self.conn.commit()
def count(self,conditions):
Table.count(self,conditions)
def join(self,tbl_name2,com_col,check_col,value):
Table.join(self,tbl_name2,com_col,check_col,value)
class CourseDetails(Table):
def insert(self, course_id, course_name, prof_name, room_no, credits):
Table.insert4(self, (course_id, course_name, prof_name, room_no, credits))
def remove(self, id_):
Table.remove(self, (id_))
def update(self, id_, **kwargs):
Table.update(self, id_, **kwargs)
def get(self, conditions):
Table.get(self, conditions)
def count(self,conditions):
Table.count(self,conditions)
def join(self,tbl_name2,com_col,check_col,value):
Table.join(self,tbl_name2,com_col,check_col,value)
class Absences(Table):
def insert(self, slot_id, date):
Table.insert5(self,(slot_id, date))
def remove1(self, date, slot_name):
date1 = '\'' + date + '\''
name = '\'' + slot_name + '\''
query = 'delete from %s where Date = %s and SlotId = %s'%(self.tbl_name,str(date1),str(name))
print query
self.cursor.execute(query)
print "Removed"
self.conn.commit()
def get(self,conditions):
Table.get(self,conditions)
def getall(self,conditions):
self.cursor.execute('select * from %s where %s' %(self.tbl_name,conditions))
self.str3 = self.cursor.fetchall()
self.conn.commit()
def count(self,conditions):
Table.count(self,conditions)
def join(self,tbl_name2,com_col,check_col,value):
Table.join(self,tbl_name2,com_col,check_col,value)
t1 = TimeTable("conn", "TimeTable")
t2 = SlotMap("conn", "SlotMap")
t3 = Assignment("conn", "Assignment")
t4 = CourseDetails("conn", "Coursedet")
t5 = Absences("conn", "Absences")
|
UTF-8
|
Python
| false | false | 2,012 |
85,899,372,927 |
df3537c6add67209d54ffddff95d8df9dca030ea
|
a7f04f5665e157fbccdd145249f27822b9fec9ca
|
/events/models.py
|
f715f346469293d66c2635afd4a5ff6d2e89084c
|
[] |
no_license
|
xerocreatividad/viejoteca
|
https://github.com/xerocreatividad/viejoteca
|
c826401addb63a2f662cc27d56b742e24eabf96e
|
36fe1816664879445f0b36039dd85941c553b050
|
refs/heads/master
| 2016-09-16T09:38:58.327572 | 2014-08-25T16:10:36 | 2014-08-25T16:10:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToFill
from tinymce.models import HTMLField
from django.template.defaultfilters import slugify
class Evento(models.Model):
nombre = models.CharField(max_length=120)
descripcion = HTMLField(help_text='Información adicional sobre el evento', blank=True)
flyer = models.ImageField(upload_to='flyers',help_text='Recuerda que el flyer debe de tener las siguientes dimensiones: 1000px x 450px')
flyer_thumbnail = ImageSpecField(source='flyer',
processors=[ResizeToFill(640, 306)],
format='JPEG',
options={'quality': 80})
flyer_thumbnail_historico = ImageSpecField(source='flyer',
processors=[ResizeToFill(260, 124)],
format='JPEG',
options={'quality': 100})
flyer_thumbnail_inminente = ImageSpecField(source='flyer',
processors=[ResizeToFill(810, 365)],
format='JPEG',
options={'quality': 100})
flyer_thumbnail_inminente_modal = ImageSpecField(source='flyer',
processors=[ResizeToFill(1000, 450)],
format='JPEG',
options={'quality': 100})
fecha = models.DateField()
src_album = models.URLField(blank=True)
# url = models.SlugField(default=slugify(nombre))
cuna_radial = models.URLField(blank=True)
fecha_creacion = models.DateTimeField(auto_now_add=True)
fecha_actualizacion = models.DateTimeField(auto_now=True)
def __str__(self):
return self.nombre
class Meta:
ordering = ['-fecha']
def get_absolute_url(self):
return '%s' %(slugify(self.nombre))
def save(self, *args, **kwargs):
self.url = slugify(self.nombre)
super (Evento, self).save(*args, **kwargs)
class VideosArtista(models.Model):
evento = models.ForeignKey(Evento)
url = models.URLField()
def __str__(self):
return self.url
|
UTF-8
|
Python
| false | false | 2,014 |
6,554,120,110,806 |
1efe4d18902bad5d80f20b43104b293a3f90140c
|
90e0a8dd428056f663257ae34328e83dc239551e
|
/thrift_async_test/SConscript
|
55de0888d31a5272569d17d7f1ec411562edb9ca
|
[] |
no_license
|
aubonbeurre/abbsandbox
|
https://github.com/aubonbeurre/abbsandbox
|
f8bfbb1faec0e034ff6081176ac78554e02ad683
|
198fa843139960712fd53062708e01e4a9f94452
|
refs/heads/master
| 2020-04-06T03:33:05.823104 | 2011-09-06T19:39:13 | 2011-09-06T19:39:13 | 1,911,032 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
Import('env')
import sys
import os.path
if sys.platform == 'win32':
THRIFT = "J:/sources/thrift/"
LIBEVENT = "J:/sources/libevent/"
else:
THRIFT = "/usr/local/thrift-0.7.0/"
LIBEVENT = "/usr/local/libevent-2.0.12/"
SRC_ROOT = env['SRC_ROOT']
BOOST_ROOT = env['BOOST_ROOT']
JPEG_ROOT = SRC_ROOT + '../lib/libjpeg/'
gensrc = [
SRC_ROOT + 'gen-cpp/imaging_constants.cpp',
SRC_ROOT + 'gen-cpp/imaging_types.cpp',
SRC_ROOT + 'gen-cpp/Imaging.cpp',
]
serversrc = [
SRC_ROOT + 'CppServer.cpp',
SRC_ROOT + 'ImagingHandler.cpp',
SRC_ROOT + 'ImagingAsyncHandler.cpp',
SRC_ROOT + 'ImagingThriftUtils.cpp',
]
clientsrc = [
SRC_ROOT + 'CppClient.cpp',
]
cpppath = [
BOOST_ROOT + 'boost/tr1',
BOOST_ROOT,
SRC_ROOT + "gen-cpp",
LIBEVENT + 'include',
JPEG_ROOT,
]
cppdefines = {
'BOOST_ALL_NO_LIB':None,
}
if sys.platform == 'win32':
pthreadVC2 = "pthreadVC2" if env['OS32'] else "pthreadVC2_x64"
libpath = [
LIBEVENT + 'build/${BUILD_PLATFORM}/%s${BUILD_TARGET}/' % ("" if env['OS32'] else "x64/"),
THRIFT + 'build/${BUILD_PLATFORM}/%s${BUILD_TARGET}/' % ("" if env['OS32'] else "x64/"),
THRIFT + 'build/${BUILD_PLATFORM}/%s/' % pthreadVC2,
]
libs = [
'libevent',
'thriftnb',
'pthreadVC2' if env['OS32'] else 'pthreadVC2_x64',
'Ws2_32',
]
cpppath += [
LIBEVENT + 'WIN32-Code',
LIBEVENT + 'compat',
LIBEVENT,
THRIFT + 'lib/cpp/src',
THRIFT + 'build/vc10',
]
cppdefines.update({
"_SCL_SECURE_NO_WARNINGS" : "1",
"_SECURE_SCL" : "0",
})
else:
libpath = [
THRIFT + 'lib',
LIBEVENT + 'lib',
]
libs = [
'thrift',
'thriftnb',
'event',
]
cpppath += [
THRIFT + 'include/thrift',
]
cppdefines.update({
'HAVE_CONFIG_H':None,
})
clientdeps = [
'myjpeg',
'boost_filesystem',
]
serverdeps = [
'myjpeg',
'boost_filesystem',
'boost_programoptions',
]
if sys.platform == 'win32':
pthread = THRIFT + 'build/${BUILD_PLATFORM}/%s/%s.dll' % (pthreadVC2, pthreadVC2),
cmd = env.Command("$BUILD_DIR/%s.dll" % pthreadVC2, env.NoCache(pthread), env.AbbCopy('$TARGET', '$SOURCE'))
copy_pthread = env.Alias('copy_pthread', cmd)
client = env.AbbProg(progname="client", sources=gensrc + clientsrc,
defines=cppdefines, includes=cpppath,
libpath=libpath, libs=libs,
deps=clientdeps)
server = env.AbbProg(progname="server", sources=gensrc + serversrc,
defines=cppdefines, includes=cpppath,
libpath=libpath, libs=libs,
deps=serverdeps)
target = Alias('apps', [client, server])
if sys.platform == 'win32':
env.Requires(client, copy_pthread)
env.Requires(server, copy_pthread)
Return("target")
|
UTF-8
|
Python
| false | false | 2,011 |
10,703,058,534,258 |
0a48c366b85bc4d331408e798144da1afa96236d
|
db32ab5bea35bdab1d47e49213e9ed4eb67f4da5
|
/scripts/bpm-systematic-to-standard
|
44e9be06aae8db79f7892b8bc96af003434dffac
|
[
"GPL-2.0-only"
] |
non_permissive
|
BurntSushi/genecentric
|
https://github.com/BurntSushi/genecentric
|
7177d32c3c031127b72509aa41393547fd09b82c
|
e07fab9178e1690132b4bc02c6f7b1d49d06ed1c
|
refs/heads/master
| 2023-07-12T12:43:22.848168 | 2013-12-16T16:06:12 | 2013-12-16T16:06:12 | 3,631,479 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python2.7
import argparse
import csv
import geneids
parser = argparse.ArgumentParser(
description='Translate a BPM file with systematic yeast gene names to '
'standard yeast gene names.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
aa = parser.add_argument
aa('bpm', type=str,
metavar='INPUT_BPM_FILE',
help='BPM file to translate.')
aa('obpm', type=str,
metavar='OUTPUT_BPM_FILE',
help='The name of the file to write the input BPM file with the gene names '
'translated.')
conf = parser.parse_args()
def translate_csv(i, o):
ids = csv.reader(i, delimiter='\t')
locuses = csv.writer(o, delimiter='\t')
for row in ids:
locuses.writerow([row[0]] + map(geneids.locus_to_product, row[1:]))
translate_csv(open(conf.bpm), open(conf.obpm, 'w+'))
|
UTF-8
|
Python
| false | false | 2,013 |
7,060,926,268,548 |
acd549a52464f1d99e47a3a804d9aa5420213738
|
54791fd57ecc9a4fe7c5164dfa6eb79c8df48ee1
|
/codes/trunk/codes/pythonDemo/12_random_simple.py
|
94d594d9dc262f19f236a9ed0099329c6ce1c589
|
[] |
no_license
|
cherry-wb/quietheart
|
https://github.com/cherry-wb/quietheart
|
8dfc91f88046bd1b40240e2f6121043977ab78b4
|
715ed73c990da2b4634313c93910769a59ce51f4
|
refs/heads/master
| 2021-01-18T00:04:39.802220 | 2014-08-21T07:39:21 | 2014-08-21T07:39:21 | 23,286,239 | 1 | 3 | null | false | 2019-03-11T09:32:21 | 2014-08-24T16:37:05 | 2017-06-21T18:38:38 | 2014-08-21T07:39:21 | 241,716 | 1 | 4 | 1 | null | false | null |
#!/usr/bin/python
import random
import time
#random item from list:
#random.choice(list)
mydict = {"1":"one","2":"two","3":"three","4":"four"}
num = len(mydict.keys())
print "dict key is:",num
mydict["random"] = mydict[random.choice(mydict.keys())]
print mydict
#random num from int range[1,5]:
print random.randint(1,5)
#random number in [0,1)
print random.random()
#print "with seed:"
date = int(time.strftime("%Y%m%d",time.localtime()))#20120427
print date
random.seed(date) #seed canbe string, not only int
for i in range(1,10):
print random.randint(1,100)
print "with same seed again:"
random.seed(date)
for i in range(1,10):
print random.randint(1,100)
#print "with state"
print "continue:"
state = random.getstate()
for i in range(1,10):
print random.randint(1,100)
print "continue with same previous init state again:"
random.setstate(state)
for i in range(1,10):
print random.randint(1,100)
|
UTF-8
|
Python
| false | false | 2,014 |
10,926,396,828,737 |
596d68ff3d0083dd3d0174c0d680b39e15cc9588
|
a23eba06ad0382d090a306e4c9a9700176b20ad3
|
/standard/records/AccountTreeView.py
|
f4e16fa4f30355dd032301d06d61e5788e6bcb26
|
[] |
no_license
|
koapesrl/segupak
|
https://github.com/koapesrl/segupak
|
08df882b4ae4ca10845f5fc778c760bdfcbcd5ac
|
5dae4b193b8d0ff5ea6e0e1f0e6932b074f4382b
|
refs/heads/master
| 2016-06-02T15:04:01.516940 | 2013-08-17T17:04:36 | 2013-08-17T17:04:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#encoding: utf-8
from OpenOrange import *
ParentAccountTreeViewSettings = SuperClass("AccountTreeViewSettings","Setting",__file__)
class AccountTreeViewSettings(ParentAccountTreeViewSettings):
buffer = SettingBuffer()
|
UTF-8
|
Python
| false | false | 2,013 |
7,095,286,012,871 |
839ce937d9d61db92c8f0c30e409ebcb09e5988f
|
bc991d482370f08833e28b73de6bc42763f79c39
|
/oscon2012_users.py
|
8be633769896cbfe4156a5defdc8e50dfee952a4
|
[] |
no_license
|
xsankar/oscon2012-handson
|
https://github.com/xsankar/oscon2012-handson
|
2e5bba9b91777a30704713685b38bc936ab0a27a
|
9a8aad27c715287371c0a955e7b0f37c709d4c2b
|
refs/heads/master
| 2020-05-18T00:01:11.228581 | 2012-07-16T23:00:48 | 2012-07-16T23:00:48 | 5,059,091 | 2 | 6 | null | false | 2012-07-17T03:35:18 | 2012-07-15T19:08:04 | 2012-07-17T03:35:18 | 2012-07-17T03:35:18 | 144 | null | null | null |
Python
| null | null |
#!/usr/bin/env python
import requests
import json
from requests.auth import HTTPBasicAuth
#
# User Lookup
#
# GET https://api.twitter.com/1/users/lookup.json?screen_name=twitterapi,twitter&include_entities=true
#
url='https://api.twitter.com/1/users/lookup.json'
payload={"screen_name":"twitterapi,twitter,jack"}
r = requests.get(url, params=payload)
print json.dumps(r.headers,sort_keys=True,indent=2)
print json.dumps(r.json,sort_keys=True,indent=2)
'''
Sample Response
@jack id = 12, @twitter = 783214, @twitterapi = 6253282
{
"cache-control": "no-cache, no-store, must-revalidate, pre-check=0, post-check=0",
"content-encoding": "gzip",
"content-length": "1775",
"content-type": "application/json; charset=utf-8",
"date": "Sat, 23 Jun 2012 00:24:06 GMT",
"etag": "\"e93eb3aeb3bf67655c5387fc0e9c2d13\"",
"expires": "Tue, 31 Mar 1981 05:00:00 GMT",
"last-modified": "Sat, 23 Jun 2012 00:24:06 GMT",
"pragma": "no-cache",
"server": "tfe",
"set-cookie": "k=10.35.101.135.1340411046155542; path=/; expires=Sat, 30-Jun-12 00:24:06 GMT; domain=.twitter.com, guest_id=v1%3A134041104616061676; domain=.twitter.com; path=/; expires=Mon, 23-Jun-2014 12:24:06 GMT, _twitter_sess=BAh7CDoPY3JlYXRlZF9hdGwrCBJpuRY4ASIKZmxhc2hJQzonQWN0aW9uQ29u%250AdHJvbGxlcjo6Rmxhc2g6OkZsYXNoSGFzaHsABjoKQHVzZWR7ADoHaWQiJTFl%250ANTg0ZmIyOGVlOTlhZGI1NGRhMjYwNzE4NmY3MjU4--b818ad65b801146173a2ab81569c3197ac64383f; domain=.twitter.com; path=/; HttpOnly",
"status": "200 OK",
"vary": "Accept-Encoding",
"x-frame-options": "SAMEORIGIN",
"x-mid": "2134114c7ed2199739fb622fb2c50325699ba39c",
"x-ratelimit-class": "api",
"x-ratelimit-limit": "150",
"x-ratelimit-remaining": "145",
"x-ratelimit-reset": "1340413192",
"x-runtime": "0.03738",
"x-transaction": "1e7707fe2c10b397",
"x-transaction-mask": "a6183ffa5f8ca943ff1b53b5644ef114546a8e75"
}
[
{
"contributors_enabled": true,
"created_at": "Tue Mar 21 20:50:14 +0000 2006",
"default_profile": false,
"default_profile_image": false,
"description": "Executive Chairman of Twitter, CEO of Square, a founder of both.",
"favourites_count": 1047,
"follow_request_sent": false,
"followers_count": 2026857,
"following": false,
"friends_count": 1192,
"geo_enabled": true,
"id": 12,
"id_str": "12",
"is_translator": false,
"lang": "en",
"listed_count": 18036,
"location": "San Francisco",
"name": "Jack Dorsey",
"notifications": false,
"profile_background_color": "EBEBEB",
"profile_background_image_url": "http://a0.twimg.com/images/themes/theme7/bg.gif",
"profile_background_image_url_https": "https://si0.twimg.com/images/themes/theme7/bg.gif",
"profile_background_tile": false,
"profile_image_url": "http://a0.twimg.com/profile_images/1563216547/image_normal.jpg",
"profile_image_url_https": "https://si0.twimg.com/profile_images/1563216547/image_normal.jpg",
"profile_link_color": "990000",
"profile_sidebar_border_color": "DFDFDF",
"profile_sidebar_fill_color": "F3F3F3",
"profile_text_color": "333333",
"profile_use_background_image": true,
"protected": false,
"screen_name": "jack",
"show_all_inline_media": true,
"status": {
"contributors": null,
"coordinates": null,
"created_at": "Fri Jun 22 06:27:29 +0000 2012",
"favorited": false,
"geo": null,
"id": 216054784015335425,
"id_str": "216054784015335425",
"in_reply_to_screen_name": null,
"in_reply_to_status_id": null,
"in_reply_to_status_id_str": null,
"in_reply_to_user_id": null,
"in_reply_to_user_id_str": null,
"place": null,
"retweet_count": 34523,
"retweeted": false,
"retweeted_status": {
"contributors": null,
"coordinates": null,
"created_at": "Fri Jun 22 05:54:32 +0000 2012",
"favorited": false,
"geo": null,
"id": 216046493721825280,
"id_str": "216046493721825280",
"in_reply_to_screen_name": null,
"in_reply_to_status_id": null,
"in_reply_to_status_id_str": null,
"in_reply_to_user_id": null,
"in_reply_to_user_id_str": null,
"place": null,
"retweet_count": 34523,
"retweeted": false,
"source": "<a href=\"http://www.echofon.com/\" rel=\"nofollow\">Echofon</a>",
"text": "Tough way to end a season but I am truly blessed to have the privilege to play for such a great city, congrats to the Miami Heat..",
"truncated": false
},
"source": "<a href=\"http://twitter.com/download/iphone\" rel=\"nofollow\">Twitter for iPhone</a>",
"text": "RT @KDTrey5: Tough way to end a season but I am truly blessed to have the privilege to play for such a great city, congrats to the Miami ...",
"truncated": true
},
"statuses_count": 11242,
"time_zone": "Pacific Time (US & Canada)",
"url": null,
"utc_offset": -28800,
"verified": true
},
{
"contributors_enabled": true,
"created_at": "Tue Feb 20 14:35:54 +0000 2007",
"default_profile": false,
"default_profile_image": false,
"description": "Always wondering what's happening. ",
"favourites_count": 17,
"follow_request_sent": false,
"followers_count": 11278047,
"following": false,
"friends_count": 1064,
"geo_enabled": true,
"id": 783214,
"id_str": "783214",
"is_translator": false,
"lang": "en",
"listed_count": 71495,
"location": "San Francisco, CA",
"name": "Twitter",
"notifications": false,
"profile_background_color": "ACDED6",
"profile_background_image_url": "http://a0.twimg.com/profile_background_images/378245879/Twitter_1544x2000.png",
"profile_background_image_url_https": "https://si0.twimg.com/profile_background_images/378245879/Twitter_1544x2000.png",
"profile_background_tile": true,
"profile_banner_url": "https://si0.twimg.com/brand_banners/twitter/1323368512/live",
"profile_image_url": "http://a0.twimg.com/profile_images/2284174758/v65oai7fxn47qv9nectx_normal.png",
"profile_image_url_https": "https://si0.twimg.com/profile_images/2284174758/v65oai7fxn47qv9nectx_normal.png",
"profile_link_color": "038543",
"profile_sidebar_border_color": "EEEEEE",
"profile_sidebar_fill_color": "F6F6F6",
"profile_text_color": "333333",
"profile_use_background_image": true,
"protected": false,
"screen_name": "twitter",
"show_all_inline_media": true,
"status": {
"contributors": [
7694352
],
"coordinates": null,
"created_at": "Fri Jun 22 16:42:47 +0000 2012",
"favorited": false,
"geo": null,
"id": 216209630475452419,
"id_str": "216209630475452419",
"in_reply_to_screen_name": null,
"in_reply_to_status_id": null,
"in_reply_to_status_id_str": null,
"in_reply_to_user_id": null,
"in_reply_to_user_id_str": null,
"place": null,
"possibly_sensitive": false,
"retweet_count": 253,
"retweeted": false,
"source": "web",
"text": "Want to see the best Tweets about #Euro2012 all in one place? We've got you covered. http://t.co/riNDcJZY",
"truncated": false
},
"statuses_count": 1340,
"time_zone": "Pacific Time (US & Canada)",
"url": "http://blog.twitter.com/",
"utc_offset": -28800,
"verified": true
},
{
"contributors_enabled": true,
"created_at": "Wed May 23 06:01:13 +0000 2007",
"default_profile": true,
"default_profile_image": false,
"description": "The Real Twitter API. I tweet about API changes, service issues and happily answer questions about Twitter and our API. Don't get an answer? It's on my website.",
"favourites_count": 24,
"follow_request_sent": false,
"followers_count": 1103346,
"following": false,
"friends_count": 30,
"geo_enabled": true,
"id": 6253282,
"id_str": "6253282",
"is_translator": false,
"lang": "en",
"listed_count": 10550,
"location": "San Francisco, CA",
"name": "Twitter API",
"notifications": false,
"profile_background_color": "C0DEED",
"profile_background_image_url": "http://a0.twimg.com/images/themes/theme1/bg.png",
"profile_background_image_url_https": "https://si0.twimg.com/images/themes/theme1/bg.png",
"profile_background_tile": false,
"profile_image_url": "http://a0.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_normal.png",
"profile_image_url_https": "https://si0.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_normal.png",
"profile_link_color": "0084B4",
"profile_sidebar_border_color": "C0DEED",
"profile_sidebar_fill_color": "DDEEF6",
"profile_text_color": "333333",
"profile_use_background_image": true,
"protected": false,
"screen_name": "twitterapi",
"show_all_inline_media": false,
"status": {
"contributors": [
819797
],
"coordinates": null,
"created_at": "Fri Jun 15 19:57:54 +0000 2012",
"favorited": false,
"geo": null,
"id": 213722016866508802,
"id_str": "213722016866508802",
"in_reply_to_screen_name": "ricktagious",
"in_reply_to_status_id": 213670532372115459,
"in_reply_to_status_id_str": "213670532372115459",
"in_reply_to_user_id": 152138582,
"in_reply_to_user_id_str": "152138582",
"place": null,
"retweet_count": 8,
"retweeted": false,
"source": "web",
"text": "@ricktagious not seeing any issues; consider reaching out to @support for issues on the site. ^TS",
"truncated": false
},
"statuses_count": 3314,
"time_zone": "Pacific Time (US & Canada)",
"url": "http://dev.twitter.com",
"utc_offset": -28800,
"verified": true
}
]
'''
|
UTF-8
|
Python
| false | false | 2,012 |
1,443,109,025,948 |
953a712f8fd85e47fddf8486b2ba1ea9bd915c98
|
ece9734905c50fe20dc7552a98249da9f1708337
|
/synchg/sync.py
|
e7c622ec8bb2e84750caead18eba7825a845d2dd
|
[
"BSD-3-Clause"
] |
permissive
|
obmarg/synchg
|
https://github.com/obmarg/synchg
|
6f2fc42df3588611a7b331858552b214227ca91d
|
29c054f5abf036d66e720ff81b4cadaf3c8439b2
|
refs/heads/master
| 2020-05-20T12:44:00.845837 | 2013-01-24T22:20:36 | 2013-01-24T22:20:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
This module provides the actual syncing functionality for SyncHg. It's
functions can be called by imported and called by other libraries if they wish
to make use of SyncHg functionality.
'''
import plumbum
from remote import RemoteMachine
from repo import Repo
from utils import yn
class AbortException(Exception):
'''
An exception that's thrown when a user chooses to abort. This should be
caught and ignored at the start of the program to allow users to abort at
prompts
'''
pass
class SyncError(Exception):
'''
An exception that's thrown when a non-exceptional error occurs. This
exception is usually accompanied by an error message and should probably
be caught and the backtrace suppressed.
'''
pass
def SyncRemote(host, name, localpath, remote_root):
'''
Syncs a remote repository. This function should be called to kick off a
sync
:param host: The hostname of the remote repository
:param name: The name of the project that is being synced.
This parameter will be appended to the remote_root
to find the remote repository.
:param localpath: A plumbum path to the local repository
:param remote_root: The path to the parent directory of the
remote repository
'''
print "Sync {0} -> {1}".format(name, host)
with RemoteMachine(host) as remote:
with plumbum.local.cwd(localpath):
local = Repo(plumbum.local, host)
remote_path = remote_root + '/' + name
_SanityCheckRepos(local, host, remote_path, remote)
with remote.cwd(remote.cwd / remote_path):
_DoSync(local, Repo(remote))
def _SanityCheckRepos(local_repo, host, remote_path, remote):
'''
Does a sanity check of the repositories, and attempts
to fix any problems found.
This includes cloning the repository, setting up remotes
and setting up mq repositories.
It's expected that the local path will be set up by this point
:param local_repo: A Repo object for the local repository
:param host: The hostname of the remote repo
:param remote_path: The path to the remote repository as a string
:param remote: A plumbum machine for the remote machine
'''
patch_dir = plumbum.local.cwd / '.hg' / 'patches'
if patch_dir.exists():
if not (patch_dir / '.hg').exists():
# Seems mq --init hasn't been run. Run it.
local_repo.InitMq()
local_repo.CommitMq()
# Check if the remote exists, and clone it if not
hg_remote_path = 'ssh://{0}/{1}'.format(host, remote_path)
rpath = remote.cwd / remote_path
if not rpath.exists():
print "Remote repository can't be found."
if yn('Do you want to create a clone?'):
local_repo.Clone(hg_remote_path)
else:
raise AbortException
# Check if remote paths are set up properly
if host not in local_repo.config.remotes:
local_repo.config.AddRemote(host, hg_remote_path)
if host not in local_repo.mqconfig.remotes:
local_repo.mqconfig.AddRemote(host, hg_remote_path + '/.hg/patches')
# TODO: Would probably be good to check that the remotes aren't
# pointing at the wrong address as well
# Finally, check if the mq repository needs cloned
if patch_dir.exists() and not (rpath / '.hg' / 'patches').exists():
local_repo.CloneMq(hg_remote_path)
def _DoSync(local, remote):
'''
Function that actually handles the syncing after everything
has been set up
:param local: The local repository
:param remote: The remote repository
'''
# First, check the state of each repository
if remote.summary.commit.modified:
# Changes might be lost on remote...
raise SyncError('Remote repository has uncommitted changes')
lsummary = local.summary
if lsummary.commit.modified:
print "Local repository has uncommitted changes."
if lsummary.mq.applied:
# We can't push/pop patches to check remote is
# in sync if we've got local changes, so prompt to refresh.
if yn('Do you want to refresh the current patch?'):
local.RefreshMq()
else:
print "Ok. Please run again after dealing with changes."
raise AbortException
else:
# If we're not doing an mq sync, we can happily ignore
# these changes, but probably want to make sure that's
# what the user wants...
if not yn('Do you want to ignore these changes?'):
print "Ok. Please run again after dealing with changes."
raise AbortException
# Pop any patches on the remote before we begin
remote.PopPatch()
with local.CleanMq():
if local.outgoings:
incomings = local.incomings
if incomings:
# Don't want to be creating new remote heads when we push
print "Changesets will be stripped from remote:"
for hash, desc in incomings:
if len(desc) > 50:
desc = desc[:47] + '...'
print " {0} {1}".format(hash[:6], desc)
if not yn('Do you want to continue?'):
raise AbortException()
remote.Strip(incomings)
print "Pushing to remote"
local.PushToRemote()
print "Updating remote"
remote.Update(local.currentRev)
appliedPatch = local.lastAppliedPatch
if appliedPatch:
print "Syncing mq repos"
local.CommitMq()
local.PushMqToRemote()
print "Updating remote mq repo"
remote.UpdateMq()
remote.PushPatch(appliedPatch)
print "Ok!"
|
UTF-8
|
Python
| false | false | 2,013 |
18,751,827,242,871 |
41cf76fe3b6110457e634a431cd7321f335868b5
|
044fa2209e278ade0bb20c17784b6a94f7645bd8
|
/src/SConscript
|
4cb0b26056a67a22dcf4005b16ebdf3d3859dcee
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
thejpster/stellaris-launchpad-c
|
https://github.com/thejpster/stellaris-launchpad-c
|
f67af81f9f3fbf9c4076f9238a57a3c4b4a90a92
|
f7a0191a6eca71dcee87d3e6021f9e56a24f50cf
|
refs/heads/master
| 2021-05-28T06:42:53.573465 | 2014-04-22T21:41:52 | 2014-04-22T21:41:52 | 7,287,135 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Builds a simple example application which runs bare-metal on a TI Stellaris Launchpad.
If you have the arm-none-eabi toolchain installed, this will build the example:
$ scons
This will build and flash the example on to your Launchpad:
$ scons flash
Copyright (c) 2012-2014 theJPster ([email protected])
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
env = Environment()
# See ../prerequisites.sh
FLASH_TOOL = "sudo ./lm4tools/lm4flash/lm4flash"
# If you have installed the toolchain from https://launchpad.net/gcc-arm-embedded,
# edit this to point to your gcc. This compiler comes with a bare-metal C library
# called newlib
ARM_TOOL_PREFIX = "./gcc-arm/bin/arm-none-eabi"
env.Replace(CC="%s-gcc" % ARM_TOOL_PREFIX)
env.Replace(AS="%s-as" % ARM_TOOL_PREFIX)
env.Replace(LD="%s-gcc" % ARM_TOOL_PREFIX)
verbose = ARGUMENTS.get("VERBOSE", 0)
if not verbose:
env.Replace(CCCOMSTR="[CC] $SOURCE -> $TARGET")
env.Replace(LINKCOMSTR="[LD] $TARGET")
# Builder which uses objcopy to turn the .elf into something we can flash
strip = Builder(
action="%s-objcopy -O binary ${SOURCE} ${TARGET}" % ARM_TOOL_PREFIX)
env.Append(BUILDERS={"Strip": strip})
# Builder which uses objdump to turn the .elf into assembly code
objdump = Builder(
action="%s-objdump -S -d ${SOURCE} > ${TARGET}" % ARM_TOOL_PREFIX)
env.Append(BUILDERS={"Objdump": objdump})
# Builder to flash on to Launchpad board
flash = Builder(action="%s ${SOURCE}" % FLASH_TOOL)
env.Append(BUILDERS={"Flash": flash})
# Builder which uses size to show segment sizes
size = Builder(action="echo Max 0x40000 flash, 0x8000 SRAM && %s-size -B -x ${SOURCE}" % ARM_TOOL_PREFIX)
env.Append(BUILDERS={"Size": size})
# Set some sensible defaults for the Launchpad"s processor
env.Append(CCFLAGS=[
"-mthumb",
"-mcpu=cortex-m4",
# For some reason, FP code causes the chip to crash
#"-mfloat-abi=softfp",
#"-mfpu=fpv4-sp-d16",
"-mfloat-abi=soft",
# O2 or higher causes a crash
"-O1",
"-g",
"-Wall",
"-pedantic",
"-std=c99",
"-fno-stack-protector",
"-Isrc"
])
# Use our custom linker script
env.Append(LINKFLAGS=[
"-Wl,-T,basic.ld",
"-mthumb",
"-mcpu=cortex-m4",
# For some reason, FP code causes the chip to crash
#"-mfloat-abi=softfp",
#"-mfpu=fpv4-sp-d16",
"-mfloat-abi=soft",
"-Wl,-Map,bin/start.map"
])
env.Append(LIBS = [
# libm -> the maths library
"m",
])
env.Append(CPPPATH = ['./src'])
sources = [
'main.c',
'circbuffer/src/circbuffer.c',
'command/src/command.c',
'startup/src/startup.c',
'startup/src/libc.c',
'drivers/misc/src/misc.c',
'drivers/timers/src/timers.c',
'drivers/gpio/src/gpio.c',
'drivers/uart/src/uart.c',
]
# Set the clock rate to 66.67MHz
# env.Append(CPPDEFINES={"CLOCK_RATE": 16000000})
env.Append(CPPDEFINES={"CLOCK_RATE": 66666666})
# We want the LCD tall, not wide.
env.Append(CPPDEFINES=["LCD_ROTATE_DISPLAY"])
# We want a simpler, smaller, printf
env.Append(CPPDEFINES=["USE_IPRINTF"])
# Compiles the ELF version of our program
elf = env.Program(target="start.elf", source=sources, CPPPATH='.')
# SCons doesn"t notice the linker script is a dependency, so tell it
Depends(elf, "../basic.ld")
# Creates the raw binary version from the ELF
bin = env.Strip(target="start.bin", source=elf)
# Before we create the binary, show how big the ELF is
Depends(bin, env.Size(source=elf))
# Flashes the raw binary onto the board
fd = env.Flash(target="flash", source=bin)
# Allow the user to call "scons flash" to build and flash
fda = env.Alias("flash", fd)
env.AlwaysBuild(fda)
# Allow the user to call "scons asm" to build and dump the assembler
asm = env.Objdump(target="start.s", source=elf)
env.Alias("asm", asm)
# By default, just compile and don't flash
Default(bin)
|
UTF-8
|
Python
| false | false | 2,014 |
841,813,627,233 |
22419198c01009eeb52f3aafa0b04589422ee6cd
|
7a4fbcb5d6ad1598224d150f4e151ec1605a9555
|
/nsis-ka/natfw-nslp/eval/eval_journal.py
|
2d1afed48f6c4594e535a411ac9e53696ae54d25
|
[] |
no_license
|
jurdan21/ReVir-UFPe
|
https://github.com/jurdan21/ReVir-UFPe
|
49cffbdaaeccf98822a24a19888d2f7ba7ed0d06
|
7087b43510c8a224c5cd584f0ab9823250fba5c7
|
refs/heads/master
| 2020-11-30T05:27:53.903075 | 2011-08-09T00:23:45 | 2011-08-09T00:23:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/env python
#
# Evaluate the journal printed by the benchmark class.
#
# Note: This script requires standard python and the python-stats module
#
# $Id: eval_journal.py 2296 2006-11-08 11:56:24Z stud-matfried $
# $HeadURL: https://svn.tm.kit.edu/nsis/natfw-nslp/trunk/eval/eval_journal.py $
#
import sys
import stats
import math
if len(sys.argv) != 3:
print >>sys.stderr, 'Usage: eval_journal.py mp_id_1 mp_id_2'
sys.exit(1)
mp_id_start = int(sys.argv[1])
mp_id_stop = int(sys.argv[2])
times = { }
differences = [ ]
for line in sys.stdin:
if line.startswith('#'):
continue
(mp_id, thread_id, secs, ns) = [int(x) for x in line.split()]
nanosecs = (secs*1000000000+ns)
#print mp_id, thread_id, nanosecs
if mp_id == mp_id_start:
times[thread_id] = nanosecs
elif mp_id == mp_id_stop and times.has_key(thread_id):
differences.append(nanosecs - times[thread_id])
del times[thread_id]
# print 'Values = %8d' % len(differences)
# print 'Min = %8d' % min(differences)
# print 'Max = %8d' % max(differences)
# print 'Mean = %8d' % stats.mean(differences)
# print 'Stdev = %8d' % stats.stdev(differences)
label = '#' + str(mp_id_start) + '-#' + str(mp_id_stop)
print ' Benchmark Values Min Max Mean Stdev'
print ' %8s %8d %8d %8d %8d %8d' % (
label, len(differences), min(differences),
max(differences), stats.mean(differences), stats.stdev(differences))
# EOF
|
UTF-8
|
Python
| false | false | 2,011 |
3,204,045,621,983 |
50a42dfffbcde14011be4043e3ab93635254270e
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_8/omrkha003/question2.py
|
f39774b06167c4c7c38680de83d116a29368c453
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
https://github.com/MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# program that uses a recursive function to count the number of pairs repeated in a string
# khadeejah omar
# 4 may 2014
count = 0
def pair_counter(string) :
global count
# base case
if string == "" or len(string) == 1 :
print("Number of pairs:", count)
# recursive step
else:
if string[0] == string[1] : # if the first two letters is a pair, remove first two letters and do recursive step again
count += 1
return (pair_counter(string[2:]))
else : # if the first two letters aren't a pair, remove the first letter and do recursive step again
return (pair_counter(string[1:]))
def main() :
string = input("Enter a message: \n")
return(pair_counter(string))
main()
|
UTF-8
|
Python
| false | false | 2,014 |
12,687,333,419,054 |
b5bc950c2862a6452ffcb1206034b188d8297f7b
|
dc33b6464d242396b762d0ad84f54638a26f0d65
|
/modules/orphilia/installer.py
|
a04fb84e2f6306e2323a5cf6551ec6e513ca1908
|
[] |
no_license
|
saa/orphilia-dropbox
|
https://github.com/saa/orphilia-dropbox
|
43db07a6b399f1a8db8ea902b847ff91bdc305af
|
38ef646b614c9364d8a151f072128cc992cd3af1
|
refs/heads/master
| 2020-04-07T19:59:45.871929 | 2013-06-25T18:36:33 | 2013-06-25T18:36:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys, os, shutil
def make_executable(path):
if sys.platform[:5] == "haiku":
os.system('chmod +x '+path)
elif sys.platform[:5] != "win32":
perm = os.stat(path)
os.chmod(path, perm.st_mode | stat.S_IEXEC)
def install():
print('Orphilia Installer')
print('---')
# define generic installdir and bindir
installdir = '/usr/share/orphilia'
bindir = '/usr/share/bin'
# check if command is supported on this platform
if sys.platform[:5] == 'win32':
print('This function is not available on this platform')
sys.exit(1)
# use Haiku-specific installdir and bindir
if sys.platform[:5] == 'haiku':
installdir = '/boot/apps/orphilia'
bindir = '/boot/common/bin'
print(installdir)
print(bindir)
#generic instructions
make_executable('./orphilia.py')
try:
# generate directory tree
os.mkdir(installdir)
os.mkdir(installdir + '/dropbox')
os.mkdir(installdir + '/orphilia')
os.mkdir(installdir + '/orphiliaclient')
os.mkdir(installdir + '/shared')
except:
print('Unable to make install directory tree')
sys.exit(1)
try:
# copy all the files
shutil.copy('./orphilia.py',installdir)
shutil.copy('./dropbox/__init__.py',installdir + '/dropbox')
shutil.copy('./dropbox/six.py',installdir + '/dropbox')
shutil.copy('./dropbox/client.py',installdir + '/dropbox')
shutil.copy('./dropbox/session.py',installdir + '/dropbox')
shutil.copy('./dropbox/rest.py',installdir + '/dropbox')
shutil.copy('./dropbox/util.py',installdir + '/dropbox')
#copy additional modules
shutil.copy('./shared/__init__.py',installdir + '/shared')
shutil.copy('./shared/path_rewrite.py',installdir + '/shared')
shutil.copy('./shared/date_rewrite.py',installdir + '/shared')
#copy Orphilia modules
shutil.copy('./orphilia/__init__.py',installdir + '/orphilia')
shutil.copy('./orphilia/common.py',installdir + '/orphilia')
shutil.copy('./orphilia/config.py',installdir + '/orphilia')
shutil.copy('./orphilia/installer.py',installdir + '/orphilia')
shutil.copy('./orphiliaclient/__init__.py', installdir + '/orphiliaclient')
shutil.copy('./orphiliaclient/client.py', installdir + '/orphiliaclient')
shutil.copy('./orphiliaclient/monitor.py', installdir + '/orphiliaclient')
#copy notify scripts
shutil.copy('./notify/cli-notify',installdir)
make_executable(installdir + '/cli-notify')
#copy platform-specific notify scripts
if sys.platform[:5] == "haiku":
shutil.copy('./notify/haiku-notify',installdir)
make_executable(installdir + '/haiku-notify')
#copy branding related files
shutil.copy('./branding/orphilia.png',installdir + '/branding')
if sys.platform[:5] == "haiku":
shutil.copy('./branding/orphilia_haiku.png',installdir)
else:
shutil.copy('./branding/orphilia.png','/usr/share/pixmaps')
shutil.copy('./dropbox/trusted-certs.crt',installdir + '/dropbox')
#copy platform-specific files (gui)
if sys.platform[:5] == "haiku":
shutil.copy('./authorize.yab',installdir + '/authorize.yab')
shutil.copy('./yab', installdir)
make_executable(installdir + '/yab')
make_executable(installdir + '/authorize.yab')
#make symlinks
os.symlink(installdir + '/orphilia.py',bindir + '/orphilia')
os.symlink(installdir + '/cli-notify',bindir + '/orphilia_cli-notify')
os.symlink(installdir + '/authorize.yab',bindir +'/orphilia_haiku-authorize')
if sys.platform[:5] == 'haiku':
os.system('alert --info \"Installation completed.\"')
os.system('ln -s ' + installdir + '/haiku-notify ' + bindir + '/orphilia_haiku-notify')
print('Done. Now run orphilia --configuration as a regular user')
except:
print('Installation failed.')
def uninstall():
print('Orphilia Installer')
print('---')
# check if command is supported on this platform
if sys.platform[:3] == 'win':
print('This function is not available on this platform')
sys.exit(1)
installdir = '/usr/share/orphilia'
bindir = '/usr/share/bin'
if sys.platform[:5] == 'haiku':
installdir = '/boot/apps/orphilia'
bindir = '/boot/common/bin'
shutil.rmtree(installdir)
os.remove(bindir + '/orphilia')
os.remove(bindir + '/orphilia_cli-notify')
if sys.platform[:5] == "haiku":
os.remove(bindir + '/orphilia_haiku-notify')
os.remove(bindir + '/orphilia_haiku-authorize')
os.system('alert --info \"Uninstallation completed.\"')
else:
os.remove('/usr/share/pixmaps/orphilia.png')
print('Done.')
|
UTF-8
|
Python
| false | false | 2,013 |
9,663,676,451,212 |
b8d64f93ab57355d7334a1741e454ec8d75892fc
|
e6eb1d609a8749eea5c3a5105be002985af27936
|
/mkt/api/tests/test_throttle.py
|
c148d53fe3ee6a55f24c84e1c37c6c938a28990c
|
[] |
no_license
|
jlongster/zamboni
|
https://github.com/jlongster/zamboni
|
ad224f9110021c8a84fed959a0285c7743fc549e
|
68508bfcb85fa0ac5c47c792a801544036c7059e
|
refs/heads/master
| 2021-01-18T12:05:51.875480 | 2013-06-04T19:32:08 | 2013-06-12T21:41:08 | 5,633,847 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.test.client import RequestFactory
from mock import patch
from tastypie.exceptions import ImmediateHttpResponse
from mkt.api.base import HttpTooManyRequests, MarketplaceResource
from mkt.api.tests.test_oauth import BaseOAuth
class ThrottleTests(object):
"""
Mixin to add tests that ensure API endpoints are being appropriately
throttled.
Note: subclasses will need to define the resource being tested.
"""
resource = None
request = RequestFactory().get('/')
def test_should_throttle(self):
if not self.resource:
return
with patch.object(self.resource._meta, 'throttle') as throttle:
throttle.should_be_throttled.return_value = True
with self.assertImmediate(HttpTooManyRequests):
self.resource.throttle_check(self.request)
def test_shouldnt_throttle(self):
with patch.object(self, 'resource') as resource:
resource._meta.throttle.should_be_throttled.return_value = False
try:
self.resource.throttle_check(self.request)
except ImmediateHttpResponse:
self.fail('Unthrottled request raises ImmediateHttpResponse')
class TestThrottle(ThrottleTests, BaseOAuth):
resource = MarketplaceResource()
|
UTF-8
|
Python
| false | false | 2,013 |
12,575,664,259,651 |
0229c826dbdb69b601e3fdc8b65a623587f762df
|
2fdeb39e9dc1980d2ecc3e5a02c53d6c780131b6
|
/local/localdaemon.py
|
05d4ace8a63ce44898e5ce952bcb7daab36f74cb
|
[] |
no_license
|
zhouty/redblue
|
https://github.com/zhouty/redblue
|
8fceaf0a46235f07f88230d18bdda4b6d7298051
|
397d174de338a8db4ab0ddb2712469463e078581
|
refs/heads/master
| 2016-09-06T08:14:28.081539 | 2014-05-29T04:25:03 | 2014-05-29T04:25:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os, sys, time, libvirt, threading, datetime
from daemon import Daemon
import localmanager
def log(msg):
print "[Local] : %s" % msg
sys.stdout.flush()
class LocalDaemon(Daemon):
def run(self):
# localmanager thread start to wait for RPC request
localmanager.rpc_server_start()
def destroy(self):
log("destroy in host")
if __name__ == "__main__":
daemon = LocalDaemon('/home/kvmcon/local/local.pid', '/dev/null', '/home/kvmcon/local/local.out', '/home/kvmcon/local/local.err')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
|
UTF-8
|
Python
| false | false | 2,014 |
1,717,986,940,038 |
c6c9e9f911e74a583ecb541be7151529a4076723
|
f27ab7d45fac84c38c2ea70b78d46f2ca5dfbd92
|
/src/pat/vehicles/fixed_wing/dynamic_model_python_basic.py
|
58633ff18ded78338dbb1ea1eb0ee5a24ccf3f42
|
[] |
no_license
|
tectronics/python-aerospace-toolbox
|
https://github.com/tectronics/python-aerospace-toolbox
|
35d06516dc7144b5a151a8c9fd1d3fd4321c035a
|
73ed1cab9ce796fb3b4f2d2d488152ab40865e51
|
refs/heads/master
| 2018-01-11T14:47:44.097095 | 2014-09-01T22:32:40 | 2014-09-01T22:32:40 | 46,206,756 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#-*- coding: utf-8 -*-
#
# Copyright 2013-2014 Antoine Drouin ([email protected])
#
# This file is part of PAT.
#
# PAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PAT. If not, see <http://www.gnu.org/licenses/>.
#
"""
This is a 6dof model for a fixed wing vehicle
"""
import math
import numpy as np
import scipy.integrate
import scipy.optimize
import matplotlib.pyplot as plt
import pat.dynamic_model as dm
import pat.frames as fr
import pat.utils as pu
import pat.algebra as pal
import pat.atmosphere as patm
"""
Components of the state vector
"""
sv_x = 0 # position x axis
sv_y = 1 # position y axis
sv_z = 2 # heigh above ground
sv_v = 3 # airspeed
sv_alpha = 4 # alpha
sv_beta = 5 # beta
sv_phi = 6 # roll (euler, ltp to body)
sv_theta = 7 # pitch (euler, ltp to body)
sv_psi = 8 # yaw (euler, ltp to body)
sv_p = 9 # rotational vel body x
sv_q = 10 # rotational vel body y
sv_r = 11 # rotational vel body z
sv_size = 12
"""
Components of the input vector
"""
iv_da = 0
iv_de = 1
iv_dr = 2
iv_size = 4
def get_aero_to_body(X):
"""
computes the aero to body rotation matix
"""
ca = math.cos(X[sv_alpha]); sa = math.sin(X[sv_alpha])
cb = math.cos(X[sv_beta]); sb = math.sin(X[sv_beta])
return np.array([[ca*cb, -ca*sb, -sa],
[sb , cb , 0.],
[sa*cb, -sa*sb, ca]])
def get_f_eng_body(X, U, P):
"""
return propulsion forces expressed in body frame
"""
rho = patm.get_rho(-X[sv_z])
f_engines_body = np.zeros((P.eng_nb, 3))
for i in range(0, P.eng_nb):
thrust = U[i]*P.fmaxs[i]*math.pow((rho/P.rhois[i]),P.nrhos[i])*math.pow((X[sv_v]/P.Vis[i]),P.nVs[i])
f_engines_body[i] = np.dot(P.eng_to_body[i], np.array([thrust, 0., 0.]))
return f_engines_body
def get_f_aero_body(X, Usfc, P, Pdyn):
"""
return aerodynamic forces expressed in body frame
"""
d_alpha = X[sv_alpha] - P.alpha0
rvel = X[sv_p:sv_r+1]*np.array([P.Bref, P.Cref, P.Bref])/2/P.Vref
CL = P.CL0 + P.CL_alpha*d_alpha + P.CL_beta*X[sv_beta] +\
np.dot(P.CL_omega,rvel) + np.dot(P.CL_sfc,Usfc)
CD = P.CD0 + P.CD_k1*CL + P.CD_k2*(CL**2) + np.dot(P.CD_sfc,Usfc)
CY = P.CY_alpha*d_alpha + P.CY_beta*X[sv_beta] +\
np.dot(P.CY_omega,rvel) + np.dot(P.CY_sfc,Usfc)
return Pdyn*P.Sref*np.dot(get_aero_to_body(X),[-CD, CY, -CL])
def get_m_eng_body(f_eng_body, P):
"""
return propulsion moments expressed in body frame
"""
m = np.zeros(3)
for i in range(0, P.eng_nb):
m += np.cross(P.eng_pos[i], f_eng_body[i])
return m
def get_m_aero_body(X, Usfc, P, Pdyn):
"""
return aerodynamic moments expressed in body frame
"""
d_alpha = X[sv_alpha] - P.alpha0
rvel = X[sv_p:sv_r+1]*np.array([P.Bref, P.Cref, P.Bref])/2/P.Vref
Cl = P.Cl_alpha*d_alpha + P.Cl_beta*X[sv_beta] +\
np.dot(P.Cl_omega,rvel) + np.dot(P.Cl_sfc,Usfc)
Cm = P.Cm0 + P.Cm_alpha*d_alpha + P.Cm_beta*X[sv_beta] +\
np.dot(P.Cm_omega,rvel) + np.dot(P.Cm_sfc,Usfc)
Cn = P.Cn_alpha*d_alpha + P.Cn_beta*X[sv_beta] +\
np.dot(P.Cn_omega,rvel) + np.dot(P.Cn_sfc,Usfc)
return Pdyn*P.Sref*np.array([Cl*P.Bref, Cm*P.Cref, Cn*P.Bref])
def dyn(X, t, U, P):
"""
Dynamic model
"""
rho = patm.get_rho(-X[sv_z])
Pdyn = 0.5*rho*X[sv_v]**2
Ueng = U[0:P.eng_nb] # engines part of input vector
Usfc = U[P.eng_nb:P.eng_nb+P.sfc_nb] # control surfaces part of input vector
X_rvel_body = X[sv_p:sv_r+1] # body rotational velocities
X_euler = X[sv_phi:sv_psi+1] # euler angles
# Newton for forces in body frame
f_aero_body = get_f_aero_body(X, Usfc, P, Pdyn)
f_eng_body = get_f_eng_body(X, Ueng, P)
earth_to_body = pal.rmat_of_euler(X_euler)
f_weight_body = np.dot(earth_to_body, [0., 0., P.m*P.g])
forces_body = f_aero_body + np.sum(f_eng_body, axis=0) + f_weight_body
vel_body = np.dot(get_aero_to_body(X), [X[sv_v], 0., 0.]) # u, v, w
accel_body = 1./P.m*forces_body - np.cross(X_rvel_body, vel_body)
# Newton for moments in body frame
m_aero_body = get_m_aero_body(X, Usfc, P, Pdyn)
m_eng_body = get_m_eng_body(f_eng_body, P)
raccel_body = np.dot(P.invI, m_aero_body + m_eng_body - np.cross(X_rvel_body, np.dot(P.I, X_rvel_body)))
Xdot = np.zeros(sv_size)
Xdot[sv_x:sv_z+1] = np.dot(np.transpose(earth_to_body), vel_body)
Xdot[sv_v] = np.inner(vel_body, accel_body)/X[sv_v]
u, v, w = vel_body
ud, vd, wd = accel_body
Xdot[sv_alpha] = (u*wd - w*ud)/(u**2+w**2)
Xdot[sv_beta] = (X[sv_v]*vd - v*Xdot[sv_v]) / X[sv_v] / math.sqrt(u**2+w**2)
Xdot[sv_phi:sv_psi+1] = pal.euler_derivatives(X_euler, X_rvel_body)
Xdot[sv_p:sv_r+1] = raccel_body
return Xdot
def trim(P, args=None, debug=False):
"""
Find throttle, elevator and angle of attack corresponding
to the given airspeed and and flight path
"""
if args<>None:
va, gamma, h = (args['va'], args['gamma'], args['h'] )
else:
va, gamma, h = (P.Vref, 0., 0.)
if debug:
print "searching for constant path trajectory with"
print " va {:f} m/s".format(va)
print " gamma {:f} deg".format(pu.deg_of_rad(gamma))
def err_func((throttle, elevator, alpha)):
X=[0., 0., -h, va, alpha, 0., 0., gamma+alpha, 0., 0., 0., 0.]
U = np.zeros(P.input_nb)
U[0:P.eng_nb] = throttle; U[P.eng_nb+iv_de] = elevator
Xdot = dyn(X, 0., U, P)
Xdot_ref = [va*math.cos(gamma), 0., -va*math.sin(gamma), 0., 0., 0., 0., 0., 0., 0., 0., 0.]
return np.linalg.norm(Xdot - Xdot_ref)
p0 = [0.2, pu.rad_of_deg(2.), pu.rad_of_deg(0.)]
thr_e, ele_e, alpha_e = scipy.optimize.fmin_powell(err_func, p0, disp=debug, ftol=1e-9)
if debug:
print """result:
throttle : {:f} %
elevator : {:f} deg
angle of attack : {:f} deg""".format(100.*thr_e, pu.deg_of_rad(ele_e), pu.deg_of_rad(alpha_e))
Ue = np.zeros(P.input_nb)
Ue[0:P.eng_nb] = thr_e; Ue[P.eng_nb+iv_de] = ele_e
Xe = [va*math.cos(gamma), 0., va*math.sin(gamma), va, alpha_e, 0., 0., gamma+alpha_e, 0., 0., 0., 0.]
return Xe, Ue
import pat.vehicles.fixed_wing.dynamic_model_python_parameters
class Param(pat.vehicles.fixed_wing.dynamic_model_python_parameters.Param):
pass
class DynamicModel(dm.DynamicModel):
sv_x = sv_x # position x axis
sv_y = sv_y # position y axis
sv_z = sv_z # heigh above ground
sv_v = sv_v # airspeed
sv_alpha = sv_alpha # alpha
sv_beta = sv_beta # beta
sv_phi = sv_phi # roll (euler, ltp to body)
sv_theta = sv_theta # pitch (euler, ltp to body)
sv_psi = sv_psi # yaw (euler, ltp to body)
sv_p = sv_p # rotational vel body x
sv_q = sv_q # rotational vel body y
sv_r = sv_r # rotational vel body z
sv_size = sv_size
iv_th = 0 # throttle
iv_da = 1 # aileron
iv_de = 2 # elevator
iv_dr = 3 # rudder
iv_size = 4
# hack for multiple engines
_iv_da = 0
_iv_de = 1
_iv_dr = 2
dyn = lambda self, X, t, U, P: dyn(X, t, U, self.P)
trim = lambda self, args=None, debug=False: trim(self.P, args, debug)
def __init__(self, params=None):
print "Info: Dynamic fixed wing basic"
dm.DynamicModel.__init__(self)
if params == None: params="../config/Rcam_single_engine.xml"
self.X = np.zeros(DynamicModel.sv_size)
self.P = Param(params)
self.reset()
def name(self):
return "Fixed Wing Python Basic ({:s})".format(self.P.name)
def reset(self, X0=None):
if X0<>None: self.X = X0
else: self.X = np.array([0., 0., 0., 68., 0., 0., 0., 0., 0., 0., 0., 0.])
return self.X
def run(self, dt, U):
foo, self.X = scipy.integrate.odeint(dyn, self.X, [0, dt], args=(U, self.P, ))
return self.X
def param(self):
return str(self.P)
def iv_dth(self):
if self.P.eng_nb>1: return range(0,self.P.eng_nb)
else: return 0
def iv_da(self): return self.P.eng_nb + DynamicModel._iv_da
def iv_de(self): return self.P.eng_nb + DynamicModel._iv_de
def iv_dr(self): return self.P.eng_nb + DynamicModel._iv_dr
def input_nb(self): return self.P.input_nb
def state_SixDOFfEuclidianEuler(self):
X = np.zeros(fr.SixDOFfEuclidianEuler.size)
X[fr.SixDOFfEuclidianEuler.x:fr.SixDOFfEuclidianEuler.z+1] = self.X[sv_x:sv_z+1]
X[fr.SixDOFfEuclidianEuler.phi:fr.SixDOFfEuclidianEuler.r+1] = self.X[sv_phi:sv_r+1]
return X
def get_jacobian(self, Xe, Ue):
A,B = pu.num_jacobian(Xe, Ue, self.P, dyn)
return A, B
def state_str(self):
return """pos: {:-.2f}, {:-.2f}, {:-.2f} m
vel: {:-.2f} m/s, alpha {:-.2f}, beta {:-.2f} deg
att: {:-.2f}, {:-.2f}, {:-.2f} deg
""".format(self.X[sv_x], self.X[sv_y], self.X[sv_z],
self.X[sv_v], pu.deg_of_rad(self.X[sv_alpha]), pu.deg_of_rad(self.X[sv_beta]),
pu.deg_of_rad(self.X[sv_phi]), pu.deg_of_rad(self.X[sv_theta]), pu.deg_of_rad(self.X[sv_psi]))
def plot_trajectory(self, time, X, U=None, figure=None, window_title="Trajectory", legend=None, filename=None):
plot_trajectory(time, X, U, figure, window_title, legend, filename)
#
# Some plotting functions
#
def plot_trajectory(time, X, U=None, figure=None, window_title="Trajectory",
legend=None, filename=None):
margins=(0.04, 0.05, 0.98, 0.96, 0.20, 0.34)
figure = pu.prepare_fig(figure, window_title, figsize=(20.48, 10.24), margins=margins)
plots = [("x", "m", X[:,sv_x]), ("y", "m", X[:,sv_y]), ("z", "m", X[:,sv_z]),
("v", "m/s", X[:,sv_v]),
("$\\alpha$", "deg", pu.deg_of_rad(X[:,sv_alpha])),
("$\\beta$", "deg", pu.deg_of_rad(X[:,sv_beta])),
("$\phi$", "deg", pu.deg_of_rad(X[:,sv_phi])),
("$\\theta$", "deg", pu.deg_of_rad(X[:,sv_theta])),
("$\\psi$", "deg", pu.deg_of_rad(X[:,sv_psi])),
("$p$", "deg/s", pu.deg_of_rad(X[:,sv_p])),
("$q$", "deg/s", pu.deg_of_rad(X[:,sv_q])),
("$r$", "deg/s", pu.deg_of_rad(X[:,sv_r]))]
nrow = 5 if U<>None else 4
for i, (title, ylab, data) in enumerate(plots):
ax = plt.subplot(nrow, 3, i+1)
plt.plot(time, data)
pu.decorate(ax, title=title, ylab=ylab)
if legend<>None:
plt.legend(legend, loc='best')
if U<>None:
ax = figure.add_subplot(5, 3, 13)
ax.plot(time, 100*U[:, 0])
pu.decorate(ax, title="$d_{th}$", ylab="%")
ax = figure.add_subplot(5, 3, 14)
ax.plot(time, pu.deg_of_rad(U[:, iv_da+1]))
pu.decorate(ax, title="$d_a$", ylab="deg")
ax = figure.add_subplot(5, 3, 15)
ax.plot(time, pu.deg_of_rad(U[:, iv_de+1]))
pu.decorate(ax, title="$d_e$", ylab="deg")
return figure
|
UTF-8
|
Python
| false | false | 2,014 |
6,657,199,353,762 |
92a168acc9e372bb6271449fe92d029668a604ef
|
6c86d9541b634493877843c8b9b9c95767122797
|
/platform/libs/apple.py
|
f112e06f5678e619de0e74e775953e16b48ef083
|
[] |
no_license
|
Stamped/Stamped
|
https://github.com/Stamped/Stamped
|
34de911b02447effa404cd7aa213d5267ba8ae14
|
29107c73d85c16b1dea67a5bd7703a1d7b1e0e15
|
refs/heads/master
| 2021-01-15T21:20:33.302336 | 2012-12-17T04:44:23 | 2012-12-17T04:44:23 | 1,856,679 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
__author__ = "Stamped ([email protected])"
__version__ = "1.0"
__copyright__ = "Copyright (c) 2011-2012 Stamped.com"
__license__ = "TODO"
import Globals
from api import Entity
import copy, json, urllib, utils
from api.Schemas import *
from optparse import OptionParser
from utils import AttributeDict
from pprint import pprint
__all__ = [ "AppleAPI", "AppleAPIError" ]
class AppleAPIError(Exception):
pass
class AppleAPICall(object):
_wrapper_type_to_subcategory = {
'artist' : 'artist',
'collection' : 'album',
'track' : 'song',
'software' : 'app',
}
_kind_to_subcategory = {
'song' : 'song',
'album' : 'album',
'artist' : 'artist',
'feature-movie' : 'movie',
'software' : 'app',
}
def __init__(self, **kwargs):
self.transform = kwargs.pop('transform', False)
self.verbose = kwargs.pop('verbose', False)
self.method = kwargs.pop('method', None)
self.params = kwargs
def __getattr__(self, k):
try:
return object.__getattr__(self, k)
except:
return AppleAPICall(method=k, transform=self.transform)
def __call__(self, **kwargs):
assert self.method is not None
transform = kwargs.pop('transform', self.transform)
verbose = kwargs.pop('verbose', self.verbose)
params = copy.copy(self.params)
for kwarg in kwargs:
params[kwarg] = kwargs[kwarg]
if self.method == 'search':
if 'term' not in params:
raise AppleAPIError("required parameter 'term' missing to api method %s" % self.method)
if 'term' in params:
term = params['term']
term = term.replace(' ', '+')
params['term'] = term
url = self._get_url(params)
if verbose:
utils.log(url)
result = utils.getFile(url)
"""
f=open('out.apple.xml', 'w')
f.write(result)
f.close()
"""
result = json.loads(result)
if transform:
return self.transform_result(result)
else:
return result
def _get_url(self, params):
return "http://itunes.apple.com/%s?%s" % (self.method, urllib.urlencode(params))
def transform_result(self, result):
if result is None or not 'results' in result:
return []
results = result['results']
output = []
for result in results:
try:
wrapperType = result['wrapperType']
try:
subcategory = self._wrapper_type_to_subcategory[wrapperType]
except KeyError:
continue
if 'kind' in result:
try:
subcategory = self._kind_to_subcategory[result['kind']]
except KeyError:
continue
entity = utils.AttributeDict()
entity.subcategory = subcategory
if wrapperType == u'track':
entity.title = result['trackName']
entity.aid = result['trackId']
entity.view_url = result['trackViewUrl']
if 'trackTimeMillis' in result:
length = result['trackTimeMillis']
if length is not None:
entity.track_length = length / 1000.0
if 'trackPrice' in result:
price = result['trackPrice']
if result['currency'] is not None:
entity.currency_code = result['currency']
if price is not None:
entity.amount = int(price * 100)
entity.formatted_price = "$%.2f" % price
if subcategory == 'song':
album_name = result['collectionName']
if album_name is not None:
entity.album_name = album_name
album_id = result['collectionId']
if album_id is not None:
entity.song_album_id = album_id
elif subcategory == u'album':
entity.title = result['collectionName']
entity.aid = result['collectionId']
entity.view_url = result['collectionViewUrl']
elif subcategory == u'artist':
entity.title = result['artistName']
entity.aid = result['artistId']
try:
entity.view_url = result['artistViewUrl']
except:
try:
entity.view_url = result['artistLinkUrl']
except:
pass
elif wrapperType == u'software':
entity.title = result['trackName']
entity.aid = result['trackId']
entity.view_url = result['trackViewUrl']
else:
# should never reach this point, but not raising an error just
# in case i'm wrong for robustness purposes if we receive
# an unexpected result
print "warning: unexpected / invalid entity type returned from iTunes API"
pprint(result)
continue
if subcategory != 'artist':
entity.artist_display_name = result['artistName']
if 'artistId' in result and result['artistId'] is not None:
entity.artist_id = result['artistId']
entity_map = [
('artistName', 'artist_display_name'),
('description', 'desc'),
('previewUrl', 'preview_url'),
('artworkUrl100', 'large'),
('artworkUrl60', 'small'),
('artworkUrl30', 'tiny'),
('artworkUrl512', 'large'),
('longDescription', 'desc'),
('shortDescription', 'desc'),
('primaryGenreName', 'genre'),
('releaseDate', 'original_release_date'),
('contentAdvisoryRating', 'mpaa_rating'),
('copyright', 'copyright'),
('trackCount', 'track_count'),
('sellerName', 'studio_name'),
('sellerUrl', 'studio_url'),
('screenshotUrls', 'screenshots'),
]
for t in entity_map:
key, key2 = t
if key in result:
value = result[key]
if value is not None:
entity[key2] = result[key]
if wrapperType == 'track':
if 'trackTimeMillis' in result:
length = result['trackTimeMillis']
if length is not None:
entity.track_length = length / 1000.0
if u'genres' in result:
entity.genre = u', '.join(result[u'genres'])
entity = Entity.upgradeEntityData(dict(entity))
output.append(AttributeDict(result=result, entity=entity))
except:
utils.printException()
pprint(result)
return output
class AppleAPI(AppleAPICall):
def __init__(self, **kwargs):
AppleAPICall.__init__(self, **kwargs)
def __getattr__(self, k):
try:
return object.__getattr__(self, k)
except:
valid_calls = set([ 'search', 'lookup' ])
if k in valid_calls:
return AppleAPICall.__getattr__(self, k)
else:
raise AppleAPIError("undefined api method '%s'" % k)
def parseCommandLine():
usage = "Usage,%prog [options] query"
version = "%prog " + __version__
parser = OptionParser(usage=usage, version=version)
parser.add_option("-s", "--search", action="store_true", default=False,
help="Perform search query")
parser.add_option("-t", "--term", action="store", type="string", default=None,
help="Term to search for")
parser.add_option("-c", "--country", action="store", type="string", default='US',
help="Two-letter country code for the store you want to search (defaults to US)")
parser.add_option("-m", "--media", action="store", type="string", default=None,
help="Media type you want to search for")
parser.add_option("-e", "--entity", action="store", type="string", default=None,
help="Type of results you want returned, relative to the specified media type. For example, movieArtist for a movie media type search.")
parser.add_option("-a", "--attribute", action="store", type="string", default=None,
help="The attribute you want to search for in the stores, relative to the specified media type. For example, if you wnat to search for an artist by name, specify --entity=allArtist --attribute=allArtistTerm")
parser.add_option("-l", "--limit", action="store", type="int", default=None,
help="Numer of search results to return (1 to 200)")
parser.add_option("-L", "--language", action="store", type="string", default=None,
help="Language to use when returning search results, using the five-character codename (default en_us)")
parser.add_option("-E", "--explicit", action="store_true", default=None,
help="Whether or not to include explicit content in search results (default is True)")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="Whether or not to transform results (not verbose) or return results from the API verbatim")
(options, args) = parser.parse_args()
return (options, args)
def extract_args(options):
func_args = copy.copy(options.__dict__)
delete = []
for arg in func_args:
if func_args[arg] is None or arg == 'search':
delete.append(arg)
for d in delete:
del func_args[d]
func_args['transform'] = not options.verbose
return func_args
def main():
options, args = parseCommandLine()
api = AppleAPI()
func_args = extract_args(options)
if options.search:
results = api.search(**func_args)
else:
if len(args) < 1:
print "default lookup search takes an apple id"
return
func_args['id'] = args[0]
results = api.lookup(**func_args)
if options.verbose:
pprint(results)
else:
for result in results:
entity = result.entity
pprint(entity.value)
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,012 |
19,653,770,346,640 |
80336c94aca58550ce9ede2393256d88063b721c
|
1b8d162160f5ab6d6a6b8940b8ab83b482abb409
|
/pylastica/searchable.py
|
ca237496f5df3e184eab2b06a9723e33e50df3da
|
[
"Apache-2.0"
] |
permissive
|
jlinn/pylastica
|
https://github.com/jlinn/pylastica
|
f81e438a109dfe06adc7e9b70fdf794c5d01a53f
|
0fbf68ed3e17d665e3cdf1913444ebf1f72693dd
|
refs/heads/master
| 2020-05-19T14:07:38.794717 | 2014-07-23T23:43:00 | 2014-07-23T23:43:00 | 10,442,284 | 5 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'Joe Linn'
import abc
class Searchable(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def search(self, query=None, options=None):
"""
Searches results for a query
@param query: dict with all query data or a Query object
@type query: str or dict or pylastica.query.Query
@param options:
@type options: dict
@return: result set with all results
@rtype: pylastica.resultset.ResultSet
"""
pass
@abc.abstractmethod
def count(self, query=None):
"""
Counts results for a query. If no query is set, a MatchAll query is used.
@param query: dict with all query data or a Query object
@type query: dict or pylastica.query.Query
@return: number of docs matching the query
@rtype: int
"""
pass
@abc.abstractmethod
def create_search(self, query=None, options=None):
"""
@param query:
@type query: pylastica.query.Query
@param options:
@type options: dict
@return:
@rtype: pylastica.search.Search
"""
pass
|
UTF-8
|
Python
| false | false | 2,014 |
14,809,047,274,560 |
adf9d7173fca47484b89dfd46afd44b992b9d6ad
|
93786e4a93392329100b754b275f25156ef7935b
|
/modules/IRC/IRC_Repeat.py
|
c9a80ee0fdd5c28785e148070e15ccae872099ef
|
[
"GPL-2.0-only"
] |
non_permissive
|
caller9/PyReferee
|
https://github.com/caller9/PyReferee
|
419c1d944219902328b4d4f7b2b77c869a92eb1a
|
845be9abf4c26b2b0a265e5b47addd8f1415faec
|
refs/heads/master
| 2021-01-25T06:40:11.402716 | 2010-04-21T03:27:32 | 2010-04-21T03:27:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#~ Created by caller9.com as part of the PyReferee application
#~ Catch repeated messages/taunts/bullying
#~
#~ Created 2010-04-
#~ Modified 2010-04-19
#~ This program is free software; you can redistribute it and/or modify
#~ it under the terms of the GNU General Public License as published by
#~ the Free Software Foundation; version 2 of the License.
#~
#~ This program is distributed in the hope that it will be useful,
#~ but WITHOUT ANY WARRANTY; without even the implied warranty of
#~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#~ GNU General Public License for more details.
# TODO: Implement checks that the last N messages when split into words have no words that are repeated
# more than M times. Maybe just keep a dict with a counter for each word(key) and exclude very common
# words and smileys
|
UTF-8
|
Python
| false | false | 2,010 |
6,193,342,842,409 |
eb6f20d63781cd9ddb27c722af738cb1b3182c1c
|
1f906c1ee255cec24936136837f104f0596d8384
|
/team2.py
|
2fbeb2cdb201615a5706468efc62f5510d054e71
|
[] |
no_license
|
brewerja/tas2mlb
|
https://github.com/brewerja/tas2mlb
|
ca863385fb8b2934d56bc050eda041ae37f23ee0
|
5c099bb3b408b59d71b216a1df6ea881d5c766e6
|
refs/heads/master
| 2016-09-06T15:59:47.911949 | 2012-05-17T16:30:50 | 2012-05-17T16:30:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
class Game:
def __init__(self):
self.innings = []
self.linescore = []
class Inning:
def __init__(self):
self.halfs = []
class HalfInning:
def __init__(self):
self.events = []
class Event:
def __init__(self, scoreHome, scoreVisitor):
self.type = 'GENERIC'
self.scoreHome = scoreHome
self.scoreVisitor = scoreVisitor
self.errorFielders = {}
class Line:
pass
|
UTF-8
|
Python
| false | false | 2,012 |
10,436,770,533,980 |
bbb4c8159e9b362b1d07db965a5c21c41aa02004
|
d08f7fef90eb15e624791fb66c039e9c71bda874
|
/benchmark/scalability/scripts/old/ns3.py
|
fcedeb4b8b2c1f2fdcacaa034f3044ddbea7f283
|
[
"GPL-3.0-only"
] |
non_permissive
|
phiros/nepi
|
https://github.com/phiros/nepi
|
c813fc9e990af858a06fa4cda69a566fe621a130
|
b4b080db771ea1dd1454a980d3fa4880dd63f545
|
refs/heads/master
| 2021-01-16T00:10:01.568952 | 2014-12-19T15:54:39 | 2014-12-19T15:54:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#
# NEPI, a framework to manage network experiments
# Copyright (C) 2013 INRIA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Alina Quereilhac <[email protected]>
from nepi.execution.ec import ExperimentController
import os
import datetime
import ipaddr
import random
import psutil
import threading
import subprocess
import numpy
import time
from optparse import OptionParser
usage = ("usage: %prog -n <node_count> -a <app_count> -t <thread_count> "
"-r <run> -d <delay> -o <opdelay> -R <results>")
parser = OptionParser(usage = usage)
parser.add_option("-n", "--node-count", dest="node_count",
help="Number of simulated nodes in the experiment", type="int")
parser.add_option("-a", "--app-count", dest="app_count",
help="Number of simulated applications in the experiment", type="int")
parser.add_option("-t", "--thread-count", dest="thread_count",
help="Number of threads processing experiment events", type="int")
parser.add_option("-r", "--run", dest="run",
help="Run numbber", type="int")
parser.add_option("-d", "--delay", dest="delay",
help="Re-scheduling delay", type="float")
parser.add_option("-o", "--opdelay", dest="opdelay",
help="Opetation processing delay", type="float")
parser.add_option("-R", "--results", dest="results", help="Results folder")
(options, args) = parser.parse_args()
results = options.results
node_count = options.node_count
app_count = options.app_count
thread_count = options.thread_count
run = options.run
clean_run = (run == 1)
opdelay = options.opdelay
delay = options.delay
reschedule_delay = "0s" # "%0.1fs" % delay
def get_nepi_revision():
p = subprocess.Popen(["hg", "tip"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
info = stdout.split("\n")
changeset = info[0].split(":")[-1]
return changeset
######### Resource consumption ################################################
cpu_count = psutil.NUM_CPUS
cpu_usage_deploy = []
cpu_usage_start = []
vmem = psutil.virtual_memory()
mem_total = vmem.total
mem_usage_deploy = []
mem_usage_start = []
stop_monitor_deploy = []
stop_monitor_start = []
def compute_estimator(samples):
if len(samples) == 0:
return 0,0,0
x = numpy.array(samples)
n = len(samples)
std = x.std()
m = x.mean()
return n, m, std
def monitor_resources(cpu_usage, mem_usage, stop):
wait = 1
while not stop:
p = psutil.Process(os.getpid())
cpu = p.get_cpu_percent(interval=1)
cpu_usage.append(cpu)
mem = p.get_memory_percent()
mem_usage.append(mem)
time.sleep(wait)
thread_monitor_deploy = threading.Thread(target=monitor_resources,
args=(cpu_usage_deploy, mem_usage_deploy, stop_monitor_deploy))
thread_monitor_start = threading.Thread(target=monitor_resources,
args=(cpu_usage_start, mem_usage_start, stop_monitor_start))
########## Declaration of resources #####################################
def add_linux_node(ec, clean_run):
node = ec.register_resource("linux::Node")
ec.set(node, "hostname", "localhost")
#ec.set(node, "cleanExperiment", clean_run)
ec.set(node, "cleanProcesses", True)
return node
def add_node(ec, simu):
node = ec.register_resource("ns3::Node")
ec.set(node, "enableStack", True)
ec.register_connection(node, simu)
return node
def add_device(ec, node, ip, prefix):
dev = ec.register_resource("ns3::CsmaNetDevice")
ec.set(dev, "ip", ip)
ec.set(dev, "prefix", prefix)
ec.register_connection(node, dev)
queue = ec.register_resource("ns3::DropTailQueue")
ec.register_connection(dev, queue)
return dev
def add_ping_app(ec, node, remote_ip):
app = ec.register_resource("ns3::V4Ping")
ec.set (app, "Remote", remote_ip)
ec.set (app, "Verbose", True)
ec.set (app, "Interval", "1s")
ec.set (app, "StartTime", "0s")
ec.set (app, "StopTime", "20s")
ec.register_connection(app, node)
return app
############## Experiment design and execution ################################
platform = "ns3"
# Set the number of threads.
# NOTE: This must be done before instantiating the EC.
os.environ["NEPI_NTHREADS"] = str(thread_count)
# Create Experiment Controller:
exp_id = "%s_bench" % platform
ec = ExperimentController(exp_id)
# Add the physical node in which to run the simulation
lnode = add_linux_node(ec, clean_run)
# Add a simulation resource
simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
ec.register_connection(simu, lnode)
# Add simulated nodes and applications
nodes = list()
apps = list()
devs = list()
ips = dict()
prefix = "16"
base_addr = "10.0.0.0/%s" % prefix
net = ipaddr.IPv4Network(base_addr)
host_itr = net.iterhosts()
for i in xrange(node_count):
node = add_node(ec, simu)
nodes.append(node)
ip = host_itr.next()
dev = add_device(ec, node, ip, prefix)
devs.append(dev)
ips[node] = ip
for nid in nodes:
for j in xrange(app_count):
# If there is only one node, ping itself. If there are more
# choose one randomly.
remote_ip = ips[nid]
if len(nodes) > 1:
choices = ips.values()
choices.remove(remote_ip)
remote_ip = random.choice(choices)
app = add_ping_app(ec, node, remote_ip)
apps.append(app)
chan = ec.register_resource("ns3::CsmaChannel")
ec.set(chan, "Delay", "0s")
for dev in devs:
ec.register_connection(chan, dev)
# Deploy the experiment
zero_time = datetime.datetime.now()
# Launch thread to monitor CPU and memory usage
thread_monitor_deploy.start()
# Deploy experiment
ec.deploy()
# Wait until nodes and apps are deployed
ec.wait_deployed(nodes + apps + devs)
# Time to deploy
ttd_time = datetime.datetime.now()
# Launch thread to monitor CPU and memory usage
thread_monitor_start.start()
# Stop deploy monitoring thread
stop_monitor_deploy.append(0)
# Wait until the apps are finished
ec.wait_finished(apps)
# Time to finish
ttr_time = datetime.datetime.now()
# Stop deploy monitoring thread
stop_monitor_start.append(0)
# Do the experiment controller shutdown
ec.shutdown()
# Time to release
ttrel_time = datetime.datetime.now()
##################### Format performance information ##########################
# Get the failure level of the experiment (OK if no failure)
status = ec.failure_level
if status == 1:
status = "OK"
elif status == 2:
status = "RM_FAILURE"
else:
status = "EC_FAILURE"
# Get time deltas in miliseconds
s2us = 1000000.0 # conversion factor s to microseconds = 10^6
s2ms = 1000.0 # conversion factor s to microseconds = 10^3
ttd = ttd_time - zero_time
ttdms = (ttd.microseconds + ((ttd.seconds + ttd.days * 24 * 3600) * s2us)) / s2ms
ttr = (ttr_time - ttd_time)
ttrms = (ttr.microseconds + ((ttr.seconds + ttr.days * 24 * 3600) * s2us)) / s2ms
ttrel = (ttrel_time - ttr_time)
ttrelms = (ttrel.microseconds + ((ttrel.seconds + ttrel.days * 24 * 3600) * s2us)) / s2ms
############### Persist results
date = zero_time.strftime('%Y%m%d')
revision = get_nepi_revision()
filename = "%s_scalability_benchmark_rev_%s_%s.data" % (platform, revision, date)
filename = os.path.join(results, filename)
if not os.path.exists(filename):
f = open(filename, "w")
f.write("time|platform|cpu_count(%)|cpu_deploy(%)|cpu_start|"
"mem_total(B)|mem_deploy(%)|mem_starts(%)|opdelay(s)|scheddelay(s)|run#|"
"node_count|app_count|thread_count|TTD(ms)|TTR(ms)|TTREL(ms)|status\n")
else:
f = open(filename, "a")
n,m,std = compute_estimator(cpu_usage_deploy)
cpu_deploy = "%d,%0.2f,%0.2f" % (n,m,std)
n,m,std = compute_estimator(cpu_usage_start)
cpu_start = "%d,%0.2f,%0.2f" % (n,m,std)
n,m,std = compute_estimator(mem_usage_deploy)
mem_deploy = "%d,%0.2f,%0.2f" % (n,m,std)
n,m,std = compute_estimator(mem_usage_start)
mem_start = "%d,%0.2f,%0.2f" % (n,m,std)
timestmp = zero_time.strftime('%Y%m%d %H:%M:%S')
f.write("%s|%s|%d|%s|%s|%d|%s|%s|%0.1f|%0.1f|%d|%d|%d|%d|%d|%d|%d|%s\n" % (
timestmp,
platform,
cpu_count,
cpu_deploy,
cpu_start,
mem_total,
mem_deploy,
mem_start,
opdelay,
delay,
run,
node_count,
app_count,
thread_count,
ttdms,
ttrms,
ttrelms,
status
))
f.close()
|
UTF-8
|
Python
| false | false | 2,014 |
5,935,644,839,500 |
75cb49ba73aa5da578cb504818015211c552cb7c
|
c8ed9df755e1bea0ece7fab99ee4f45549705ae8
|
/p5.py
|
78a07ef6166dfb66ed8b9748fc530847498b4390
|
[] |
no_license
|
gurisugi/euler
|
https://github.com/gurisugi/euler
|
737abed8f9fe44a690d30e2e1bc303dd6134e234
|
d24b9d775de65904729b3d75c49544b1463e668d
|
refs/heads/master
| 2016-09-10T16:45:02.052934 | 2012-12-27T10:26:31 | 2012-12-27T10:26:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#coding:utf-8
"""
2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
"""
from collections import Counter
def solve(num, i=2, lis=None):
while i <= num:
if lis == None: lis = []
if num % i == 0:
lis.append(i)
return solve(num/i, i, lis)
else:
i += 1
return lis
if __name__ == '__main__':
cnt = Counter()
tmp_cnt = Counter()
for num in range(2, 21):
tmp_cnt.clear()
for i in solve(num):
tmp_cnt[i] += 1
for key in tmp_cnt.keys():
if not key in cnt.keys() or cnt[key] < tmp_cnt[key]:
cnt[key] = tmp_cnt[key]
print reduce(lambda a,b: a*b, [key**cnt[key] for key in cnt.keys()])
# Iassevk's awesome code
# 小さい方から割り切れない数を見つけて倍数にしていく
# よくわからない日本語
i = 1
for k in (range(1, 21)):
if i % k > 0:
for j in range(1, 21):
if (i*j) % k == 0:
i *= j
break
print i
|
UTF-8
|
Python
| false | false | 2,012 |
1,640,677,538,899 |
d34703536d393b6e1d1058a422a1bc262beb4380
|
a899759ebd476517e2cd238cdcc377bf99340d56
|
/modules/http.py
|
d138799913cf990ecda13dfa339abcd4a6965393
|
[
"Apache-2.0"
] |
permissive
|
eieio/pyy
|
https://github.com/eieio/pyy
|
88416fe5a4f6dbd0d8dc2739962dbc1dbff2ff78
|
dface9d5c8914bb74ef1ee4df112269b65e62bec
|
refs/heads/master
| 2016-08-04T08:59:03.219638 | 2013-02-13T08:29:02 | 2013-02-13T08:29:02 | 8,152,999 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
''' http.py
Copyright 2008 Corey Tabaka
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from bot import MessageHandler, BotCommandHandler
from admin import trusted
from threaded import threaded
import re, urllib
_reURL = re.compile('.*?((?:(https?)://|(?=www\.))[\w:#@%/;$()~_?\+-=\\\.&]+).*?', re.I)
_reTitle = re.compile('<title>(.*?)</title>', re.I | re.M)
_enabled = True
@BotCommandHandler('http')
@trusted
def _http(context):
'''Usage: http [enable|disable]\nEnables or disables URL titles; no param returns state'''
m = re.match('\s*(enable|disable)\s*', context.args or '', re.I)
if m:
op, = m.groups()
global _enabled
_enabled = op.lower() == 'enable'
elif not (context.args or ''):
context.reply('http titles %s' % ['DISABLED', 'ENABLED'][_enabled])
else:
context.reply('Usage: http [enable|disable]')
@MessageHandler
@threaded
def _handler(context):
m = _reURL.match(context.message)
if _enabled and m:
address, proto = m.groups()
proto = (proto or '').lower()
if not proto:
address = 'http://' + address
if proto in ('http', 'https', None):
fin = urllib.urlopen(address)
if fin.headers.gettype() == 'text/html':
title = ' '.join(_reTitle.findall(fin.read(4096))).strip()
fin.close()
if title:
context.reply('Title: ' + title)
|
UTF-8
|
Python
| false | false | 2,013 |
7,653,631,736,751 |
18b70b541e69e58e8b4fda90c28313224e417649
|
b7994aaed5df92f24cba16c34470571e65a62d9f
|
/views.py
|
38a5fed3043c98595ee8c329b210c5a374f251ed
|
[] |
no_license
|
burck1/sam-website
|
https://github.com/burck1/sam-website
|
d4c3bb86486f82ad26215ed6017b2cb1753cd28a
|
71ff310ac2d5915617b26291865561f7a095f588
|
refs/heads/master
| 2016-08-05T09:13:23.616214 | 2014-05-04T02:28:04 | 2014-05-04T02:28:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import Blueprint, request, redirect, render_template, url_for
from flask.views import MethodView
from flask.ext.mongoengine.wtf import model_form
home = Blueprint('home', __name__, template_folder='templates')
class HomeView(MethodView):
def get(self):
return render_template('index.html')
# Register the urls
home.add_url_rule('/', view_func=HomeView.as_view('home'))
|
UTF-8
|
Python
| false | false | 2,014 |
8,607,114,498,326 |
12f77f964aa170eae5f2956c144600900df09311
|
cae10c468bcd15780cb1e466a96fc7492c59deb6
|
/nova/virt/phy/nec/vif_driver.py
|
23956a302f8515830324379e116ffd3b74fc1efe
|
[
"Apache-2.0"
] |
permissive
|
jeffreycoho/nova
|
https://github.com/jeffreycoho/nova
|
c08682777e1e4dddba723b4acead5e084bed35dd
|
06e01e91d721ba487300bc4c72e99679fda0f9c8
|
refs/heads/master
| 2021-01-15T18:21:50.571434 | 2012-07-20T12:45:12 | 2012-07-20T12:45:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from nova import flags
from nova.openstack.common import log as logging
from nova import exception
from nova.virt.phy import vif_driver
from nova.virt.phy.nec.vifinfo_client import VIFINFOClient
from webob import exc
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
class NECVIFDriver(vif_driver.BareMetalVIFDriver):
def _after_plug(self, instance, network, mapping, pif):
client = VIFINFOClient(FLAGS.quantum_connection_host, FLAGS.quantum_connection_port)
client.create_vifinfo(mapping['vif_uuid'], pif.datapath_id, pif.port_no)
def _after_unplug(self, instance, network, mapping, pif):
client = VIFINFOClient(FLAGS.quantum_connection_host, FLAGS.quantum_connection_port)
try:
client.delete_vifinfo(mapping['vif_uuid'])
except (exception.NovaException, exc.HTTPNotFound, exc.HTTPInternalServerError), e:
LOG.warn("client.delete_vifinfo(%s) is failed. (unplugging is continued): %s", mapping['vif_uuid'], e)
|
UTF-8
|
Python
| false | false | 2,012 |
11,424,613,051,646 |
cd0390b1b6c089fc07c9764a71dadf6d29d4cbdb
|
568a95c1a934e511fbcb0eea5a2e590cca59d2d3
|
/RSAkey1.py
|
8d147da71200a88dfbb053cd0fc88408596dc367
|
[] |
no_license
|
Lthomas2/RSA
|
https://github.com/Lthomas2/RSA
|
37192cd5cbc9257a8fe9d31749882f0854a6e9cc
|
65566e290e501c2b3a23b5a9328a8dd2db2c7a82
|
refs/heads/master
| 2016-09-11T02:21:09.358234 | 2012-05-10T15:26:24 | 2012-05-10T15:26:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import fractions as f
class RSA:
def __init__(self,p,q):
self.n=p*q
self.phi=(p-1)*(q-1)
keygen(self)
def keygen(rsa):
i=2
while i<rsa.phi:
if f.gcd(i,rsa.phi)==1:
rsa.d=i
i=rsa.phi
i+=1
i=1
while i<rsa.phi:
if (rsa.d*i)%(rsa.phi)==1:
rsa.e=i
i=rsa.phi
i+=1
rsa.privateKey=[rsa.n,rsa.d]
rsa.publicKey=[rsa.n,rsa.e]
print "Public Key:",rsa.publicKey," Private Key:",rsa.privateKey
def encrypt(text,pubkey):
coded=''
numberstring=''
for i in text:
number=ord(i)#inverse is chr(ord(i))=i
string=str(number)
if len(string)<=2:
string='0'+string
numberstring+=string
while len(numberstring)%(len(str(pubkey[0]))-1)!=0:
numberstring='0'+numberstring
i=0
temp=''
while i<len(numberstring):
if i%(len(str(pubkey[0]))-1)!=(len(str(pubkey[0]))-2):
temp+=numberstring[i]
else:
temp+=numberstring[i]
codedbit=int(temp)
codedbit=(codedbit**pubkey[1])%(pubkey[0])
codedbit=str(codedbit)
while len(codedbit)<(len(str(pubkey[0]))):
codedbit='0'+codedbit
coded=coded+codedbit
temp=''
i+=1
print coded
def decrypt(text,privKey):
bitlength=len(str(privKey[0]))
temp=''
numstring=''
decryption=''
i=0
while i<len(text):
if i%bitlength != (bitlength-1):
temp+=text[i]
else:
temp+=text[i]
decodedbit=int(temp)
decodedbit=int((decodedbit**privKey[1])%(privKey[0]))
decodedbit=str(decodedbit)
while len(decodedbit)<(len(str(privKey[0]))-1):
decodedbit='0'+decodedbit
numstring+=decodedbit
temp=''
i+=1
i=0
temp=''
while i<len(numstring):
if i%3!=2:
temp+=numstring[i]
else:
temp+=numstring[i]
decryption+=chr(int(temp))
temp=''
i+=1
print decryption
##Example Run:
##a=RSA(13,17)
##encrypt('abcd',a.publicKey)
##
##
|
UTF-8
|
Python
| false | false | 2,012 |
4,982,162,104,175 |
e66205bac8ad61c863e17960c9db889426b233d6
|
14ba9e7c8b041ed384058d64a905dc61c6c76543
|
/clients/python/jafka-performance-auto.py
|
9e17a32eba533a9e26f348e5368dd845b1ae4dda
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
yuc8939/jafka
|
https://github.com/yuc8939/jafka
|
f9b9f82e8caa4eaf99f3167130477da22aee5ee4
|
ea52dc1892d5286e1be3805ff98bd03049efb3b1
|
refs/heads/master
| 2021-01-24T03:26:11.026483 | 2012-06-29T03:14:55 | 2012-06-29T03:14:55 | 4,847,658 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import jafka
import time
DEFAULT_MAX_MESSAGE_SIZE = 1024*1024
out = sys.stdout
def packagesize(messagesize,batchsize,topic):
return (10 + messagesize)*batchsize +16+len(topic.encode('utf-8'))
def check_message_size(messagesize,batchsize,topic):
size = packagesize(messagesize,batchsize,topic)
if size > DEFAULT_MAX_MESSAGE_SIZE:
print('The message package(%d) is too large;default message package is: %d' %\
(size,DEFAULT_MAX_MESSAGE_SIZE))
print(' package size = (10 + messagesize)*batchsize +16+topic')
dsize = DEFAULT_MAX_MESSAGE_SIZE - 16 - len(topic.encode('utf8'))
print(' messagesize(%d): batchsize <= %.d' % (messagesize,dsize/messagesize))
print(' batchsize(%d): messaegsize <= %.d' % (batchsize,dsize/batchsize))
sys.exit(1)
return size
def is_valid_messages(messagesize,batchsize,topic):
return packagesize(messagesize,batchsize,topic) < DEFAULT_MAX_MESSAGE_SIZE
def show_progress(i,count):
out.write('%05.2f%%'%(100*i/(1.0*count)))
out.flush()
def clear_progress():
out.write('\b'*6)
out.flush()
def write_line(batchsize):
out.write('\n')
out.write('%5d '%(batchsize,))
out.flush()
def write_result(tps,bytesize,seconds):
out.write('%6.d|%04.d|%03.1f '%(tps,bytesize/(1024*1024),seconds))
def compute(producer,batchsize,messagesize,times,topic):
psize = packagesize(messagesize,batchsize,topic)
start = time.time()
messages = [bytearray(messagesize) for i in range(batchsize)]
for i in range(times):
producer.send(topic,messages)
seconds = time.time() - start
tps = batchsize * times / seconds
bytesize = psize * times
return tps,bytesize,seconds
if __name__ == '__main__':
(topic,host,port,count) = ('demo','localhost',9092,100000)
producer = jafka.Producer(host,port)
batchsizes = (1,10,50,100,200,500,1000,2000)
messagesizes = (16,32,64,128,256,1024,2048)
print('send %d messages to %s:%d' % (count,host,port))
print(' batchsize(bs) = ',batchsizes)
print(' messagesizes(ms) = ',messagesizes)
print(' result report contains tree fields: tps|sendbytes|costtime')
print(' tps(Total Transactions per Second)')
print(' sendbytes(MB)')
print(' costtime(seconds)')
print()
totalcount = 0
bm = []
for batchsize in batchsizes:
for messagesize in messagesizes:
if not is_valid_messages(messagesize,batchsize,topic):
break
times = (int)(count/batchsize)
totalcount += times * batchsize
bm.append((batchsize,messagesize,times))
lastbatchsize = -1
i = 0
out.write('bs|ms %s' % (' '.join(['%15s'%ms for ms in messagesizes])))
for batchsize,messagesize,times in bm:
if lastbatchsize != batchsize: #new line
write_line(batchsize)
lastbatchsize = batchsize
show_progress(i,totalcount)
tps,bytesize,seconds = compute(producer,batchsize,messagesize,times,topic)
i += batchsize * times
clear_progress()
write_result(tps,bytesize,seconds)
print()
|
UTF-8
|
Python
| false | false | 2,012 |
8,735,963,500,745 |
2ebd4c8105288416616cd883b648b885876b39a6
|
c40b9af0ca733c20561e7c9301c2d9b15c7cd196
|
/all-in-row.py
|
cc5fbce70fc9694ed3b29a01c2759e91df719011
|
[] |
no_license
|
nikitamarchenko/checkio
|
https://github.com/nikitamarchenko/checkio
|
2573556b20bd14020ae0276c4314aa174224cf6f
|
f65d9a2015de4ac5d9a711041e4c2805a25cd9b7
|
refs/heads/master
| 2016-09-06T03:26:04.087438 | 2014-02-11T14:52:11 | 2014-02-11T14:52:11 | 13,212,561 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'nmarchenko'
# http://www.checkio.org/mission/task/info/all-in-row/python-27/
import itertools
def checkio(data):
#print str(data).replace('[', '').replace(']', '').split(',')
print map(int, (''.join([x for x in str(data) if x.isdigit() or x == ','])).split(','))
return data
checkio([1, 2, 3]) == [1, 2, 3]
checkio([1, [2, 2, 2], 4]) == [1, 2, 2, 2, 4]
checkio([[[2]], [4, [5, 6, [6], 6, 6, 6], 7]]) == [2, 4, 5, 6, 6, 6, 6, 6, 7]
|
UTF-8
|
Python
| false | false | 2,014 |
15,367,392,985,592 |
da1362f998978bfb7c707fe3cdd07485768e7701
|
6bf30ed40f9d48474427ee594716b6d8f4aaee49
|
/printCostWithOvertimeFunc.py
|
af0036c1b885d58f7cf26eeebec4d80244bacba0
|
[] |
no_license
|
GeertHa/coursera-python
|
https://github.com/GeertHa/coursera-python
|
f88ff1860123d8fb99ad31ce1f1ab89529034ded
|
d56955029a6ef82bbb2224991161a73173d863b4
|
refs/heads/master
| 2021-01-19T08:38:15.931783 | 2014-08-28T11:51:41 | 2014-08-28T11:51:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This script file is located here:
/home/geert/python
"""
def computepay(h,r):
if h > 40:
cost = 40*r + 1.5*r*(h-40)
else:
cost = h*r
return cost
hrs = raw_input("Enter Hours:")
rate = raw_input("Enter Rate:")
p = computepay(float(hrs),float(rate))
print p
|
UTF-8
|
Python
| false | false | 2,014 |
15,272,903,744,806 |
0e53c86186829950ab99d4c3222803370b41ab1e
|
a86d3ac9ab3e4f316e69598af3ad1d5154ec2f9b
|
/virtaal/virtaal/views/checksunitview.py
|
1021b9912860194757cdec306d3db4e8852c70c6
|
[
"GPL-2.0-only"
] |
non_permissive
|
cc-archive/pootle
|
https://github.com/cc-archive/pootle
|
eb40e86f7712349f5a5c9458ec574eab465fd885
|
a609da498d3b48df922dd27a60a67d4e6a43ef77
|
refs/heads/master
| 2020-12-25T19:15:27.598013 | 2011-04-20T16:59:05 | 2011-04-20T16:59:05 | 14,874,679 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 Zuza Software Foundation
#
# This file is part of Virtaal.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import locale
import gtk
import pango
from translate.lang import factory as lang_factory
from virtaal.common.pan_app import ui_language
from virtaal.views.widgets.popupwidgetbutton import PopupWidgetButton, POS_SE_NE
from baseview import BaseView
class ChecksUnitView(BaseView):
"""The unit specific view for quality checks."""
COL_CHECKNAME, COL_DESC = range(2)
# INITIALIZERS #
def __init__(self, controller):
self.controller = controller
main_controller = controller.main_controller
main_window = main_controller.view.main_window
self.popup_content = self._create_popup_content()
self._create_checks_button(self.popup_content, main_window)
self._create_menu_item()
main_controller.store_controller.connect('store-closed', self._on_store_closed)
main_controller.store_controller.connect('store-loaded', self._on_store_loaded)
self._prev_failures = None
self._listsep = lang_factory.getlanguage(ui_language).listseperator
def _create_checks_button(self, widget, main_window):
self.lbl_btnchecks = gtk.Label()
self.lbl_btnchecks.show()
self.lbl_btnchecks.set_ellipsize(pango.ELLIPSIZE_END)
self.btn_checks = PopupWidgetButton(widget, label=None, popup_pos=POS_SE_NE, main_window=main_window, sticky=True)
self.btn_checks.set_property('relief', gtk.RELIEF_NONE)
self.btn_checks.set_update_popup_geometry_func(self.update_geometry)
self.btn_checks.add(self.lbl_btnchecks)
def _create_menu_item(self):
mainview = self.controller.main_controller.view
self.mnu_checks = mainview.gui.get_widget('mnu_checks')
self.mnu_checks.connect('activate', self._on_activated)
def _create_popup_content(self):
vb = gtk.VBox()
frame = gtk.Frame()
frame.set_shadow_type(gtk.SHADOW_ETCHED_IN)
frame.add(vb)
self.lbl_empty = gtk.Label('<i>' + _('No issues') + '</i>')
self.lbl_empty.set_use_markup(True)
self.lbl_empty.hide()
vb.pack_start(self.lbl_empty)
self.lst_checks = gtk.ListStore(str, str)
self.tvw_checks = gtk.TreeView()
name_column = gtk.TreeViewColumn(_('Quality Check'), gtk.CellRendererText(), text=self.COL_CHECKNAME)
name_column.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
self.tvw_checks.append_column(name_column)
description_renderer = gtk.CellRendererText()
#description_renderer.set_property('wrap-mode', pango.WRAP_WORD_CHAR)
description_column = gtk.TreeViewColumn(_('Description'), description_renderer, text=self.COL_DESC)
description_column.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
self.tvw_checks.append_column(description_column)
self.tvw_checks.set_model(self.lst_checks)
self.tvw_checks.get_selection().set_mode(gtk.SELECTION_NONE)
vb.pack_start(self.tvw_checks)
return frame
# METHODS #
def show(self):
parent = self.controller.main_controller.unit_controller.view._widgets['vbox_right']
parent.pack_start(self.btn_checks, expand=False, fill=True)
self.btn_checks.show()
def hide(self):
if self.btn_checks.get_active():
self.btn_checks.clicked()
def update(self, failures):
# We don't want to show "untranslated"
failures.pop('untranslated', None)
if failures == self._prev_failures:
return
self._prev_failures = failures
if not failures:
# We want an empty button, but this causes a bug where subsequent
# updates don't show, so we set it to a non-breaking space
self.lbl_btnchecks.set_text(u"\u202a")
self._show_empty_label()
self.btn_checks.set_tooltip_text(u"")
return
self.lst_checks.clear()
nice_name = self.controller.get_check_name
sorted_failures = sorted(failures.iteritems(), key=lambda x: nice_name(x[0]), cmp=locale.strcoll)
names = []
for testname, desc in sorted_failures:
testname = nice_name(testname)
self.lst_checks.append([testname, desc])
names.append(testname)
name_str = self._listsep.join(names)
self.btn_checks.set_tooltip_text(name_str)
self.lbl_btnchecks.set_text(name_str)
self._show_treeview()
def _show_empty_label(self):
self.tvw_checks.hide()
self.lbl_empty.show()
def _show_treeview(self):
self.lbl_empty.hide()
self.tvw_checks.show_all()
def update_geometry(self, popup, popup_alloc, btn_alloc, btn_window_xy, geom):
x, y, width, height = geom
textbox = self.controller.main_controller.unit_controller.view.sources[0]
alloc = textbox.get_allocation()
if width > alloc.width * 1.3:
return x, y, int(alloc.width * 1.3), height
return geom
# EVENT HANDLERS #
def _on_activated(self, menu_iitem):
self.btn_checks.clicked()
def _on_store_closed(self, store_controller):
self.mnu_checks.set_sensitive(False)
self.hide()
def _on_store_loaded(self, store_controller):
self.mnu_checks.set_sensitive(True)
|
UTF-8
|
Python
| false | false | 2,011 |
10,986,526,374,853 |
bee5081860251967c454df6b7012caa1095a9cf8
|
6010d1ca7f252c909d2e32a13ccc35f071eb62e7
|
/build/lib/pyMSA/test/test_compareFeatureXMLmzML.py
|
6e96ec8711a3c7d147f40a6dbe8331542becc7f2
|
[
"GPL-3.0-only"
] |
non_permissive
|
davidmam/pyMSA
|
https://github.com/davidmam/pyMSA
|
a66633436bb69f1775984e30d57045d100d3fede
|
81da33e514f50ada2a1b34e1fe6872c3ee11ec0e
|
refs/heads/master
| 2021-01-23T07:27:07.291363 | 2013-06-20T09:41:59 | 2013-06-20T09:41:59 | 4,726,772 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright (c) 2012 - N.P. de Klein
#
# This file is part of Python Mass Spec Analyzer (PyMSA).
#
# Python Mass Spec Analyzer (PyMSA) is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Python Mass Spec Analyzer (PyMSA) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Python Mass Spec Analyzer (PyMSA). If not, see <http://www.gnu.org/licenses/>.")
"""
Unit test of compareFeatureXMLmzML.py
"""
# author: ndeklein
# date:10/02/2012
# summary: Unit testing functionality of the compareFeatureXMLmzML.py script
import sys
import os
import doctest
# to be able to import unittest2 from a locally installed unittest2
try:
sys.path.append('/homes/ndeklein/python2.6/site-packages')
except:
pass
# some magic to import from pyMS. dirname(dirname(__file__)) gets the two directories closer to the root.
# this is so that pyMS is added to the pythonpath and you can do import compareFeatureXMLmzML.py
# if this is made in a package from pyMS import compareFeatureXMLmzML should also work
dirname = os.path.dirname
sys.path.append(os.path.join(dirname(dirname(__file__))))
import unittest2 as unittest
import compareFeatureXMLmzML as compare
import config
configHandle = config.ConfigHandle()
config = configHandle.getConfig()
testFolder = os.path.join(os.path.dirname(__file__), config.get('test','testfilefolder'))
class testCompareFeatureXMLmzML(unittest.TestCase):
"""
A test class for the compareFeatureXMLmzML module.
"""
def test_compareCoordinate(self):
expectedCsvRead = ['id','# precursors\r\nf_43922326584371237334','1\r\nf_130205234428175237334','0\r\nf_8613715360396561740','0\r\nf_13020522388175237334','1\r\n'] # this is the expected output of reading the output csv file
expectedReturnValue = {'totalPrecursorsInFeatures':2, 'averagePrecursorsInFeatures':0.5, 'featPerPrecursorDict':{'f_13020522388175237334': 1, 'f_130205234428175237334': 0, 'f_43922326584371237334': 1, 'f_8613715360396561740': 0}}
# to make sure that the test isn't passing when the method doesn't work, but the file already exists
# the file is deleted at the beginning of the test rather than at the end, because user might want to check how an example output file looks
if os.path.exists(testFolder+'testPrecursorPerFeature.csv'):
os.remove(testFolder+'testPrecursorPerFeature.csv')
actualReturnValue = compare.compareCoordinate(testFolder+'mzml_test_file_1.mzML', testFolder+'featurexmlTestFile_1.featureXML', True, testFolder+'testPrecursorPerFeature.csv')
csvFile = open(testFolder+'testPrecursorPerFeature.csv')
csvRead = csvFile.read().split('\t')
self.assertDictEqual(expectedReturnValue, actualReturnValue)
self.assertListEqual(expectedCsvRead, csvRead)
if os.path.exists(testFolder+'testPrecursorPerFeature.csv'):
os.remove(testFolder+'testPrecursorPerFeature.csv')
def suite():
suite = unittest.TestSuite()
# adding the unit tests to the test suite
suite.addTest(unittest.makeSuite(testCompareFeatureXMLmzML))
return suite
unittest.TextTestRunner(verbosity=2).run(suite())
|
UTF-8
|
Python
| false | false | 2,013 |
16,449,724,746,606 |
2f342a9a149bca5f331e98fda2b2b97475a24034
|
7cbbbd08d882d9f476e603c16c74961f0c6fb036
|
/src/view.py
|
7762207b629aea4e54b4c42a749f55e4c9a6e8d8
|
[
"GPL-1.0-or-later",
"GPL-3.0-only"
] |
non_permissive
|
Benjamin-L/earthworm-castle
|
https://github.com/Benjamin-L/earthworm-castle
|
af34a22e66a1877c5ae716a4c4a754093708c0f0
|
b883595410e3098d9b454a5f4d3724eac605151c
|
refs/heads/master
| 2016-03-31T09:22:18.664405 | 2014-01-29T02:53:44 | 2014-01-29T02:53:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import slide
import pygame
from config import *
#A single viewpoint
#This can contain any number >0 of slides
class Viewpoint:
currentSlide = 0
def __init__(self, slides=[]):
self.slides = slides
def rotleft(self, surf):
oldSlide = self.currentSlide
self.currentSlide = (self.currentSlide-1)%len(self.slides)
for x in range(0,screenW, ROTATE_SPEED):
self.slides[self.currentSlide].imgs[self.slides[self.currentSlide].currentImg].pos = (x-screenW,0)
self.slides[oldSlide].imgs[self.slides[oldSlide].currentImg].pos = (x,0)
self.slides[self.currentSlide].draw(surf)
self.slides[oldSlide].draw(surf)
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
pygame.quit()
sys.exit()
if event.key == pygame.K_f:
pygame.display.toggle_fullscreen()
self.slides[self.currentSlide].imgs[self.slides[self.currentSlide].currentImg].pos = (0,0)
self.slides[oldSlide].imgs[self.slides[oldSlide].currentImg].pos = (0,0)
def rotright(self, surf):
oldSlide = self.currentSlide
self.currentSlide = (self.currentSlide+1)%len(self.slides)
for x in reversed(range(0,screenW, ROTATE_SPEED)):
self.slides[self.currentSlide].imgs[self.slides[self.currentSlide].currentImg].pos = (x,0)
self.slides[oldSlide].imgs[self.slides[oldSlide].currentImg].pos = (x-screenW,0)
self.slides[self.currentSlide].draw(surf)
self.slides[oldSlide].draw(surf)
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
pygame.quit()
sys.exit()
if event.key == pygame.K_f:
pygame.display.toggle_fullscreen()
self.slides[self.currentSlide].imgs[self.slides[self.currentSlide].currentImg].pos = (0,0)
self.slides[oldSlide].imgs[self.slides[oldSlide].currentImg].pos = (0,0)
def draw(self, surf):
self.slides[self.currentSlide].draw(surf)
def getMode(self, mousePos):
for spot in self.slides[self.currentSlide].spots:
if spot.checkRegion(mousePos):
return 4
if mousePos[0] < MARGIN:
return 1
elif mousePos[0] > 1.0-MARGIN:
return 2
elif self.slides[self.currentSlide].dest!=None:
return 3
return 0
def handleClick(self, mousePos, surf):
for spot in self.slides[self.currentSlide].spots:
if spot.handleClick(mousePos, surf):
return None
if mousePos[0] < MARGIN:
self.rotleft(surf)
elif mousePos[0] > 1.0-MARGIN:
self.rotright(surf)
elif self.slides[self.currentSlide].dest!=None:
return self.slides[self.currentSlide].dest
return None
|
UTF-8
|
Python
| false | false | 2,014 |
2,765,958,941,288 |
40f2e9467110b2a36c32fbbd086c5a619faf752e
|
6c734891e075bfebef25cc05207f8eea76d55f85
|
/src/impl/setuputil.py
|
ed524c2974d228b718859c54d05b372f08736567
|
[] |
no_license
|
casibbald/jboss-5.1.0-wrapper
|
https://github.com/casibbald/jboss-5.1.0-wrapper
|
ebfb0db64427f4064278b4faff6bf5adca10f187
|
105755b798862ea9afb6beac25b645bdccc4cd1d
|
refs/heads/master
| 2016-09-06T05:20:24.473414 | 2013-11-30T23:57:29 | 2013-11-30T23:57:29 | 3,315,374 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
'''
Created on Oct 18, 2012
@author: casibbald
'''
import os #site, errno;
import sys
import tarfile
import shutil
import urllib2
python_version = sys.version[:3]
userhome = os.path.expanduser("~")
def checkenvpaths(path):
currentpath = os.environ.get('PATH')
if currentpath.find(path) == 0 :
print "Python Path has been set to : %s" % (path)
return True
elif currentpath.find(path) == -1:
print "Python Path has NOT been set.................!"
return False
def detect_environment():
environment = {}
environment['os_detected'] = sys.platform
python_version = sys.version[:3]
if environment['os_detected'] == 'darwin':
environment['userprofile'] = os.path.join(userhome, ".bash_profile")
environment['fileName'] = os.path.join(userhome, '.pydistutils.cfg')
environment['bin_dir'] = os.path.join(userhome, "bin")
environment['install_dir'] = os.path.join(userhome, "Library", "python", python_version, "site-packages")
environment['python_dir'] = os.path.join("/Library", "Frameworks", "Python.framework", "Versions", python_version, "bin")
environment['ssh_dir'] = os.path.join(userhome, '.ssh')
elif environment['os_detected'] == 'linux' or 'linux2' :
environment['userprofile'] = os.path.join(userhome, ".bashrc")
environment['fileName'] = os.path.join(userhome, '.pydistutils.cfg')
environment['bin_dir'] = os.path.join(userhome, "bin")
environment['install_dir'] = os.path.join("/", "usr", "lib", ''.join(["python", python_version]), "site-packages")
environment['python_dir'] = os.path.join(userhome, "Library", "Frameworks", "Python.framework", "Versions", python_version, "bin")
environment['ssh_dir'] = os.path.join(userhome, '.ssh')
elif environment['os_detected'] == 'sunos5':
environment['userprofile'] = os.path.join(userhome, ".profile")
environment['fileName'] = os.path.join(userhome, '.pydistutils.cfg')
environment['bin_dir'] = os.path.join(userhome, "bin")
environment['install_dir'] = os.path.join(userhome, "Library", "python", python_version, "site-packages")
environment['python_dir'] = os.path.join(userhome, "Library", "Frameworks", "Python.framework", "Versions", python_version, "bin")
environment['ssh_dir'] = os.path.join('/etc', 'ssh', 'keys')
elif environment['os_detected'] == 'windows' or 'win32' :
environment['fileName'] = os.path.join(userhome, '.pydistutils.cfg')
environment['bin_dir'] = os.path.join(userhome, "bin")
environment['install_dir'] = os.path.join(userhome, "Library", "python", python_version, "site-packages")
environment['python_dir'] = os.path.join(userhome, "Library", "Frameworks", "Python.framework", "Versions", python_version, "bin")
environment['ssh_dir'] = os.path.join(userhome, '.ssh')
else:
environment['fileName'] = os.path.join(userhome, '.pydistutils.cfg')
environment['bin_dir'] = os.path.join(userhome, "bin")
environment['install_dir'] = os.path.join(userhome, "lib", "python", python_version, "site-packages")
return environment
environment = detect_environment()
system_os = { 'darwin' : """[install]
install_lib = %s
install_scripts = %s """ % (environment['install_dir'], environment['bin_dir']),
'linux' : """[install]
install_lib = %s
install_scripts = %s""" % (environment['install_dir'], environment['bin_dir']),
'linux2' : """[install]
install_lib = %s
install_scripts = %s""" % (environment['install_dir'], environment['bin_dir']),
'sunos5' : """[install]
install_lib = %s
install_scripts = %s""" % (environment['install_dir'], environment['bin_dir']),
'windows' : """[install]
install_lib = %s
install_scripts = %s""" % (environment['install_dir'], environment['bin_dir']),
}
class Filer(object):
def __init__(self):
print "Filer initalised"
def createFile(self, FQFileName):
self.fileName = FQFileName
if not os.path.exists(self.fileName): # Avoid clobbering files
try:
print
print "Creating : %s" % (self.fileName)
o = open(self.fileName, "w")
o.flush()
o.close()
finally:
pass
def writeThisToFile(self, stringToWrite):
if os.path.exists(self.fileName): # Avoid clobbering files
try:
print
print "Writing to : %s " % (self.fileName)
w = open(self.fileName, "w")
w.write(stringToWrite)
w.write("\n")
w.flush()
w.close()
finally:
pass
class ThirdPartyApp(object):
def __init__(self):
print
print "Third Party Install Instance Initialised"
self.thirdparty = os.path.join(os.getcwd(), "thirdparty")
if not os.path.exists(self.thirdparty):
try:
print
print "Creating Thirdparty Directory: %s " % (self.thirdparty)
os.mkdir(self.thirdparty)
except OSError :
pass
if not os.path.exists(environment['install_dir']):
try:
print "Creating Python Egg Directory: %s" % (environment['install_dir'])
os.makedirs(environment['install_dir'])
except OSError :
pass
if not os.path.exists(environment['bin_dir']):
try:
print "Creating Python Egg Directory: %s" % (environment['bin_dir'])
os.makedirs(environment['bin_dir'])
except OSError :
pass
def legacy_download_this(self, url, to_dir, saveas=None):
"""This Class Method is only used once to pull down the very basic requirements of the project.
**kwargs saveas is used if the downloaded file should be renamed when saved to disk.
"""
self.url = url
self.to_dir = to_dir
self.saveas = saveas
if self.saveas:
self.saveto = os.path.join(self.to_dir, self.saveas)
else:
self.saveto = os.path.join(self.to_dir, os.path.basename(self.url))
self.src = self.dst = None
if not os.path.exists(self.saveto): # Avoid repeated downloads
try:
print
print "Attempting to Download : %s from %s" % (os.path.basename(self.url), os.path.dirname(self.url))
self.src = urllib2.urlopen(self.url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
self.data = self.src.read()
self.dst = open(self.saveto,"wb"); self.dst.write(self.data)
finally:
if self.src:
self.src.close()
if self.dst:
self.dst.close()
print "%s Downloaded Successfully" % (os.path.basename(self.url))
print
return os.path.realpath(self.saveto)
def determine_compression_algorithim(self, infile):
self.infile = infile
if self.infile.endswith(".tar") :
self.arc_type = 'r:*'
elif self.infile.endswith(".gz") :
self.arc_type = 'r:gz'
elif self.infile.endswith(".bz2") :
self.arc_type = 'r:bz2'
elif self.infile.endswith(".zip") :
self.arc_type = 'r:*'
print "File %s detected as a %s " % (self.infile, self.arc_type.split(":")[1])
return self.arc_type
def extract_archive(self, infile, to_dir):
self.infile = infile
self.to_dir = to_dir
print "Archive to extract: ", self.infile
self.tar = tarfile.open(self.infile, self.determine_compression_algorithim(self.infile))
self.tar.extractall(self.to_dir)
self.tar.close()
def read_thirdparty_config(self):
if os.path.exists("thirdparty.cfg"):
self.fileToRead = ("thirdparty.cfg")
print self.fileToRead
self.thirdparty_app = {}
self.f = open(self.fileToRead,'r')
self.details = self.f.readlines()
for self.detail in self.details:
if not self.detail.isspace() :
if not self.detail.startswith('#'):
self.pair = self.detail.split('||')
if self.pair[0]:
self.thirdparty_app[self.pair[0].strip()] = (self.pair[1].strip(), self.pair[2].strip())
return self.thirdparty_app
def cleandest():
builddest = os.path.abspath("build")
distdest = os.path.abspath("dist")
buildegg = os.path.join(environment['install_dir'], "releasetools-*")
if os.path.exists(builddest):
try:
print "Cleaning Build Destination: %s" % (builddest)
shutil.rmtree(builddest)
except OSError :
pass
if os.path.exists(distdest):
try:
print "Cleaning Dist Destination: : %s" % (distdest)
shutil.rmtree(distdest)
except OSError :
pass
if os.path.exists(buildegg):
try:
print "Destroying Previously Built Egg: : %s" % (buildegg)
shutil.rmtree(buildegg)
except OSError :
pass
def configure_python() :
"""Configure python to use seperate site packages dir if NO Virtual Environment is detected"""
print "Operating System Detected : ", environment['os_detected']
f = Filer()
f.createFile(environment['fileName'])
f.writeThisToFile(system_os[environment['os_detected']])
third_party_app = ThirdPartyApp()
app_list = third_party_app.read_thirdparty_config()
for each in app_list:
if each == 'ez_setup':
third_party_app.legacy_download_this(app_list[each][0], os.getcwd(), saveas='ez_setup.py')
else:
third_party_app.legacy_download_this(app_list[each][0], os.path.join(os.path.join(os.getcwd(), "thirdparty")))
|
UTF-8
|
Python
| false | false | 2,013 |
8,761,733,313,797 |
e7211fc41d404326e21470fc02d9a1e5055f4897
|
e41b523193b10c1f6eaf430950eba553601ce245
|
/extra/first-batch-locales.py
|
8956f32d06882075466d879a008783764bfcd35d
|
[] |
no_license
|
MaTriXy/whatsnew
|
https://github.com/MaTriXy/whatsnew
|
d4b13ab18c0753e02a5973e26f7c5c8e6b770dd3
|
2fde03cd13f7c3c37e7cbd9353394cbc17327f47
|
refs/heads/master
| 2020-12-30T17:20:01.137070 | 2013-04-14T22:52:40 | 2013-04-14T22:52:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/env/python
import os
locale_codes = ['af', 'ms', 'ca', 'cs', 'da', 'de', 'et', 'en', 'es', 'tl', 'fr', 'hr', 'zu', 'it', 'sw', 'lv', 'lt', 'hu', 'nl', 'no', 'pl', 'pt', 'ro', 'sk', 'sl', 'fi', 'sv', 'vi', 'tr', 'el', 'be', 'bg', 'ru', 'sr', 'uk', 'am', 'hi', 'th', 'ko', 'ja', 'zh']
filename = 'strings.xml'
content_template = '<?xml version="1.0" encoding="utf-8"?>\n<resources>\n <string name="crawling_lang_code" translatable="false">%s</string>\n</resources>'
for code in locale_codes:
directory = 'values-' + code
if not os.path.exists(directory):
os.makedirs(directory)
file = open(os.path.join(directory, filename), 'wb')
content = content_template % code
file.write(content)
file.close
|
UTF-8
|
Python
| false | false | 2,013 |
7,370,163,901,394 |
1ff66013e2f9371893e11e1cf4db7052cd5cc1cf
|
fec8a5e62ac79d28cd039e728d12c9d9bc488ce8
|
/settings.py
|
fbc06b6bac76c80e3ece6b3640503e03e623f000
|
[] |
no_license
|
claudiobl2010/twitter-chupa-cabra
|
https://github.com/claudiobl2010/twitter-chupa-cabra
|
f63f30a18392289a485f16ec6e5c07db49db8055
|
dfadb9492dd07d87cd531455eb72dcd328426920
|
refs/heads/master
| 2020-04-21T08:02:08.913561 | 2011-11-15T03:16:03 | 2011-11-15T03:16:03 | 2,465,397 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/python
# -*- coding: utf-8 -*-
contas_twitter = {
'usuario_twitter_1' : {
'consumer_key' : '????????????????',
'consumer_secret' : '????????????????',
'access_token_key' : '????????????????',
'access_token_secret' : '????????????????'
},
'usuario_twitter_2' : {
'consumer_key' : '????????????????',
'consumer_secret' : '????????????????',
'access_token_key' : '????????????????',
'access_token_secret' : '????????????????'
}
}
# Caso (conta_twitter_default = '') for igual a branco,
# o script ira perguntar qual conta twitter voce deseja usar
conta_twitter_default = ''
|
UTF-8
|
Python
| false | false | 2,011 |
1,898,375,589,576 |
3a4fd29f5868c53480bbe2f5fa858c709cddbd7a
|
0cbfc2fcf7a787c0af7858f4f8473aa0c2e3871e
|
/condescending.py
|
dc498c6c3f325ed322677ef54f4baf1874657cdd
|
[] |
no_license
|
kaushal/redditroll
|
https://github.com/kaushal/redditroll
|
282bb16040084c833669703c08b29d7fe1d829dd
|
18dc61a1c0d4735084c921c2b8b71cac5be02963
|
refs/heads/master
| 2020-04-22T08:08:55.021642 | 2013-05-13T23:04:04 | 2013-05-13T23:04:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
from random import choice
def generate_reply(post):
"""
Runs through the logic of creating a comment to post as a reply to another post.
If the post is not worth replying to, an empty string will be returned.
Arguments:
post - The post that is potentially being replied to.
"""
#Makes it easier for comparisons later.
post = post.lower()
message = ""
#Feel free to add any sort of funny, condescending behavior!
if "ironic" in post:
message += "Actually, that isn't even ironic. It's just coincidental."
elif "idea" in post:
message += "Wow. That is the stupidest idea I have ever heard."
message += "\nYou " + insulting_adjective() + " " + insulting_noun() + "!"
return message
def insulting_adjective():
"""
Returns a random adjective from our list of offensive adjectives.
"""
try:
with open("adjectives.txt") as adj_list:
return choice(adj_list.read().split())
except EnvironmentError:
print "Dude! Make a list of adjectives!"
sys.exit()
def insulting_noun():
"""
Returns a random noun from our list of offensive nouns
"""
try:
with open("nouns.txt") as noun_list:
return choice(noun_list.read().split())
except EnvironmentError:
print "Dude! Make a list of nouns!"
sys.exit()
|
UTF-8
|
Python
| false | false | 2,013 |
2,911,987,839,354 |
7c7a5a787151915e3cc23b4ea7c08db870e0f94f
|
070f699f435bf666f0d2f1960119924068fad0b3
|
/afn/python/src/timernotify.py
|
2d894f1f3ac8c41eba619e3b27a1591ed30b4de5
|
[] |
no_license
|
javawizard/afn
|
https://github.com/javawizard/afn
|
049e510069b2eaf39ac10560f8c706ff5aa74277
|
d9d95e24673794a20bb8138ce44d5bac236e07ed
|
refs/heads/master
| 2016-09-06T06:53:28.872368 | 2013-09-19T08:12:52 | 2013-09-19T08:12:52 | 3,390,850 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
from libautobus import AutobusConnection
from optparse import OptionParser
from time import sleep
def main():
try:
import pynotify
except ImportError:
print "You don't have the Python libnotify binding installed."
sys.exit()
if not pynotify.init("timernotify"):
print "Libnotify could not be initialized."
sys.exit()
parser = OptionParser(usage="usage: python -m timernotify [options]",
add_help_option=False)
parser.add_option("-h", type="string", action="store", dest="host", default="localhost",
help="The host that the Autobus server is running on. Without this "
"option, localhost will be used.")
parser.add_option("-s", action="store_const", const=True, dest="state_change",
default=False, help="If present, all state changes (instead of just "
"when the timer beeps) will cause a notification to be shown.")
parser.add_option("-?", "--help", action="help")
options, command_line_args = parser.parse_args()
bus = AutobusConnection(host=options.host)
def beeping_listener(timer):
pynotify.Notification("Timer " + str(timer), "is beeping.").show()
def state_change_listener(timer, state):
state_string = {1: "counting up", 2: "counting down", 3: "stopped"}[state]
pynotify.Notification("Timer " + str(timer), "is now " + state_string +
".").show()
bus.add_event_listener("timer", "beeping", beeping_listener)
if options.state_change:
bus.add_event_listener("timer", "manual_state_change", state_change_listener)
bus.connect()
try:
while True:
sleep(1)
except KeyboardInterrupt:
print "Interrupted, shutting down"
finally:
bus.shutdown()
|
UTF-8
|
Python
| false | false | 2,013 |
5,789,615,928,583 |
df146aea315b624741cf8c12fed7f38ce73ce7db
|
128c5874bd74a54ac9a5e7079137c8a1e0caac11
|
/tests/test_parameters_for_url.py
|
a1a01e4fe0b8e13884fb96acb15a3b4de9347fc4
|
[] |
no_license
|
mikimer/TwilioSheet
|
https://github.com/mikimer/TwilioSheet
|
b3c412de25f5350ac1884d419dced4639e486678
|
b7c23ae6c6d346b5300a08c3c354fb19f96e93cb
|
refs/heads/master
| 2021-01-17T18:13:36.897764 | 2014-07-17T19:51:22 | 2014-07-17T19:51:22 | 34,759,874 | 1 | 0 | null | true | 2015-04-28T22:44:14 | 2015-04-28T22:44:14 | 2015-04-04T16:09:06 | 2014-07-17T19:51:43 | 5,276 | 0 | 0 | 0 | null | null | null |
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
from app import TestURL
from app import NoURLException
from app import NoGoogleInURLException
from app import URLNotForGoogleFormException
from app import URLForGoogleSpreadsheetNotFormException
from app import GoogleFormDoesntExistException
from app import NoTwilioParametersInFormException
# TODO: Use mocking library to simulate HTTP
class TestURLTest(unittest.TestCase):
def test_requires_url(self):
url = ''
with self.assertRaises(NoURLException):
TestURL(url)
def test_requires_google_in_url(self):
for url in ['http://example.com']:
with self.assertRaises(NoGoogleInURLException):
TestURL(url)
def test_not_apparently_for_spreadsheet(self):
for url in ['https://docs.google.com/spreadsheet/ccc?key=fake#gid=0']:
with self.assertRaises(URLForGoogleSpreadsheetNotFormException):
TestURL(url)
def test_requires_formkey(self):
for url in ['http://google.com']:
with self.assertRaises(URLNotForGoogleFormException):
TestURL(url)
def test_form_exists(self):
for url in ['http://google.com?formkey=fake']:
with self.assertRaises(GoogleFormDoesntExistException):
TestURL(url)
def test_form_needs_twilio_parameters(self):
url = 'https://docs.google.com/spreadsheet/' \
'viewform?formkey=dHk3N2M5NlAtZV9mMlAyOEU5VE05dEE6MQ'
with self.assertRaises(NoTwilioParametersInFormException):
TestURL(url)
def test_detects_valid_url(self):
url = 'https://docs.google.com/spreadsheet/' \
'viewform?formkey=dG02c3hqdEZBaWZMN1NBdnBCZkVzdWc6MQ'
form = TestURL(url)
self.assertIsInstance(form.parameters, set)
|
UTF-8
|
Python
| false | false | 2,014 |
7,748,121,009,279 |
5a55bc77f245ad8c02144a5ad4ac275cb32c1f21
|
fc9ca505abc41c494cfbfb2e00b6cc42784b597a
|
/tests/testing.py
|
1e10e6abe4d437c20a5b96ba113b12afcc082d7b
|
[] |
no_license
|
u20024804/cybrain
|
https://github.com/u20024804/cybrain
|
113c546a285dded17690fcdd0f41a8c9faf3a2d4
|
ab34acf07acd479b3e6ac7039d2540e23d79a11d
|
refs/heads/master
| 2021-01-18T18:34:15.819475 | 2014-12-29T04:05:43 | 2014-12-29T04:05:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'Cristian'
import unittest
from math import exp
from cybrain import LogisticNeuron, LinearConnection, LinearNeuron
#
# def similar(a,b,tol = 0.001):
# return abs(a-b) < tol
#
# def sigmoid(z):
# return 1.0 / ( 1.0 + exp(-z) )
#
# class MyTestCase(unittest.TestCase):
#
# def setUp(self):
# self.Nin = LinearNeuron()
# self.Nin.weighted_sum = 2.0
#
# self.Nout = LinearNeuron()
#
# self.in_out = LinearConnection(self.Nin,self.Nout, 0.5)
#
# self.Logout = LogisticNeuron()
#
# self.in_Logout = LinearConnection(self.Nin,self.Logout, 0.5)
#
# def test_linear_activation(self):
# self.Nin.activate()
# a = self.Nout.state
# b = self.Nin.state * self.in_out.weight
# self.assertTrue( similar(a,b), "Failed " + self.test_linear_activation.__name__ )
# self.clear()
#
# def clear(self):
# self.Nin.clearAcumulators()
# self.Nin.clearCounters()
# self.Nout.clearAcumulators()
# self.Nout.clearCounters()
#
# self.in_out.clearAcumulators()
# self.in_Logout.clearAcumulators()
#
# def test_sigmoid_activation(self):
# self.Nin.activate()
# a = self.Logout.state
# b = sigmoid( self.Nin.state * self.in_Logout.weight )
# self.assertTrue( similar(a,b), "Failed: a = {}, b = {}".format(a,b) )
# self.clear()
#
# def test_linear_backprop(self):
# target = 1.5
# self.Nin.activate()
# self.Nout.errorDerivative(target)
# self.Nout.backErrorActivate()
#
# a = self.in_out.weight_diff
# b = self.Nin.state * ( self.Nout.state - target )
#
# self.assertTrue( similar(a,b), "Failed: a = {}, b = {}".format(a,b) )
# self.clear()
#
# def test_linear_backprop(self):
# target = 1.5
# self.Nin.activate()
# self.Nout.errorDerivative(target)
# self.Nout.backErrorActivate()
#
# a = self.in_out.weight_diff
# b = self.Nin.state * ( self.Nout.state - target )
#
# self.assertTrue( similar(a,b), "Failed: a = {}, b = {}".format(a,b) )
# self.clear()
#
# def test_logistic_backprop(self):
# target = 0.0
# self.Nin.activate()
# self.Logout.errorDerivative(target)
# self.Logout.backErrorActivate()
#
# a = self.in_Logout.weight_diff
# b = self.Nin.state * ( self.Logout.state - target )
#
# self.assertTrue( similar(a,b), "Failed: a = {}, b = {}".format(a,b) )
# self.clear()
#
# if __name__ == '__main__':
# unittest.main()
|
UTF-8
|
Python
| false | false | 2,014 |
18,614,388,294,366 |
a03b40291c3face306d9c3d58cbf2aeb66bbcb4d
|
7dfd53d63da5fb6ff2085b87da09b1ebd0723d12
|
/blogengine/admin/__init__.py
|
a79b383d4ebc5d344d097c7936dee31c4f92c153
|
[] |
no_license
|
tklarryonline/django-blog-homework
|
https://github.com/tklarryonline/django-blog-homework
|
374ae4172e1fe2dd6e552f8388477a1da883b981
|
424ec0da10f8d32ec1e91a6d5167282d199c8e25
|
refs/heads/master
| 2021-01-18T14:14:58.778100 | 2014-03-19T09:23:04 | 2014-03-19T09:23:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from blogengine import models
from .models_admin import PostAdmin
admin.site.register(models.Category)
admin.site.register(models.Tag)
admin.site.register(models.Post, PostAdmin)
|
UTF-8
|
Python
| false | false | 2,014 |
15,564,961,510,610 |
e53250efa647a2039f266a7524fcfadd9bef0465
|
d3b960ac8424f93bb259aebb3cf90c5668298dde
|
/website/andreev_ru/urls.py
|
d1cb73cdb11f3eb748e0e15beb702eff7b6f8415
|
[] |
no_license
|
Otann/andreev_ru
|
https://github.com/Otann/andreev_ru
|
f787e225560c91bc4980e0d1fce5df31e1fceaf6
|
a69bd80d8859db84ae441670b6a5182ac6f3ac55
|
refs/heads/master
| 2021-01-01T20:16:26.148529 | 2014-12-23T16:43:25 | 2014-12-23T16:43:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
admin.autodiscover()
urlpatterns = i18n_patterns('',
url(r'^$', 'andreev_ru.main.views.home', name='home'),
url(r'^works/$', 'andreev_ru.main.views.works', name='works'),
url(r'^work/(?P<slug>[-\w]+)/$', 'andreev_ru.main.views.work', name='work'),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^search/$', 'andreev_ru.main.views.search', name='search'),
url(r'^search_json/$', 'andreev_ru.main.views.search_json', name='search_json'),
url(r'^news/$', 'andreev_ru.main.views.news', name='news'),
url(r'^about/$', 'andreev_ru.main.views.about', name='about'),
url(r'^contacts/$', 'andreev_ru.main.views.contacts', name='contacts'),
url(r'^team/', 'andreev_ru.main.views.team', name='team'),
url(r'^(?P<slug>[-\w]+)$', 'andreev_ru.main.views.page', name='page'),
)
urlpatterns += patterns('',
url(r'^ckeditor/', include('ckeditor.urls')),
url(r'^grappelli/', include('grappelli.urls')),
)
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', { 'document_root': settings.MEDIA_ROOT, }),
)
|
UTF-8
|
Python
| false | false | 2,014 |
4,440,996,214,344 |
eb5036a5fd64ac2d1fda57c7f681ded902af0b9e
|
cbedd7fd2582ff8122ab22dae202a62d48ff3d11
|
/samples_mangopy/mangopy/bouncing_ball.py
|
615af1fbe0b1c2ceb4e2de8647150bb995ae1609
|
[
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"PSF-2.0",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-other-copyleft",
"Python-2.0",
"GPL-1.0-or-later"
] |
non_permissive
|
volund/mango
|
https://github.com/volund/mango
|
7723d0795ab669ae02ad5ee4d2c660e213a3b782
|
e3ed223c36250e9a438885694384085287f306a2
|
refs/heads/master
| 2021-01-10T20:11:42.572923 | 2012-08-01T20:20:21 | 2012-08-01T20:20:21 | 3,550,908 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import Geometry, math
class BouncingBall(Geometry.Sphere):
t = 0
def step(self):
self.t += 0.08
self.position = (0, 0.2 + abs(math.sin(self.t)), 0)
class MovingPlatform(Geometry.Box):
t = 0
def step(self):
self.t += 0.01
self.position = (math.sin(self.t), 0, 0)
platform = MovingPlatform()
platform.setDimensions(1.0, 0.1, 1.0)
ball = BouncingBall()
ball.setRadius(0.2)
ball.setParentFrame(platform)
ball.set(RENDER | STEP)
platform.set(RENDER | STEP)
Camera.lookFrom((0, 2, 3))
Camera.translate(0, 0.7, 0)
|
UTF-8
|
Python
| false | false | 2,012 |
7,026,566,525,751 |
3875d1dae9001215610bbec43a1a0c8b479fceee
|
46d7b564c283685b4d8b16c4212561a6adc05cd7
|
/tests/models/test_disk.py
|
14838c751959bc5369044f66658fb0b76051fa50
|
[
"BSD-3-Clause"
] |
permissive
|
fholiveira/tlpconfig
|
https://github.com/fholiveira/tlpconfig
|
ede7d021a3f18bcb6d46148c4415f2ee829e8bfe
|
0bb971c05f79902133263edf6b56625bf219ade6
|
refs/heads/master
| 2020-04-16T12:02:43.774900 | 2014-11-26T14:31:10 | 2014-11-26T18:16:10 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from unittest import TestCase
from tlp.models import Disk
class TestDisk(TestCase):
def test_disk_should_be_known_by_id(self):
disk = Disk('disk1', alias='sda', size='256GB')
self.assertTrue(disk.know_as('disk1'))
def test_disk_should_be_known_by_alias(self):
disk = Disk('disk1', alias='sda', size='256GB')
self.assertTrue(disk.know_as('sda'))
|
UTF-8
|
Python
| false | false | 2,014 |
11,974,368,865,150 |
2525f524aa6ed52df7ac43c1f5bfd15c947e3988
|
57cbbe35b1b5a7aa40b732aa6a9be3f7bfd5291f
|
/Transforms.py
|
74243cbe585c68e7caa71cacc8ea88f67a3ab8de
|
[] |
no_license
|
JoeyChaps/regularization
|
https://github.com/JoeyChaps/regularization
|
18d1fb1233231ebd08496ff9a4b8db57d84e2055
|
235f6c58b28025ef9086e76aa5f7565c98753e97
|
refs/heads/master
| 2020-04-15T15:17:33.461376 | 2014-12-31T05:31:44 | 2014-12-31T05:31:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
#######
# File: Transforms.py
# Project: Regularization
# Author: Joe Chapman
# Date: 9-8-14
# Url: https://github.com/JoeyChaps/regularization
#######
class Transform:
"""A class that transforms data according to various algorithms."""
def __init__ (self, num, maxFeats):
self.bDone = True
self.count = 3
self.lastCount = 0
self.transNum = 0
self.transformDef = ""
self.ncolumns = 0
self.nMaxFeatures = maxFeats
if num == 1:
self.transNum = num
elif num == 2:
self.transNum = num
elif num == 3:
self.transNum = num
else:
self.transNum = None
def getTransform(self):
"""Performs a transform and returns the transformed data, or returns
None if no transform was used.
"""
if self.transNum == 1:
return self.tran1
elif self.transNum == 2:
return self.tran2
elif self.transNum == 3:
return self.tran3
else:
return None
def getTestTransform(self):
"""Performs an appropriate transform for testing based on the transform
used for training and returns the transformed data, or returns None if
no transform was used.
"""
if self.transNum == 1:
self.count = self.lastCount
return self.tran1
elif self.transNum == 2:
self.count = self.lastCount
return self.tran2
elif self.transNum == 3:
self.count = self.lastCount
return self.tran3
else:
return None
def getTransformVersion(self):
"""Returns a string indicating the number of the transform applied to
the data and the number of features used, or returns None if no
transform was used.
"""
if self.transNum:
sVers = str(self.transNum)
if self.transNum > 0 & self.transNum < 4:
sVers = sVers + "-" + str(self.count)
return sVers
else:
return None
def getTransformCount(self):
"""Returns the number of features that were most recently applied by
the transform.
"""
return self.lastCount
def setTransformCount(self, c):
"""Sets the number of features for the transform to apply."""
self.count = c
def getTransformDef(self):
"""Returns a string definition of the transform."""
return self.transformDef
def getDone(self):
"""Returns whether the transform has been completed."""
return self.bDone
def tran1(self, a_pats):
"""A function that performs up to a complete fifth-order transformation
on two-dimensional patterns, initially returning the complete first-
order transformed patterns, then using an additional feature to
transform and return the patterns each subsequent time the function is
called, until the maximum number of features has been reached.
"""
self.bDone = False
ncols = 0
a_tranPats = []
npats = len(a_pats)
if (npats > 0):
ncols = len(a_pats[0])
if (ncols > 0):
sFormula = ""
if (self.count >= 3):
sFormula = "1, x0, x1"
if (self.count >= 4):
sFormula = sFormula + ", x0^2"
if (self.count >= 5):
sFormula = sFormula + ", x0x1"
if (self.count >= 6):
sFormula = sFormula + ", x1^2"
if (self.count >= 7):
sFormula = sFormula + ", x0^3"
if (self.count >= 8):
sFormula = sFormula + ", x0^2x1"
if (self.count >= 9):
sFormula = sFormula + ", x1^2x0"
if (self.count >= 10):
sFormula = sFormula + ", x1^3"
if (self.count >= 11):
sFormula = sFormula + ", x0^4"
if (self.count >= 12):
sFormula = sFormula + ", x0^3x1"
if (self.count >= 13):
sFormula = sFormula + ", x0^2x1^2"
if (self.count >= 14):
sFormula = sFormula + ", x1^3x0"
if (self.count >= 15):
sFormula = sFormula + ", x1^4"
if (self.count >= 16):
sFormula = sFormula + ", x0^5"
if (self.count >= 17):
sFormula = sFormula + ", x0^4x1"
if (self.count >= 18):
sFormula = sFormula + ", x0^3x1^2"
if (self.count >= 19):
sFormula = sFormula + ", x0^2x1^3"
if (self.count >= 20):
sFormula = sFormula + ", x1^4x0"
if (self.count >= 21):
sFormula = sFormula + ", x1^5"
self.transformDef = sFormula
for p in range(0, npats):
x0 = a_pats[p][0]
x1 = a_pats[p][1]
x0_2 = x0 * x0
x1_2 = x1 * x1
x0_3 = x0 * x0_2
x1_3 = x1 * x1_2
x0_4 = x0 * x0_3
x1_4 = x1 * x1_3
x0_5 = x0 * x0_4
x1_5 = x1 * x1_4
a_row = []
if (self.count >= 3):
a_row = [1, x0, x1]
if (self.count >= 4):
a_row.append(x0_2)
if (self.count >= 5):
a_row.append(x0 * x1)
if (self.count >= 6):
a_row.append(x1_2)
if (self.count >= 7):
a_row.append(x0_3)
if (self.count >= 8):
a_row.append(x0_2 * x1)
if (self.count >= 9):
a_row.append(x1_2 * x0)
if (self.count >= 10):
a_row.append(x1_3)
if (self.count >= 11):
a_row.append(x0_4)
if (self.count >= 12):
a_row.append(x0_3 * x1)
if (self.count >= 13):
a_row.append(x0_2 * x1_2)
if (self.count >= 14):
a_row.append(x1_3 * x0)
if (self.count >= 15):
a_row.append(x1_4)
if (self.count >= 16):
a_row.append(x0_5)
if (self.count >= 17):
a_row.append(x0_4 * x1)
if (self.count >= 18):
a_row.append(x0_3 * x1_2)
if (self.count >= 19):
a_row.append(x0_2 * x1_3)
if (self.count >= 20):
a_row.append(x1_4 * x0)
if (self.count >= 21):
a_row.append(x1_5)
a_tranPats.append(a_row)
self.lastCount = self.count
self.count += 1
if (self.count > self.nMaxFeatures):
self.bDone = True
return list(a_tranPats)
def tran2(self, a_pats):
"""A function that performs a complete fifth-order transformation on
two-dimensional patterns, initially returning the complete first-order
transformed patterns, then using the complete next order to transform
and return the patterns each subsequent time the function is called,
and finally returning the complete fifth-order transformed patterns.
"""
self.bDone = False
ncols = 0
a_tranPats = []
npats = len(a_pats)
if (npats > 0):
ncols = len(a_pats[0])
if (ncols > 0):
sFormula = ""
if (self.count >= 1):
sFormula = "1, x0, x1"
if (self.count >= 2):
sFormula = sFormula + ", x0^2, x0x1, x1^2"
if (self.count >= 3):
sFormula = sFormula + \
", x0^3, x0^2x1, x1^2x0, x1^3"
if (self.count >= 4):
sFormula = sFormula + \
", x0^4, x0^3x1, x0^2x1^2, x1^3x0, \
x1^4"
if (self.count >= 5):
sFormula = sFormula + \
", x0^5, x0^4x1, x0^3x1^2, \
x0^2x1^3, x1^4x0, x1^5"
self.transformDef = sFormula
for p in range(0, npats):
x0 = a_pats[p][0]
x1 = a_pats[p][1]
x0_2 = x0 * x0
x1_2 = x1 * x1
x0_3 = x0 * x0_2
x1_3 = x1 * x1_2
x0_4 = x0 * x0_3
x1_4 = x1 * x1_3
x0_5 = x0 * x0_4
x1_5 = x1 * x1_4
a_row = []
if (self.count >= 1):
a_row = [1, x0, x1]
if (self.count >= 2):
a_row.append([x0_2, x0 * x1, x1_2])
if (self.count >= 3):
a_row.append([x0_3, x0_2 * x1, x1_2 * x0, x1_3])
if (self.count >= 4):
a_row.append([x0_4, x0_3 * x1, x0_2 * x1_2,
x1_3 * x0, x1_4])
if (self.count >= 5):
a_row.append([x0_5, x0_4 * x1, x0_3 *
x1_2, x0_2 * x1_3, x1_4
* x0, x1_5])
a_tranPats.append(a_row)
self.lastCount = self.count
self.count += 1
if (self.count > 5):
self.bDone = True
return list(a_tranPats)
def tran3(self, a_pats):
"""A function that performs a complete fifth-order transformation on
one-dimensional patterns and returns the list of transformed patterns.
"""
self.bDone = False
ncols = 0
a_tranPats = []
npats = len(a_pats)
self.transformDef = "x0,x0^2,x0^3,x0^4,x0^5"
for p in range(0, npats):
x0 = a_pats[p][0]
x0_2 = x0 * x0
x0_3 = x0 * x0_2
x0_4 = x0 * x0_3
x0_5 = x0 * x0_4
a_row = [x0, x0_2, x0_3, x0_4, x0_5]
a_tranPats.append(a_row)
return list(a_tranPats)
|
UTF-8
|
Python
| false | false | 2,014 |
10,110,353,051,064 |
bc26d5e7873037ca66ae30d4a693f7ed782c7705
|
881abf0ec3ecc9eaf1a054e71584b8aa787356df
|
/spinwaves/utilities/mpfit/__init__.py
|
1c380a12b2407b73551085924a8ede8219762d8b
|
[] |
no_license
|
wflynny/spinwaves_git_final
|
https://github.com/wflynny/spinwaves_git_final
|
9451006a2f31e5066467533cd14a3accb383a714
|
ed70e979b8836ce3cc3937d4778713c9a8421229
|
refs/heads/master
| 2016-09-16T10:21:36.255896 | 2010-06-16T23:17:23 | 2010-06-16T23:17:23 | 716,384 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on Aug 20, 2009
@author: tsarvey
'''
|
UTF-8
|
Python
| false | false | 2,010 |
7,146,825,605,030 |
8b42ae85cc7bc4a9611b71a16295bda93773c168
|
26e7b3de3aaebd778a7e73cbe61d2567b386a60a
|
/cube.py
|
47d37e8ae0913f8e043777f8d7ae1f9349e7ab3a
|
[] |
no_license
|
benesch/cuber
|
https://github.com/benesch/cuber
|
e126d82c635a691b49b5677faf8f9019dba1721e
|
e3a49111e61792591e69d85222bd919532dc8513
|
refs/heads/master
| 2020-04-26T04:29:39.239453 | 2013-05-05T20:36:46 | 2013-05-05T20:36:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import itertools
import struct
class Cube(object):
def __init__(self, size):
self.size = size
self.fill(False)
def fill(self, state):
s = self.size
self.cube = (
[[[state for i in range(s)] for i in range(s)] for i in range(s)]
)
def _toggle(self, state, *args):
if args:
for pos in args: self.set_pos(pos, state)
else:
self.fill(state)
def on(self, *args):
self._toggle(True, *args)
def off(self, *args):
self._toggle(False, *args)
def set_pos(self, pos, state):
z, y, x = pos
self.cube[z][y][x] = state
def valid_position(self, position):
return all(0 <= p < self.size for p in position)
def to_bytes(self):
def bool_to_bit(bit):
return '1' if bit else '0'
bytes = ''
for layer in self.cube:
flattened = ''.join(bool_to_bit(x) for x in itertools.chain(*layer))
num = int(flattened, 2)
bytes += struct.pack('>H', num)
return bytes
def __getitem__(self, index):
return self.cube[index]
|
UTF-8
|
Python
| false | false | 2,013 |
6,511,170,424,052 |
fa040d427a22c08db94d5a004038b2e18b7b51bb
|
d27dd932d5297fd5cd35ece23ccb993d214190f8
|
/violinplot_tests/test_violinplot_position.py
|
2c211986d5e598511487ee866ffbeb5a0eb0aeb0
|
[
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
Khodeir/matplotlib
|
https://github.com/Khodeir/matplotlib
|
abca486758e669e1c8d6a3dd7f0a2904465cdc41
|
1c1fdb52f2f2edd7e6ce147f798b68c9f882b66c
|
refs/heads/master
| 2021-01-17T12:25:44.336503 | 2014-04-02T18:51:55 | 2014-04-02T18:51:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from matplotlib.pyplot import figure, show
import matplotlib.pyplot as plt
from numpy.random import normal
import traceback
data = [[10, 1, 10, 1, 10, 10, 10, 10],
[10, 9, 8, 7, 6, 5, 4, 3],
[1,1],
[1, 2],
[0, 10],
[0, 5, 5, 5, 5, 5, 10],
[0, 5, 5, 2, 5, 5, 10],
[1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 7, 7, 7, 7, 7, 11], [-1,0,1]]
def reference_plot():
fig = figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.violinplot(data)
return ax2
# Redundant position argument
def redundant_pos_arg():
ax2 = reference_plot()
ax2.violinplot(data, positions=range(1,len(data)+1))
show()
# Reversed plot
def reverse_pos_arg():
ax2 = reference_plot()
ax2.violinplot(data, positions=range(1,len(data)+1)[::-1])
show()
# Swapped first and third positions plot
def swap13_pos_arg():
ax2 = reference_plot()
pos = range(1, len(data) + 1)
pos[0], pos[2] = pos[2], pos[0]
ax2.violinplot(data, positions=pos)
show()
if __name__ == '__main__':
try:
print 'TEST 1: Specified positions same as default (i.e. range(1, n+1))'
print 'Manual Check that the right plot matches the left plot.'
redundant_pos_arg()
print 'TEST 1 RAN SUCCESSFULLY'
except Exception:
print 'FAIL: Something went wrong while plotting with positions=range(1,len(data)+1)'
try:
print 'TEST 2: Specified positions reversed (i.e. range(1,n+1)[::-1])'
print 'Manual Check that the right plot is the same as the left one in reverse'
reverse_pos_arg()
print 'TEST 2 RAN SUCCESSFULLY'
except Exception:
print 'FAIL: Something went wrong while plotting with postions=range(1,len(data)+1)[::-1]'
try:
print 'TEST 3: Swap positions 1 and 3'
print 'Manual Check that the "violins" at 1st and 3rd positions have been swapped. 1st violin should be empty space.'
swap13_pos_arg()
print 'TEST 3 RAN SUCCESSFULLY'
except Exception:
print 'FAIL: Something went wrong while plotting positions 1 and 3 swapped.'
|
UTF-8
|
Python
| false | false | 2,014 |
13,743,895,359,225 |
095b993dae4adb7a12fd99060f6f4e3b475800e0
|
23ff96dabcc33e8f91550ba6f4bda036c2eb6361
|
/controllers/ode_exec.py
|
77ce2b578ea4731407714ed93407da4c59121ab4
|
[] |
no_license
|
eoinmurray/trion_old
|
https://github.com/eoinmurray/trion_old
|
c3a68cf110874cb27b05ae519c4f0ee0b3bdf8df
|
6d3231b5248538b14f4eaecbd753b9e2e3c32e0c
|
refs/heads/master
| 2016-09-10T04:31:51.421668 | 2013-10-04T14:43:35 | 2013-10-04T14:43:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate, interpolate, optimize
import scipy.stats as stats
from ode_defined import f
def gauss(t):
"""
Generates gaussian for convolution
"""
FWHM = 0.4
mu = FWHM*np.sqrt(2*np.log(2))
return np.exp( -t**2 / (2*mu**2) )/( mu*np.sqrt(2*np.pi) )
gauss_v = np.vectorize(gauss)
def fake_odeint(func, y0, t, parameters):
"""
ODE integrator replacement for scipy.odeint
"""
ig = integrate.ode(func)
ig.set_integrator('lsoda', method='adams')
ig.set_initial_value(y0, t=0.)
ig.set_f_params(parameters)
y = []
t = np.array(t)
for tt in t:
pops = ig.integrate(tt)
assert (pops.sum() > 0.999999) and (pops.sum() < 1.000001), 'Populations not summing to 1.0. Actually: ' + repr(pops.sum())
y.append(pops)
return np.array(y)
def f_piecewise_run(initial_conditions, parameters, x_data, pos, k = None, result_queue = None):
"""
Generates corrlation curves. Piecewise defined, so it can handle cross corrolations too.
"""
gauss_d = gauss_v(x_data)
x_data_neg = x_data[x_data < 0]
x_data_pos = x_data[x_data >= 0]
temp_neg = fake_odeint( f, initial_conditions[:initial_conditions.size/2], - x_data_neg[::-1], parameters)
temp_pos = fake_odeint( f, initial_conditions[initial_conditions.size/2:], x_data_pos, parameters)
temp_neg = temp_neg[:,pos[0]][::-1]
temp_pos = temp_pos[:,pos[1]]
temp_pos = temp_pos/temp_pos[-1]
temp_neg = temp_neg/temp_neg[1]
temp = np.append(temp_neg, temp_pos)
temp = np.convolve(temp, gauss_d, 'same')
most_frequent = stats.mode(np.around(temp, decimals=3))
temp = temp/most_frequent[0][0]
if k:
result_queue[k] = temp
return temp
def intensities_run(initial_conditions, parameters, x_data, pos, k = None, result_queue = None):
"""
Generates the predicted intensities of each peak.
"""
gauss_d = gauss_v(x_data)
temp = fake_odeint( f, initial_conditions[initial_conditions.size/2:], x_data[x_data > 0], parameters)[-1, :]
e5 = temp[5]/parameters[8]
e4 = temp[7]/parameters[11]
e1 = temp[8]/parameters[14]
e2 = temp[8]/parameters[13]
e7 = temp[8]/parameters[12]
e3 = temp[6]/parameters[9]
e6 = temp[11]/parameters[10]
intensities = np.array([e7, e6, e5, e4, e3, e2, e1])
if k:
result_queue[k] = intensities
return intensities/intensities.max()
|
UTF-8
|
Python
| false | false | 2,013 |
1,992,864,870,664 |
00826e514988ad19862a357800fff6d8529edf8f
|
881ae2b911269c041e07accde6abb62a8997eb1b
|
/apps/lock_manager/models.py
|
53c6b6e49dea36a8a2f5ce4e68e5cce9db5f9a98
|
[
"GPL-3.0-only"
] |
non_permissive
|
fccoelho/mayan
|
https://github.com/fccoelho/mayan
|
522c5922aec73d50b64a83a37664a87c0e44ea4f
|
55659910729d3c0e5f71d7ae60ffe8d59f4e842a
|
refs/heads/master
| 2021-01-16T19:24:46.279648 | 2011-09-14T00:49:34 | 2011-09-14T00:49:34 | 2,467,882 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import datetime
from django.db import models
from django.utils.translation import ugettext_lazy as _
from lock_manager.managers import LockManager
from lock_manager.conf.settings import DEFAULT_LOCK_TIMEOUT
class Lock(models.Model):
creation_datetime = models.DateTimeField(verbose_name=_(u'creation datetime'))
timeout = models.IntegerField(default=DEFAULT_LOCK_TIMEOUT, verbose_name=_(u'timeout'))
name = models.CharField(max_length=32, verbose_name=_(u'name'), unique=True)
objects = LockManager()
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
self.creation_datetime = datetime.datetime.now()
super(Lock, self).save(*args, **kwargs)
class Meta:
verbose_name = _(u'lock')
verbose_name_plural = _(u'locks')
|
UTF-8
|
Python
| false | false | 2,011 |
4,896,262,756,659 |
de397c38bb4a5bcba2384e1a1678674068e9614b
|
884d97989a16df342790c0d77aa304708df90c35
|
/python/afinnBucketPredict.py
|
f030b9d0fac69090bf485c78d6eda0697b6dbc6c
|
[] |
no_license
|
ankur123/4SQ
|
https://github.com/ankur123/4SQ
|
a6c27866bd34c2344f09ec53055b6cfb41dd889e
|
896a5db5fbbc51ebf7c6df1db8265422d03cb41d
|
refs/heads/master
| 2021-01-13T01:59:19.025064 | 2012-04-19T04:59:58 | 2012-04-19T04:59:58 | 3,497,918 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import csv
import sys
import math
import random
import re
#These buckets are created so that each bucket will have at least 30 data points
minBucket = -6
maxBucket = 10
csv.field_size_limit(1000000000)
wordListFile = sys.argv[1]
wordsReader = csv.reader(open(wordListFile, 'rb'), delimiter='\t')
wordsReader.next()
words = {}
for row in wordsReader: words[row[0]] = int(row[1])
def getTextRawScore(text):
return sum(map(lambda word: words.get(word, 0), re.sub(r'[,\.!\?\"]', ' ', text).lower().split()))
bucketModel = sys.argv[2]
bucketModelReader = csv.reader(open(bucketModel, 'rb'), delimiter="\t")
headers = bucketModelReader.next()
bucketsToDist = {}
for row in bucketModelReader:
bucketNum = int(row[0])
distribution = {}
for i in range(1, len(row)):
label = headers[i]
distribution[label] = float(row[i])
bucketsToDist[bucketNum] = distribution
categories = ["P", "N", "M", "A"]
print "\t".join(categories)
reader = csv.reader(sys.stdin, delimiter='\t')
for row in reader:
score = getTextRawScore(row[0])
score = max(score, minBucket)
score = min(score, maxBucket)
distribution = bucketsToDist[score]
print "\t".join(map(lambda C: str(distribution.get(C)), categories))
|
UTF-8
|
Python
| false | false | 2,012 |
10,814,727,664,045 |
97e99d8dc4f34488192732f258645a3dbd26e1de
|
77c16cafb87abbdc3d6a92c016af90c3a60c6ac6
|
/tests/audio_tests.py
|
89bffd845b33ef2bb128fb8b5d2798ae57abb828
|
[
"MIT"
] |
permissive
|
crestonbunch/pymus
|
https://github.com/crestonbunch/pymus
|
ebe3de0d24eb09d6f32609137562c9a0b492c513
|
58e49208f7316ac63173cef463b0f46a6bb82c0f
|
refs/heads/master
| 2023-08-27T19:27:26.716255 | 2014-12-19T01:20:51 | 2014-12-19T01:20:51 | 26,563,537 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import unittest
import numpy as np
from pymus.audio.waveform import Waveform
from pymus.audio.spectrogram import Spectrogram
from pymus.audio.effects import lowpass
class TestWaveform(unittest.TestCase):
def setUp(self):
# create a 5 Hz sine @ 1000 samples/sec
self.f = 1000 # sampling frequency
self.T = 1 / self.f # sampling period
self.t = np.arange(0,1,self.T) # time axis
self.samples = np.sin(2 * np.pi * 5.0 * self.t)
# 100 samples / sec
self.wf = Waveform(len(self.samples), self.f)
self.wf[:] = self.samples;
def test_init(self):
self.assertEqual(len(self.t), len(self.wf))
def test_iter(self):
for i,v in enumerate(self.wf):
self.assertEqual(self.samples[i], v._samples[0])
def test_get(self):
piece1 = self.wf[0:100]
piece2 = self.wf[0:100:2]
self.assertTrue(100, len(piece1))
self.assertTrue(50, len(piece2))
self.assertTrue(
(piece1._samples==self.wf._samples[0:100]).all()
)
self.assertTrue(
(piece2._samples==self.wf._samples[0:100:2]).all()
)
def test_set(self):
self.assertFalse(self.wf._samples[2] == 1.0)
self.wf[2] = 1.0
self.assertTrue(self.wf._samples[2] == 1.0)
self.wf[2] = 99999
self.assertTrue(self.wf._samples[2] == 1.0)
self.wf[2] = -99999
self.assertTrue(self.wf._samples[2] == -1.0)
def test_add(self):
gained = self.wf + 0.5
self.assertEqual(gained._samples[4], self.wf._samples[4] + 0.5)
self.assertTrue(
(gained._samples == self.wf._samples + 0.5).any()
)
self.assertTrue((gained._samples <= 1.0).all())
self.assertTrue((gained._samples >= -1.0).all())
def test_sub(self):
gained = self.wf - 0.5
self.assertEqual(gained._samples[2], self.wf._samples[2] - 0.5)
self.assertTrue(
(gained._samples == self.wf._samples - 0.5).any()
)
self.assertTrue((gained._samples <= 1.0).all())
self.assertTrue((gained._samples >= -1.0).all())
def test_mul(self):
softened = self.wf * 0.8
self.assertEqual(softened._samples[100], self.wf._samples[100] * 0.8)
self.assertTrue(
(softened._samples == self.wf._samples * 0.8).all()
)
self.assertTrue((softened._samples <= 1.0).all())
self.assertTrue((softened._samples >= -1.0).all())
loudened = self.wf * 1.2
self.assertEqual(loudened._samples[10], self.wf._samples[10] * 1.2)
self.assertTrue(
(loudened._samples == self.wf._samples * 1.2).any()
)
self.assertTrue((loudened._samples <= 1.0).all())
self.assertTrue((loudened._samples >= -1.0).all())
def test_mean(self):
self.assertAlmostEqual(self.wf.mean, np.sum(self.wf._samples)/len(self.wf))
def test_std(self):
m = self.wf.mean
t = np.sqrt(np.sum([(x-m)**2 for x in self.wf._samples]) / len(self.wf))
self.assertAlmostEqual(self.wf.std, t)
def test_pow(self):
powed = self.wf ** 2
self.assertEqual(powed._samples[100], self.wf._samples[100] ** 2)
self.assertTrue(
(powed._samples == self.wf._samples ** 2).any()
)
self.assertTrue((powed._samples <= 1.0).all())
self.assertTrue((powed._samples >= -1.0).all())
def test_rms(self):
added = (self.wf * 0.5) + 0.1
self.assertAlmostEqual(self.wf.rms, np.std(self.wf._samples))
# rms^2 = mean^2 + std^2
self.assertAlmostEqual(added.rms**2, np.std(added._samples)**2 + np.mean(added._samples)**2)
def test_resample(self):
resampled = self.wf.resample(400)
wf_bins = np.fft.fftfreq(len(self.wf), 1/self.wf.sample_rate)
re_bins = np.fft.fftfreq(len(resampled), 1/resampled.sample_rate)
wf_set = dict(zip(wf_bins, np.abs(self.wf.fft)))
re_set = dict(zip(re_bins, np.abs(resampled.fft)))
# The same frequencies should be positive in both fft's
for freq,amp in wf_set.items():
if freq in re_set and wf_set[freq] > 0:
self.assertTrue(re_set[freq] > 0)
elif freq in re_set:
self.assertEqual(wf_set[freq], re_set[freq])
class TestSpectrogram(unittest.TestCase):
def setUp(self):
# create a funny waveform
self.samples = (np.sin(np.linspace(0, 5 * 2 * np.pi, 500)) + # 1 Hz
np.sin(np.linspace(0, 10 * 2 * np.pi, 500)) + # 2 Hz
np.sin(np.linspace(0, 50 * 2 * np.pi, 500)) # 10 Hz
)
# 100 samples / sec
self.wf = Waveform(len(self.samples), 100)
self.wf[:] = self.samples;
# create a spectrogram
self.sp = Spectrogram(self.wf, 10, np.hanning, 5, 100)
def test_len(self):
self.assertEqual(len(self.wf), len(self.sp))
def test_iter(self):
for i in self.sp:
self.assertEqual(len(self.sp.bins), len(i))
def test_get(self):
self.assertEqual(len(self.sp.bins), len(self.sp[0]))
def test_set(self):
self.assertFalse((self.sp[0] == np.zeros(len(self.sp.bins))).all())
self.sp[0] = np.zeros(len(self.sp.bins))
self.assertTrue((self.sp[0] == np.zeros(len(self.sp.bins))).all())
class TestEffects(unittest.TestCase):
def setUp(self):
self.f = 1000 # sample rate in Hz
self.T = 1 / self.f # sample period
self.t = np.arange(0,1,self.T) # time axis
self.freq1 = 50
self.freq2 = 100
self.freq3 = 125
self.freq4 = 250
self.a = 0.5 * np.sin(2 * np.pi * self.freq1 * self.t)
self.b = 0.5 * np.sin(2 * np.pi * self.freq2 * self.t)
self.c = 0.5 * np.sin(2 * np.pi * self.freq3 * self.t)
self.d = 0.5 * np.sin(2 * np.pi * self.freq4 * self.t)
self.wf = Waveform(len(self.t), self.f)
self.wf[:] = self.a + self.b + self.c + self.d
def test_lowpass(self):
lowpassed = Waveform(len(self.t), self.f)
lowpassed[:] = self.wf._samples
lowpassed.lowpass(110)
bins = np.fft.fftfreq(len(self.t), self.T)
# frequencies that should be zeroed
mask = (np.abs(bins) < 110)
all_fft = np.abs(self.wf.fft)
low_fft = np.abs(lowpassed.fft)
# tolerance threshold for verifying lowpass filter results
tol = 2.0
for i,v in enumerate(mask):
if v:
self.assertAlmostEqual(low_fft[i], all_fft[i], delta=tol)
else:
self.assertAlmostEqual(low_fft[i], 0.0, delta=tol)
def test_highpass(self):
highpassed = Waveform(len(self.t), self.f)
highpassed[:] = self.wf._samples
highpassed.highpass(110)
bins = np.fft.fftfreq(len(self.t), self.T)
# frequencies that should be zeroed
mask = (np.abs(bins) > 110)
all_fft = np.abs(self.wf.fft)
high_fft = np.abs(highpassed.fft)
# tolerance threshold for verifying highpass filter results
tol = 2.0
for i,v in enumerate(mask):
if v:
self.assertAlmostEqual(high_fft[i], all_fft[i], delta=tol)
else:
self.assertAlmostEqual(high_fft[i], 0.0, delta=tol)
def test_bandpass(self):
bandpassed = Waveform(len(self.t), self.f)
bandpassed[:] = self.wf._samples
bandpassed.bandpass(60, 240)
bins = np.fft.fftfreq(len(self.t), self.T)
# frequencies that should be zeroed
mask = (np.abs(bins) > 90) * (np.abs(bins) < 150)
all_fft = np.abs(self.wf.fft)
band_fft = np.abs(bandpassed.fft)
# tolerance threshold for verifying bandpass filter results
tol = 50
for i,v in enumerate(mask):
if v:
self.assertAlmostEqual(band_fft[i], all_fft[i], delta=tol)
else:
self.assertAlmostEqual(band_fft[i], 0.0, delta=tol)
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false | false | 2,014 |
12,841,952,217,439 |
66c60208acdfc5a1768eb8b7dc37c5abac436a86
|
faecb40f4188d575e3a7bb417b3b6d8c0d9f7929
|
/openttd/constants.py
|
ea103146e7293face8a9b6f6fd924213ffbecba7
|
[
"GPL-3.0-only"
] |
non_permissive
|
bridgeduan/openttd-python
|
https://github.com/bridgeduan/openttd-python
|
481aff80c4280ff6492e962ad11ae759e885c854
|
ed27100f6da00898d1e4301e3c78743bcbbdbf33
|
refs/heads/master
| 2020-05-20T01:30:36.563214 | 2010-06-28T15:31:05 | 2010-06-28T15:31:05 | 37,499,524 | 1 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from const_commands import *
# OTTD Constants
SEND_MTU = 1460 # Number of bytes we can pack in a single packet
PACKET_UDP_CLIENT_FIND_SERVER=0 # Queries a game server for game information
PACKET_UDP_SERVER_RESPONSE=1 # Reply of the game server with game information
PACKET_UDP_CLIENT_DETAIL_INFO=2 # Queries a game server about details of the game, such as companies
PACKET_UDP_SERVER_DETAIL_INFO=3 # Reply of the game server about details of the game, such as companies
PACKET_UDP_SERVER_REGISTER=4 # Packet to register itself to the master server
PACKET_UDP_MASTER_ACK_REGISTER=5 # Packet indicating registration has succedeed
PACKET_UDP_CLIENT_GET_LIST=6 # Request for serverlist from master server
PACKET_UDP_MASTER_RESPONSE_LIST=7 # Response from master server with server ip's + port's
PACKET_UDP_SERVER_UNREGISTER=8 # Request to be removed from the server-list
PACKET_UDP_CLIENT_GET_NEWGRFS=9 # Requests the name for a list of GRFs (GRF_ID and MD5)
PACKET_UDP_SERVER_NEWGRFS=10 # Sends the list of NewGRF's requested.
PACKET_UDP_MASTER_SESSION_KEY=11 # Sends a fresh session key to the client
PACKET_UDP_END=12 # Must ALWAYS be on the end of this list!! (period)
NETWORK_COMPANY_INFO_VERSION = 6
NETWORK_GAME_INFO_VERSION = 4
NETWORK_VEHICLE_TYPES = 5
NETWORK_STATION_TYPES = 5
NETWORK_MAX_GRF_COUNT = 62
# packet IDs
PACKET_SERVER_FULL=0
PACKET_SERVER_BANNED=1
PACKET_CLIENT_JOIN=2
PACKET_SERVER_ERROR=3
PACKET_CLIENT_COMPANY_INFO=4
PACKET_SERVER_COMPANY_INFO=5
PACKET_SERVER_CLIENT_INFO=6
PACKET_SERVER_NEED_GAME_PASSWORD=7
PACKET_SERVER_NEED_COMPANY_PASSWORD=8
PACKET_CLIENT_GAME_PASSWORD=9
PACKET_CLIENT_COMPANY_PASSWORD=10
PACKET_SERVER_WELCOME=11
PACKET_CLIENT_GETMAP=12
PACKET_SERVER_WAIT=13
PACKET_SERVER_MAP=14
PACKET_CLIENT_MAP_OK=15
PACKET_SERVER_JOIN=16
PACKET_SERVER_FRAME=17
PACKET_SERVER_SYNC=18
PACKET_CLIENT_ACK=19
PACKET_CLIENT_COMMAND=20
PACKET_SERVER_COMMAND=21
PACKET_CLIENT_CHAT=22
PACKET_SERVER_CHAT=23
PACKET_CLIENT_SET_PASSWORD=24
PACKET_CLIENT_SET_NAME=25
PACKET_CLIENT_QUIT=26
PACKET_CLIENT_ERROR=27
PACKET_SERVER_QUIT=28
PACKET_SERVER_ERROR_QUIT=29
PACKET_SERVER_SHUTDOWN=30
PACKET_SERVER_NEWGAME=31
PACKET_SERVER_RCON=32
PACKET_CLIENT_RCON=33
PACKET_SERVER_CHECK_NEWGRFS=34
PACKET_CLIENT_NEWGRFS_CHECKED=35
PACKET_SERVER_MOVE=36
PACKET_CLIENT_MOVE=37
PACKET_SERVER_COMPANY_UPDATE=38
PACKET_SERVER_CONFIG_UPDATE=39
PACKET_END=40
MAP_PACKET_START=0
MAP_PACKET_NORMAL=1
MAP_PACKET_END=2
MAX_COMPANIES=0x0F
packet_names = {
0:"PACKET_SERVER_FULL",
1:"PACKET_SERVER_BANNED",
2:"PACKET_CLIENT_JOIN",
3:"PACKET_SERVER_ERROR",
4:"PACKET_CLIENT_COMPANY_INFO",
5:"PACKET_SERVER_COMPANY_INFO",
6:"PACKET_SERVER_CLIENT_INFO",
7:"PACKET_SERVER_NEED_GAME_PASSWORD",
8:"PACKET_SERVER_NEED_COMPANY_PASSWORD",
9:"PACKET_CLIENT_GAME_PASSWORD",
10:"PACKET_CLIENT_COMPANY_PASSWORD",
11:"PACKET_SERVER_WELCOME",
12:"PACKET_CLIENT_GETMAP",
13:"PACKET_SERVER_WAIT",
14:"PACKET_SERVER_MAP",
15:"PACKET_CLIENT_MAP_OK",
16:"PACKET_SERVER_JOIN",
17:"PACKET_SERVER_FRAME",
18:"PACKET_SERVER_SYNC",
19:"PACKET_CLIENT_ACK",
20:"PACKET_CLIENT_COMMAND",
21:"PACKET_SERVER_COMMAND",
22:"PACKET_CLIENT_CHAT",
23:"PACKET_SERVER_CHAT",
24:"PACKET_CLIENT_SET_PASSWORD",
25:"PACKET_CLIENT_SET_NAME",
26:"PACKET_CLIENT_QUIT",
27:"PACKET_CLIENT_ERROR",
28:"PACKET_SERVER_QUIT",
29:"PACKET_SERVER_ERROR_QUIT",
30:"PACKET_SERVER_SHUTDOWN",
31:"PACKET_SERVER_NEWGAME",
32:"PACKET_SERVER_RCON",
33:"PACKET_CLIENT_RCON",
34:"PACKET_SERVER_CHECK_NEWGRFS",
35:"PACKET_CLIENT_NEWGRFS_CHECKED",
36:"PACKET_SERVER_MOVE",
37:"PACKET_CLIENT_MOVE",
38:"PACKET_SERVER_COMPANY_UPDATE",
39:"PACKET_SERVER_CONFIG_UPDATE",
40:"PACKET_END"
}
def parseCommands(cmdstr):
num = 0
result = {}
lines = cmdstr.split("\n")
for line in lines:
args = line.split(",")
if len(args) < 2:
continue
cmd_id = num
cmd_name = args[0].strip()
cmd_desc = args[1].strip('/< ')
result[cmd_id] = [cmd_name, cmd_desc]
#print " %d = '%s' : '%s'" % (cmd_id, cmd_name, cmd_desc)
num += 1
return result
# parse the commands
command_names = parseCommands(str_commands)
# create a map in the inverse direction
commands = {}
for i in command_names:
commands[command_names[i][0]] = i
NETWORK_ACTION_JOIN=0
NETWORK_ACTION_LEAVE=1
NETWORK_ACTION_SERVER_MESSAGE=2
NETWORK_ACTION_CHAT=3
NETWORK_ACTION_CHAT_COMPANY=4
NETWORK_ACTION_CHAT_CLIENT=5
NETWORK_ACTION_GIVE_MONEY=6
NETWORK_ACTION_NAME_CHANGE=7
NETWORK_ACTION_COMPANY_SPECTATOR=8
NETWORK_ACTION_COMPANY_JOIN=9
NETWORK_ACTION_COMPANY_NEW=10
NETWORK_SERVER_MESSAGE_GAME_PAUSED_PLAYERS=0 # Game paused (not enough players)
NETWORK_SERVER_MESSAGE_GAME_UNPAUSED_PLAYERS=1 # Game unpaused (enough players)
NETWORK_SERVER_MESSAGE_GAME_PAUSED_CONNECT=2 # Game paused (connecting client)
NETWORK_SERVER_MESSAGE_GAME_UNPAUSED_CONNECT=3 # Game unpaused (client connected)
NETWORK_SERVER_MESSAGE_GAME_UNPAUSED_CONNECT_FAIL=4# Game unpaused (client failed to connect)
DESTTYPE_BROADCAST=0 # Send message/notice to all players (All)
DESTTYPE_TEAM=1 # Send message/notice to everyone playing the same company (Team)
DESTTYPE_CLIENT=2 # Send message/notice to only a certain player (Private)
NETWORK_ERROR_GENERAL=0 # Generally unused
# Signals from clients
NETWORK_ERROR_DESYNC=1
NETWORK_ERROR_SAVEGAME_FAILED=2
NETWORK_ERROR_CONNECTION_LOST=3
NETWORK_ERROR_ILLEGAL_PACKET=4
NETWORK_ERROR_NEWGRF_MISMATCH=5
# Signals from servers
NETWORK_ERROR_NOT_AUTHORIZED=6
NETWORK_ERROR_NOT_EXPECTED=7
NETWORK_ERROR_WRONG_REVISION=8
NETWORK_ERROR_NAME_IN_USE=9
NETWORK_ERROR_WRONG_PASSWORD=10
NETWORK_ERROR_PLAYER_MISMATCH=11 # Happens in CLIENT_COMMAND
NETWORK_ERROR_KICKED=12
NETWORK_ERROR_CHEATER=13
NETWORK_ERROR_FULL=14
NETWORK_GAME_PASSWORD=0
NETWORK_COMPANY_PASSWORD=1
error_names={
0:['NETWORK_ERROR_GENERAL', 'unknown reason'],
1:['NETWORK_ERROR_DESYNC', 'desynced'],
2:['NETWORK_SAVEGAME_FAILED', 'couldn\t load savegame'],
3:['NETWORK_CONNECTION_LOST', 'connection lost'],
4:['NETWORK_ILLEGAL_PACKET', 'illegal packet'],
5:['NETWORK_ERROR_NEWGRF_MISMATCH', 'newgrf mismatch'],
6:['NETWORK_ERROR_NOT_AUTHORIZED', 'not autohorized'],
7:['NETWORK_ERROR_NOT_EXPECTED', 'unexpected packet'],
8:['NETWORK_ERROR_WRONG_REVISION', 'wrong revision'],
9:['NETWORK_ERROR_NAME_IN_USE', 'Name in use'],
10:['NETWORK_ERROR_WRONG_PASSWORD', 'wrong password'],
11:['NETWORK_ERROR_PLAYER_MISMATCH', 'incorrect player id in command'],
12:['NETWORK_ERROR_KICKED', 'kicked'],
13:['NETWORK_ERROR_CHEATER', 'cheater'],
14:['NETWORK_ERROR_FULL', 'server full'],
}
OWNER_BEGIN = 0x00 # First Owner
PLAYER_FIRST = 0x00 # First Player, same as owner
MAX_PLAYERS = 0x08 # Maximum numbe rof players
OWNER_TOWN = 0x0F # A town owns the tile, or a town is expanding
OWNER_NONE = 0x10 # The tile has no ownership
OWNER_WATER = 0x11 # The tile/execution is done by "water"
OWNER_END = 0x12 # Last + 1 owner
INVALID_OWNER = 0xFF # An invalid owner
INVALID_PLAYER = 0xFF # And a valid owner
PLAYER_INACTIVE_CLIENT = 253 # The client is joining
PLAYER_NEW_COMPANY = 254 # The client wants a new company
PLAYER_SPECTATOR = 255 # The client is spectating
NETLANG_ANY=0
NETWORK_MASTER_SERVER_VERSION = 1
NETWORK_MASTER_SERVER_HOST = "master.openttd.org"
NETWORK_CONTENT_SERVER_HOST = "content.openttd.org"
NETWORK_CONTENT_MIRROR_HOST = "binaries.openttd.org"
NETWORK_MASTER_SERVER_PORT = 3978
NETWORK_CONTENT_SERVER_PORT = 3978
NETWORK_CONTENT_MIRROR_PORT = 80
NETWORK_MASTER_SERVER_WELCOME_MESSAGE = "OpenTTDRegister"
NETWORK_CONTENT_MIRROR_URL = "/bananas"
OPENTTD_FINGER_SERVER = "finger.openttd.org"
OPENTTD_FINGER_PORT = "80"
OPENTTD_FINGER_TAGS_URL="/tags.txt"
# The minimum starting year/base year of the original TTD
ORIGINAL_BASE_YEAR = 1920
# The maximum year of the original TTD
ORIGINAL_MAX_YEAR = 2090
DAYS_TILL_ORIGINAL_BASE_YEAR = (365 * ORIGINAL_BASE_YEAR + ORIGINAL_BASE_YEAR / 4 - ORIGINAL_BASE_YEAR / 100 + ORIGINAL_BASE_YEAR / 400)
NETWORK_GAME_INFO_VERSION = 4
known_languages=['ANY','ENGLISH','GERMAN','FRENCH','BRAZILIAN','BULGARIAN','CHINESE','CZECH','DANISH','DUTCH','ESPERANTO','FINNISH','HUNGARIAN','ICELANDIC','ITALIAN','JAPANESE','KOREAN','LITHUANIAN','NORWEGIAN','POLISH','PORTUGUESE','ROMANIAN','RUSSIAN','SLOVAK','SLOVENIAN','SPANISH','SWEDISH','TURKISH','UKRAINIAN','AFRIKAANS','CROATIAN','CATALAN','ESTONIAN','GALICIAN','GREEK','LATVIAN']
saveload_chunk_types = {
"CH_RIFF": 0,
"CH_ARRAY": 1,
"CH_SPARSE_ARRAY": 2,
"CH_TYPE_MASK": 3,
"CH_LAST": 8,
"CH_AUTO_LENGTH": 16,
"CH_PRI_0": 0 << 4,
"CH_PRI_1": 1 << 4,
"CH_PRI_2": 2 << 4,
"CH_PRI_3": 3 << 4,
"CH_PRI_SHL": 4,
"CH_NUM_PRI_LEVELS": 4,
}
HEADER_FORMAT = "<HB"
PACKETSIZEHEADER_FORMAT = "<H"
import structz
HEADER = structz.Struct(HEADER_FORMAT)
HEADER_SIZE = HEADER.size
PACKETSIZEHEADER = structz.Struct(PACKETSIZEHEADER_FORMAT)
PACKETSIZEHEADER_SIZE = PACKETSIZEHEADER.size
del structz
ContentType = {
'BEGIN' : 1, # Helper to mark the begin of the types
'BASE_GRAPHICS': 1, # The content consists of base graphics
'NEWGRF' : 2, # The content consists of a NewGRF
'AI' : 3, # The content consists of an AI
'AI_LIBRARY' : 4, # The content consists of an AI library
'SCENARIO' : 5, # The content consists of a scenario
'HEIGHTMAP' : 6, # The content consists of a heightmap
'BASE_SOUNDS' : 7, # The content consists of base sounds
'BASE_MUSIC' : 8, # The content consists of base music
'END' : 9 # Helper to mark the end of the types
}
ContentTypeDescr = {
1: 'BASE_GRAPHICS',
2: 'NEWGRF',
3: 'AI',
4: 'AI_LIBRARY',
5: 'SCENARIO',
6: 'HEIGHTMAP',
7: 'BASE_SOUNDS',
8: 'BASE_MUSIC',
9: 'END'
}
PACKET_CONTENT_CLIENT_INFO_LIST =0# Queries the content server for a list of info of a given content type
PACKET_CONTENT_CLIENT_INFO_ID =1# Queries the content server for information about a list of internal IDs
PACKET_CONTENT_CLIENT_INFO_EXTID =2# Queries the content server for information about a list of external IDs
PACKET_CONTENT_CLIENT_INFO_EXTID_MD5 =3# Queries the content server for information about a list of external IDs and MD5
PACKET_CONTENT_SERVER_INFO =4# Reply of content server with information about content
PACKET_CONTENT_CLIENT_CONTENT =5# Request a content file given an internal ID
PACKET_CONTENT_SERVER_CONTENT =6# Reply with the content of the given ID
PACKET_CONTENT_END =7# Must ALWAYS be on the end of this list!! (period)
|
UTF-8
|
Python
| false | false | 2,010 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.