__id__
int64 3.09k
19,722B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
256
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 3
values | repo_name
stringlengths 5
109
| repo_url
stringlengths 24
128
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 6.65k
581M
⌀ | star_events_count
int64 0
1.17k
| fork_events_count
int64 0
154
| gha_license_id
stringclasses 16
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
5.76M
⌀ | gha_stargazers_count
int32 0
407
⌀ | gha_forks_count
int32 0
119
⌀ | gha_open_issues_count
int32 0
640
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 2
classes | gha_disabled
bool 1
class | content
stringlengths 9
4.53M
| src_encoding
stringclasses 18
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | year
int64 1.97k
2.01k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3,470,333,600,868 |
227578830c096b75fbf626d55a9a94145ee6cb74
|
41d25bf5c5d936751c18ba35cfc413e385d53b7c
|
/exception.py
|
939fb678986c5801ea0c30ffe4c3a9e8378694d8
|
[
"GPL-3.0-only"
] |
non_permissive
|
erictzeng/pyscheme
|
https://github.com/erictzeng/pyscheme
|
f6ccc1b70e91c92a6a6920bfde3a98283d829ce2
|
b39aa3c32c2b8e039901e57ca6ba1d7f9ee0c824
|
refs/heads/master
| 2020-03-26T19:25:14.305010 | 2011-08-23T06:33:09 | 2011-08-23T06:33:09 | 2,094,438 | 3 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class SchemeError(Exception):
pass
# Unbound variable
class UnboundVariableError(SchemeError):
def __init__(self, var):
self.msg = "Unbound variable {0}".format(var)
# IllegalArgumentErrors
class IllegalArgumentError(SchemeError):
pass
# Bad number of arguments
class ArgumentCountError(IllegalArgumentError):
def __init__(self, function, expected, given):
self.msg = "Bad number of arguments: {0} takes {1} arguments ({2} given)".format(function, expected, given)
# Wrong type of argument
class WrongArgumentTypeError(IllegalArgumentError):
def __init__(self, function, expected, given):
self.msg = "Wrong type of argument for {0}: Expected {1} ({2} given)".format(function, expected, given)
# Index out of bounds
class IndexOutOfBoundsError(SchemeError):
def __init__(self, vector, index, length):
self.msg = "Index out of bounds: Tried to access index {1} of {0} (with length {2})".format(vector, index, length)
# Mismatched Parens
class MismatchedParensError(SchemeError):
def __init__(self, expression):
self.msg = "Mismatched parenthesis: {0}".format(expression)
# Parse Error
class ParseError(SchemeError):
def __init__(self, token):
self.msg = "Error: Unidentified token {0}".format(token)
# Trace Error
class TraceError(SchemeError):
def __init__(self, expression, reason):
self.msg = "Error: Cannot trace {0}: {1}".format(expression, reason)
# Untrace Error
class UntraceError(SchemeError):
def __init__(self, expression, reason):
self.msg = "Error: Cannot untrace {0}: {1}".format(expression. reason)
|
UTF-8
|
Python
| false | false | 2,011 |
8,340,826,513,050 |
bcc284980619ad22c7d7ce6e60772c4d72832af1
|
b16286c734ef21db5bef09ca78bf9a09035fb035
|
/core/views.py
|
f18eeaf932d4b2b395953fb41c57d4e90855886a
|
[] |
no_license
|
benbaka/schoolapp
|
https://github.com/benbaka/schoolapp
|
ff7e4659b5b3dc6be53c7b485a9c1fb771664081
|
aba0f2c172dbc31ba4c48207dc6401aee20ed229
|
refs/heads/master
| 2021-01-01T05:48:30.561711 | 2014-10-06T23:02:33 | 2014-10-06T23:02:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render, render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from django.forms.formsets import formset_factory
from django.template import RequestContext
from core.models import Student
from core.forms import RollCallForm
# Create your views here.
def register(request):
context = RequestContext(request)
RollCallFormSet = formset_factory(RollCallForm, extra=0)
formset = ""
if request.method == "POST":
register_formset = RollCallFormSet(request.POST, request.FILES)
if register_formset.is_valid():
for data in register_formset.cleaned_data:
print(data['student_id'])
else:
print(register_formset.errors)
else:
students = Student.objects.all()
# Build the hash to be passed to the form
# as the initial data
array_of_student_hashes = []
for student in students:
student_hash = {'student_id': student.id, 'student_name': student.first_name + " " + student.last_name,
'present': False}
array_of_student_hashes.append(student_hash)
formset = RollCallFormSet(initial=array_of_student_hashes)
return render_to_response("register.html",{'formset':formset}, context)
|
UTF-8
|
Python
| false | false | 2,014 |
11,106,785,441,267 |
2b511a53389c7eedf10468bdddf3527ee5cb6641
|
8f421b32ceafd1ed7e7a8225e61c6ed690d20660
|
/src/tests/testsBaseline.py
|
96edb759316c1968f31902b8b9402a830779e405
|
[] |
no_license
|
robrant/bam
|
https://github.com/robrant/bam
|
0b39860cb0b0fc920bd1b4cb74bacd94d82bdc6a
|
892f7066ffeca29792f231ff1217d2cd7228d8fd
|
refs/heads/master
| 2021-01-23T07:09:44.245198 | 2012-10-13T15:38:18 | 2012-10-13T15:38:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
importDir = ['/Users/brantinghamr/Documents/Code/eclipseWorkspace/bam/src/tests/',
'/Users/brantinghamr/Documents/Code/eclipseWorkspace/bam/src/scripts/',
'/Users/brantinghamr/Documents/Code/eclipseWorkspace/bam/src/libs/']
for dirx in importDir:
if dirx not in sys.path: sys.path.append(dirx)
import unittest
import datetime
import random
# FOSS libs
import numpy as np
#Custom libs
import mdb
from timeSeriesDoc import timeSeries
from baseObjects import keyword as kw
import baselineProcessing as bl
class TestBaseline(unittest.TestCase):
def testGetAllCountForOneCell(self):
''' Gets a count for a single cell'''
c, dbh = mdb.getHandle()
dbh = mdb.setupCollections(dbh, dropCollections=True) # Set up collections
tweetTime = datetime.datetime(2011,1,2,12,5,15)
oldTweetTime = tweetTime - datetime.timedelta(seconds=11*60)
# Build a keyword to represent the basekine
kword = kw(keyword='keyword1', timeStamp=oldTweetTime, lat=34.4, lon=45.5,
text='this text contained the hashtag #keyword1',
tweetID=346664, userID=4444, source='twitter')
# New timeseries object
ts = timeSeries()
ts.importData(kword)
success = ts.insertBlankDoc()
# Build a keyword
kword = kw(keyword='keyword1', timeStamp=tweetTime, lat=34.4, lon=45.5,
text='this text contained the hashtag #keyword1',
tweetID=346664, userID=4444, source='twitter')
# New timeseries object
ts = timeSeries()
ts.importData(kword)
success = ts.insertBlankDoc()
# ALL DOCUMENTS
mgrs = '38SND4595706622'
keyword = 'keyword1'
# This indate represents when the baseline was run (12:10) minus the interest period (10 minutes)
inDate = datetime.datetime(2011,1,2,12,0,0)
results = bl.getResultsPerCell(dbh, collection='timeseries', mgrs=mgrs, keyword=keyword, inDate=inDate)
self.assertEqual(len(results), 1)
def testGetAllCountForOneCellLookback(self):
''' Gets a count for a single cell'''
tweetTime = datetime.datetime(2011,1,2,12,5,15)
oldTweetTime = tweetTime - datetime.timedelta(seconds=15*60)
baselineTime = datetime.datetime(2011,1,2,12,0,0)
# Get a db handle
c, dbh = mdb.getHandle()
dbh = mdb.setupCollections(dbh, dropCollections=True) # Set up collections
# Build a keyword
kword = kw(keyword='keyword1', timeStamp=tweetTime, lat=34.4, lon=45.5,
text='this text contained the hashtag #keyword1',
tweetID=346664, userID=4444, source='twitter')
# New timeseries object
ts = timeSeries()
ts.importData(kword)
success = ts.insertBlankDoc()
# Last 2 documents
lookback = 24
mgrs = '38SND4595706622'
qKeyword = 'keyword1'
res = bl.getResultsPerCell(dbh,
collection='timeseries',
mgrs=mgrs,
keyword=qKeyword,
inDate=baselineTime,
lookback=lookback)
print res
results = []
for doc in res:
print doc
results.append(doc)
self.assertEqual(len(results), 1)
# Close the connection
mdb.close(c, dbh)
def testgetSliceTimeOfDay(self):
''' Gets a slice of each day preceding.'''
# How big is the input array? 7 days long in this case...
days, hours, minutes = 31, 24, 60
# Build an array populated in certain hours
arr = np.zeros((days,hours,minutes), np.int8)
# For all days, hours 4 and 5 as 1 and flatten
arr[:,14:16,:] = 1
arr = arr.flatten()
outArr = bl.getSliceTimeOfDay(arr, hoi=15, stepDays=7, pad=1)
checkArr = np.ones((4, 120), np.int8)
self.assertEquals(checkArr.tolist(), outArr.tolist())
def testGetLookback(self):
''' Get a new datetime based on a lookback period.'''
nowSurrogate = datetime.datetime(2012,1,2,12,6,3)
lbh = 12
# Truncating at day precision
truth = datetime.datetime(2012,1,1,23,59,0)
start = bl.getLookback(dt=nowSurrogate, lookbackHours=lbh, byDay=True)
self.assertEquals(start, truth)
# Not truncating at day precision
truth = datetime.datetime(2012,1,2,0,6,3)
start = bl.getLookback(dt=nowSurrogate, lookbackHours=lbh)
self.assertEquals(start, truth)
def testBuildFullArray(self):
'''Build a full array from a cursor result'''
# Get a db handle
c, dbh = mdb.getHandle()
dbh = mdb.setupCollections(dbh, dropCollections=True) # Set up collections
# Build a keyword
kword = kw(keyword='keyword1', timeStamp=datetime.datetime(2011,1,2,12,1,1), lat=34.4, lon=45.5,
text='this text contained the hashtag #keyword1', tweetID=346664, userID=4444, source='twitter')
# New timeseries object
ts = timeSeries()
ts.importData(kword)
success = ts.insertBlankDoc()
# Insert the doc now that its been modified
kword.timeStamp = datetime.datetime(2011,1,1,12,1,1)
ts = timeSeries()
ts.importData(kword)
success = ts.insertBlankDoc()
# Last 1 weeks worth of documents
resultSet = bl.getResultsPerCell(dbh, '38SND4595706622', 'keyword1', datetime.datetime(2011,1,2), 168)
# Inputs
inDate = datetime.datetime(2011, 1, 2, 0, 0)
period = datetime.timedelta(days=7)
flat = None
dates, data = bl.buildFullArray(resultSet, inDate, period, flat)
self.assertEquals(len(dates), 8)
self.assertEquals(len(data), 8)
# Close the connection
mdb.close(c, dbh)
def testBuildFullArrayFlat(self):
'''Build a full FLATTENED array from a cursor result'''
st = datetime.datetime.utcnow()
# A keyword that went in yesterday creates a timeseries yesterday
nowDt = datetime.datetime(year=2011,month=1,day=12,hour=11,minute=1,second=1)
oneDay= datetime.timedelta(days=1)
# Get a db handle
c, dbh = mdb.getHandle()
dbh = mdb.setupCollections(dbh, dropCollections=True) # Set up collections
# Build a keyword
kword = kw(keyword='keyword1', timeStamp=nowDt-oneDay, lat=34.4, lon=45.5,
text='this text contained the hashtag #keyword1',
tweetID=346664, userID=4444, source='twitter')
# New timeseries object
ts = timeSeries()
ts.importData(kword)
success = ts.insertBlankDoc()
# Insert 2ND DOC IN THE COLLECTION
kword.timeStamp = nowDt
ts = timeSeries()
ts.importData(kword)
success = ts.insertBlankDoc()
nowDate = nowDt.replace(hour=0,minute=0,second=0,microsecond=0)
# Last 1 weeks worth of documents
resultSet = bl.getResultsPerCell(dbh, '38SND4595706622', 'keyword1', nowDate, 168)
# Close the connection
mdb.close(c, dbh)
# Inputs
period = datetime.timedelta(days=7)
dates, data = bl.buildFullArray(resultSet, nowDate, period, 1)
firstDay = dates[0]
lastDay = dates[-1]
self.assertEquals(data.shape[0], 11520)
self.assertEquals(firstDay, nowDate - period)
self.assertEquals(lastDay, nowDate)
def testlastBaselined(self):
''' Builds a baseline document for inserting.'''
# Connect and get handle
c, dbh = mdb.getHandle()
dbh = mdb.setupCollections(dbh, dropCollections=True)
# Build a keyword object
testKywd = kw(keyword='keyword1',
timeStamp=datetime.datetime(2011,6,22,12,10,45),
lat=34.4, lon=45.5,
text='this text contained the hashtag #keyword1',
tweetID=346664, userID=4444, source='twitter')
# Create a new baseline object
baseLine = bl.baseline(kywd=testKywd, cellBuildPeriod=600)
baseLine.outputs['days30_all'] = 0.5
baseLine.outputs['days7_all'] = 0.4
baseLine.outputs['hrs30_all'] = 0.3
baseLine.outputs['days30_weekly'] = 0.2
baseLine.outputs['days7_daily'] = 0.1
doc = baseLine.buildDoc()
bl.insertBaselineDoc(dbh, doc)
# Method returns the date of last baseline calculation
lastBaseline = baseLine.lastBaselined()
self.assertEquals(lastBaseline, datetime.datetime(2011,6,22,12,10))
# Close the connection
mdb.close(c, dbh)
def testInsertBaselineDoc(self):
''' Inserts a completed baseline document into the baseline collection.'''
# Connect and get handle
c, dbh = mdb.getHandle()
dbh = mdb.setupCollections(dbh, dropCollections=True)
# Build a keyword object
testKywd = kw(keyword='keyword1',
timeStamp=datetime.datetime(2011,6,22,12,10,45),
lat=34.4, lon=45.5,
text='this text contained the hashtag #keyword1',
tweetID=346664, userID=4444, source='twitter')
# Instantiate the baseline object/class
baseLine = bl.baseline(kywd=testKywd,cellBuildPeriod=600)
# Build the document and insert it
doc = baseLine.buildDoc()
bl.insertBaselineDoc(dbh, doc)
res = dbh.baseline.find()[0]
print res
self.assertEquals(res['keyword'], 'keyword1')
self.assertEquals(res['mgrs'], '38SND4595706622')
self.assertEquals(res['mgrsPrecision'], 10)
# Close the connection
mdb.close(c, dbh)
'''
TESTS TO CHECK:
1. Accurate population of days30_all /
2. Accurate population of days7_all /
3. Accurate population of hrs30_all /
4. Accurate population of days30_weekly /
5. Accurate population of days7_daily /
'''
def testjustZeros(self):
''' '''
inArr = np.zeros((10), np.int8)
just0 = bl.justZeros(inArr)
self.assertTrue(just0)
inArr = np.zeros((10), np.int8)
inArr[5] = 1
just1 = bl.justZeros(inArr)
self.assertFalse(just1)
inArr = np.zeros((10), np.int8)
inArr[5] = 40
just1 = bl.justZeros(inArr)
self.assertFalse(just1)
def testProcessBaselineLast30Days(self):
''' Checks accurate population of an array for 30 day all '''
# Connect and get handle
c, dbh = mdb.getHandle()
dbh = mdb.setupCollections(dbh, dropCollections=True)
# Set up some times to work with
tweetTime = datetime.datetime.utcnow()
thisMinute = tweetTime.replace(second=0,microsecond=0)
today = tweetTime.replace(hour=0, minute=0, second=0, microsecond=0)
# Thirty days ago - at the start of the day
lastMonthTweet = tweetTime - datetime.timedelta(days=30)
# Build a keyword object
testKywd = kw(keyword='keyword1',
timeStamp=lastMonthTweet,
lat=34.4, lon=45.5,
text='this text contained the hashtag #keyword1',
tweetID=346664, userID=4444, source='twitter')
# Insert a new timeseries object for the tweet 30 days ago
ts = timeSeries()
ts.importData(testKywd)
success = ts.insertBlankDoc()
ts.updateCount()
# Create a keyword object for the current tweet
testKywd2 = kw(keyword='keyword1',
timeStamp=lastMonthTweet + datetime.timedelta(hours=1),
lat=34.4, lon=45.5,
text='this text contained the hashtag #keyword1',
tweetID=346664, userID=4444, source='twitter')
# Insert the current keyword - NOTE HOW THIS IS AFTER THE BASELINE BUILD
ts2 = timeSeries()
ts2.importData(testKywd2)
success = ts2.insertBlankDoc()
ts2.updateCount()
# Create a keyword object for the current tweet
testKywd3 = testKywd
testKywd3.timeStamp = tweetTime
# Instantiate the baseline object/class
base = bl.baseline(kywd=testKywd3, cellBuildPeriod=600)
if base.needUpdate == True:
if not base.lastBaselined():
doc = base.buildDoc()
bl.insertBaselineDoc(dbh, doc)
# Insert the current keyword - NOTE HOW THIS IS AFTER THE BASELINE BUILD
ts3 = timeSeries()
ts3.importData(testKywd3)
success = ts3.insertBlankDoc()
ts3.updateCount()
tweetTimeMinus2Days = tweetTime - datetime.timedelta(days=2)
# Create a new keyword object to test the daily slicing
testKywd5 = kw(keyword='keyword1',
timeStamp=tweetTimeMinus2Days,
lat=34.4, lon=45.5,
text='this text contained the hashtag #keyword1',
tweetID=346664, userID=4444, source='twitter')
# Insert the current keyword - NOTE HOW THIS IS AFTER THE BASELINE BUILD
ts5 = timeSeries()
ts5.importData(testKywd5)
success = ts5.insertBlankDoc()
ts5.updateCount()
# Process Baseline
base.processBaseline()
# Get back the 30 day array
arr = base.test30DayArray
# Calculate what the array length should be
soFarToday = (thisMinute - today).seconds/60.0
# The start of the array datetime
lastMonthDay = lastMonthTweet.replace(hour=0, minute=0, second=0, microsecond=0)
# The number of days between today and the start of the array (then in minutes)
dateDiff = (today - lastMonthDay)
minsDiff = dateDiff.days*1440 + dateDiff.seconds/60.0
total = minsDiff + soFarToday
# Confirm its the right length
self.assertEqual(total, len(arr))
# Get the minutes for the first 2 keywords (the third shouldn't be there)
kwd1Min = int((testKywd.timeStamp - lastMonthDay).seconds/60)
kwd2Min = int((testKywd2.timeStamp - lastMonthDay).seconds/60)
kwd1Test = [arr[kwd1Min-1], arr[kwd1Min], arr[kwd1Min+1]]
kwd2Test = [arr[kwd2Min-1], arr[kwd2Min], arr[kwd2Min+1]]
for j in arr:
if arr[j] > 0:
print j, arr[j]
self.assertEquals(kwd1Test, [0,1,0])
self.assertEquals(kwd2Test, [0,1,0])
# 30 DAY TIME SLICE CHECK
arr = base.test30DaySliced
# weekly
testSliced = int(30/7) * 6 * 60
self.assertEquals(testSliced, len(arr))
arr7Day = base.test7DayArray
test7DayAll = (thisMinute - today).seconds/60.0 + 1440*7
self.assertEquals(len(arr7Day), int(test7DayAll))
arr30Hrs = base.test30hrArray
test30Hours = 30*60
self.assertEquals(len(arr30Hrs), int(test30Hours))
# Close the connection
mdb.close(c, dbh)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
UTF-8
|
Python
| false | false | 2,012 |
2,680,059,594,260 |
79537b5eaf1700676eb50b573f98462a250d4177
|
4e08f1984d2b6fbf853c57f1cabd94fa4506cf54
|
/lib/FRETUtils/Run.py
|
09226999fbad721e397c241917a71f1fd3e80c4e
|
[] |
no_license
|
daneeq/fretutils
|
https://github.com/daneeq/fretutils
|
5e26a8b2e9d88ed5755d8041fb96318eebbff6d2
|
2628dec3b2fb5ea9032ce6c6d3654ca432e8cd59
|
refs/heads/master
| 2020-12-11T06:13:38.923571 | 2013-01-18T14:40:45 | 2013-01-18T14:40:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on 04.10.2012
@author: mhoefli
'''
from FRETUtils.Efficiencies import calculateBursts, calcKineticRatesFromConfig
from FRETUtils.Ensemble import readProbabilities, assignTrajProbabilityClasses, cleanProbabilities
from FRETUtils.Photons import setPhotonGenerator
from FRETUtils.Trajectories import createTrajectoryList, readTrajs, calcFRETRates, writeRKProbTraj, floodTrajsWithPhotons
from FRETUtils.Config import FRETConfigParser, ReconstructionConfigParser, BurstDistAVGConfigParser
from FRETUtils.Reconstruction import readEfficiencies, constructTM, \
resolveDistances, writeDistances
from FRETUtils.Distances import getDistanceBursts, normalizeTrajProbForSpecies
import random
import sys
import cPickle
import multiprocessing
from numpy import array, savetxt
import os
def getSecureConfig(conffile, parser):
config = parser()
if not os.path.exists(conffile):
with open(conffile, "w") as configfile:
config.write(configfile)
with open(conffile) as rfp:
config.readfp(rfp)
return config
def getFRETConfig(conffile):
return getSecureConfig(conffile, FRETConfigParser)
def getReconstructionConfig(conffile):
return getSecureConfig(conffile, ReconstructionConfigParser)
def getDistAVGConfig(conffile):
return getSecureConfig(conffile, BurstDistAVGConfigParser)
def doMultiprocessRun(options, config, trajectories, eprobabilities):
ncpu = config.get("System", "ncpu")
print "Preparing %d clients for burst generation." % ncpu
pool = multiprocessing.Pool(ncpu)
results = []
nbursts = config.get("Monte Carlo", "nbursts")
print "Total bursts to calculate:", nbursts
try:
blocksize = config.get("System", "blocksize")
except:
blocksize = 100
print "Using default blocksize of", blocksize, "for each job."
blockcount = nbursts // blocksize
remainder = nbursts % blocksize
blocks = [blocksize] * blockcount
blocks.append(remainder)
print "Setting up %d jobs to generate %d bursts" % (blockcount, nbursts)
config.set("System", "verbose", 0)
for block in blocks:
options.rseed = random.randint(0, sys.maxint)
config.makeReadonly()
res = pool.apply_async(calculateBursts, (trajectories, eprobabilities, config, block, random.randint(0, sys.maxint)))
results.append(res)
bursts = []
for ene, res in enumerate(results, start = 1):
bursts += res.get()
print "\r%6d of %6d jobs processed." % (ene, blockcount),
return bursts
def efficienciesFromBursts(config, bursts):
QD = config.get("Dye Constants", "QD")
QA = config.get("Dye Constants", "QA")
effs = []
for burst in bursts:
effs.append(burst.getEfficiency(QD, QA))
return array(effs)
def sizesFromBursts(bursts, corrected = False, QD = 0., QA = 0.):
sizes = []
for burst in bursts:
if not corrected:
sizes.append(burst.bsize)
else:
sizes.append(burst.bsizeCorr(QD, QA))
return array(sizes)
def writeOutputFiles(options, config, bursts):
print
print "================================ Calculation complete ========================="
print "Preparing output."
if options.binaryofile:
print "Binary output requested, this may take a while..."
with open(options.binaryofile, "w") as fh:
cPickle.dump(bursts, fh)
print "Binary output (pickled data) written to ", options.binaryofile
if options.efficiencyofile:
print "Burst efficiency output requested."
with open(options.efficiencyofile, "w") as fh:
savetxt(fh, efficienciesFromBursts(config, bursts))
print "Burst efficiencies written to ", options.efficiencyofile
if options.burstsizeofile:
print "Burst size output requested."
with open(options.burstsizeofile, "w") as fh:
norm = sizesFromBursts(bursts)
corr = sizesFromBursts(bursts, corrected = True, QD = config.get("Dye Constants", "QD"), QA = config.get("Dye Constants", "QA"))
savetxt(fh, array((norm, corr)).T)
print "Burst size written to ", options.burstsizeofile
if options.burstcompofile:
print "Burstcomposition output requested."
with open(options.burstcompofile, "w") as fh:
for myburst in bursts:
fh.write("%d %d %d %d\n" % (myburst.donorphot, myburst.acceptorphot, myburst.donortherm, myburst.acceptortherm))
print "Burstcomposition written to ", options.burstcompofile
if options.endtimeofile:
print "Endtime output requested."
with open(options.endtimeofile, "w") as fh:
for burst in bursts:
for phot in burst.photons:
fh.write("%f " % phot.endtime)
fh.write("\n")
print "Endtimes written to ", options.endtimeofile
if options.decaytimeofile:
print "Decaytime output requested."
with open(options.decaytimeofile, "w") as fh:
for burst in bursts:
for phot in burst.photons:
fh.write("%f " % phot.duration)
fh.write("\n")
print "Decaytimes written to ", options.decaytimeofile
print "Finished!"
def readTrajAndClasses(options):
print "\nReading trajectories recursively from directory \"%s\"." % options.trajdirectory
trajectories = createTrajectoryList(options.trajdirectory, options.trajformat)
if len(trajectories) == 0:
raise ValueError("No trajectories found. Did you specify the correct format with the -r switch? Exiting.")
readTrajs(trajectories, options.trajformat)
print "\nReading ensemble probabilities"
eprobabilities = readProbabilities(options.pbfile)
assignTrajProbabilityClasses(trajectories, eprobabilities)
print "Removing empty classes"
eprobabilities = cleanProbabilities(trajectories, eprobabilities)
return trajectories, eprobabilities
def readConfigAndAssignFRETRate(options, trajectories):
print "Reading configuration file \"%s\"." % options.configfilename
config = getFRETConfig(options.configfilename)
calcKineticRatesFromConfig(config)
config.set("System", "verbose", 0)
if options.rseed:
print "Setting up RNG seed to %d" % options.rseed
random.seed(options.rseed)
setPhotonGenerator(config)
calcFRETRates(trajectories, config)
return config
def runMCFRET(options):
trajectories, eprobabilities = readTrajAndClasses(options)
config = readConfigAndAssignFRETRate(options, trajectories)
config.sethidden("Burst Size Distribution", "bsdfile", options.expbfile, str)
print
print "================================ Input prepared ========================="
print
print "Starting efficiency calculation"
ncpu = config.get("System", "ncpu")
if ncpu == -1:
ncpu = autodetectCPUs(config)
if options.prffile:
print "THIS IS A PROFILING RUN! - will write to logfile and run with only process", options.prffile
import cProfile
cProfile.runctx('bursts = calculateBursts(trajectories,eprobabilities,config,%d,%d)' % (config.get("Monte Carlo", "nbursts"), random.randint(0, sys.maxint)), globals(), locals(), options.prffile)
print "Profiling runs write not output..."
elif ncpu > 1:
bursts = doMultiprocessRun(options, config, trajectories, eprobabilities)
else:
print "Doing single process run."
config.set("System", "verbose", 1)
print "Will calculate efficiencies from", config.get("Monte Carlo", "nbursts"), "bursts."
print "Setting up burst generator."
bursts = calculateBursts(trajectories, eprobabilities, config, config.get("Monte Carlo", "nbursts"), random.randint(0, sys.maxint))
if not options.prffile:
writeOutputFiles(options, config, bursts)
def runMultiprocessPhotonFlooding(trajectories, config):
ncpu = config.get("System", "ncpu")
print "Preparing %d clients for burst generation." % ncpu
pool = multiprocessing.Pool(ncpu)
results = []
print "Setting up jobs for %d trajectories." % (len(trajectories))
config.set("System", "verbose", 0)
for traj in trajectories:
trajs = {}
trajs[traj] = trajectories[traj]
config.makeReadonly()
res = pool.apply_async(floodTrajsWithPhotons, (trajs, config, random.randint(0, sys.maxint)))
results.append(res)
bursts = []
for ene, res in enumerate(results, start = 1):
restrajs = res.get()
for restraj in restrajs:
trajectories[restraj] = restrajs[restraj]
print "\r%6d of %6d jobs processed." % (ene, len(trajectories)),
return bursts
def autodetectCPUs(config):
print "Determining number of available cpu's..."
print "-> %d cpus detected" % multiprocessing.cpu_count()
ncpu = multiprocessing.cpu_count()
config.set("System", "ncpu", ncpu)
return ncpu
def runTrajPhotonFlooding(trajectories, config):
print
print "================================ Input prepared ========================="
print
print "Starting trajectory processing"
ncpu = config.get("System", "ncpu")
if ncpu == -1:
ncpu = autodetectCPUs(config)
if ncpu > 1:
runMultiprocessPhotonFlooding(trajectories, config)
else:
config.set("System", "verbose", 1)
print "Doing single process run."
trajectories = floodTrajsWithPhotons(trajectories, config, random.randint(0, sys.maxint))
def runTrajPrbAdd(options):
trajectories, eprobabilities = readTrajAndClasses(options)
if options.configfilename:
config = readConfigAndAssignFRETRate(options, trajectories)
runTrajPhotonFlooding(trajectories, config)
else:
print "No config file specified, assigning class probabilities only."
config = None
print "Writing to", options.outtrajfile
with open(options.outtrajfile, "w") as fh:
writeRKProbTraj(fh, trajectories, eprobabilities, config)
def validateOptions(options):
if not options.efficiencyfile:
print "No efficiency file (-e) specified."
sys.exit(1)
if not os.path.exists(options.efficiencyfile):
print "Efficiency file %s does not exist." % options.efficiencyfile
sys.exit(1)
def runReconstruction(options):
config = getReconstructionConfig(options.configfile)
config.sethidden("Burst Size Distribution", "bsdfile", options.expbfile, str)
if options.rseed:
print "Setting up RNG seed to %d" % options.rseed
random.seed(options.rseed)
effhist = readEfficiencies(options.efficiencyfile, config.get("Transfer Matrix", "efficiency bins"))
TM = constructTM(options, config)
r_prdist, mxrange, _e_fitprdist, _fitvals = resolveDistances(config, TM, effhist)
if options.tmplotfile:
print "Writing Transfer Matrix Plot"
TM.plotToFile(options.tmplotfile)
writeDistances(mxrange, r_prdist, options)
def runBurstDistAVGs(options):
if options.rseed:
print "Setting up RNG seed to %d" % options.rseed
random.seed(options.rseed)
trajectories, eprobabilities = readTrajAndClasses(options)
normalizeTrajProbForSpecies(trajectories, eprobabilities)
config = getDistAVGConfig(options.configfilename)
config.sethidden("Burst Size Distribution", "bsdfile", options.expbfile, str)
distbursts = getDistanceBursts(trajectories, eprobabilities, config)
if options.distoutfile:
print "Burst efficiency output requested."
with open(options.distoutfile, "w") as fh:
savetxt(fh, distbursts)
|
UTF-8
|
Python
| false | false | 2,013 |
7,713,761,270,963 |
59c429c2c3565dfd79c7eaf4c7e01964925160ab
|
2aab3e4a85028d6076b82f057a7eef2d55b737bb
|
/README/inputs.conf.spec
|
c29bd5cb29af593928cdbc3c940d34f6d9793fcc
|
[
"CC0-1.0"
] |
permissive
|
freemotionstudios/splunkcasper
|
https://github.com/freemotionstudios/splunkcasper
|
dfcb8777384170b9ef4891818a5f0da7651b158c
|
282f1e42a90e92b6466b24f1f47cd9fa5800b08d
|
refs/heads/master
| 2020-04-25T20:54:49.226784 | 2014-05-31T14:40:18 | 2014-05-31T14:40:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
[splunkcasper://<name>]
JSSURL= <value>
username= <value>
password= <value>
|
UTF-8
|
Python
| false | false | 2,014 |
6,562,710,074,542 |
84b68182bed8009b5c2c618a654187bef5cf6265
|
c7f73033020bcde8395fe6e6cc88be580dc92065
|
/kobas/tests/TestDiscover.py
|
3bfdd41b45fee7b7ffbb0c241b84f46247fa18cf
|
[
"LicenseRef-scancode-biopython"
] |
non_permissive
|
Gmw657/kobas
|
https://github.com/Gmw657/kobas
|
36303d3c867d73af7d9e10cdf7e48e4123ccf1a4
|
65da2e17a9bf5c7e911e90a53f21abbdb2fec5b9
|
refs/heads/master
| 2021-05-26T14:23:42.333182 | 2010-03-07T19:17:21 | 2010-03-07T19:17:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""unittest for discovery.py"""
import os,sys, unittest
from Cheetah.Template import Template
from kobas import config, discover, exception, tests
class TestTwoSampleTest(unittest.TestCase):
def testInitFalse(self):
sample1 = []
sample2 = []
sample3 = []
self.failUnlessRaises(TypeError, discover.TwoSampleTest, sample1)
self.failUnlessRaises(TypeError,
discover.TwoSampleTest, sample1, sample2, sample3)
def testGetProb(self):
dist = discover.Dist()
dist['red'] = 5
dist['white'] = 4
dist['black'] = 8
dist['green'] = 3
mybinom = discover.BinomTest(discover.Dist(), discover.Dist())
self.failUnlessEqual(mybinom.get_prob(dist, 'red'), 0.25)
self.failUnlessEqual(mybinom.get_prob(dist, 'orange'), 0)
def testFdr(self):
sample1 = discover.dist_from_distfile(
tests.get_test_data('synechocystis'))
sample2 = discover.dist_from_distfile(
tests.get_test_data('s.cerevisiae'))
mybinom = discover.BinomTest(sample1, sample2)
result = mybinom()
result.fdr()
self.failUnlessEqual(len(result[0]), 7)
class TestDist(unittest.TestCase):
def testAdd(self):
dist = discover.Dist()
dist.add('1', 1)
testres = set()
testres.add(1)
self.failUnlessEqual(dist['1'], testres)
dist.add('1', 1)
self.failUnlessEqual(dist['1'], testres)
def testUpdate(self):
dist = discover.Dist()
for i in range(10):
dist['a'] = i
dist.update('b', range(10))
self.failUnlessEqual(dist['a'], dist['b'])
dist.update('c', range(10))
dist.update('c', range(5, 15))
dist.update('d', range(15))
self.failUnlessEqual(dist['c'], dist['d'])
def testSize(self):
dist = discover.Dist()
dist.update('a', range(50))
dist.update('b', range(25,75))
dist.update('c', range(100, 200))
self.failUnlessEqual(dist.size(), len(range(75))+len(range(100,200)))
def testGetProb(self):
dist = discover.Dist()
dist.update('a', range(50))
dist.update('b', range(25,75))
dist.update('c', range(100, 200))
prob = float(50)/175
self.failUnlessEqual(dist.get_prob('a'), prob)
def testFromFile(self):
dist = discover.dist_from_annot_file(
tests.get_test_data('test_annot.txt'))
self.failUnlessEqual(dist.size(), 278)
class TestStatistics(unittest.TestCase):
def testHyper(self):
q,m,n,k = 5, 50, 800, 90
self.failUnlessEqual(
discover.hyper(q,m,n,k), 1-discover.rpy.r.phyper(q-1,m,n,k))
def testBinom(self):
m,n,p = 1, 2, 0.5
self.failUnlessEqual(discover.binom(m, n, p), 0.75)
def testIsValidpValue(self):
tdata = [i * 0.1 for i in range(1,11)]
self.failUnless(discover.is_valid_pvalue(tdata))
fdata = [float('nan'), -0.1, 2]
for val in fdata:
self.failIf(discover.is_valid_pvalue([val]))
def testChisqTest(self):
data = [[254,246], [219,281]]
self.failUnlessAlmostEqual(discover.chisq_test(*data), 0.0312801)
class TestCheetah(unittest.TestCase):
def testCheetah(self):
tmpl = os.path.join(config.getrc()['kobas_home'],
"template", "test_html.tmpl")
data = {'thead':range(7), 'result':[range(7) for i in range(10)], 'title':'Test'}
t = Template(file=tmpl, searchList=[data,])
self.failUnless(str(t).find('$') == -1)
if __name__ == "__main__":
unittest.main()
|
UTF-8
|
Python
| false | false | 2,010 |
5,342,939,358,768 |
9fc662daa2625ddc31cc8def510edf06c6759917
|
5dd6198cebf0f86fd74faa2d2903829d6d2922a5
|
/xml/converter/read_freemind.py
|
b5df2710980c67df0d0fa64307ef5325b59aa451
|
[] |
no_license
|
MysteriousSonOfGod/propython
|
https://github.com/MysteriousSonOfGod/propython
|
008c55e559e62c4b9a35b910f6f191db4ccbd214
|
58c397333377cdaec39e44cea4a25c5bdaef0bca
|
refs/heads/master
| 2021-12-14T17:06:53.077836 | 2013-11-27T15:26:45 | 2013-11-27T15:26:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# coding: utf-8
from xml.etree import ElementTree
map = ElementTree.parse('crtp-freemind.mm')
docroot = map.getroot()
root = docroot.getchildren()[0]
def walk(e, level=0):
indent = ' '*level
print '%s%s: %s' % (indent, e.tag, e.get('TEXT'))
for child in e.getchildren():
walk(child, level+1)
walk(root)
|
UTF-8
|
Python
| false | false | 2,013 |
7,112,465,846,019 |
c2bf3ca1710c43513bd718b3a9e64313657198ab
|
cd102faea14f3bb0cc836def521c6e10704eaec5
|
/flask_fliki/util.py
|
0a63413cce76412cc2237b9176978cb49a033fd3
|
[
"MIT"
] |
permissive
|
gkdemaree/fliki
|
https://github.com/gkdemaree/fliki
|
7542279a3abdfdaf3cfeb794b54affd3f2f6831d
|
fa8156368ad5fcb7f816c821be4535a2f6396c37
|
refs/heads/master
| 2021-05-14T01:46:59.031566 | 2014-03-13T19:28:05 | 2014-03-13T19:28:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import re
from flask import current_app, url_for
from werkzeug import LocalProxy
_wiki = LocalProxy(lambda: current_app.extensions['fliki'])
#def get_url(endpoint_or_url):
# """Returns a URL if a valid endpoint is found. Otherwise, returns the
# provided value.
#
# :param endpoint_or_url: The endpoint name or URL to default to
# """
# try:
# return url_for(endpoint_or_url)
# except:
# return endpoint_or_url
def get_config(app):
"""Conveniently get the security configuration for the specified
application without the annoying 'WIKI_' prefix.
:param app: The application to inspect
"""
items = app.config.items()
prefix = 'WIKI_'
def strip_prefix(tup):
return (tup[0].replace('WIKI_', ''), tup[1])
return dict([strip_prefix(i) for i in items if i[0].startswith(prefix)])
def config_value(key, app=None, default=None):
"""Get a configuration value.
:param key: The configuration key without the prefix `WIKI_`
:param app: An optional specific application to inspect. Defaults to Flask's
`current_app`
:param default: An optional default value if the value is not set
"""
app = app or current_app
return get_config(app).get(key.upper(), default)
def get_wiki_endpoint_name(endpoint):
return '{}.{}'.format(_wiki.blueprint_name, endpoint)
def url_for_wiki(endpoint, **values):
"""Return a URL for the wiki blueprint
:param endpoint: the endpoint of the URL (name of the function)
:param values: the variable arguments of the URL rule
:param _external: if set to `True`, an absolute URL is generated. Server
address can be changed via `SERVER_NAME` configuration variable which
defaults to `localhost`.
:param _anchor: if provided this is added as anchor to the URL.
:param _method: if provided this explicitly specifies an HTTP method.
"""
endpoint = get_wiki_endpoint_name(endpoint)
return url_for(endpoint, **values)
def clean_url(url):
if url:
url_core = re.compile("{!s}|edit/|edit".format(_wiki.url_prefix))
return re.sub(url_core, "", url)
return None
def bare_url(url):
if url:
return clean_url(url)[1:-1]
return None
def do_flash(message, category=None):
"""Flash a message depending on if the `FLASH_MESSAGES` configuration
value is set.
:param message: The flash message
:param category: The flash message category
"""
if config_value('FLASH_MESSAGES'):
flash(message, category)
def get_message(key, **kwargs):
rv = config_value('MSG_' + key)
return rv[0].format(kwargs), rv[1]
|
UTF-8
|
Python
| false | false | 2,014 |
94,489,313,066 |
a59cbc6076eab2680d25acd406fa0edd81b2d1a3
|
5347ec610a895d356658388c268c45fa5c872dcc
|
/mitzeichner/web.py
|
9bfdf47be8d89cb094e4844c286eebbb16270bc2
|
[] |
no_license
|
pudo/mitzeichner
|
https://github.com/pudo/mitzeichner
|
1e26e529489d026ea693ae32bed8b8a603f594d3
|
e1d1a28a67ae8bfacfc57111adea6888768f4ca0
|
refs/heads/master
| 2020-05-18T10:53:55.577158 | 2011-06-24T11:14:52 | 2011-06-24T11:14:52 | 2,737,317 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import colander
from flask import render_template, request, redirect, flash
from core import app, db, Mitzeichner, Delegation
from delegation import check_credentials
from re import split
class DelegationForm(colander.MappingSchema):
agent_name = colander.SchemaNode(colander.String())
agent_location = colander.SchemaNode(colander.String())
theme = colander.SchemaNode(colander.String())
username = colander.SchemaNode(colander.String())
password = colander.SchemaNode(colander.String())
@app.route('/delegate', methods=['POST'])
def delegate():
try:
data = DelegationForm().deserialize(request.form)
data = [(k, v[0]) for k, v in data.items()]
if not check_credentials(data.get('username'), data.get('password')):
flash("Deine Anmeldedaten sind inkorrekt.")
else:
delegation = Delegation(data.get('agent_name'),
data.get('agent_location'),
data.get('theme'),
data.get('username'),
data.get('password'))
db.session.add(delegation)
db.session.commit()
flash("Deine Delegation wurde eingerichtet. " +
"Widerruf kommt in Version 2" )
except colander.Invalid:
flash("Fehler in den Eingabedaten!")
return redirect("/")
@app.route('/')
def index():
qs = request.args.get('q', '')
name_parts = [p.lower() for p in split(r"[\.,\s]", qs) if len(p)]
results = []
if len(name_parts):
q = Mitzeichner.query
for part in name_parts:
q = q.filter(Mitzeichner.name.like("%%%s%%" % part))
q = q.order_by(Mitzeichner.name.asc())
q = q.order_by(Mitzeichner.location.asc())
results = q.all()
themes = map(lambda t: t[0], Mitzeichner.themes())
return render_template('index.html', q=qs,
themes=themes, results=results)
if __name__ == "__main__":
app.debug = True
app.run()
|
UTF-8
|
Python
| false | false | 2,011 |
9,191,230,058,079 |
9a75c98ea4f8bec7ceabc69385e29cfc63cca4c8
|
09bcadf6db98144903929fa7941a5f6e13e56056
|
/redesign/django-jaxerorg/apps/jaxerhotsauce/views.py
|
00155a473b5dca30447187cd418cc436d2284a03
|
[
"GPL-3.0-only",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MPL-1.1",
"EPL-1.0",
"MIT",
"LicenseRef-scancode-aptana-1.0",
"W3C",
"LicenseRef-scancode-generic-exception",
"GPL-1.0-or-later",
"GPL-2.0-only",
"BSD-3-Clause",
"Apache-2.0",
"CC-BY-NC-2.5"
] |
non_permissive
|
esatterwhite/jaxer-org
|
https://github.com/esatterwhite/jaxer-org
|
ef24f9e3a522d5c4b7ea0e5f645a8af491682949
|
3a05dd6afa0b298f34796128a1eafad80b46f2e6
|
refs/heads/master
| 2020-12-30T18:58:01.952466 | 2010-01-27T15:39:49 | 2010-01-27T15:39:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
''' HOTSAUCE VIEWS '''
#from django.core.urlresolvers import reverse
#from hotsauce.models import ChangeSet, WikiPage
from jaxerhotsauce.forms import EditableItemForm
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.contrib.auth.decorators import login_required
from jaxerhotsauce.models import WikiPage, ChangeSet
# Create your views here.
def wiki_list(request):
''' display all wikipages'''
object_list = WikiPage.objects.all()
return render_to_response('hotsauce/hotsauce_index.html',
{'object_list':object_list},
context_instance=RequestContext(request))
@login_required
def add_edit_item(request, item=None, obj_id=None, template_name='hotsauce/hotsuace_wikiitem.html'):
'''docstrings'''
try:
updating = WikiPage.objects.get(pk=obj_id)
except:
updating = None
if request.POST:
if updating is None:
form = EditableItemForm(request.POST, request.FILES, initial={'action':'create'})
else:
form = EditableItemForm(request.POST, request.FILES, instance=updating,initial={'action':'edit'})
if form.is_valid():
form.save()
return render_to_response('/', {}, context_instance=RequestContext(request))
else:
return render_to_response(template_name, {'form':form}, context_instance=RequestContext(request))
else:
form = EditableItemForm(instance=updating, initial={'author':request.user.id, 'action':'edit'})
return render_to_response(template_name, {'form':form}, context_instance=RequestContext(request))
def view_changes(request, item=None, obj_id=None, template_name='hotsauce/hotsauce_viewchanges.html'):
changeset = ChangeSet.objects.get(pk=obj_id)
html = changeset.display_change_html()
return render_to_response(template_name, {'html':html}, context_instance=RequestContext(request))
|
UTF-8
|
Python
| false | false | 2,010 |
16,604,343,600,864 |
3679d3c513b4a595eaab056873ca98533230d197
|
4f76e100f360ca10f573b4f869f914d1a0d45063
|
/Keypoints/Experiments/Average/allAverage.py
|
f60bd9bfab0fa163e4a093eedeac7dc11c39d66a
|
[] |
no_license
|
amulyakish/IFT6266
|
https://github.com/amulyakish/IFT6266
|
7d7896e62dcf91679f77e909e79f1f75c5f915c8
|
1652928fbaabb1514dd619acc4ec5a88fa6945ce
|
refs/heads/master
| 2016-09-05T10:37:30.731306 | 2013-05-10T03:44:43 | 2013-05-10T03:44:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = "Vincent Archambault-Bouffard"
"""Always answer the average"""
import loadData
import numpy as np
import csv
import os
from PIL import Image, ImageDraw
def sampleImages(X, avg, std):
# Transform to PIL image
for i in np.random.randint(0, 7000, 6):
image = X[i].reshape(96, 96, 1) # Take first image as exemple
image = np.cast['uint8'](image)
image = Image.fromarray(image[:, :, 0])
# Superpose key points
image = drawKeypointsOnImage(image, avg, std)
image.save("averageKeypoints_{0}.png".format(i))
def drawKeypointsOnImage(img, keyPoints, std):
"""
Returns an RGB image with the keypoints added to it.
Green for left side and red for right side. (relative to subject)
Original author = Pierre-Luc Carrier
"""
cp = img.copy().convert("RGB")
draw = ImageDraw.Draw(cp)
draw.setink("#00ff00")
leftFill = (0, 255, 0)
rightFill = (255, 0, 0)
left_eye_center_x = 0
left_eye_inner_corner_x = 4
left_eye_outer_corner_x = 6
left_eyebrow_inner_end_x = 12
left_eyebrow_outer_end_x = 14
mouth_left_corner_x = 22
for i in range(len(keyPoints) / 2):
if keyPoints[i * 2] is not None and keyPoints[i * 2 + 1] is not None:
if i * 2 in [left_eye_center_x,
left_eye_inner_corner_x,
left_eye_outer_corner_x,
left_eyebrow_inner_end_x,
left_eyebrow_outer_end_x,
mouth_left_corner_x,
left_eye_center_x]:
fill = leftFill
else:
fill = rightFill
draw.ellipse((int(keyPoints[i * 2]) - std[i * 2]/2.0, int(keyPoints[i * 2 + 1]) - std[i * 2 + 1]/2.0,
int(keyPoints[i * 2]) + std[i * 2]/2.0, int(keyPoints[i * 2 + 1]) + std[i * 2 + 1]/2.0),
fill=fill)
del draw
return cp
kpList = ['left_eye_center_x',
'left_eye_center_y',
'right_eye_center_x',
'right_eye_center_y',
'left_eye_inner_corner_x',
'left_eye_inner_corner_y',
'left_eye_outer_corner_x',
'left_eye_outer_corner_y',
'right_eye_inner_corner_x',
'right_eye_inner_corner_y',
'right_eye_outer_corner_x',
'right_eye_outer_corner_y',
'left_eyebrow_inner_end_x',
'left_eyebrow_inner_end_y',
'left_eyebrow_outer_end_x',
'left_eyebrow_outer_end_y',
'right_eyebrow_inner_end_x',
'right_eyebrow_inner_end_y',
'right_eyebrow_outer_end_x',
'right_eyebrow_outer_end_y',
'nose_tip_x',
'nose_tip_y',
'mouth_left_corner_x',
'mouth_left_corner_y',
'mouth_right_corner_x',
'mouth_right_corner_y',
'mouth_center_top_lip_x',
'mouth_center_top_lip_y',
'mouth_center_bottom_lip_x',
'mouth_center_bottom_lip_y']
mapping = dict(zip(kpList, range(30)))
indexOfAll = [mapping['left_eye_center_x'], mapping['left_eye_center_y'],
mapping['right_eye_center_x'], mapping['right_eye_center_y'],
mapping['nose_tip_x'], mapping['nose_tip_y'],
mapping['mouth_center_bottom_lip_x'], mapping['mouth_center_bottom_lip_y']]
def splitFullSparse(X, Y):
indexSparse = Y[:, 10] == -1
xFull = X[np.logical_not(indexSparse), :]
yFull = Y[np.logical_not(indexSparse), :]
xSparse = X[indexSparse, :]
ySparse = Y[indexSparse, :]
return xFull, yFull, xSparse, ySparse
def makeSubmission(y, out_path):
submission = []
with open('/Users/Archi/Documents/University/IFT6266/IFT6266/Keypoints/submissionFileFormat.csv', 'rb') as cvsTemplate:
reader = csv.reader(cvsTemplate)
for row in reader:
submission.append(row)
for row in submission[1:]:
keypointName = row[2]
keyPointIndex = mapping[keypointName]
row.append(y[keyPointIndex])
if os.path.exists(out_path):
os.remove(out_path)
with open(out_path, 'w') as cvsTemplate:
writer = csv.writer(cvsTemplate)
for row in submission:
writer.writerow(row)
def printReport(avg, std, report):
print "Name, average, std, 1std, 2std, 3std"
for idx in range(30):
line = ""
line += "{0}, {1:.2f}, {2:.2f}, ".format(kpList[idx], avg[idx], std[idx])
for i in range(1, 4):
line += "{0:.2f}%, ".format(report[idx, i] * 100)
print line
if __name__ == "__main__":
print "allAverage started"
X, Y = loadData.loadFacialKeypointDataset("train")
print X.shape, Y.shape
xFull, yFull, xSparse, ySparse = splitFullSparse(X, Y)
print xFull.shape, yFull.shape
print xSparse.shape, ySparse.shape
Yaverage = np.average(Y, axis=0)
Ystd = np.std(Y, axis=0)
yFullaverage = np.average(yFull, axis=0)
yFullstd = np.std(yFull, axis=0)
print Yaverage.shape
print yFullaverage.shape
nbTotal = X.shape[0]
nbFull = xFull.shape[0]
avg = Yaverage
std = Ystd
for i in range(30):
if i not in indexOfAll:
avg[i] = yFullaverage[i]
std[i] = yFullstd[i]
# Count number of example in Std
report = np.zeros((30, 6))
for i in range(1, 4):
for idx in range(30):
if idx not in indexOfAll:
total = nbFull
xx = yFull
else:
total = nbTotal
xx = Y
count = np.sum((np.abs(xx[:, idx] - avg[idx]) - (1.0 * i * std[idx]) <= 0).astype(int))
maX = np.max((np.abs(xx[:, idx] - avg[idx])))
report[idx, i] = count * 1.0 / total
report[idx, 4] = maX
printReport(avg, std, report)
#makeSubmission(avg, "average_submission.csv")
#sampleImages(X, avg, std)
|
UTF-8
|
Python
| false | false | 2,013 |
7,456,063,259,685 |
1103e743aa1bd1ded3ce29f0661b475b9ba7587f
|
78b4e01da55d44fa1b0974c39dc7c3d7881402e6
|
/TNanomuncher/Graph.py
|
596f1d6c0cf94408bf271a897e8fd8434e32be27
|
[] |
no_license
|
tt810/HeuristicProblemSolving
|
https://github.com/tt810/HeuristicProblemSolving
|
463c38e90dac30e1be2cbd45bd332b98333d9e54
|
738cb642712f955f34d210ce177c859c9861217e
|
refs/heads/master
| 2016-09-06T13:25:54.567117 | 2012-02-02T03:58:57 | 2012-02-02T03:58:57 | 2,602,583 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
"""
class Graph(object):
"""
"""
def __init__(self):
self.nodes = []
def addNode(self, node):
"""
"""
self.nodes.append(node)
def uneaten(self):
count = 0
startNode = None
for node in self.nodes:
if node.eaten == False:
count += 1
startNode = node
return (count, startNode)
def isLine(self):
"""
"""
def isSimpleLine():
start = self.nodes[0].id
for node in self.nodes:
if node.numberOfNeighbors >2:
return -1
if node.numberOfNeighbors == 1:
start = node.id
return start
result = isSimpleLine()
if result != -1:
return result
else:
#Extra check:
counter1=0
counter3=0
start = self.nodes[0].id
for node in self.nodes:
if node.numberOfNeighbors == 3:
counter3 +=1
if counter3 >1: return -1
if node.numberOfNeighbors == 4:
return -1
if node.numberOfNeighbors == 1:
start = node.id
counter1 +=1
if counter1>1: return -1
if counter3 != 1 or counter1 != 1:
return -1
return start
|
UTF-8
|
Python
| false | false | 2,012 |
10,943,576,722,902 |
4b93845cbc974cb3e5975eb4bfd54719da409765
|
1eb6c52617dcc206a3089c442911a4531cf3b3b4
|
/sudoku_checker.py
|
312941e16125a49555c5d9d734c75a7fe1f42a15
|
[] |
no_license
|
avastjohn/Interview_Practice
|
https://github.com/avastjohn/Interview_Practice
|
cb3ddae32cfe4c534dfd25c1bfe5156477c46998
|
12734a63d407916421ba575a5bc5b36e32888be8
|
refs/heads/master
| 2021-01-02T08:19:55.100948 | 2014-03-18T21:07:04 | 2014-03-18T21:07:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
solved = [[2,4,8,3,9,5,7,1,6],
[5,7,1,6,2,8,3,4,9],
[9,3,6,7,4,1,5,8,2],
[6,8,2,5,3,9,1,7,4],
[3,5,9,1,7,4,6,2,8],
[7,1,4,8,6,2,9,5,3],
[8,6,3,4,1,7,2,9,5],
[1,9,5,2,8,6,4,3,7],
[4,2,7,9,5,3,8,6,1],]
false_solved = [[2,4,8,3,9,5,7,1,6],
[5,7,1,6,2,8,3,4,9],
[9,3,6,7,4,1,5,8,2],
[6,8,2,5,3,9,1,7,4],
[3,5,9,1,7,4,6,2,8],
[7,1,4,8,6,2,9,5,3],
[8,6,3,4,1,7,2,9,5],
[1,9,5,2,8,6,4,3,7],
[4,2,7,9,5,3,8,1,6]]
false_solved2 = [[1,2,3,4,5,6,7,8,9],
[2,3,4,5,6,7,8,9,1],
[3,4,5,6,7,8,9,1,2],
[4,5,6,7,8,9,1,2,3],
[5,6,7,8,9,1,2,3,4],
[6,7,8,9,1,2,3,4,5],
[7,8,9,1,2,3,4,5,6],
[8,9,1,2,3,4,5,6,7],
[9,1,2,3,4,5,6,7,8]]
def check_nine(nine_list):
d = {}
for num in nine_list:
if d.get(num):
return False
else:
d[num] = True
return True
def sudoku_checker(matrix):
for i in range(9):
#check rows
if not check_nine(matrix[i]):
return False
#check collumns
col = []
for j in range(9):
col.append(matrix[j][i])
if not check_nine(col):
return False
#check boxes
if i % 3 == 0:
box1 = []
box2 = []
box3 = []
for j in range(9):
if j<3:
box1 += (matrix[j][i:i+3])
elif j < 6:
box2 += (matrix[j][i:i+3])
else:
box3 += (matrix[j][i:i+3])
if not (check_nine(box1) and check_nine(box2) and check_nine(box3)):
return False
return True
print sudoku_checker(solved)
print sudoku_checker(false_solved)
print sudoku_checker(false_solved2)
|
UTF-8
|
Python
| false | false | 2,014 |
9,345,848,842,352 |
b0f212871893a705bce36d2d31bd2f011c4f33cc
|
785abe9ffc2c32b81daeb33fe9b7ba85b4226f1a
|
/core/config.py
|
84c25834bb4d64fb40c3c6cd47382ebe76f5ce74
|
[] |
no_license
|
gethinzhang/monitor_api
|
https://github.com/gethinzhang/monitor_api
|
23a6299352dd71b375eafe49ce450908a41f802f
|
73924fca0033fefebf8a6cd1bb28fd97ca29f6de
|
refs/heads/master
| 2016-08-04T22:59:00.905533 | 2014-05-08T05:14:50 | 2014-05-08T05:14:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
PORT = 9989
SERVER_HOST = 'localhost'
SERVER_PORT = 8002
|
UTF-8
|
Python
| false | false | 2,014 |
13,151,189,898,243 |
83ec4205320b43e2163a8acfe600beae029e4f7b
|
d903d15eed87cfa708c411f74fdf5a4486cb20c6
|
/src/hopkinsTesting.py
|
b24d9b2984f9610cfa87658a5bc49dab2aba9ce0
|
[
"BSD-2-Clause"
] |
permissive
|
daniel-vainsencher/regularized_weighting
|
https://github.com/daniel-vainsencher/regularized_weighting
|
1275bda26f87b9d1797f1683a058051b61be437c
|
b5f6f632c1ad0c9862f800b9df15a560aed9ef17
|
refs/heads/master
| 2020-12-24T16:59:01.132249 | 2014-10-30T13:48:47 | 2014-10-30T13:48:47 | 19,425,090 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu May 17 10:58:33 2012
@author: danielv
"""
import time
from itertools import count
from numpy import (array, sqrt, arange, linspace, unique, median, ceil,
flatnonzero)
from numpy.random import randn, rand
from matplotlib.pyplot import (plot, show, subplot, title, semilogy,
figure, legend)
from hopkinsUtilities import loadSet, hopkinsDatasetDimensions
from utility import meanMinSquaredLoss, segmentationError
import alternatingAlgorithms
import weightedModelTypes
import utility
#name = 'three-cars'
#name = '1RT2TC'
def subsetsAsUniformDistributions(S):
dists = []
for s in unique(S):
subset = (S == s) * 1.0
c = sum(subset)
dists.append(subset / c)
return dists
def runAlgorithm(data, d, k):
W = None
alphas = 10.0 ** linspace(-4, 1, 6)
#alphas = 10.0 ** array([-2,-1,0,-2,-3,-2])
expNum = len(alphas)
allStates = []
for (alpha, i) in zip(alphas, count(1)):
states = jointlyPenalizedMultiplePCA(data, d, alpha, W, k=k)
allStates.append(states)
weightVectors = [s.weights for s in states]
W = array(weightVectors)
figure()
for (states, alpha, i) in zip(allStates, alphas, count(1)):
subplot(expNum, 1, i)
weightVectors = [s.weights for s in states]
W = array(weightVectors)
plot(W.T)
title(r'Run with $\alpha $ = %f' % alpha)
figure()
for (states, alpha, i) in zip(allStates, alphas, count(1)):
subplot(expNum, 1, i)
lossVectors = [s.squaredLosses() for s in states]
L = array(lossVectors)
semilogy(L.T)
title(r'Run with $\alpha $ = %f' % alpha)
show()
def preprocessHopkins(name):
(rawData, S) = loadSet('/home/danielv/Research/Datasets/Hopkins155',
name)
if S.shape[0] == 0:
print('Bad name: %s' % name)
return (0, 0, 0, 0, 0, 0)
dims = hopkinsDatasetDimensions(rawData, S)
d = int(ceil(median(dims)))
k = len(dims)
sufficientDimension = sum(dims)
lowData = utility.PCA(rawData, sufficientDimension)
lowNormedData = lowData / sqrt(sum(lowData ** 2, 0))
data = lowNormedData
_, q = lowNormedData.shape
return (dims, S, data, d, k, q)
def findBadNames(names):
for name in names:
(dims, S, data, d, k, q) = preprocessHopkins(name)
if dims == 0:
print name
def findBalancedSets(names):
summary = []
for name in names:
(dims, S, data, d, k, q) = preprocessHopkins(name)
if dims == 0:
print(name + 'is bad')
continue
classSizes = [flatnonzero(S == v).size for v in unique(S)]
summary.append((float(min(classSizes)) / max(classSizes), name))
summary.sort(reverse=True)
return zip(*summary)
def testLloydsOnMany(names):
summary = []
for name in names:
(dims, S, data, d, k, q) = preprocessHopkins(name)
if dims == 0:
print('Directory named: %s has no data.' % name)
continue
print('Dataset %s, k=%d, dims = %s, using d=%d' % (name, k, dims, d))
toplineStateSet = [weightedModelTypes.MultiPCAState(data[:, :], w, d)
for w
in subsetsAsUniformDistributions(S)]
toplineMeanError = meanMinSquaredLoss(toplineStateSet)
allRuns = [runLloydsNonString(data, d, k) for _ in arange(10)]
allStateSets = [sSet for run in allRuns for sSet in run]
allMSEs = array([meanMinSquaredLoss(sSet) for sSet in allStateSets])
bestRun = allMSEs.argmin()
_, bestRunMissRate = segmentationError(allStateSets[bestRun], S)
print('best/topline error: %f' % (toplineMeanError / allMSEs.min()))
print('Misclassification rate: %f' % bestRunMissRate)
summary.append((name, allMSEs, S.size,
bestRunMissRate, toplineMeanError))
return summary
def testOnMany(names, internalQuality, iters=10):
summary = []
for name in names:
(dims, S, data, d, k, q) = preprocessHopkins(name)
if dims == 0:
print('Directory named: %s has no data.' % name)
continue
summary.append(testDataSet(name, S, dims, data,
k, d, internalQuality, iters=iters))
return summary
def runOnManyExtraNoise(names, internalQuality, iters=10, noiseLevel=0):
summary = []
totalCount = len(names)
for (i, name) in zip(count(1), names):
(dims, S, data, d, k, q) = preprocessHopkins(name)
if dims == 0:
print('Directory named: %s has no data.' % name)
continue
summary.append(testDataSet(name, S, dims, data,
k, d, internalQuality, iters=iters,
noiseLevel=noiseLevel))
print('Finished %d out of %d' % (i, totalCount))
print(time.localtime())
return summary
def addNoise(data, level):
D, q = data.shape
raw = data + randn(D, q) * level / rand(q)
return raw / raw.sum(0)
def testDataSet(name, S, dims, data, k, d, internalQuality, iters=10, noiseLevel=0):
print('Dataset %s, k=%d, dims = %s, using d=%d' % (name, k, dims, d))
data = addNoise(data, noiseLevel)
allRuns = [runString(data, d, k) for _ in arange(iters)]
allStateSets = [sSet for run in allRuns for sSet in run]
allQualities = array([internalQuality(sSet) for sSet in allStateSets])
bestRun = allQualities.argmin()
_, missRates = zip(*[segmentationError(sSet, S) for sSet in allStateSets])
missRates = array(missRates)
allLloydRuns = [runLloydsNonString(data, d, k) for _ in arange(iters)]
allLloydStateSets = [sSet for run in allLloydRuns for sSet in run]
allLloydQualities = array([internalQuality(sSet)
for sSet in allLloydStateSets])
bestLloydRun = allLloydQualities.argmin()
_, lloydRunMissRates = zip(*[segmentationError(sSet, S)
for sSet in allLloydStateSets])
lloydRunMissRates = array(lloydRunMissRates)
toplineStateSet = [weightedModelTypes.MultiPCAState(data[:, :], w, {'d': d})
for w
in subsetsAsUniformDistributions(S)]
toplineQuality = internalQuality(toplineStateSet)
_, toplineMissRate = segmentationError(toplineStateSet, S)
print('our:Lloyds:topline quality: %f:%f:%f'
% (allQualities.min(),
allLloydQualities.min(),
toplineQuality))
print('Misclassification rate: (our:Lloyds:topline) %f:%f:%f' %
(missRates[bestRun], lloydRunMissRates[bestLloydRun], toplineMissRate))
return (
name, S.size, allQualities, allLloydQualities, toplineQuality, missRates, lloydRunMissRates, toplineMissRate)
def toplineOnMany(names):
summary = []
for name in names:
(dims, S, data, d, k, q) = preprocessHopkins(name)
if dims == 0:
print('Directory named: %s has no data.' % name)
continue
print('Dataset %s, k=%d, dims = %s, using d=%d' % (name, k, dims, d))
toplineStateSet = [weightedModelTypes.MultiPCAState(data[:, :], w, d)
for w
in subsetsAsUniformDistributions(S)]
_, missClassificationRate = segmentationError(toplineStateSet, S)
toplineMeanError = meanMinSquaredLoss(toplineStateSet)
summary.append((name, S.size,
missClassificationRate, toplineMeanError))
return summary
def runString(data, d, k, lowAlpha=10 ** -2, highAlpha=10 ** 1):
#alphas = 10.0 ** linspace(lowAlpha, highAlpha, 6)
#initStates = jointlyPenalizedMultiplePCAInitialStates(data, d, alphas[0], k=k)
#allStates = iterates(initStates, alphas, lambda s, alpha:
# learnJointlyPenalizedMultipleModels(s, alpha))
#return allStates
# alpha between 10 and 1000:
#alpha = 10 ** (rand() * 3 - 2)
dataDim, n = data.shape
# alpha between 0.1n and 10n, where n = # data points
alpha = n * (10 ** (rand() * 2 - 1))
print "alpha %s" % alpha
return [alternatingAlgorithms.learnJointlyPenalizedMultipleModels(
alternatingAlgorithms.jointlyPenalizedInitialStates(data, weightedModelTypes.MultiPCAState, alpha, k, {'d': d}),
alpha)]
def runLloydsNonString(data, d, k):
initStates = alternatingAlgorithms.lloydsInitialStates(data, weightedModelTypes.MultiPCAState, k=k,
modelParameters={'d': d})
return [alternatingAlgorithms.lloydsAlgorithm(initStates)]
def showMissclassificationRates(summary):
(mmsers, missRates, sizes, setNumber) = zip(
*[[MSEs.min() / t, missRate, numPoints, i]
for ((n, MSEs, numPoints, missRate, t), i)
in zip(summary, count(0))])
figure()
missRates = array(missRates)
sizes = array(sizes)
missRatesOrder = missRates.argsort()
semilogy(missRates[missRatesOrder], marker='.')
semilogy(sizes[missRatesOrder], marker='.')
legend(['missclassification rate, if positive', 'points in dataset'])
title('missclassification rates and dataset sizes.')
print('Mean missclassification rate: %f' %
(sum(missRates * sizes) / sum(sizes)))
print('Median missclassification rate: %f' %
median(missRates))
show()
def showRepresentationErrors(summary):
(mmsers, missRates, sizes, setNumber) = zip(
*[[MSEs.min() / t, missRate, numPoints, i]
for ((n, MSEs, numPoints, missRate, t), i)
in zip(summary, count(0))])
figure()
mmsers.sort()
semilogy(mmsers, marker='.')
#semilogy([t for (n, MSEs, b, t) in summary])
legend(['best of 60 MSE / oracle MSE', 'baseline']) # topline',
title('Hopkins 155')
show()
# testOnMany(names = ['dancing', 'three-cars', '1RT2TC'])
fewNames = ['2T3RTCR_g23', 'articulated_g12', '2RT3RCT_B_g23']
manyNames = ['2T3RTCR_g23', 'articulated_g12', '2RT3RCT_B_g23',
'2RT3RTCRT_g23', '1RT2RCRT_g12', '2T3RCRTP_g12', '1R2RCT_A_g23',
'three-cars_g13', '1RT2TC_g13', 'cars7', '1R2RCT_B_g23',
'1R2TCR_g13', 'arm', '1RT2RCRT_g23', 'cars10_g12', '2RT3RTCRT_g13',
'articulated', '1RT2RCR_g12', 'cars10_g23', '1R2RCT_B',
'1R2RC_g23']
# knownBad = ['1R2RC']
rest = ['2R3RTC_g23', 'cars6', '1RT2TC', '1R2RCT_A', '2R3RTCRT_g13',
'1RT2TC_g23', '1R2RCT_B_g12', '2RT3RCR_g13', '2T3RCTP_g13',
'1R2TCRT_g12', '2T3RCR', '1R2RCT_A_g13', '1R2RCT_A_g12',
'cars2B_g12', 'kanatani2', '1RT2TCRT_B_g13', '2T3RCRTP_g13',
'2RT3RC_g12', 'kanatani3', '2R3RTC', '1R2RCR_g13', '1R2RC_g12',
'2RT3RC', '2T3RCTP', 'cars2_06_g23', '2RT3RC_g23', 'cars9_g13',
'1RT2RCRT_g13', '2R3RTCRT_g23', '2R3RTC_g13', 'cars3_g12', 'dancing',
'2T3RCRT_g23', '1RT2RTCRT_A_g13', 'three-cars', 'cars2_07',
'cars2B', 'cars5_g23', 'cars9_g12', '2R3RTCRT', '2RT3RCT_A_g13',
'2RT3RCT_A', '2T3RCR_g23', '2RT3RCT_A_g23', 'cars9',
'1RT2TCRT_A_g12', 'three-cars_g12', 'cars4', '1R2TCR_g12',
'cars3_g13', 'cars2B_g23', 'truck2', '2RT3RCT_B_g12',
'two_cranes_g23', '2T3RCR_g12', '1RT2TCRT_B_g23', '1R2TCR_g23',
'cars5_g12', 'two_cranes', '1RT2RTCRT_B_g12', '2RT3RCT_B',
'2T3RCR_g13', 'cars1', '1R2RCR', '1R2RCR_g12', '1R2RCR_g23',
'kanatani1', '1R2TCRT_g13', '1RT2RCRT', '2T3RTCR', 'truck1',
'2R3RTC_g12', '2RT3RCR', '2T3RCTP_g12', '1R2RCT_B_g13',
'two_cranes_g13', '1RT2RTCRT_B', '1RT2RCR_g23', '1RT2TCRT_A_g23',
'1RT2RTCRT_A_g23', 'cars10_g13', '2T3RCRT_g12', '1R2RC_g13',
'1RT2TCRT_B', '2RT3RCT_A_g12', 'cars2B_g13', 'cars5', '2RT3RCT_B_g13',
'articulated_g13', '2RT3RTCRT', 'people2', '2RT3RCR_g23', '2RT3RC_g13',
'cars3_g23', '1R2TCR', 'head', '1R2TCRT', 'three-cars_g23',
'two_cranes_g12', '2T3RTCR_g13', '1RT2RTCRT_A', '1RT2TCRT_A',
'2T3RTCR_g12', 'cars5_g13', 'articulated_g23', '2R3RTCRT_g12',
'1RT2TCRT_A_g13', 'cars2_06_g13', 'people1', 'cars9_g23',
'1R2TCRT_g23', 'cars2_07_g12', '1RT2RCR_g13', '1RT2RTCRT_B_g13',
'cars3', '2T3RCTP_g23', '2T3RCRT_g13', '1RT2TCRT_B_g12',
'cars2_07_g23', '2RT3RTCRT_g12', 'cars2_06', '2T3RCRTP', 'cars8',
'1RT2TC_g12', 'cars2_07_g13', 'cars2', 'cars10', '1RT2RTCRT_A_g12',
'1RT2RTCRT_B_g23', '2T3RCRT', '2T3RCRTP_g23', 'cars2_06_g12',
'1RT2RCR', '2RT3RCR_g12']
#summary = testOnMany(manyNames+rest)
#summary = testOnMany(fewNames)
#file('summaryFewBackup.txt', 'w').write(repr(summary))
#(brs, mmsers, s) = zip(*[[b/t,mse.min()/t, i]
# for ((n, MSEs, b, t), i) in zip(summary, count(0))
# for mse in MSEs.flatten()])
#name = '1RT2TC'
#(dims, S, data, d, k, q) = preprocessHopkins(name)
#runAlgorithm(data, d, k)
|
UTF-8
|
Python
| false | false | 2,014 |
18,580,028,547,162 |
79bd87eb7de6f8902235cf0cad99d844a7bde3f8
|
823bebe27c139ace9ecb5129a08313149f407373
|
/Fibonacci/src/Fibonacci.py
|
e0acc4b52c0429eff81aadc6afe3da645d38af7c
|
[] |
no_license
|
sabbir044/Projects
|
https://github.com/sabbir044/Projects
|
93c6751f5a8a1995ec8af10a36e80ee19d9cf2ee
|
27dbdc2883201e840df3033425b4badb1de8a9fc
|
refs/heads/master
| 2016-09-05T16:57:59.375773 | 2014-08-13T14:54:16 | 2014-08-13T14:54:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on Aug 24, 2013
@author: sabbir
'''
class Fibonacci:
def __init__(self):
self.ar = [0,1];
self.mx = 1;
def findFib(self, n):
if n>self.mx:
for i in range(self.mx+1, n+1):
self.ar.append(self.ar[i-1]+self.ar[i-2]);
self.mx = n;
return self.ar[n];
def main():
fib = Fibonacci();
while True:
inputString = input("Enter the sequence number or 'exit' to exit: ");
if inputString == 'exit':
break;
print(fib.findFib(int(inputString)));
if __name__ == '__main__':
main();
|
UTF-8
|
Python
| false | false | 2,014 |
16,432,544,881,953 |
4d4dc2e9f2e8c1e5c1abfdf4c6f074adf04a6f0f
|
9c538cbec463d7621667a40f397bdb4144559560
|
/gamelib/fx/explosion.py
|
f24919ba4ecefeefa2d13e32ceb82316e39bf115
|
[
"BSD-3-Clause"
] |
permissive
|
sixthgear/nanobiotics
|
https://github.com/sixthgear/nanobiotics
|
7c3c5d30649aebc02b2dc5b7cedd890215ed8121
|
ff69623d96c2078ea4e949eb283f0e23a981c9f6
|
refs/heads/master
| 2020-12-25T14:12:56.218422 | 2011-09-21T10:44:46 | 2011-09-21T10:44:46 | 2,448,596 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pyglet
import random
import math
import lepton
import lepton.controller
import lepton.emitter
import lepton.renderer
import lepton.texturizer
fire_tex = pyglet.resource.texture('puff.png')
spark_tex = pyglet.resource.texture('flare1.png')
class Exploder(object):
"""A Class used to splode stuff up."""
# sound = pyglet.resource.media('explode.wav', streaming=False)
def __init__(self):
self.fire = lepton.ParticleGroup(
controllers=[
lepton.controller.Lifetime(1.50),
lepton.controller.Movement(damping=0.91),
lepton.controller.Growth(10),
lepton.controller.Fader(
fade_in_start=0,
start_alpha=0,
fade_in_end=0.1,
max_alpha=1.0,
fade_out_start=0.75,
fade_out_end=1.50
)
],
renderer=lepton.renderer.BillboardRenderer(lepton.texturizer.SpriteTexturizer(fire_tex.id)))
self.firer = lepton.emitter.StaticEmitter(
template=lepton.Particle(
position=(0,0,0),
size=(190,190,0)),
deviation=lepton.Particle(
position=(20,20,0),
velocity=(600,600,0),
size=(40,40,0),
# up=(0,0,math.pi*2),
# rotation=(0,0,math.pi*0.06),
# age=2,
),
color=[
(0.5,0,0),
(0.5,0.5,0.5),
(0.4,0.1,0.1),
(0.85,0.3,0)
],
)
self.sparks = lepton.ParticleGroup(
controllers=[
# lepton.controller.Gravity((0,-240,0)),
lepton.controller.Lifetime(1.2),
lepton.controller.Movement(damping=0.97),
lepton.controller.Fader(fade_out_start=0.75, fade_out_end=1.2),
],
renderer=lepton.renderer.BillboardRenderer(lepton.texturizer.SpriteTexturizer(spark_tex.id)))
self.sparker = lepton.emitter.StaticEmitter(
template=lepton.Particle(
position=(0,0,0),
color=(1,1,1)),
deviation=lepton.Particle(
position=(1,1,0),
velocity=(600,600,0),
),
size=[(5,5,0), (7,7,0), (10,10,0)]
)
def explode(self, x, y, size=1.0, color=(1,1,1)):
self.firer.template.position = (x,y,0)
self.firer.emit(120, self.fire)
self.sparker.template.position = (x,y,0)
self.sparker.template.color = color
self.sparker.emit(120, self.sparks)
def update(self, dt):
self.fire.update(dt)
self.sparks.update(dt)
def draw(self):
pyglet.gl.glPushAttrib(pyglet.gl.GL_ALL_ATTRIB_BITS)
pyglet.gl.glEnable(pyglet.gl.GL_BLEND)
pyglet.gl.glBlendFunc(pyglet.gl.GL_SRC_ALPHA, pyglet.gl.GL_ONE)
self.sparks.draw()
self.fire.draw()
pyglet.gl.glPopAttrib()
|
UTF-8
|
Python
| false | false | 2,011 |
8,512,625,230,132 |
3ca2ac8ad1f7f014fb24b4d41a531d33a0e8809c
|
d75dda4aa4e454a8ef6ce04cc8cb1c8aa4744c88
|
/4_symb/flyweight/flyw.py
|
6f486aa8fdffed63b7f70305c38c0196e1ddc6ff
|
[] |
no_license
|
kgadek/kpfp
|
https://github.com/kgadek/kpfp
|
22d9d81be40d918de904f40df0fca4625a078925
|
90e19688e0632290748c0e4ba8ac172dea3f5fe6
|
refs/heads/master
| 2021-01-23T12:17:53.501842 | 2012-09-28T17:12:52 | 2012-09-28T17:12:52 | 1,496,299 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8
import weakref
class BigObj(object):
inst = weakref.WeakValueDictionary()
def __new__(cls, id):
if not cls.inst.has_key(id):
a = object.__new__(cls)
cls.inst[id] = a
return cls.inst[id]
def __init__(self, haslo):
self.haslo = haslo
def getHaslo(self):
return self.haslo
def setHaslo(self, newHaslo):
oldHaslo = self.haslo
self.haslo = newHaslo
return oldHaslo
if __name__ == '__main__':
print "Start"
a = BigObj(2)
print "a=%s" % (a.getHaslo())
b = BigObj(2)
print "b=%s" % (b.getHaslo())
b.setHaslo(3)
print "a=%s" % (a.getHaslo())
print "b=%s" % (b.getHaslo())
|
UTF-8
|
Python
| false | false | 2,012 |
3,899,830,327,014 |
1b3d1ef848c92b475245b6056459b5dc1001ec2e
|
4dc12e918c6f384c8265718fa47ed68bffb914ff
|
/store.py
|
a2815fb4f1ab751fb5f6bb826b67f5bab599db6c
|
[] |
no_license
|
gwongz/yo-food-truck-sf
|
https://github.com/gwongz/yo-food-truck-sf
|
c6ba82ea75b4a5cb45a1b2b1d46122004adfde3c
|
21eda68bad84b6041a60000170f54436147ab549
|
refs/heads/master
| 2020-05-04T22:16:32.979143 | 2014-12-16T19:29:56 | 2014-12-16T19:29:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import redis
import urlparse
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
url = urlparse.urlparse(redis_url)
redis = redis.StrictRedis(host=url.hostname, port=url.port, db=0, password=url.password)
|
UTF-8
|
Python
| false | false | 2,014 |
8,641,474,226,588 |
15b27b12cf471e4d20d0a6145d1e19312f04d13e
|
267a5156d8962fa80f2ee6559b651ab1d0a0ea47
|
/_dev/ext/supercast/Twisted-0.12.3/twisted/test/test_import.py
|
bb03e89adec2b7ba5d9b948fd6d569aeb9b8b02f
|
[
"LGPL-2.1-only",
"LicenseRef-scancode-other-copyleft",
"LGPL-2.1-or-later"
] |
non_permissive
|
katrinleinweber/songclub
|
https://github.com/katrinleinweber/songclub
|
f3fd75ae692c388bb47ab58dfec56dfc9352a269
|
6e56e477e26f7cc2cd4dee10b267937529d92509
|
refs/heads/master
| 2021-08-28T16:20:52.690090 | 2012-05-29T14:02:50 | 2012-05-29T14:02:50 | 114,024,309 | 0 | 0 | null | true | 2017-12-12T18:19:23 | 2017-12-12T18:19:23 | 2016-05-08T18:04:44 | 2012-05-29T14:03:02 | 8,341 | 0 | 0 | 0 | null | false | null |
# Twisted, the Framework of Your Internet
# Copyright (C) 2001 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from pyunit import unittest
class AtLeastImportTestCase(unittest.TestCase):
"""I test that there are no syntax errors which will not allow importing.
"""
failureException = ImportError
def test_misc(self):
"""Test importing other misc. modules
"""
from twisted import copyright
def test_persisted(self):
"""Test importing persisted
"""
from twisted.persisted import dirdbm
from twisted.persisted import styles
def test_internet(self):
"""Test importing internet
"""
from twisted.internet import tcp
from twisted.internet import main
from twisted.internet import app
# from twisted.internet import ssl
from twisted.internet import stdio
from twisted.internet import abstract
from twisted.internet import process
from twisted.internet import task
from twisted.internet import threadtask
def test_spread(self):
"""Test importing spreadables
"""
from twisted.spread import pb
from twisted.spread import jelly
def test_twistedPython(self):
"""Test importing twisted.python
"""
from twisted.python import delay
from twisted.python import hook
from twisted.python import log
from twisted.python import observable
from twisted.python import reference
from twisted.python import reflect
from twisted.python import threadable
from twisted.python import threadpool
from twisted.python import usage
def test_reality(self):
"""Test importing reality
"""
from twisted.reality import reality
from twisted.reality import thing
from twisted.reality import sentence
from twisted.reality import source
from twisted.reality import error
from twisted.reality import player
from twisted.reality import plumbing
from twisted.reality import room
from twisted.reality import container
from twisted.reality import geometry
from twisted.reality import clothing
from twisted.reality import door
from twisted.reality import furniture
from twisted.reality import lock
def test_protocols(self):
"""Test importing protocols
"""
from twisted.protocols import basic
from twisted.protocols import ftp
from twisted.protocols import http
from twisted.protocols import irc
from twisted.protocols import pop3
from twisted.protocols import protocol
from twisted.protocols import smtp
from twisted.protocols import telnet
def test_web(self):
"""Test importing web
"""
from twisted.web import server
from twisted.web import html
from twisted.web import twcgi
from twisted.web import cal
from twisted.web import script
from twisted.web import static
from twisted.web import test
from twisted.web import vhost
def test_words(self):
"""Test importing words
"""
from twisted.words import service
from twisted.words import ircservice
def test_mail(self):
"""Test importing mail
"""
from twisted.mail import mail
from twisted.mail import maildir
from twisted.mail import pb
from twisted.mail import relaymanager
testCases = [AtLeastImportTestCase]
|
UTF-8
|
Python
| false | false | 2,012 |
3,702,261,811,672 |
7bb48f83e93c7eecc39948b9d13c2914809dada4
|
20673b4fa4f78f75c44452d4ac7f6e2939a99b03
|
/src/citizendesk/feeds/sms/external/frontlinesms.py
|
18f6f9d87e75723e124d4ccbfb2d2cc075eb488e
|
[] |
no_license
|
sourcefabric-innovation/citizendesk-core
|
https://github.com/sourcefabric-innovation/citizendesk-core
|
b894e855e070bd2d2bfc8c4c7247d577b38fc51f
|
4e74e0529514897ba40c99c76382a197f8ecdb20
|
refs/heads/master
| 2021-01-18T13:59:54.646468 | 2014-10-07T13:47:03 | 2014-10-07T13:47:03 | 16,517,729 | 0 | 4 | null | false | 2014-10-07T13:47:04 | 2014-02-04T16:32:04 | 2014-09-03T12:36:26 | 2014-10-07T13:47:03 | 1,630 | 0 | 4 | 0 |
Python
| null | null |
#!/usr/bin/env python
import os, sys, datetime, json, logging
import urllib, urllib2
class SMSConnector(object):
''' sending SMS to a SMS gateway '''
def __init__(self, gateway_url, api_key):
self.gateway_url = gateway_url
self.api_key = api_key
def send_sms(self, message, recipients):
if not message:
return (False, 'no message provided')
if type(recipients) is not dict:
return (False, 'wrong recipients specification')
data = {
'secret': str(self.api_key),
'message': message,
'recipients': []
}
some_recipients = False
if ('phone_numbers' in recipients) and (type(recipients['phone_numbers']) in [list, tuple]):
for phone_number in recipients['phone_numbers']:
phone_number = phone_number.strip()
if not phone_number:
continue
data['recipients'].append({'type': 'address', 'value': phone_number})
if not data['recipients']:
return (False, 'no recipients provided')
send_notice = None
success = True
try:
post_data = json.dumps(data)
req = urllib2.Request(self.gateway_url, post_data, {'Content-Type': 'application/json'})
response = urllib2.urlopen(req)
send_notice = response.read()
response.close()
except Exception, exc:
success = False
err_notice = ''
exc_other = ''
try:
exc_other += ' ' + str(exc.message).strip() + ','
except:
pass
try:
err_notice = str(exc.read()).strip()
exc_other += ' ' + err_notice + ','
except:
pass
if err_notice:
send_notice = err_notice
else:
send_notice = str(exc) + str(exc_other)
return (success, send_notice)
|
UTF-8
|
Python
| false | false | 2,014 |
12,463,995,144,591 |
cfec8afb52a53011ae260fc09e150a8c2e349b1b
|
54c4690eaa256526e0db6a56a62517303ab6a411
|
/united_geonames/admin.py
|
7f68a94c16d5e5d34e6f01fb8aaea5da93c2f0b6
|
[
"GPL-3.0-only"
] |
non_permissive
|
justinasjaronis/hpn
|
https://github.com/justinasjaronis/hpn
|
132d30050390293945b11099411000f1cfe69bbf
|
b63a8eb707db09a81f4f58cf0d18f41842957e0c
|
refs/heads/master
| 2021-01-20T12:03:44.317624 | 2014-11-30T20:53:07 | 2014-11-30T20:53:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from django.contrib import admin
from .forms import UnitedGeoNameSynonimForm, GeoNamesMatchingLogMatchedPlacesForm
from main.models.vietos.models import Vietos
from united_geonames.filters import GisNgramPercentage, GisOrNgramPercentage, ProjectDatabaseFilter
from united_geonames.models import UnitedGeoName, UnitedGeoNameSynonim, GeoNamesMatchingLogMatch, GeoNamesMatchingLogMatchedPlaces, \
UserProject, UserGeoName, Database
from united_geonames.plugins.default import UnitedGeoNamePlugin
def create_only_synonym(ugn_object, place_name, content_type, object_id, coordinates):
UnitedGeoNameSynonim.objects.get_or_create(
united_geoname=ugn_object,
name=place_name,
content_type=content_type,
object_id=object_id,
coordinates=coordinates,
)
def vietos_coordinates(matched_object):
try:
return Vietos.objects.using('default').get(viid=matched_object.object_id).vikoordinates_set.all()[0].koord
except:
return None
def get_place_detail_accord_by_content_type(obj):
# Vietos
if obj.content_type.id == 44:
obj.model_matching = Vietos.objects.using('default').get(viid=obj.object_id)
place_name = obj.model_matching.pavadinimas
coordinates = vietos_coordinates(obj)
region = obj.model_matching.apskritis
country = obj.model_matching.salis
try:
subregion = obj.model_matching.subregion.name_std
except:
subregion = None
# Norway
if hasattr(obj.model_matching, 'enh_snavn'):
place_name = obj.model_matching.enh_snavn
coordinates = obj.model_matching.coordinates
region = None
subregion = None
country = None
# Pleiades
if hasattr(obj.model_matching, 'title'):
place_name = obj.model_matching.title
coordinates = obj.model_matching.coordinates
region = None
subregion = None
country = None
return (obj.model_matching, place_name, coordinates, region, subregion, country)
def create_new_geoname(modeladmin, request, queryset):
for obj in queryset:
place, place_name, coordinates, region, subregion, country = get_place_detail_accord_by_content_type(obj)
UnitedGeoNamePlugin().create_united_geo_with_synonym(place, place_name, coordinates, region, subregion, country)
obj.delete()
create_new_geoname.short_description = u'Create new United Geo Name'
def merge_with_proposed_geoname(modeladmin, request, queryset):
for obj in queryset:
for matched in obj.matched.all():
if matched.best_match:
ungeo_object = matched.united_geoname
matched_object = matched.matchinglogmatch
place, place_name, coordinates, region, subregion, country = get_place_detail_accord_by_content_type(obj)
create_only_synonym(ungeo_object, place_name, matched_object.content_type, matched_object.object_id, coordinates)
obj.delete()
merge_with_proposed_geoname.short_description = u'Link to the proposed Geo Name'
def merge_with_selected_geoname(modeladmin, request, queryset):
link_with_pk = request.POST['import_place']
for obj in queryset:
for matched in obj.matched.all():
matched_object = matched.matchinglogmatch
ungeo_object = matched.united_geoname
if str(matched.pk) == str(link_with_pk):
place, place_name, coordinates, region, subregion, country = get_place_detail_accord_by_content_type(obj)
create_only_synonym(ungeo_object, place_name, matched_object.content_type, matched_object.object_id, coordinates)
obj.delete()
merge_with_selected_geoname.short_description = u'Link with the selected Geo Name'
class UnitedGeoNameSynonimInline(admin.StackedInline):
model = UnitedGeoNameSynonim
form = UnitedGeoNameSynonimForm
extra = 0
raw_id_fields = ('united_geoname', 'content_type', 'synonim_content_type',)
class UnitedGeoNameAdmin(admin.ModelAdmin):
model = UnitedGeoName
inlines = [
UnitedGeoNameSynonimInline,
]
search_fields = ['main_name', ]
ordering = ['main_name']
class GeoNamesMatchingLogMatchedPlacesInline(admin.StackedInline):
model = GeoNamesMatchingLogMatchedPlaces
form = GeoNamesMatchingLogMatchedPlacesForm
extra = 0
class GeoNamesMatchingLogMatchedPlacesAdmin(admin.ModelAdmin):
model = GeoNamesMatchingLogMatchedPlaces
form = GeoNamesMatchingLogMatchedPlacesForm
class UnitedGeoNameSynonimAdmin(admin.ModelAdmin):
model = UnitedGeoNameSynonim
class GeoNamesMatchingLogMatchAdmin(admin.ModelAdmin):
actions = [merge_with_proposed_geoname, merge_with_selected_geoname, create_new_geoname]
inlines = [
GeoNamesMatchingLogMatchedPlacesInline,
]
ordering = ('-matching_index',)
list_display = ('place_object', 'start_date', 'matching_index', 'number_of_alternatives', 'matched_objects')
list_filter = (GisNgramPercentage, GisOrNgramPercentage, ProjectDatabaseFilter)
def queryset(self, request):
qs = super(GeoNamesMatchingLogMatchAdmin, self).queryset(request)
if request.user.is_superuser:
return qs
else:
return qs.filter(display_for_users=request.user)
def place_object(self, obj):
# Aruodai vietos (kad nemigruoti duomenu)
if obj.content_type.id == 44:
return Vietos.objects.using('default').get(viid=obj.object_id).pavadinimas
# Kitos
else:
return obj.model_matching
@staticmethod
def check_if_checked(bool):
if bool:
return 'checked'
else:
return ''
def matched_objects(self, obj):
if any(obj.matched.all()):
return '<form>{0}</form><br /><a href=\"/admin/{1}/{2}/{3}/?_popup=1">Add another place</a>'.format("<br />".join(
[
"<a href=\"/admin/%s/%s/%d\">The proposed place %s Distance %s %s%s</a> <input type='radio' name='import_place' value='%s' %s/>" %
(i.united_geoname._meta.app_label, i.united_geoname._meta.module_name, i.united_geoname.id, i.united_geoname, i.geographical_distance, i.percentage, '%', i.pk, self.check_if_checked(i.best_match))
for i in obj.matched.all()
]
), obj._meta.app_label, obj._meta.module_name, obj.id)
else:
return '<b>There is no Matched Places</b><br /><a href=\"/admin/{0}/{1}/{2}/?_popup=1">Add place</a>'.format(
obj._meta.app_label, obj._meta.module_name, obj.id
)
matched_objects.allow_tags = True
class UserProjectAdmin(admin.ModelAdmin):
model = UserProject
admin.site.register(UnitedGeoName, UnitedGeoNameAdmin)
admin.site.register(UnitedGeoNameSynonim, UnitedGeoNameSynonimAdmin)
admin.site.register(GeoNamesMatchingLogMatchedPlaces, GeoNamesMatchingLogMatchedPlacesAdmin)
admin.site.register(GeoNamesMatchingLogMatch, GeoNamesMatchingLogMatchAdmin)
admin.site.register(UserProject, UserProjectAdmin)
admin.site.register(UserGeoName)
admin.site.register(Database)
|
UTF-8
|
Python
| false | false | 2,014 |
14,104,672,634,385 |
3f6d60541a58d0e01b479795c17ab0e4e73ead5c
|
1f81cd59e5c4fb8a436f436a2ac66b7bc4b2c54a
|
/core/context_processors.py
|
904c9c2b0ef93d76fbb49b4dc932ebd91d607ff4
|
[] |
no_license
|
ynka/django-zabaglione
|
https://github.com/ynka/django-zabaglione
|
f385499de02606aa6a93ae557873f932e678c8e5
|
5c801ebefa4ea84b41c61bfb1129c7a0149dc782
|
refs/heads/master
| 2021-01-22T07:35:33.638155 | 2011-09-13T18:40:55 | 2011-09-13T18:40:55 | 2,372,065 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib.flatpages.models import FlatPage
from django.contrib.auth.decorators import login_required
@login_required
def add_flatpages(request):
return {'flatpages':FlatPage.objects.all()}
|
UTF-8
|
Python
| false | false | 2,011 |
5,411,658,805,767 |
89b1965ec72052bbc6d052a080b5efb63946a833
|
f38012f86e65141559519d7a359bae37b0e3a151
|
/r_os/os_symlinks.py
|
589e89b0efa8035af021d899b29b6e88e995a078
|
[] |
no_license
|
rApeNB/PyMOTW
|
https://github.com/rApeNB/PyMOTW
|
e73607444c86afabafd23d0dab7e4fdd0f60cbf1
|
166a0098ab68863161a518a668e33f543288599d
|
refs/heads/master
| 2020-04-13T11:40:39.880083 | 2014-12-14T15:13:31 | 2014-12-14T15:13:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'rApeNB'
# -*- coding: utf-8 -*-
import os
import tempfile
link_name = tempfile.mktemp()
print 'Creating link %s -> %s' % (link_name, __file__)
os.symlink(__file__, link_name)
stat_info = os.lstat(link_name)
print 'Permissons:', oct(stat_info.st_mode)
print 'Points to:', os.readlink(link_name)
# Clean up
os.unlink(link_name)
|
UTF-8
|
Python
| false | false | 2,014 |
12,558,484,395,164 |
ae997334e36f4908571356f3ea0959476f50b9bc
|
7a7cb9c8aaef20152170da7cee49b12aa87720fd
|
/mysite/prescriptive/urls.py
|
116d6fe41cd1bcfc5eaa00e0eb2029ae39406945
|
[] |
no_license
|
keni-m-patel/mathnasium
|
https://github.com/keni-m-patel/mathnasium
|
88c2257c97373db0fa6d4a52c5b0b5f81c42f8af
|
af0ae520e03bd40a8d8f4c0547eb071377af7a9a
|
refs/heads/master
| 2018-05-19T00:52:02.542633 | 2014-11-30T20:39:22 | 2014-11-30T20:39:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import patterns, url
from prescriptive import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^detail/(?P<code>.*)/', views.detail, name='detail'),
url(r'^all/$', views.all, name='all'),
)
|
UTF-8
|
Python
| false | false | 2,014 |
12,936,441,503,876 |
12ef8479c8dfb9b9ccd894d9dfdc3194f1240781
|
c318069d084b547d687dd181ae502ea1885c8eb5
|
/exercices/010/solution.py
|
68ad187f68fc82712b12e3d2b70b1de206953fa6
|
[] |
no_license
|
AgnesK/course-material
|
https://github.com/AgnesK/course-material
|
10e51c6053890bc568843aac8ed499db4635bc1b
|
531e2ce1e6242e295d826357eb55168b192c3007
|
refs/heads/master
| 2021-01-22T19:15:06.175090 | 2014-09-22T19:23:08 | 2014-09-22T19:23:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 22 10:41:05 2014
@author: Agnes
"""
for i in range(100):
print("Hello World")
|
UTF-8
|
Python
| false | false | 2,014 |
13,022,340,888,148 |
7bacf483a41f9fa9042f0e47695eeccb49691600
|
3cee10897563d72b6b2feb4de00c4dd1fe369f0c
|
/opentrials/vocabulary/migrations/0002_order.py
|
abd9126750526e58b445a22008ca8c3082a55d0f
|
[
"LGPL-2.1-only",
"GPL-2.0-only"
] |
non_permissive
|
rebec/opentrials
|
https://github.com/rebec/opentrials
|
febad1f18ff35968f21919aae42f4642cd23bc57
|
e0761be69ed1871c3b5e4d6c90fb8f3f04d256e6
|
refs/heads/master
| 2017-04-28T19:48:33.483774 | 2011-12-14T22:07:32 | 2011-12-14T22:07:32 | 2,217,497 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'StudyType.order'
db.add_column('vocabulary_studytype', 'order', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True, blank=True), keep_default=False)
# Adding field 'AttachmentType.order'
db.add_column('vocabulary_attachmenttype', 'order', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True, blank=True), keep_default=False)
# Adding field 'InterventionCode.order'
db.add_column('vocabulary_interventioncode', 'order', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True, blank=True), keep_default=False)
# Adding field 'RecruitmentStatus.order'
db.add_column('vocabulary_recruitmentstatus', 'order', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True, blank=True), keep_default=False)
# Adding field 'TrialNumberIssuingAuthority.order'
db.add_column('vocabulary_trialnumberissuingauthority', 'order', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True, blank=True), keep_default=False)
# Adding field 'StudyPurpose.order'
db.add_column('vocabulary_studypurpose', 'order', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True, blank=True), keep_default=False)
# Adding field 'CountryCode.order'
db.add_column('vocabulary_countrycode', 'order', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True, blank=True), keep_default=False)
# Adding field 'StudyMasking.order'
db.add_column('vocabulary_studymasking', 'order', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True, blank=True), keep_default=False)
# Adding field 'StudyAllocation.order'
db.add_column('vocabulary_studyallocation', 'order', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True, blank=True), keep_default=False)
# Adding field 'StudyPhase.order'
db.add_column('vocabulary_studyphase', 'order', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True, blank=True), keep_default=False)
# Adding field 'InterventionAssigment.order'
db.add_column('vocabulary_interventionassigment', 'order', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True, blank=True), keep_default=False)
# Adding field 'IcdChapter.order'
db.add_column('vocabulary_icdchapter', 'order', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True, blank=True), keep_default=False)
# Adding field 'DecsDisease.order'
db.add_column('vocabulary_decsdisease', 'order', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'StudyType.order'
db.delete_column('vocabulary_studytype', 'order')
# Deleting field 'AttachmentType.order'
db.delete_column('vocabulary_attachmenttype', 'order')
# Deleting field 'InterventionCode.order'
db.delete_column('vocabulary_interventioncode', 'order')
# Deleting field 'RecruitmentStatus.order'
db.delete_column('vocabulary_recruitmentstatus', 'order')
# Deleting field 'TrialNumberIssuingAuthority.order'
db.delete_column('vocabulary_trialnumberissuingauthority', 'order')
# Deleting field 'StudyPurpose.order'
db.delete_column('vocabulary_studypurpose', 'order')
# Deleting field 'CountryCode.order'
db.delete_column('vocabulary_countrycode', 'order')
# Deleting field 'StudyMasking.order'
db.delete_column('vocabulary_studymasking', 'order')
# Deleting field 'StudyAllocation.order'
db.delete_column('vocabulary_studyallocation', 'order')
# Deleting field 'StudyPhase.order'
db.delete_column('vocabulary_studyphase', 'order')
# Deleting field 'InterventionAssigment.order'
db.delete_column('vocabulary_interventionassigment', 'order')
# Deleting field 'IcdChapter.order'
db.delete_column('vocabulary_icdchapter', 'order')
# Deleting field 'DecsDisease.order'
db.delete_column('vocabulary_decsdisease', 'order')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'vocabulary.attachmenttype': {
'Meta': {'ordering': "['order']", 'object_name': 'AttachmentType'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'vocabulary.countrycode': {
'Meta': {'ordering': "['description']", 'object_name': 'CountryCode'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'submission_language': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'})
},
'vocabulary.decsdisease': {
'Meta': {'ordering': "['order']", 'object_name': 'DecsDisease'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'vocabulary.icdchapter': {
'Meta': {'ordering': "['order']", 'object_name': 'IcdChapter'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'vocabulary.interventionassigment': {
'Meta': {'ordering': "['order']", 'object_name': 'InterventionAssigment'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'vocabulary.interventioncode': {
'Meta': {'ordering': "['order']", 'object_name': 'InterventionCode'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'vocabulary.recruitmentstatus': {
'Meta': {'object_name': 'RecruitmentStatus'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'vocabulary.studyallocation': {
'Meta': {'ordering': "['order']", 'object_name': 'StudyAllocation'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'vocabulary.studymasking': {
'Meta': {'ordering': "['order']", 'object_name': 'StudyMasking'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'vocabulary.studyphase': {
'Meta': {'ordering': "['order']", 'object_name': 'StudyPhase'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'vocabulary.studypurpose': {
'Meta': {'ordering': "['order']", 'object_name': 'StudyPurpose'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'vocabulary.studytype': {
'Meta': {'ordering': "['order']", 'object_name': 'StudyType'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'vocabulary.trialnumberissuingauthority': {
'Meta': {'object_name': 'TrialNumberIssuingAuthority'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'vocabulary.vocabularytranslation': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'language'),)", 'object_name': 'VocabularyTranslation'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '8', 'db_index': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['vocabulary']
|
UTF-8
|
Python
| false | false | 2,011 |
3,186,865,740,243 |
d724f527c0168fbf31e32e5d4906632bdb61aaf7
|
2d2db0a691f655604bbe37af090ecd3a98d65f8b
|
/cihui/handler/common.py
|
e1d3c26398e92750e07f5c351f4ace8cc80094f5
|
[
"MIT"
] |
permissive
|
mahugui/cihui
|
https://github.com/mahugui/cihui
|
c95af7d117a74a82439fa06a024255281ef813d3
|
e6bf9f0bee6779fa2fbe79c240f568878f2da267
|
refs/heads/master
| 2020-05-23T08:04:33.274607 | 2014-10-27T03:20:06 | 2014-10-27T03:20:06 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Geoff Wilson <[email protected]>
import tornado.web
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
# TODO(gmwils): refactor & test
session_key = self.get_secure_cookie('session_id')
if session_key:
user_id, username = str(session_key, encoding='ascii').split('|')
return user_id
return None
|
UTF-8
|
Python
| false | false | 2,014 |
2,190,433,342,383 |
b86fe30cc89f5c5ffc1a02c4717d9cca0b7d1fc2
|
d8829cbc2d2863f68cb1b447a878dce0ac20a878
|
/scripts/similarity.py
|
0b3a9b6dc34238d22ba5ca5bcb2cc6e4903622dd
|
[] |
no_license
|
XiaoxiaoWang87/InsightPrj
|
https://github.com/XiaoxiaoWang87/InsightPrj
|
170a9757dfdf4669ee2c52322f2f5e5d766ce2a1
|
767a3719fad93ddb9711817f543b5e7b1822f680
|
refs/heads/master
| 2021-01-19T00:41:44.195663 | 2014-10-06T22:28:24 | 2014-10-06T22:28:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import sys
import os
import csv
import time
import datetime
from types import *
import random
import math
import pandas as pd
import numpy as np
import json
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from mpl_toolkits.mplot3d import Axes3D
import pylab as pl
import pymysql as mdb
from scipy import special
from sklearn.metrics.pairwise import euclidean_distances
def main():
# preprocessing data
df = {}
games_played = [60]
db = mdb.connect(user="root", host="localhost", db="nbagamedb", charset='utf8')
cur = {}
with open("../json/names.json", 'r') as file_handle:
names = json.load(file_handle)
all_players = pd.DataFrame(names['resultSets'][0]['rowSet'], columns=names['resultSets'][0]['headers'])
result_tuple = [(203487, 0.18595585), (203506, 0.11655443), (203504, 0.05306283), (203486, 0.02186713), (203508, 0.00706554), (203527, 0.00638566), (203480, 0.00477143), (203500, 0.00192156), (203507, -0.00350226), (203476, -0.01304738), (203489, -0.01324894), (203548, -0.01504922), (203474, -0.01980168), (203268, -0.02474724), (203491, -0.03426916), (203473, -0.03692819), (203459, -0.04018171), (203477, -0.04141347), (203505, -0.0438259), (203519, -0.04384942), (202620, -0.04415433), (201979, -0.04804931), (203496, -0.04972171), (203471, -0.04981582), (203503, -0.05217203), (203482, -0.05485713), (203462, -0.05486091), (203485, -0.05782554), (203810, -0.05834132), (203469, -0.05904961), (203539, -0.05988436), (203136, -0.06093478), (203147, -0.06270075), (203120, -0.06496599), (202779, -0.067018), (203315, -0.06791735), (203492, -0.07292787), (203495, -0.07407107), (203501, -0.07426697), (203517, -0.07717779), (203513, -0.07819452), (202197, -0.07968543), (203543, -0.08127428), (203490, -0.08199551), (203544, -0.08233529), (203461, -0.08248828), (203540, -0.08391612), (203499, -0.08454515), (203479, -0.08738991), (203498, -0.08832967), (203468, -0.08932379), (203460, -0.09485895), (203183, -0.09616857), (203458, -0.096383), (203318, -0.09687138), (203497, -0.09712607), (203816, -0.09742036), (203524, -0.09755825), (203502, -0.09786547), (203569, -0.09961734), (203515, -0.1003242), (203561, -0.10408864), (203463, -0.109834), (203484, -0.11683215), (203263, -0.11898496), (203552, -0.12360296), (203488, -0.12583992), (202091, -0.12839389), (203545, -0.13250706), (203546, -0.13418365), (203493, -0.13682664), (203133, -0.13985063), (203467, -0.14074716), (203481, -0.14116683), (203584, -0.14441901), (203521, -0.17158767), (203138, -0.17192985)]
result_df = pd.DataFrame(result_tuple, columns=['Player_ID', 'Score'])
focus_list = []
size = result_df.shape[0]
for index, row in result_df.iterrows():
p = int((1-float(index+1)/float(size+1))*100.0)
if p>=50:
focus_list.append(row['Player_ID'])
sig_2013 = "sig_2013"
bkg_2013 = "bkg_2013"
#cur[sig_2013] = db.cursor(mdb.cursors.DictCursor)
#cur[sig_2013].execute("SELECT PTS,AST,REB,STL,BLK,FGA,FGM,FTA,FTM,TOV,WL,FG_PCT,FG3_PCT,FT_PCT,MIN FROM star WHERE PLAYED<=60 AND FROM_YEAR=2013;")
#df[sig_2013] = pd.DataFrame( cur[sig_2013].fetchall() )
cur[bkg_2013] = db.cursor(mdb.cursors.DictCursor)
cur[bkg_2013].execute("SELECT PTS,AST,REB,STL,BLK,FGA,FGM,FTA,FTM,TOV,WL,FG_PCT,FG3_PCT,FT_PCT,MIN,Player_ID FROM non_star WHERE PLAYED<=60 AND FROM_YEAR=2013;")
df[bkg_2013] = pd.DataFrame( cur[bkg_2013].fetchall() )
df["2013"] = df[bkg_2013] #pd.concat([df[sig_2013], df[bkg_2013]])
df["2013"]['EFF'] = (df["2013"]['PTS'] + df["2013"]['REB'] + df["2013"]['AST'] + df["2013"]['STL'] + df["2013"]['BLK']) - ((df["2013"]['FGA']-df["2013"]['FGM'])+(df["2013"]['FTA']-df["2013"]['FTM'])+df["2013"]['TOV'])
df["2013"]['WL'] = df["2013"]['WL'].map(lambda x: 1 if x=='W' else 0)
grouped = {}
grouped['2013'] = df['2013'].groupby('Player_ID')
df_id_result = {}
df_n_result = {}
df_e_result = {}
df_result = {}
df_sorted = {}
df_top3 = {}
rookie_id = []
first_degree = []
second_degree = []
third_degree = []
first_degree_name = []
second_degree_name = []
third_degree_name = []
count = 0
save = {}
for name, g in grouped['2013']:
count = count+1
n = g.shape[0]
m = g.mean()
#print name, n, m
if m['Player_ID'] not in focus_list:
continue
print m['Player_ID']
bkg_unit = np.ravel(g.as_matrix(columns=['PTS','AST','REB','STL','BLK','FG_PCT','FG3_PCT','FT_PCT','MIN','EFF','WL']))
#print bkg_unit.shape
bkg_container = []
bkg_container.append(bkg_unit)
#print bkg_container
sig_str = 'sig_%dg' % n
cur[sig_str] = db.cursor(mdb.cursors.DictCursor)
cur[sig_str].execute("SELECT PTS,AST,REB,STL,BLK,FGA,FGM,FTA,FTM,TOV,WL,FG_PCT,FG3_PCT,FT_PCT,MIN,FROM_YEAR,Player_ID FROM star WHERE PLAYED<=%d;" % n)
df[sig_str] = pd.DataFrame( cur[sig_str].fetchall() )
# calculating effciency
df[sig_str]['EFF'] = (df[sig_str]['PTS'] + df[sig_str]['REB'] + df[sig_str]['AST'] + df[sig_str]['STL'] + df[sig_str]['BLK']) - ((df[sig_str]['FGA']-df[sig_str]['FGM'])+(df[sig_str]['FTA']-df[sig_str]['FTM'])+df[sig_str]['TOV'])
df[sig_str] = df[sig_str][ df[sig_str]['FROM_YEAR'] < 2013 ]
# trim down variables
df[sig_str] = df[sig_str].ix[:,['PTS','AST','REB','STL','BLK','FG_PCT','FG3_PCT','FT_PCT','MIN','EFF','WL','Player_ID']]
df[sig_str]['WL'] = df[sig_str]['WL'].map(lambda x: 1 if x=='W' else 0)
sig_container = []
sig_id_container = []
sig_name_container = []
sgrouped = df[sig_str].groupby('Player_ID')
for sname, sg in sgrouped:
#print sg.shape[0],n
if sg.shape[0] != n:
continue
sm = sg.mean()
print sm['Player_ID']
sig_unit = np.ravel(sg.as_matrix(columns=['PTS','AST','REB','STL','BLK','FG_PCT','FG3_PCT','FT_PCT','MIN','EFF','WL']))
#print sig_unit.shape
sig_container.append(sig_unit)
sig_id_container.append(sm['Player_ID'])
for index2, row2 in all_players.iterrows():
if int(row2["PERSON_ID"]) == int(sm['Player_ID']):
sig_name_container.append(row2['DISPLAY_LAST_COMMA_FIRST'])
#print sig_container
rookie = str(int(m['Player_ID']))
df_id_result[rookie] = pd.DataFrame(sig_id_container, columns=['StarID'])
df_n_result[rookie] = pd.DataFrame(sig_name_container, columns=['StarName'])
e_result = euclidean_distances(sig_container, bkg_container)
df_e_result[rookie] = pd.DataFrame(e_result, columns=['Distance'])
#print df_n_result[rookie]
#print df_e_result[rookie]
df_result[rookie] = df_id_result[rookie].join([df_n_result[rookie], df_e_result[rookie]])
#print df_result[rookie]
df_sorted[rookie] = df_result[rookie].sort(['Distance'], ascending=1)
df_top3[rookie] = df_sorted[rookie][:3]
print df_top3[rookie]
c = 0
for i, r in df_top3[rookie].iterrows():
if c==0:
rookie_id.append(rookie)
first_degree.append(r['StarID'])
first_degree_name.append(r['StarName'])
elif c==1:
second_degree.append(r['StarID'])
second_degree_name.append(r['StarName'])
elif c==2:
third_degree.append(r['StarID'])
third_degree_name.append(r['StarName'])
c = c+1
a = pd.DataFrame(rookie_id, columns=['RookieID'])
b = pd.DataFrame(first_degree, columns=['FirstDegreeID'])
c = pd.DataFrame(second_degree, columns=['SecondDegreeID'])
d = pd.DataFrame(third_degree, columns=['ThirdDegreeID'])
x = pd.DataFrame(first_degree_name, columns=['FirstDegreeName'])
y = pd.DataFrame(second_degree_name, columns=['SecondDegreeName'])
z = pd.DataFrame(third_degree_name, columns=['ThirdDegreeName'])
f = a.join([b, c, d, x, y, z])
print f
f.to_csv('../log/0923_similarity.csv', sep='\t', index=False)
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,014 |
7,541,962,593,572 |
f0ac1cb26df81021b054e456a96de96bd355c34c
|
2e9718899086b3469b672d523a54ac25fb87f288
|
/evaluation.py
|
a0c72ced5c3fc686d170276163ea478611c16320
|
[] |
no_license
|
simonzheng/simonlucas-tweet-weather-classifier
|
https://github.com/simonzheng/simonlucas-tweet-weather-classifier
|
44ea84f355b75f100d47733463535d90a787c8ae
|
0f07f1e5c99fc6dfd86b04e56b688356c809458d
|
refs/heads/master
| 2021-01-01T16:25:27.148363 | 2013-12-10T06:33:37 | 2013-12-10T06:33:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy
import math, random
import vectorToLabel
# Sample confidence vectors
# predicted_confidence_vector_a = [0, 0, 0.6, 0, 0, .4, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]
# predicted_confidence_vector_b = [0, 0, 0.6, 0, 0, .4, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# predictions_list = [predicted_confidence_vector_a, predicted_confidence_vector_b]
# gold_confidence_vector_a = [0, 0, 0.5, 0, 0, .5, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]
# gold_confidence_vector_b = [0, 0, 0.6, 0, 0, .4, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# gold_list = [gold_confidence_vector_a, gold_confidence_vector_b]
# Note: These only differ in confidence of s2 and s5 in vector a
# These differences make the squared error 0.1^2 + 0.1^2 = 0.02
# We expect root mean squared error to be math.sqrt(0.02 / 2 * 24) = 0.020412414523193152
# print "predicted_confidence_vector is "
# for prediction in predictions_list:
# print prediction
# print "gold_confidence_vector is "
# for gold in gold_list:
# print gold
class Evaluator:
def __init__(self):
self.labelnames = \
{'sentiment':['s1', 's2', 's3', 's4', 's5'],
'time':['w1', 'w2', 'w3', 'w4'],
'event':['k1', 'k2', 'k3', 'k4', 'k5', 'k6', 'k7', 'k8', 'k9', 'k10', 'k11', 'k12', 'k13', 'k14', 'k15']
}
self.ordered_keys = ['s1', 's2', 's3', 's4', 's5',
'w1', 'w2', 'w3', 'w4',
'k1', 'k2', 'k3', 'k4', 'k5', 'k6', 'k7', 'k8',
'k9', 'k10', 'k11', 'k12', 'k13', 'k14', 'k15']
self.label_types = ['sentiment', 'time', 'event']
self.label_indices = \
{'sentiment': range(0,5),
'time': range(5,9),
'event': range(9,24)
}
def single_data_point_se(self, predicted_confidence_vector, gold_confidence_vector, index_range=None):
numLabels = len(predicted_confidence_vector)
label_indices = range(numLabels) if index_range == None else index_range
squared_error = 0.0
for label_index in label_indices:
squared_error += math.pow((predicted_confidence_vector[label_index] - gold_confidence_vector[label_index]), 2)
return squared_error
def error_rate(self, predictions_list, gold_list):
self.check_matching_pred_gold(predictions_list, gold_list)
numPredictions = len(predictions_list)
numLabels = len(predictions_list[0])
#print "numPredictions is %d\nnumLabels is %d" %(numPredictions, numLabels)
# Note: ensure that numPredictions = len(gold_list)
# Ensure that numPredictions > 0 and len(gold_list) > 0
# numLabels = len(gold_list[0])
num_errors = 0
for prediction_index in range(len(predictions_list)):
# total_mean_squared_error += single_data_point_mse(predictions_list[prediction_index], gold_list[prediction_index])
if predictions_list[prediction_index] != gold_list[prediction_index]:
num_errors += 1
error_rate = float(num_errors) / numPredictions
return error_rate
def rmse(self, predictions_list, gold_list):
self.check_matching_pred_gold(predictions_list, gold_list)
numPredictions = len(predictions_list)
numLabels = len(predictions_list[0])
total_squared_error = 0.0
for prediction_index in range(len(predictions_list)):
# total_mean_squared_error += single_data_point_mse(predictions_list[prediction_index], gold_list[prediction_index])
total_squared_error += self.single_data_point_se(predictions_list[prediction_index], gold_list[prediction_index])
total_mean_squared_error = total_squared_error / (numPredictions * numLabels)
root_mean_squared_error = math.sqrt(total_mean_squared_error)
return root_mean_squared_error
def rmse_by_labelclass(self, predictions_list, gold_list):
self.check_matching_pred_gold(predictions_list, gold_list)
numPredictions = len(predictions_list)
numLabels = len(predictions_list[0])
rmseDictByClass = {}
for label_type in self.label_types:
total_squared_error = 0.0
for prediction_index in range(len(predictions_list)):
# total_mean_squared_error += single_data_point_mse(predictions_list[prediction_index], gold_list[prediction_index])
total_squared_error += self.single_data_point_se(predictions_list[prediction_index],
gold_list[prediction_index],
index_range=self.label_indices[label_type])
total_mean_squared_error = total_squared_error / (numPredictions * numLabels)
root_mean_squared_error = math.sqrt(total_mean_squared_error)
rmseDictByClass[label_type] = root_mean_squared_error
return rmseDictByClass
def absolute_accuracy(self, predictions_list, gold_list):
self.check_matching_pred_gold(predictions_list, gold_list)
correct_vector = [tuple(predictions_list[i]) == tuple(gold_list[i]) for i in range(len(predictions_list))]
return float(correct_vector.count(True)) / len(correct_vector)
def absolute_accuracy_by_labelclass(self, predictions_list, gold_list):
self.check_matching_pred_gold(predictions_list, gold_list)
absAccuracyByClass = {}
for label_type in self.label_types:
idx_start = min(self.label_indices[label_type])
idx_end = max(self.label_indices[label_type])
correct_vector = [tuple(predictions_list[i][idx_start : idx_end]) == \
tuple(gold_list[i][idx_start : idx_end]) for i in range(len(predictions_list))]
absAccuracyByClass[label_type] = float(correct_vector.count(True)) / len(correct_vector)
return absAccuracyByClass
def show_errors(self, tweets, predictions_list, gold_list):
self.check_matching_pred_gold(predictions_list, gold_list)
print '\n~~~~~~~~~~~~~~~~~\nShowing Incorrect Tweets\n~~~~~~~~~~~~~~~~~'
converter = vectorToLabel.Converter()
correct_vector = [tuple(predictions_list[i]) == tuple(gold_list[i]) for i in range(len(predictions_list))]
for i in range(len(correct_vector)):
if correct_vector[i] == False:
print '****************************************************'
print 'for tweet ==', tweets[i]
print 'gold_list[%i] was ' %(i), gold_list[i]
converter.printLabels(gold_list[i])
print 'predictions_list[%i] was ' %(i), predictions_list[i]
converter.printLabels(predictions_list[i])
def show_correct(self, tweets, predictions_list, gold_list):
self.check_matching_pred_gold(predictions_list, gold_list)
print '\n~~~~~~~~~~~~~~~~~\nShowing CORRECT Tweets\n~~~~~~~~~~~~~~~~~'
converter = vectorToLabel.Converter()
correct_vector = [tuple(predictions_list[i]) == tuple(gold_list[i]) for i in range(len(predictions_list))]
for i in range(len(correct_vector)):
if correct_vector[i] == True:
print '****************************************************'
print 'for tweet ==', tweets[i]
print 'gold_list[%i] was ' %(i), gold_list[i]
converter.printLabels(gold_list[i])
print 'predictions_list[%i] was ' %(i), predictions_list[i]
converter.printLabels(predictions_list[i])
def check_matching_pred_gold(self, predictions_list, gold_list):
numPredictions = len(predictions_list)
if numPredictions <= 0:
print "numPredictions <= 0"
exit()
if len(gold_list) <= 0:
print "len(gold_list) <= 0"
exit()
if numPredictions != len(gold_list):
print "predictions_list and gold_list do not match in number of tweets: predictions_list has %i and gold_list has %i" %(numPredictions, len(gold_list))
exit()
numLabels = len(predictions_list[0])
if numLabels <= 0:
print "prediction numLabels <= 0"
exit()
if len(gold_list[0]) <= 0:
print "gold numLabels <= 0"
exit()
if numLabels != len(gold_list[0]):
print "length of a prediction label vector and gold label vector do not match"
exit()
##### Scraps ######
# Note: can copy or use scikit's: http://scikit-learn.org/stable/modules/cross_validation.html
# def k_folds_validation(dataList):
# numDataPoints = len(dataList)
# numDataPointsPerFold = numDataPoints / 5 # Note: we can lose up to 4 total data numDataPoints
def kfold_crossvalidate(dataList, k=5):
dataList = dataList[:len(dataList) - (len(dataList) % k)] # we effectively ignore the last len(dataList) % k data points
if k > len(dataList):
print "your k = %d and is greater than the length of the dataList" % k
exit()
indices = numpy.random.permutation(len(dataList))
print "indices are ", indices
print "\n"
numDataPointsPerFold = len(dataList) / k
print "numDataPointsPerFold is ", numDataPointsPerFold
# # we set aside 1/k of the data points for fold validation.
# for foldIndex in range(k):
# training_idx = indices[foldIndex * numDataPointsPerFold : (foldIndex + 1) * numDataPointsPerFold]
# test_idx = list(indices[:foldIndex * numDataPointsPerFold]) + list(indices[(foldIndex + 1) * numDataPointsPerFold:])
# # print "training_idx is ", training_idx
# # print "test_idx is ", test_idx
# training, test = [dataList[index] for index in training_idx], [dataList[index] for index in test_idx]
# # print "training is ", training
# # print "test is ", test
# we set aside 1/k of the data points for fold validation.
for foldIndex in range(k):
training_idx = indices[foldIndex * numDataPointsPerFold : (foldIndex + 1) * numDataPointsPerFold]
test_idx = list(indices[:foldIndex * numDataPointsPerFold]) + list(indices[(foldIndex + 1) * numDataPointsPerFold:])
print "training_idx is ", training_idx
print "test_idx is ", test_idx
training, test = [dataList[index] for index in training_idx], [dataList[index] for index in test_idx]
print "training is ", training
print "test is ", test
########## Start of using scikit ##############
# import numpy as np
# from sklearn import cross_validation
# from sklearn import datasets
# from sklearn import svm
# # X_train, X_test, y_train, y_test = cross_validation.train_test_split(
# # iris.data, iris.target, test_size=0.4, random_state=0)
# # X_train.shape, y_train.shape
# iris = datasets.load_iris()
# print 'iris.data', iris.data
# print 'iris.data.shape', iris.data.shape
# # print 'iris.target', iris.target
# print 'iris.target.shape', iris.target.shape
# X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target, test_size=0.4, random_state=0)
# # print 'X_train, X_test, y_train, y_test', X_train, X_test, y_train, y_test
# print 'X_train.shape, y_train.shape', X_train.shape, y_train.shape
# print 'X_test.shape, y_test.shape', X_test.shape, y_test.shape
# clf = svm.SVC(kernel='linear', C=1).fit(X_train, y_train)
# print clf.score(X_test, y_test)
|
UTF-8
|
Python
| false | false | 2,013 |
3,135,326,141,623 |
3a0b611db49798211614f033a5ca63beb9b50c9a
|
3d7f57f4c143fe669929f9a29edb48c3ef57cfda
|
/triangle.py
|
3fb899a87b3849ec0ce2b45a12e3105ec2f86144
|
[] |
no_license
|
phamd1989/ProgrammingChallenges
|
https://github.com/phamd1989/ProgrammingChallenges
|
56f313d934ca6aaba4715ec0cc786f465919f71b
|
08aee6e97c1c672115461630c7769d584d9696ba
|
refs/heads/master
| 2021-01-01T05:37:06.992803 | 2014-01-03T02:13:24 | 2014-01-03T02:13:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# solve the triangle problem from Yodle coding challenges
def maxTotal(matrix, size):
# matrix is a two dimensionals array
for i in range(size-2, -1, -1): # each row
for j in range(i+1): # each cols of the row
currValue = matrix[i][j]
if (currValue + matrix[i+1][j] > currValue + matrix[i+1][j+1]):
matrix[i][j] = currValue + matrix[i+1][j]
else:
matrix[i][j] = currValue + matrix[i+1][j+1]
return matrix[0][0]
def main():
size = 100
matrix = [[0 for x in xrange(size)] for x in xrange(size)]
myInput = open('triangle.txt', 'r')
for i in range(size):
theLine = myInput.readline()
arr = theLine.split()
for j in range(i+1):
matrix[i][j] = int(arr[j])
print maxTotal(matrix, size)
main()
|
UTF-8
|
Python
| false | false | 2,014 |
687,194,775,784 |
bd4fbdacbc6d0f76d5d0155382d85909fa1eed6d
|
49dc88dbabab84e53496a058daf9e30987fd46a7
|
/sketches/py2b/python_classes/ser.py
|
c1bb10fac0dde96275fd6e28f454dbc112c75956
|
[
"MIT"
] |
permissive
|
sauloal/arduino
|
https://github.com/sauloal/arduino
|
4f97f6e4d9822dc1516c395f7e140c061d6c319f
|
e8acbf53b5fb23be02c168dc70ee897f345f4c76
|
refs/heads/master
| 2020-05-28T07:43:02.545545 | 2013-07-29T19:40:27 | 2013-07-29T19:40:27 | 3,607,207 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
##########################################
###### PYTHON 2B #########################
##########################################
# #
# MARCO SANGALLI [email protected] #
# ALEX RIGAMONTI [email protected] #
# #
##########################################
##########################################
from serial import Serial
from threading import Thread
import time
class ser(Thread):
def __init__(self):
Thread.__init__(self)
#port
self.port=-1
self.maxports=3
self.s=None
#sync flag
self.sync=False
#sync byte
self.syncbyte=100;
#minimum sync count
self.synccount=5;
#listeners
self.listeners=[]
#reset
self.reset()
#start
self.start()
#try different port to connect to
def connect(self):
#resync
self.sync=False
#close serial
if self.s:
self.s.close()
#choose a different port 0..self.maxports
self.port+=1
if self.port>self.maxports:
self.port=0
p="/dev/ttyUSB%s" % self.port
#try to connect
try:
self.s = Serial(p,19200,timeout=1)
except:
print "PORT ERROR %s>>>" % p
time.sleep(1)
else:
print "CONNECTED TO %s>>>" % p
#thread loop
def run(self):
while 1:
try:
#send queue message################
res=self.s.read(2)
#read reply 2 byte PIN|VALUE#######
pin=ord(res[0])
val=ord(res[1])
except:
self.connect()
else:
#dispatch values ##################
#@@print "[%s:%s]" % (pin,val)
#sync byte ########################
if pin==self.syncbyte:
#print "SYNC BYTE DETECTED>>>"
#resync on signal
if val==self.syncbyte:
#decrement sync counter
if self.synccount>0:
self.synccount-=1
elif not self.sync:
print "SYNCHRONIZED>>>"
self.sync=True
else:
print "SHIFTED>>>"
res=self.s.read(1)
#send back dsync signal
self.sendMessage(self.syncbyte,self.syncbyte)
elif self.sync:
#send to listeners
#print "TO LISTENERS>>>"
self.__toListeners(val,pin)
#little sleep
time.sleep(0.001)
#private method to dispatch
def __toListeners(self,val,pin):
for listener in self.listeners:
if listener["pin"]==pin:
#value to controller
listener["cls"](pin,val)
#send coneverted message to device
def sendMessage(self,pin,value):
if value>=0 and value<=255 and self.sync:
#convert to bytes
msg="%s%s" % (chr(pin),chr(value))
try:
self.s.write(msg)
except:
print "UNABLE TO WRITE>>>"
self.connect()
#reset input enable status to 0
def reset(self):
self.sendMessage(self.syncbyte,self.syncbyte)
#add new listener
def addListener(self,pin,cls):
self.listeners.append({"pin":pin,"cls":cls})
if __name__ == "__main__":
s=ser()
|
UTF-8
|
Python
| false | false | 2,013 |
14,388,140,480,824 |
11eda39e548ba645582a1f728bbda0ad46786fd6
|
8adcca698e8b8a148e9167a39afaa07a6c4fccac
|
/nova/tests/cert/test_rpcapi.py
|
58b07ff751e604b5663b864bae3d251e0723e532
|
[
"Apache-2.0"
] |
permissive
|
usc-isi/nova
|
https://github.com/usc-isi/nova
|
8ef88d8522c0f4ca77f599a980ac938131b0e4dc
|
0e0dc77b12ed60a522d7a0f5f05a1188cf50ad96
|
refs/heads/hpc-trunk
| 2021-01-15T20:33:31.738702 | 2012-10-01T22:04:46 | 2012-10-01T22:04:46 | 2,484,268 | 8 | 2 | null | true | 2013-08-13T19:21:38 | 2011-09-29T19:13:44 | 2013-08-13T17:42:59 | 2013-07-25T21:40:31 | 760,264 | null | 2 | 1 |
Python
| null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for nova.cert.rpcapi
"""
from nova.cert import rpcapi as cert_rpcapi
from nova import context
from nova import flags
from nova.openstack.common import rpc
from nova import test
FLAGS = flags.FLAGS
class CertRpcAPITestCase(test.TestCase):
def _test_cert_api(self, method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = cert_rpcapi.CertAPI()
expected_retval = 'foo'
expected_msg = rpcapi.make_msg(method, **kwargs)
expected_msg['version'] = rpcapi.BASE_RPC_API_VERSION
self.call_ctxt = None
self.call_topic = None
self.call_msg = None
self.call_timeout = None
def _fake_call(_ctxt, _topic, _msg, _timeout):
self.call_ctxt = _ctxt
self.call_topic = _topic
self.call_msg = _msg
self.call_timeout = _timeout
return expected_retval
self.stubs.Set(rpc, 'call', _fake_call)
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
self.assertEqual(self.call_ctxt, ctxt)
self.assertEqual(self.call_topic, FLAGS.cert_topic)
self.assertEqual(self.call_msg, expected_msg)
self.assertEqual(self.call_timeout, None)
def test_revoke_certs_by_user(self):
self._test_cert_api('revoke_certs_by_user', user_id='fake_user_id')
def test_revoke_certs_by_project(self):
self._test_cert_api('revoke_certs_by_project',
project_id='fake_project_id')
def test_revoke_certs_by_user_and_project(self):
self._test_cert_api('revoke_certs_by_user_and_project',
user_id='fake_user_id',
project_id='fake_project_id')
def test_generate_x509_cert(self):
self._test_cert_api('generate_x509_cert',
user_id='fake_user_id',
project_id='fake_project_id')
def test_fetch_ca(self):
self._test_cert_api('fetch_ca', project_id='fake_project_id')
def test_fetch_crl(self):
self._test_cert_api('fetch_crl', project_id='fake_project_id')
def test_decrypt_text(self):
self._test_cert_api('decrypt_text',
project_id='fake_project_id', text='blah')
|
UTF-8
|
Python
| false | false | 2,012 |
9,028,021,301,780 |
f24837ca49fb4dc5d3a42b490ad5a1b856b1af2f
|
d36f0a161d676490c59eacc0713806287f1dee97
|
/likitomi/weight/templatetags/twelfth.py
|
df9283837556410d7def571fb7ad0231b63a1774
|
[] |
no_license
|
patipol/RTG-Likitomi
|
https://github.com/patipol/RTG-Likitomi
|
f126bed099f2b88b2fa1c739921b9b94330364e1
|
968f01ac67e093d7ec8dc65a9c280f39564a904b
|
refs/heads/master
| 2021-01-13T01:27:55.366017 | 2011-05-27T17:27:24 | 2011-05-27T17:27:24 | 1,286,462 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django import template
register = template.Library()
@register.filter
def twelfth(value):
result = value[11]
return result
|
UTF-8
|
Python
| false | false | 2,011 |
953,482,776,144 |
a3013307a241bb1326e0479a4b7ce99569878891
|
a3577a6832ddf3b7477f967fb5cbd9ab9c609b9a
|
/tags/1.0.0/src/plugins/help/load_help.py
|
cee89b2372208101a0268bcbcddd066f2e77f437
|
[
"GPL-3.0-only"
] |
non_permissive
|
BackupTheBerlios/freq-dev-svn
|
https://github.com/BackupTheBerlios/freq-dev-svn
|
5573eea0646a88d3e32785d21f150695a07a3730
|
82bc821d17996a32c18228c0b20b0fcaa4df0079
|
refs/heads/master
| 2020-05-23T14:29:03.267975 | 2012-04-10T23:41:42 | 2012-04-10T23:41:42 | 40,663,392 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def load_help_content(k, l):
fn = u'doc/help/%s-%s.txt' % (k, l)
fp = file(fn.encode('utf8'), 'r')
ctg = fp.readline().decode('utf8')
content = fp.read().decode('utf8')
fp.close()
return content
|
UTF-8
|
Python
| false | false | 2,012 |
17,222,818,883,027 |
5c2f0cfd690febb59905a0ec5c5b37a3c27913b3
|
5908cf053b8a88768e0a9b06210dca4fc2970a21
|
/word-manager/app/models/admin.py
|
2e02eb7916d03b74b742bee89f2b6773828c9059
|
[
"MIT"
] |
permissive
|
jbowens/taboo
|
https://github.com/jbowens/taboo
|
d87b39af645088a6222c1ae7cfaed4f18331fa04
|
e4a346aa28a5a64f2b0141dbf728fa809c6d1cb3
|
refs/heads/master
| 2020-04-19T13:24:24.383281 | 2014-07-24T20:54:52 | 2014-07-24T20:54:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from app import db
class Admin(db.Model):
"""
Represents an administrator.
"""
__tablename__ = 'admins'
aid = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(50), unique=True)
passwordhash = db.Column(db.String(256))
|
UTF-8
|
Python
| false | false | 2,014 |
7,438,883,387,271 |
d8e25df3122aa6b41c43260e086b8d98be9758ce
|
e758cee04fc929c6a4af43231161ee6abd9103ac
|
/Resources/findcentroid.py
|
d1e3ea80f1c51d51754811dd35132473bcb8bcda
|
[
"MIT"
] |
permissive
|
dongle/BreakVaders
|
https://github.com/dongle/BreakVaders
|
3ce4f38a9a84982947ed7d3595909b347f323663
|
e24dafa713c01e25d145b5833ee72dfedcf83bef
|
refs/heads/master
| 2016-09-16T09:46:56.455531 | 2014-06-16T04:25:19 | 2014-06-16T04:25:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import struct
import png
import sys
import os.path
from types import *
versionstring = "0.1.0"
# --------- dot notation for python dictionaries
# From http://parand.com/say/index.php/2008/10/24/python-dot-notation-dictionary-access/
class dotdict(dict):
def __getattr__(self, attr):
return self.get(attr, None)
__setattr__= dict.__setitem__
__delattr__= dict.__delitem__
# --------- wmb data structure definitions
# From http://www.conitec.net/beta/prog_mdlhmp.html
LONG_TYPE = '<L'
SHORT_TYPE = '<h'
FLOAT_TYPE = '<f'
STRING4_TYPE = '<4s'
STRING16_TYPE = '<16s'
STRING20_TYPE = '<20s'
STRING44_TYPE = '<44s'
def read_struct(astruct, filehandle):
if type(astruct) == StringType:
line = filehandle.read(struct.calcsize(astruct))
rval = struct.unpack(astruct, line)[0]
return rval
else:
rval = {}
for item in astruct:
rval[item[0]]=read_struct(item[1], filehandle)
return dotdict(rval)
def read_888_image_flipped(width, height, filehandle):
bytes = [''] * height
for j in reversed(range(height)):
line = filehandle.read(3*width)
bytes[j] = struct.unpack('<%dB'%(3*width), line)
#print bytes[j]
return bytes
def read_565_image(width, height, filehandle):
bytes = []
exbytes = []
for j in range(height):
line = filehandle.read(2*width)
bytes.append(struct.unpack('<%dH'%width, line))
line = []
for i in list(bytes[j]):
line.append((i>>11)<<3)
line.append(((i>>5) & 0x3F)<<2)
line.append((i & 0x1F)<<3)
exbytes.append(tuple(line))
return exbytes
def write_to_filechunk(length, filename, filehandle):
chunk = filehandle.read(length)
name = filter(lambda x: x!='\00', filename)
f = open('%s.chunk' % name, 'wb')
f.write(chunk)
f.close()
def read_img_file(infile):
# get the file and metadata
r = png.Reader(filename = infile)
p = r.asRGBA8()
l = list(p[2])
xdim = p[0]
ydim = p[1]
# find centroid
xaccum = 0
yaccum = 0
count = 0
for j in range(ydim):
for i in range(xdim):
if l[j][i*4+3] > 0:
xaccum += i
yaccum += j
count += 1
centx = xaccum / count
centy = yaccum / count
print infile, centx, centy
return
def main(args):
read_img_file(args[0])
if __name__ == "__main__":
main(sys.argv[1:])
|
UTF-8
|
Python
| false | false | 2,014 |
8,546,984,952,550 |
c969df1138311a23eeb9a70370770ed4f77e8d56
|
df432bb7f873e8c17f27ce38c23e6d75a6b7ba8b
|
/scripts/Maelstrom/Episode6/E6M3/E6M3_AI_Fed.py
|
641ee90fc83e6a9e5d5e66ed49da3a7e0b319c44
|
[] |
no_license
|
tnu1997/bridgecommander2013
|
https://github.com/tnu1997/bridgecommander2013
|
9e4e1c15f32436f61d61276cb8b3d4fe97d73c8a
|
81da2e13a031881b9ae88cd0c0467e341f46150d
|
refs/heads/master
| 2021-01-23T13:31:36.327437 | 2013-02-19T13:36:05 | 2013-02-19T13:36:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import App
def CreateAI(pShip):
#########################################
# Creating PlainAI WarpToSavoy3 at (32, 116)
pWarpToSavoy3 = App.PlainAI_Create(pShip, "WarpToSavoy3")
pWarpToSavoy3.SetScriptModule("Warp")
pWarpToSavoy3.SetInterruptable(0)
pScript = pWarpToSavoy3.GetScriptInstance()
pScript.SetDestinationSetName("Systems.Savoy.Savoy3")
pScript.SetDestinationPlacementName("FedEnter")
pScript.SetWarpDuration(2)
# Done creating PlainAI WarpToSavoy3
#########################################
#########################################
# Creating PlainAI InterceptGalor1 at (77, 17)
pInterceptGalor1 = App.PlainAI_Create(pShip, "InterceptGalor1")
pInterceptGalor1.SetScriptModule("Intercept")
pInterceptGalor1.SetInterruptable(1)
pScript = pInterceptGalor1.GetScriptInstance()
pScript.SetTargetObjectName("Galor 1")
pScript.SetMaximumSpeed(10)
pScript.SetInterceptDistance(10)
# Done creating PlainAI InterceptGalor1
#########################################
#########################################
# Creating ConditionalAI TargetsNotInRange at (88, 64)
## Conditions:
#### Condition GalorsInRange
pGalorsInRange = App.ConditionScript_Create("Conditions.ConditionInRange", "ConditionInRange", 200, pShip.GetName(), "Galor 1", "Galor 2")
## Evaluation function:
def EvalFunc(bGalorsInRange):
ACTIVE = App.ArtificialIntelligence.US_ACTIVE
DORMANT = App.ArtificialIntelligence.US_DORMANT
DONE = App.ArtificialIntelligence.US_DONE
if (bGalorsInRange):
return DONE
return ACTIVE
## The ConditionalAI:
pTargetsNotInRange = App.ConditionalAI_Create(pShip, "TargetsNotInRange")
pTargetsNotInRange.SetInterruptable(1)
pTargetsNotInRange.SetContainedAI(pInterceptGalor1)
pTargetsNotInRange.AddCondition(pGalorsInRange)
pTargetsNotInRange.SetEvaluationFunction(EvalFunc)
# Done creating ConditionalAI TargetsNotInRange
#########################################
#########################################
# Creating PlainAI Stay at (195, 23)
pStay = App.PlainAI_Create(pShip, "Stay")
pStay.SetScriptModule("Stay")
pStay.SetInterruptable(1)
# Done creating PlainAI Stay
#########################################
#########################################
# Creating ConditionalAI IsPlayerInSet at (177, 87)
## Conditions:
#### Condition PlayerInSet
pPlayerInSet = App.ConditionScript_Create("Conditions.ConditionInSet", "ConditionInSet", "player", "Savoy3")
## Evaluation function:
def EvalFunc(bPlayerInSet):
ACTIVE = App.ArtificialIntelligence.US_ACTIVE
DORMANT = App.ArtificialIntelligence.US_DORMANT
DONE = App.ArtificialIntelligence.US_DONE
if (bPlayerInSet):
return DONE
return ACTIVE
## The ConditionalAI:
pIsPlayerInSet = App.ConditionalAI_Create(pShip, "IsPlayerInSet")
pIsPlayerInSet.SetInterruptable(1)
pIsPlayerInSet.SetContainedAI(pStay)
pIsPlayerInSet.AddCondition(pPlayerInSet)
pIsPlayerInSet.SetEvaluationFunction(EvalFunc)
# Done creating ConditionalAI IsPlayerInSet
#########################################
#########################################
# Creating CompoundAI BasicAttack3pSavoy3FedsTargets at (238, 129)
import AI.Compound.BasicAttack
pBasicAttack3pSavoy3FedsTargets = AI.Compound.BasicAttack.CreateAI(pShip, App.ObjectGroup_FromModule("Maelstrom.Episode6.E6M3.E6M3", "g_pSavoy3FedsTargets"), Difficulty = 0.75, SmartTorpSelection = 0)
# Done creating CompoundAI BasicAttack3pSavoy3FedsTargets
#########################################
#########################################
# Creating PlainAI WarpToSavoy1 at (335, 61)
pWarpToSavoy1 = App.PlainAI_Create(pShip, "WarpToSavoy1")
pWarpToSavoy1.SetScriptModule("Warp")
pWarpToSavoy1.SetInterruptable(1)
pScript = pWarpToSavoy1.GetScriptInstance()
pScript.SetDestinationSetName("Systems.Savoy.Savoy1")
pScript.SetDestinationPlacementName("FedEnter")
pScript.SetWarpDuration(15)
# Done creating PlainAI WarpToSavoy1
#########################################
#########################################
# Creating ConditionalAI WarpTimer at (331, 117)
## Conditions:
#### Condition Timer
pTimer = App.ConditionScript_Create("Conditions.ConditionTimer", "ConditionTimer", 15)
## Evaluation function:
def EvalFunc(bTimer):
ACTIVE = App.ArtificialIntelligence.US_ACTIVE
DORMANT = App.ArtificialIntelligence.US_DORMANT
DONE = App.ArtificialIntelligence.US_DONE
if (bTimer):
return ACTIVE
return DORMANT
## The ConditionalAI:
pWarpTimer = App.ConditionalAI_Create(pShip, "WarpTimer")
pWarpTimer.SetInterruptable(1)
pWarpTimer.SetContainedAI(pWarpToSavoy1)
pWarpTimer.AddCondition(pTimer)
pWarpTimer.SetEvaluationFunction(EvalFunc)
# Done creating ConditionalAI WarpTimer
#########################################
#########################################
# Creating PlainAI Call_ThisShipDamaged at (462, 45)
pCall_ThisShipDamaged = App.PlainAI_Create(pShip, "Call_ThisShipDamaged")
pCall_ThisShipDamaged.SetScriptModule("RunScript")
pCall_ThisShipDamaged.SetInterruptable(1)
pScript = pCall_ThisShipDamaged.GetScriptInstance()
pScript.SetScriptModule("Maelstrom.Episode6.E6M3.E6M3")
pScript.SetFunction("FedShipDamaged")
pScript.SetArguments(pShip.GetName())
# Done creating PlainAI Call_ThisShipDamaged
#########################################
#########################################
# Creating ConditionalAI TakingHullDamage at (458, 107)
## Conditions:
#### Condition HullAt75
pHullAt75 = App.ConditionScript_Create("Conditions.ConditionSystemBelow", "ConditionSystemBelow", pShip.GetName(), App.CT_HULL_SUBSYSTEM, 0.75)
## Evaluation function:
def EvalFunc(bHullAt75):
ACTIVE = App.ArtificialIntelligence.US_ACTIVE
DORMANT = App.ArtificialIntelligence.US_DORMANT
DONE = App.ArtificialIntelligence.US_DONE
if (bHullAt75):
return ACTIVE
return DORMANT
## The ConditionalAI:
pTakingHullDamage = App.ConditionalAI_Create(pShip, "TakingHullDamage")
pTakingHullDamage.SetInterruptable(1)
pTakingHullDamage.SetContainedAI(pCall_ThisShipDamaged)
pTakingHullDamage.AddCondition(pHullAt75)
pTakingHullDamage.SetEvaluationFunction(EvalFunc)
# Done creating ConditionalAI TakingHullDamage
#########################################
#########################################
# Creating CompoundAI BasicAttackTransports at (559, 47)
import AI.Compound.BasicAttack
pBasicAttackTransports = AI.Compound.BasicAttack.CreateAI(pShip, App.ObjectGroup_FromModule("Maelstrom.Episode6.E6M3.E6M3", "g_pSavoy1Transports"), Difficulty = 0.01, SmartTorpSelection = 0)
# Done creating CompoundAI BasicAttackTransports
#########################################
#########################################
# Creating ConditionalAI TransportCloseToStation at (551, 108)
## Conditions:
#### Condition Transport80kFromStation
pTransport80kFromStation = App.ConditionScript_Create("Conditions.ConditionInRange", "ConditionInRange", 450, "Savoy Station", "Transport 1", "Transport 2")
## Evaluation function:
def EvalFunc(bTransport80kFromStation):
ACTIVE = App.ArtificialIntelligence.US_ACTIVE
DORMANT = App.ArtificialIntelligence.US_DORMANT
DONE = App.ArtificialIntelligence.US_DONE
if (bTransport80kFromStation):
return ACTIVE
return DORMANT
## The ConditionalAI:
pTransportCloseToStation = App.ConditionalAI_Create(pShip, "TransportCloseToStation")
pTransportCloseToStation.SetInterruptable(1)
pTransportCloseToStation.SetContainedAI(pBasicAttackTransports)
pTransportCloseToStation.AddCondition(pTransport80kFromStation)
pTransportCloseToStation.SetEvaluationFunction(EvalFunc)
# Done creating ConditionalAI TransportCloseToStation
#########################################
#########################################
# Creating CompoundAI BasicAttack3Savoy1FedsTargets at (680, 42)
import AI.Compound.BasicAttack
pBasicAttack3Savoy1FedsTargets = AI.Compound.BasicAttack.CreateAI(pShip, App.ObjectGroup_FromModule("Maelstrom.Episode6.E6M3.E6M3", "g_pSavoy1FedsTargets"), Difficulty = 0.75, SmartTorpSelection = 0)
# Done creating CompoundAI BasicAttack3Savoy1FedsTargets
#########################################
#########################################
# Creating CompoundAI BasicAttackSavoy1Transports at (674, 108)
import AI.Compound.BasicAttack
pBasicAttackSavoy1Transports = AI.Compound.BasicAttack.CreateAI(pShip, App.ObjectGroup_FromModule("Maelstrom.Episode6.E6M3.E6M3", "g_pSavoy1Transports"), Difficulty = 0.75, SmartTorpSelection = 0)
# Done creating CompoundAI BasicAttackSavoy1Transports
#########################################
#########################################
# Creating PlainAI FollowKhitomer at (647, 187)
pFollowKhitomer = App.PlainAI_Create(pShip, "FollowKhitomer")
pFollowKhitomer.SetScriptModule("FollowObject")
pFollowKhitomer.SetInterruptable(1)
pScript = pFollowKhitomer.GetScriptInstance()
pScript.SetFollowObjectName("Khitomer")
pScript.SetRoughDistances(150, 175, 200)
# Done creating PlainAI FollowKhitomer
#########################################
#########################################
# Creating PriorityListAI Savoy1Priority at (404, 183)
pSavoy1Priority = App.PriorityListAI_Create(pShip, "Savoy1Priority")
pSavoy1Priority.SetInterruptable(1)
# SeqBlock is at (530, 192)
pSavoy1Priority.AddAI(pTakingHullDamage, 1)
pSavoy1Priority.AddAI(pTransportCloseToStation, 2)
pSavoy1Priority.AddAI(pBasicAttack3Savoy1FedsTargets, 3)
pSavoy1Priority.AddAI(pBasicAttackSavoy1Transports, 4)
pSavoy1Priority.AddAI(pFollowKhitomer, 5)
# Done creating PriorityListAI Savoy1Priority
#########################################
#########################################
# Creating SequenceAI InnerSequence at (233, 181)
pInnerSequence = App.SequenceAI_Create(pShip, "InnerSequence")
pInnerSequence.SetInterruptable(1)
pInnerSequence.SetLoopCount(1)
pInnerSequence.SetResetIfInterrupted(0)
pInnerSequence.SetDoubleCheckAllDone(0)
pInnerSequence.SetSkipDormant(0)
# SeqBlock is at (347, 188)
pInnerSequence.AddAI(pWarpTimer)
pInnerSequence.AddAI(pSavoy1Priority)
# Done creating SequenceAI InnerSequence
#########################################
#########################################
# Creating SequenceAI Savoy3Sequence at (13, 190)
pSavoy3Sequence = App.SequenceAI_Create(pShip, "Savoy3Sequence")
pSavoy3Sequence.SetInterruptable(1)
pSavoy3Sequence.SetLoopCount(1)
pSavoy3Sequence.SetResetIfInterrupted(0)
pSavoy3Sequence.SetDoubleCheckAllDone(0)
pSavoy3Sequence.SetSkipDormant(1)
# SeqBlock is at (133, 191)
pSavoy3Sequence.AddAI(pWarpToSavoy3)
pSavoy3Sequence.AddAI(pTargetsNotInRange)
pSavoy3Sequence.AddAI(pIsPlayerInSet)
pSavoy3Sequence.AddAI(pBasicAttack3pSavoy3FedsTargets)
pSavoy3Sequence.AddAI(pInnerSequence)
# Done creating SequenceAI Savoy3Sequence
#########################################
#########################################
# Creating PreprocessingAI AvoidObstacles at (10, 256)
## Setup:
import AI.Preprocessors
pScript = AI.Preprocessors.AvoidObstacles()
## The PreprocessingAI:
pAvoidObstacles = App.PreprocessingAI_Create(pShip, "AvoidObstacles")
pAvoidObstacles.SetInterruptable(1)
pAvoidObstacles.SetPreprocessingMethod(pScript, "Update")
pAvoidObstacles.SetContainedAI(pSavoy3Sequence)
# Done creating PreprocessingAI AvoidObstacles
#########################################
return pAvoidObstacles
|
UTF-8
|
Python
| false | false | 2,013 |
5,832,565,606,577 |
025384b4c823e2f70217b842a2b762f96469fe2d
|
a84041bc41d41e7d09aeb95ea889cea8fd11e860
|
/__init__.py
|
1d53829dbd65f15ea9be68baa8409de8f0b8843d
|
[] |
no_license
|
ribeiroit/tweet
|
https://github.com/ribeiroit/tweet
|
4ee098155da79e98051ff6e46e71fe906d39f85d
|
056e7d3a9b17d0c9afd1f54d964bddb07ac995db
|
refs/heads/master
| 2016-09-03T07:33:07.119732 | 2013-09-26T14:52:48 | 2013-09-26T14:52:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding: utf-8
#
# Starts new app and config mongodb connection
#
import os
# /User/tribeiro/projetos/dojo/tweet/templates
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
from flask import Flask
from flask_mongoengine import MongoEngine, MongoEngineSessionInterface
from flask.ext.login import LoginManager
app = Flask(__name__, static_folder='static', template_folder=tmpl_dir)
app.config.from_pyfile('config.py')
app.debug = True
db = MongoEngine(app)
app.session_interface = MongoEngineSessionInterface(db)
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'login'
from tweet import views
|
UTF-8
|
Python
| false | false | 2,013 |
4,904,852,690,714 |
d7820a383cd51f41bd215594d8834c6fccbff050
|
ac7512969254e54aa702e22f687f441d64b3de68
|
/allergy_assassin/allergies.py
|
6da1b5c88693ae4cb48fc4ded6ae42a336de00ab
|
[] |
no_license
|
majackson/allergy-assassin
|
https://github.com/majackson/allergy-assassin
|
a3423048cb25f26e4f3e2576473bf6cf716e03ed
|
e84e6309d133aa64f10cb3b1da69db78de11ae65
|
refs/heads/master
| 2016-09-08T08:15:44.797152 | 2013-10-12T16:42:13 | 2013-10-12T16:42:13 | 3,155,160 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
This file maps abstract allergies (which don't by name refer to a concrete
ingredient, to a set of concrete ingredients.
The fixtures in this file are also used to generate the allergies
autocompletion list, which explains the 1->1 mappings also listed here.
"""
from allergy_assassin import db, logger
import re
logger = logger.init("allergy_assassin.allergies")
fixtures = [
(('peanut',), ('peanut')),
(('nut',), ('peanut', 'hazelnut', 'cashew nut', 'brazil nut',
'almond', 'walnut', 'pecan', 'pistachio')),
(('shellfish',), ('prawn', 'clam', 'oyster', 'crab', 'lobster',
'crayfish', 'mussel', 'winkle', 'scallop',
'shrimp')),
(('seafood',), ('prawn', 'clam', 'oyster', 'crab', 'lobster',
'crayfish', 'mussel', 'winkle', 'scallop',
'shrimp', 'squid', 'calamari', 'octopus',
'fish', 'cod', 'scampi', 'haddock', 'mackerel'
'tuna', 'sardine', 'anchovy')),
(('dairy',), ('milk', 'yoghurt', 'cheese', 'cream', 'butter')),
(('egg',), ('egg')),
(('lime',), ('lime')),
(('kidney bean',), ('kidney bean')),
(('gluten','wheat'), ('bread', 'flour', 'pasta')),
(('barley',), ('barley')),
(('garlic',), ('garlic')),
(('milk',), ('milk')),
(('soy',), ('soy')),
(('sulfite', 'sulphite'), ('wine')),
(('nitrate', 'nitrite'), ('beef, pork')),
(('amine',), ()),
(('propionates',), ()),
(('benzoates',), ()),
(('sorbates',), ()),
]
def populate():
for allergies, allergens in fixtures:
for allergy in allergies:
db.allergies.insert({'_id': allergy,
'allergens': allergens})
def get_real_allergens(allergen_list):
"""Takes an iterable of allergens, all of which may be abstract or real
and converts them to a list of real allergens that may appear in
a list of ingrdients"""
real_allergens = set()
for allergen in allergen_list:
allergen = allergen.lower().strip()
reals = get(allergen)
if reals:
real_allergens.update(reals)
else:
real_allergens.add(allergen)
logger.debug("Expanded allergens from %s to %s" %
(str(allergen_list), str(real_allergens)) )
return real_allergens
def search(query):
"""Fetch allergies by search term if provided, else fetch all"""
results = search_query(query) if query else get_all()
return [ result['_id'] for result in results ]
def search_query(query):
autocomplete_rx = re.compile('^%s' % query, re.IGNORECASE)
return db.allergies.find({'_id': autocomplete_rx}).sort('_id')
def get_all():
return db.allergies.find()
def get(allergy):
"""Fetch concrete allergies associated with abstract allergy,
or none if none existant"""
result = db.allergies.find_one({'_id': allergy})
return result['allergens'] if result else None
|
UTF-8
|
Python
| false | false | 2,013 |
13,056,700,617,532 |
5ff12b2ba626562af0453f110dda2ecbdad0b386
|
d26c12acc30f49700ef70db4b27f89a03963db2d
|
/apps/blog/managers.py
|
c26956ece713cd516b403e383ece1096d30fc180
|
[] |
no_license
|
kuchin/wend
|
https://github.com/kuchin/wend
|
eebbec75da6b9d789145fce8a23d148d407603eb
|
10ffd688df9f2bcaaccc276b20303237ab8389b2
|
refs/heads/master
| 2019-04-07T06:45:00.990864 | 2013-02-11T19:49:33 | 2013-02-11T19:49:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
from django.contrib.contenttypes.models import ContentType
from core.models import ActiveManager
from actstream.models import user_stream
class LikeManager(ActiveManager):
def record_like(self, obj, user):
"""
Records user or anonymous like for the object
"""
if user.is_anonymous():
user = None
ctype = ContentType.objects.get_for_model(obj)
if user:
try:
self.get(user=user, content_type=ctype, object_id=obj._get_pk_val())
return
except models.ObjectDoesNotExist:
pass
self.create(user=user, content_type=ctype, object_id=obj._get_pk_val())
def record_unlike(self, obj, user):
"""
Removes user like for the object
"""
ctype = ContentType.objects.get_for_model(obj)
try:
v = self.get(user=user, content_type=ctype, object_id=obj._get_pk_val())
v.delete()
except models.ObjectDoesNotExist:
pass
def get_likes(self, obj):
"""
Return all likes of the object
"""
ctype = ContentType.objects.get_for_model(obj)
return self.filter(content_type=ctype, object_id=obj._get_pk_val())
def get_likes_users(self, obj):
"""
Return all non-anonymous likes of the object
"""
ctype = ContentType.objects.get_for_model(obj)
return self.filter(content_type=ctype, object_id=obj._get_pk_val(), user__isnull=False)
def get_likes_anonymous(self, obj):
"""
Return all non-anonymous likes of the object
"""
ctype = ContentType.objects.get_for_model(obj)
return self.filter(content_type=ctype, object_id=obj._get_pk_val(), user__isnull=True)
class PostManager(ActiveManager):
def user_stream(self, user):
ctype = ContentType.objects.get_for_model(self.model)
return user_stream(user).filter(action_object_content_type=ctype)
def user_stream_external(self, user):
return self.user_stream(user).exclude(actor_object_id=user.pk)
def user_feed(self, user):
return self.filter(user=user).order_by('-date_updated')
|
UTF-8
|
Python
| false | false | 2,013 |
833,223,667,572 |
34b8084f449c056de37fa3f8a98ad0f11e065feb
|
f995ccace2f007033bce77f94e474f81202265f0
|
/src/compiler/expressions.py
|
bfdfa70103d2fbbd43c3112d72d341dc860211ac
|
[
"MIT"
] |
permissive
|
pomagma/pomagma
|
https://github.com/pomagma/pomagma
|
8a71ad95d27dab6163f60e41784554121c436191
|
9cf830ed6f5b30f6e08cadfe7e4553a67703d8e5
|
refs/heads/master
| 2021-01-18T16:05:26.925373 | 2014-10-26T04:21:38 | 2014-10-26T04:21:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import re
from pomagma.compiler.util import union
from pomagma.compiler import signature
class Expression(object):
def __init__(self, name, *args):
assert isinstance(name, str)
assert re.match('[a-zA-Z][a-zA-Z_]*$', name),\
'invalid name: {0}'.format(name)
args = list(args)
arity = signature.get_arity(name)
assert len(args) == signature.get_nargs(arity)
for arg in args:
assert isinstance(arg, Expression), arg
self._name = name
self._args = args
self._arity = arity
self._polish = ' '.join([name] + [arg._polish for arg in args])
self._hash = hash(self._polish)
if arity == 'Variable':
self._var = self
self._vars = set([self])
elif arity == 'NullaryFunction':
self._var = Expression(name + '_')
self._vars = set()
elif arity in [
'InjectiveFunction',
'BinaryFunction',
'SymmetricFunction',
]:
var = re.sub('[ _]+', '_', self.polish).rstrip('_')
self._var = Expression(var)
self._vars = union(arg.vars for arg in args)
else:
self._var = None
self._vars = union(arg.vars for arg in args)
@property
def name(self):
return self._name
@property
def args(self):
return self._args
@property
def arity(self):
return self._arity
@property
def polish(self):
return self._polish
@property
def var(self):
return self._var
@property
def vars(self):
return self._vars.copy()
@property
def consts(self):
if self.is_fun() and not self.args:
return set([self])
else:
return union([arg.consts for arg in self.args])
def __hash__(self):
return self._hash
def __eq__(self, other):
assert isinstance(other, Expression), other
return self._polish == other._polish
def __str__(self):
return self._polish
def __repr__(self):
return self._polish
def is_var(self):
return signature.is_var(self.name)
def is_fun(self):
return signature.is_fun(self.name)
def is_rel(self):
return signature.is_rel(self.name)
def is_con(self):
return signature.is_con(self.name)
def substitute(self, var, defn):
assert isinstance(var, Expression)
assert isinstance(defn, Expression)
assert var.is_var()
if var not in self.vars:
return self
elif self.is_var():
return defn
else:
return Expression(
self.name,
*[arg.substitute(var, defn) for arg in self.args])
|
UTF-8
|
Python
| false | false | 2,014 |
19,499,151,559,654 |
c287d12d1d9224aca0fbdb550d154d82a67d83df
|
731556e068f7cb27053e01583a46d99630ed83fd
|
/src/pickle2db.py
|
09d96bb7bd4d8f1b48a2366bb561cf425f84912b
|
[] |
no_license
|
paegert/ebfactory
|
https://github.com/paegert/ebfactory
|
b89ff5306321f4f0d992788fbdce591bf109af9d
|
8e529d1583933c8f012c7b09305e797223942815
|
refs/heads/master
| 2021-01-25T07:39:32.642974 | 2013-12-05T18:07:56 | 2013-12-05T18:07:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on Sep 6, 2012
@package pickle2db
@author map
@version \$Revision: 1.3 $
@date \$Date: 2013/12/05 17:20:06 $
convert pickeled net to database, devide --> divide
$Log: pickle2db.py,v $
Revision 1.3 2013/12/05 17:20:06 paegerm
deleting classes option (obsolete, we store the classes in the network)
deleting classes option (obsolete, we store the classes in the network)
Revision 1.2 2013/08/07 15:43:10 paegerm
adding select and remark to network dictionary data,
renaming dict to ndict where the dictionary of the trained network is meant
Revision 1.1 2012/09/24 21:39:34 paegerm
convert pickeled network into database
Initial revision
'''
from optparse import OptionParser
import numpy as np
import os
import pickle
import sqlitetools.dbwriter as dbw
import sqlitetools.dbreader as dbr
import mlp
import dbconfig
from stopwatch import *
def writevec(netuid, writer, vector, name):
vals = []
for i in xrange(len(vector)):
vals.append([netuid, name, i, vector[i]])
writer.insert(vals, True)
if __name__ == '__main__':
usage = '%prog [options] [dbname]'
parser = OptionParser(usage=usage)
# parser.add_option('--classes', dest='clsnames', type='string',
# default='classes.txt',
# help='file with space separated classnames (classes.txt)')
parser.add_option('-d', dest='debug', type='int', default=1,
help='debug setting (default: 1)')
parser.add_option('--dbconfig', dest='dbconfig', type = 'string',
default='Asas',
help='name of database configuration (default = Asas')
parser.add_option('--dbname', dest='dbname', type='string',
default='asasnet.sqlite',
help='name of database file')
parser.add_option('--name', dest='name', type='string',
default=None,
help='name for the trained network (None)')
parser.add_option('--pname', dest='picklename', type='string',
default='mlp.pickle',
help='filename for the trained, pickled network (mlp.pickle)')
parser.add_option('--remark', dest='remark', type='string',
default=None,
help='remark for this run')
parser.add_option('--resdir', dest='resdir', type='string',
default='results',
help='subdirectory for results (default = results)')
parser.add_option('--rootdir', dest='rootdir', type='string',
default='./',
help='directory for database files (default = ./)')
(options, args) = parser.parse_args()
if (options.rootdir[-1] != '/'):
options.rootdir += '/'
if (len(args) == 1):
options.fitname = args[0]
# for line in open(options.rootdir + options.clsnames):
# if (len(line.strip()) == 0) or (line.startswith('#')):
# continue
# options.classes = line.split()
if (options.name == None):
pos = options.picklename.rfind('.')
options.name = options.resdir + '_' + options.picklename[:pos]
cls = getattr(dbconfig, options.dbconfig)
dbc = cls()
watch = Stopwatch()
watch.start()
pf = open(options.rootdir + options.resdir + '/' + options.picklename)
net = pickle.load(pf)
pf.close()
ndictwriter = dbw.DbWriter(options.rootdir + options.dbname, dbc.netdictcols,
dbc.netdicttname, dbc.netdicttypes,
dbc.netdictnulls)
(w1rows, w1cols) = np.shape(net.weights1)
(w2rows, w2cols) = np.shape(net.weights2)
mres = 0
if net.multires == True:
mres = 1
ndictdata = [options.name, net.nin, net.nout, net.ndata, net.nhidden,
net.beta, net.momentum, net.eta, net.outtype, mres,
net.mdelta, w1rows, w1cols, w2rows, w2cols, len(net.classes),
net.trainerror, net.validerror, net.stopcount, net.allpercent,
net.comment, net.select, options.remark]
# None, None, None]
ndictwriter.insert((ndictdata,), True)
# netuid = ndictwriter.dbcurs.lastrowid
res = ndictwriter.dbcurs.execute('SELECT uid from ' + dbc.netdicttname +
" where name = '" + options.name + "';")
(netuid,) = res.fetchone()
ndictwriter.close()
# write weights
vals = []
for i in xrange(w1rows):
for j in xrange(w1cols):
vals.append([netuid, 1, i, j, net.weights1[i][j]])
writer = dbw.DbWriter(options.rootdir + options.dbname, dbc.netwcols,
dbc.netwtname, dbc.netwtypes, dbc.netwnulls)
writer.insert(vals, True)
vals = []
for i in xrange(w2rows):
for j in xrange(w2cols):
vals.append([netuid, 2, i, j, net.weights2[i][j]])
writer.insert(vals, True)
writer.close()
# write classes
vals = []
for i in xrange(len(net.classes)):
vals.append([netuid, i, net.classes[i]])
writer = dbw.DbWriter(options.rootdir + options.dbname, dbc.netclasscols,
dbc.netclasstname, dbc.netclasstypes, dbc.netclassnulls)
writer.insert(vals, True)
writer.close()
# write confusion matrix
if (net.cm != None):
vals = []
(rows, cols) = np.shape(net.cm)
for i in xrange(rows):
for j in xrange(cols):
vals.append([netuid, dbc.confmatname, i, j, int(net.cm[i][j])])
writer = dbw.DbWriter(options.rootdir + options.dbname, dbc.netmatcols,
dbc.netmattname, dbc.netmattypes, dbc.netmatnulls)
writer.insert(vals, True)
writer.close()
writer = dbw.DbWriter(options.rootdir + options.dbname, dbc.netveccols,
dbc.netvectname, dbc.netvectypes, dbc.netvecnulls)
writevec(netuid, writer, net.normdivide, 'normdivide')
writevec(netuid, writer, net.normsubtract, 'normsubtract')
writevec(netuid, writer, net.trainstats, 'trainstats')
writevec(netuid, writer, net.validstats, 'validstats')
writevec(netuid, writer, net.teststats, 'teststats')
writer.close()
print 'Network written in', watch.stop(), 's'
print "Done"
|
UTF-8
|
Python
| false | false | 2,013 |
1,563,368,144,348 |
cede5a1680339fa30646710b078b9d851631c20c
|
0f1da56e21de2a66978e6fc2af27dd9ff3583562
|
/SVD.py
|
b3b9d43bf87ed32e467c43dd90c2cb2003a33466
|
[] |
no_license
|
leonxyao/Support-Vector-Machine
|
https://github.com/leonxyao/Support-Vector-Machine
|
aa989204e63efea9b9b54de2b08d94ee54273146
|
a28be2a296879e293e51a7d4a580569e60da7bdf
|
refs/heads/master
| 2020-04-16T01:01:12.740918 | 2014-03-06T05:48:20 | 2014-03-06T05:48:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import collections
import numpy as np
import random
import copy
import time
C=100
k=0
step = 0.0000003
epsilon = 0.25
d=122
n=6414
w = np.zeros((122))
b=0
x = np.zeros((n,d))
file = open('features.txt','r')
for index,line in enumerate(file):
for i,elem in enumerate(line.split(',')):
x[index][i] = float(elem)
y = np.zeros(n)
file = open('target.txt','r')
for index,line in enumerate(file):
y[index]=float(line)
curr_error = 10000000000
init_errors = 0
for i in range(n):
confidence = 1-y[i]*(np.dot(w,x[i])+b)
init_errors+=max(0,confidence)
prev_fk = 0.5*sum(w**2) + C*init_errors
print prev_fk
start_time = time.time()
while curr_error > epsilon:
w_prev = copy.copy(w)
for j in range(d):
gradient = 0
for i in range(n):
confidence = y[i]*(np.dot(x[i],w_prev)+b)
if confidence < 1:
gradient += -1*y[i]*x[i][j]
w[j] = w[j] - step*(w[j]+C*gradient)
gradient_b = 0
for i in range(n):
confidence = y[i]*(np.dot(x[i],w)+b)
if confidence < 1:
gradient_b += -1*y[i]
gradient_b*=C
b = b-step*gradient_b
k=k+1
errors = 0
for i in range(n):
confidence = 1-y[i]*(np.dot(w,x[i])+b)
errors+=max(0,confidence)
f_k = 0.5*sum(w**2) + C*errors
# print "ITERATION: ", k
# print f_k
curr_error = abs(((prev_fk)-f_k)/prev_fk*100)
prev_fk = f_k
print k, curr_error, f_k
print time.time()-start_time
|
UTF-8
|
Python
| false | false | 2,014 |
6,038,724,045,995 |
56ba4e0866fd189a8fd8623a4ba085493aa4b095
|
4d8366d150c7cb05d1ca09fe359cb18d37c3a4d7
|
/phillydata/violations/synchronizers.py
|
829790e11a8dc476e23cfb83cf62ed4763f6f4e0
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
fagan2888/django-phillydata
|
https://github.com/fagan2888/django-phillydata
|
9ff5b91083694e9453d786e41e189011b2cd8299
|
444a3ff9d436cd276530534642dd092402278ba5
|
refs/heads/master
| 2020-12-19T11:55:32.573183 | 2014-12-22T16:35:24 | 2014-12-22T16:35:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import logging
import external_data_sync
from external_data_sync.synchronizers import Synchronizer
from .adapter import find_violations
logger = logging.getLogger(__name__)
class LIViolationsSynchronizer(Synchronizer):
"""
A Synchronizer that updates L&I Violation data.
"""
# L&I says these should be the useful codes
codes = ('CP-802', 'PM-102.4/1', 'PM-302.2/4', 'PM-306.0/2', 'PM-306.0/91',
'PM-307.1/21',)
def sync(self, data_source):
logger.info('Starting to synchronize L&I Violation data.')
self.update_violation_data()
logger.info('Finished synchronizing L&I Violation data.')
def update_violation_data(self):
for code in self.codes:
find_violations(code, self.data_source.last_synchronized)
external_data_sync.register(LIViolationsSynchronizer)
|
UTF-8
|
Python
| false | false | 2,014 |
12,695,923,350,894 |
08497c52f615e590d57ca08f474ae304142296cc
|
41cd1bcff0166ed3aab28a183a2837adaa2d9a07
|
/allauth/socialaccount/providers/twitter/migrations/0004_auto__del_twitteraccount__del_twitterapp.py
|
954c475e46736e2cde1316cf74716b373f62604e
|
[
"MIT"
] |
permissive
|
thomaspurchas/django-allauth
|
https://github.com/thomaspurchas/django-allauth
|
694dde8615b90cd4768e7f9eda79fdcf6fe3cdb6
|
d7a8b9e13456180648450431057a206afa689373
|
refs/heads/master
| 2022-02-04T03:18:25.851391 | 2013-05-20T11:26:55 | 2013-05-20T11:26:55 | 7,754,028 | 1 | 0 |
MIT
| true | 2022-02-01T23:04:02 | 2013-01-22T14:44:56 | 2016-06-03T23:42:32 | 2022-02-01T23:04:01 | 918 | 1 | 0 | 13 |
Python
| false | false |
# encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'TwitterAccount'
db.delete_table('twitter_twitteraccount')
# Deleting model 'TwitterApp'
db.delete_table('twitter_twitterapp')
def backwards(self, orm):
# Adding model 'TwitterAccount'
db.create_table('twitter_twitteraccount', (
('username', self.gf('django.db.models.fields.CharField')(max_length=15)),
('social_id', self.gf('django.db.models.fields.BigIntegerField')(unique=True)),
('socialaccount_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['socialaccount.SocialAccount'], unique=True, primary_key=True)),
('profile_image_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
))
db.send_create_signal('twitter', ['TwitterAccount'])
# Adding model 'TwitterApp'
db.create_table('twitter_twitterapp', (
('consumer_secret', self.gf('django.db.models.fields.CharField')(max_length=80)),
('request_token_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('name', self.gf('django.db.models.fields.CharField')(max_length=40)),
('authorize_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('consumer_key', self.gf('django.db.models.fields.CharField')(max_length=80)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('access_token_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
))
db.send_create_signal('twitter', ['TwitterApp'])
models = {
}
complete_apps = ['twitter']
|
UTF-8
|
Python
| false | false | 2,013 |
9,878,424,803,150 |
4d654667fdb0e33bab03ebd296371b14c6fa9528
|
4d1fa652a81b2531422e37b59b25e51236ea873f
|
/jpegFiles.py
|
723b8e39553d08300305b4ef5304ef7d4614d7c6
|
[] |
no_license
|
lgezelius/motion
|
https://github.com/lgezelius/motion
|
63d2d4e342f5a7bcffc0ce6cd4b497c1f4b0a495
|
e26c81102bd7727c0f05da44cd3d8685112e96ed
|
refs/heads/master
| 2020-06-07T23:52:13.535577 | 2014-02-02T17:44:53 | 2014-02-02T17:44:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os,time,json
def get_files(directory):
file_list = []
for name in os.listdir(directory):
file_stat = os.stat(os.path.join(directory,name))
#file_list.append({"name":name,"modifiedTime":time.ctime(a.st_mtime)})
file_list.append({"name":name,"modifiedTime":file_stat.st_mtime})
return file_list
file_list = get_files("/var/www/motion")
sorted_file_list = sorted(file_list, key=lambda k: k['modifiedTime'],reverse=True)
formatted_file_list = []
for file in sorted_file_list:
formatted_file_list.append({"name":file["name"],"modifiedTime":time.ctime(file["modifiedTime"])})
print json.dumps(formatted_file_list)
|
UTF-8
|
Python
| false | false | 2,014 |
163,208,771,737 |
93a2c7fcceb9e8be7ad17ae05ac11283490f1eb6
|
ef187313537d520739b4918e0ef4c0b0041bf4e7
|
/C41_BodyControl/src/C41_BodyControl/Example1.py
|
bfcf29b3d41b17c455a5c0a78dd3e82dd8e4725c
|
[] |
no_license
|
robotil/robil
|
https://github.com/robotil/robil
|
e4a0a4eb92c0758101ecc1963d77142d51ed4c9c
|
257cd66266f299fd5f696cd4b5e92fa195237e47
|
refs/heads/master
| 2021-05-30T13:34:35.303549 | 2013-11-20T06:46:25 | 2013-11-20T06:46:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/env python
import roslib;
roslib.load_manifest('C45_PostureControl')
import rospy, math
import actionlib
import C0_RobilTask.msg
from std_msgs.msg import Float64
TASK_RESULT_REJECT=0
TASK_RESULT_OK=1
TASK_RESULT_PLAN=2
class BodyControlServer(object):
# create messages that are used to publish feedback/result
_feedback = C0_RobilTask.msg.RobilTaskFeedback()
_result = C0_RobilTask.msg.RobilTaskResult()
def __init__(self):
self._action_name = "/PostureControl"
self._as = actionlib.SimpleActionServer(self._action_name, C0_RobilTask.msg.RobilTaskAction, execute_cb=self.task)
self._as.start()
def task(self, goal):
task_success = True
task_result = TASK_RESULT_OK
task_plan = ""
# start executing the action
#### GET TASK PARAMETERS ####
rospy.loginfo("%s: Start: task name = %s",self._action_name, goal.name);
rospy.loginfo("%s: Start: task id = %s", self._action_name, goal.uid);
rospy.loginfo("%s: Start: task params = %s", self._action_name, goal.parameters);
#### HERE PROCESS TASK PARAMETERS ####
_arm = rospy.Publisher('/r_arm_ely_position_controller/command', Float64)
#### DEFINE SLEEP DURATION BETWEEN TASK LOOP ITERATIONS ####
r = rospy.Rate(100)
#### SET NUMBER OF TASK LOOP ITERATIONS ####
for i in xrange(1000):
if self._as.is_preempt_requested() or rospy.is_shutdown():
#### HERE PROICESS PREEMTION OR INTERAPT #####
rospy.loginfo('%s: Preempted' % self._action_name)
self._as.set_preempted()
task_success = False
break
#### HERE PROCESS TASK ####
t = 6 * rospy.get_time()
next_pos = 0.4 + 0.4 * math.sin(t)
_arm.publish(next_pos)
r.sleep()
if task_success:
self._result.success = task_result;
rospy.loginfo("%s: Succeeded", self._action_name);
if task_result == TASK_RESULT_PLAN:
ROS_INFO("%s: New plan", self._action_name);
self._result.plan = task_plan;
self._as.set_succeeded(self._result);
else:
rospy.loginfo("%s: Aborted", self._action_name);
self._as.set_aborted(self._result);
|
UTF-8
|
Python
| false | false | 2,013 |
11,072,425,713,672 |
7539fe564d6e00495ec1853cc7a81db20ecd2f3c
|
117ac8256b7619f88f145df18512cd7ad1e68043
|
/cloudscheduler/openstackcluster.py
|
307ef8df00d95b5404611053fbc2007219bdc56e
|
[
"LicenseRef-scancode-other-copyleft",
"Apache-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-philippe-de-muyter",
"GPL-3.0-only"
] |
non_permissive
|
igable/cloud-scheduler
|
https://github.com/igable/cloud-scheduler
|
78f1ee5baf3e5742f8893cdad58d2e9394a5759e
|
ef9968ada09df5857f23f21e620fd0fe01235bb9
|
refs/heads/master
| 2021-01-16T18:23:15.187424 | 2014-02-12T22:04:41 | 2014-02-12T22:04:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import sys
import time
import string
import shutil
import logging
import nimbus_xml
import subprocess
import cluster_tools
import cloudscheduler.config as config
import cloudscheduler.utilities as utilities
from cloudscheduler.job_management import _attr_list_to_dict
log = utilities.get_cloudscheduler_logger()
class OpenStackCluster(cluster_tools.ICluster):
VM_STATES = {
"running" : "Running",
"pending" : "Starting",
"shutting-down" : "Shutdown",
"terminated" : "Shutdown",
"error" : "Error",
}
def __init__(self, name="Dummy Cluster", host="localhost", cloud_type="Dummy",
memory=[], max_vm_mem= -1, cpu_archs=[], networks=[], vm_slots=0,
cpu_cores=0, storage=0,
access_key_id=None, secret_access_key=None, security_group=None,
username=None, password=None, tenant_name=None, auth_url=None,
hypervisor='xen', key_name=None, boot_timeout=None, secure_connection="",
regions=[], vm_domain_name="", reverse_dns_lookup=False,placement_zone=None):
# Call super class's init
cluster_tools.ICluster.__init__(self,name=name, host=host, cloud_type=cloud_type,
memory=memory, max_vm_mem=max_vm_mem, cpu_archs=cpu_archs, networks=networks,
vm_slots=vm_slots, cpu_cores=cpu_cores,
storage=storage, hypervisor=hypervisor, boot_timeout=boot_timeout)
try:
import novaclient.v1_1.client as nvclient
import keystoneclient.v2_0.client as ksclient
except:
print "Unable to import novaclient - cannot use native openstack cloudtypes"
sys.exit(1)
if not security_group:
security_group = ["default"]
self.security_groups = security_group
if not access_key_id or not secret_access_key:
log.error("Cannot connect to cluster %s "
"because you haven't specified an access_key_id or "
"a secret_access_key" % self.name)
self.access_key_id = access_key_id
self.secret_access_key = secret_access_key
self.username = username
self.password = password
self.tenant_name = tenant_name
self.auth_url = auth_url
self.key_name = key_name
self.secure_connection = secure_connection in ['True', 'true', 'TRUE']
self.total_cpu_cores = -1
self.regions = regions
self.vm_domain_name = vm_domain_name if vm_domain_name != None else ""
self.reverse_dns_lookup = reverse_dns_lookup in ['True', 'true', 'TRUE']
self.placement_zone = placement_zone
def vm_create(self, vm_name, vm_type, vm_user, vm_networkassoc, vm_cpuarch,
vm_image, vm_mem, vm_cores, vm_storage, customization=None,
vm_keepalive=0, instance_type="", job_per_core=False,
securitygroup=[],key_name=""):
""" Create a VM on OpenStack."""
nova = self._get_creds_nova()
if len(key_name) > 0:
if not nova.keypairs.findall(name=key_name):
key_name = ""
try:
image = vm_image[self.name]
except:
try:
image = vm_image[self.network_address]
except:
try:
vm_default_ami = _attr_list_to_dict(config.default_VMAMI)
if self.name in vm_default_ami.keys():
image = vm_default_ami[self.name]
else:
image = vm_default_ami[self.network_address]
except:
try:
image = vm_default_ami["default"]
except:
log.exception("Can't find a suitable AMI")
return
try:
if self.name in instance_type.keys():
i_type = instance_type[self.name]
else:
i_type = instance_type[self.network_address]
except:
log.debug("No instance type for %s, trying default" % self.network_address)
try:
if self.name in self.DEFAULT_INSTANCE_TYPE_LIST.keys():
i_type = self.DEFAULT_INSTANCE_TYPE_LIST[self.name]
else:
i_type = self.DEFAULT_INSTANCE_TYPE_LIST[self.network_address]
except:
log.debug("No default instance type found for %s, trying single default" % self.network_address)
i_type = self.DEFAULT_INSTANCE_TYPE
instance = nova.servers.create(image=image, flavor=i_type, key_name=key_name)
#print instance
instance_id = instance.id
vm_mementry = self.find_mementry(vm_mem)
if (vm_mementry < 0):
#TODO: this is kind of pointless with EC2...
log.debug("Cluster memory list has no sufficient memory " +\
"entries (Not supposed to happen). Returning error.")
return self.ERROR
log.verbose("vm_create - Memory entry found in given cluster: %d" %
vm_mementry)
new_vm = cluster_tools.VM(name = vm_name, id = instance_id, vmtype = vm_type, user = vm_user,
clusteraddr = self.network_address,
cloudtype = self.cloud_type, network = vm_networkassoc,
cpuarch = vm_cpuarch, image= vm_image,
memory = vm_mem, mementry = vm_mementry,
cpucores = vm_cores, storage = vm_storage,
keep_alive = vm_keepalive, job_per_core = job_per_core)
try:
self.resource_checkout(new_vm)
except:
log.exception("Unexpected Error checking out resources when creating a VM. Programming error?")
self.vm_destroy(new_vm, reason="Failed Resource checkout")
return self.ERROR
self.vms.append(new_vm)
return 0
def vm_destroy(self, vm, return_resources=True, reason=""):
""" Destroy a VM on OpenStack."""
nova = self._get_creds_nova()
instance = nova.servers.get(vm.id)
ret = instance.delete()
#print 'delete ret %s' % ret
# Delete references to this VM
if return_resources:
self.resource_return(vm)
with self.vms_lock:
self.vms.remove(vm)
return 0
def vm_poll(self, vm):
""" Query OpenStack for status information of VMs."""
nova = self._get_creds_nova()
instance = nova.servers.get(vm.id)
with self.vms_lock:
if vm.status != self.VM_STATES.get(instance.state, "Starting"):
vm.last_state_change = int(time.time())
log.debug("VM: %s on %s. Changed from %s to %s." % (vm.id, self.name, vm.status, self.VM_STATES.get(instance.state, "Starting")))
vm.status = instance.status
pass
def _get_creds_ks(self):
"""Get an auth token to Keystone."""
return ksclient.Client(username=self.username, password=self.password, auth_url=self.auth_url, tenant_name=self.tenant_name)
def _get_creds_nova(self):
"""Get an auth token to Nova."""
return nvclient.Client(username=self.username, api_key=self.password, auth_url=self.auth_url, project_id=self.tenant_name)
|
UTF-8
|
Python
| false | false | 2,014 |
7,499,012,925,519 |
0e8879d64b98b3ad7beb0189753fba2978a8e105
|
ccf72a1e34d3608cf205c752b2871d2fdc53714e
|
/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedCheetahGenerator.py
|
a75329d2a647ebf9e37958da6639b6a4e7e788ea
|
[
"BSD-2-Clause",
"mpich2",
"LicenseRef-scancode-other-permissive"
] |
non_permissive
|
ab/bcfg2
|
https://github.com/ab/bcfg2
|
2f2a78d5e42ea287bab9003d98453b4d760a6e51
|
94dc7a59c0f42fcbb4ae1e8919c1b41eae1e52ad
|
refs/heads/master
| 2021-01-15T17:41:16.775866 | 2012-09-02T17:43:31 | 2012-09-02T17:43:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import logging
from Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator import CfgCheetahGenerator
from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator import CfgEncryptedGenerator
logger = logging.getLogger(__name__)
class CfgEncryptedCheetahGenerator(CfgCheetahGenerator, CfgEncryptedGenerator):
__extensions__ = ['cheetah.crypt', 'crypt.cheetah']
def handle_event(self, event):
CfgEncryptedGenerator.handle_event(self, event)
def get_data(self, entry, metadata):
return CfgCheetahGenerator.get_data(self, entry, metadata)
|
UTF-8
|
Python
| false | false | 2,012 |
13,151,189,904,616 |
5c01d28b952a92ae9f2da222d05d401eb7773025
|
69196bfb6ee4bdac95208ea8be60d699144b8099
|
/easycare/easycare/config/development.py
|
392d7207845558d3bea25f80e6820311986ab0df
|
[] |
no_license
|
chaiyawutso/easycare
|
https://github.com/chaiyawutso/easycare
|
8fd3ba08eb612ef61a162e809319f3728e5bdefe
|
e26c93405301b8c2ad62ac2f32306f8a7d51b0cd
|
refs/heads/master
| 2020-04-11T03:52:48.877514 | 2013-05-09T03:36:17 | 2013-05-09T03:36:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
DATABASE_PATH = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', '..'))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(DATABASE_PATH, 'easycare.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'south',
'frontend',
'djcelery',
'djcelery_email',
)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
import djcelery
djcelery.setup_loader()
EMAIL_BACKEND = 'djcelery_email.backends.CeleryEmailBackend'
|
UTF-8
|
Python
| false | false | 2,013 |
7,971,459,314,763 |
0422dd4c7a10f515199a79f3c69b0b12dac55d91
|
56df4e64da800a99206b2879aabd5d3c4b2c57b7
|
/scenarios/supercars/supercars_create.py
|
a5b50c48c41ec8eb2b3cd64e31cef76380193a77
|
[
"MIT"
] |
permissive
|
kaeru-repo/kaeru
|
https://github.com/kaeru-repo/kaeru
|
7e5a63a19b3ff1d79674b70c5253317702085aee
|
b3d7fb58ea25e6c8e4f9a1c09427887d7c4cdd3f
|
refs/heads/master
| 2016-05-26T09:18:48.693038 | 2014-05-10T14:29:10 | 2014-05-10T14:29:10 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.by import By
from hamcrest import assert_that, equal_to, greater_than, is_, contains_string
import random
class SupercarsCreateTest():
def __init__(self, log, thinktime, url, queue, hub=None, webdriver=None):
self.log = log
self.thinktime = thinktime
self.url = url
self.hub = hub
self.webdriver = webdriver
self.queue = queue
def setup(self):
if self.webdriver:
from selenium import webdriver
self.browser = getattr(webdriver, self.webdriver)()
else:
self.browser = WebDriver(command_executor=str(self.hub),
desired_capabilities=DesiredCapabilities.PHANTOMJS)
def run(self):
# test_url
self.log('start_page', 'start')
self.browser.get(self.url)
assert_that(self.browser.title, is_('Angular Supercars'))
# check that it has more than 10 elements
entries = self.browser.find_elements(By.CSS_SELECTOR, 'div#sidebar ul li a')
assert_that(len(entries), greater_than(10))
self.log('start_page', 'end')
self.thinktime(5)
# create
self.log('enter_details_page', 'start')
self.browser.find_element(By.ID, 'newButton').click()
name = 'PerfTest_%d' % random.randint(0,99999)
self.browser.find_element(By.ID, 'nameInput').send_keys(name)
self.browser.find_element(By.ID, 'countryInput').send_keys('Italy')
self.browser.find_element(By.ID, 'topSpeedInput').send_keys('218')
self.browser.find_element(By.ID, 'powerInput').send_keys('650')
self.browser.find_element(By.ID, 'engineInput').send_keys('5998')
self.browser.find_element(By.ID, 'weightInput').send_keys('1365')
self.browser.find_element(By.ID, 'imageInput').send_keys('050.jpg')
self.browser.find_element(By.ID, 'descriptionTextarea').send_keys('created by performance test')
self.browser.find_element(By.ID, 'saveButton').click()
# verify that the new car is contained in the list
assert_that(self.browser.find_element(By.CSS_SELECTOR, 'div#sidebar').text, contains_string(name))
self.log('enter_details_page', 'end')
# add the new car to the queue
self.queue.lpush(name)
def teardown(self):
self.browser.quit()
|
UTF-8
|
Python
| false | false | 2,014 |
8,452,495,671,257 |
423e59de76d56eeefb4830a814bb508cc9c34017
|
1dac401ae4fd4bd7bf6b9a70c9e840499303a923
|
/SourceCode/Python/Gmerg/Common/lib/python/hgu/externalDataSource/imageConsortium/__init__.py
|
7ce39321faf75c29acd3e2312c211a32ffd5cd63
|
[
"CC-BY-3.0"
] |
non_permissive
|
ma-tech/Anatomy
|
https://github.com/ma-tech/Anatomy
|
c1f9695513092b43dfa42b50a554e19864cad2df
|
67d78da86a7c0e4f055427ff0afa278a381e0e3d
|
refs/heads/master
| 2021-01-10T21:39:48.228529 | 2014-05-01T12:17:49 | 2014-05-01T12:17:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env /usr/bin/python
# -*- coding: iso-8859-15 -*-
#-------------------------------------------------------------------
#
# Module initialisation for the IMAGE Consortium library.
#
__all__ = ["ImageClone"]
|
UTF-8
|
Python
| false | false | 2,014 |
5,746,666,245,870 |
4ac19089df2247af1e9785f7d1476d5c601c4d66
|
9d7ccc17af76678b1823ae5351ade5d4c29293e2
|
/rapid/filemodify.py
|
49a4d24fe32fbc1fa24ab9a727ccb73b13040a58
|
[
"GPL-2.0-only"
] |
non_permissive
|
gns-ank/rapid-photo-downloader
|
https://github.com/gns-ank/rapid-photo-downloader
|
5aa7ec1ae2b040ce9cb8c583d1a466e6096e603f
|
c9f7e77f9ce116e76eee44872fb70d8627acaaa2
|
refs/heads/master
| 2015-08-11T07:52:50.925766 | 2014-02-23T16:52:54 | 2014-02-23T16:52:54 | 18,941,314 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding: latin1 -*-
### Copyright (C) 2011-2014 Damon Lynch <[email protected]>
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; either version 2 of the License, or
### (at your option) any later version.
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
### USA
import os.path, fractions
import subprocess
import hashlib
import multiprocessing
import logging
logger = multiprocessing.get_logger()
import rpdmultiprocessing as rpdmp
import rpdfile
import metadataxmp as mxmp
import subfolderfile
import config
import problemnotification as pn
from gettext import gettext as _
WRITE_XMP_INPLACE = rpdfile.NON_RAW_IMAGE_EXTENSIONS + ['dng']
def lossless_rotate(jpeg):
"""using exiftran, performs a lossless, inplace translation of a jpeg, preserving time stamps"""
try:
logger.debug("Auto rotating %s", jpeg)
proc = subprocess.Popen(['exiftran', '-a', '-i', '-p', jpeg], stdout=subprocess.PIPE)
v = proc.communicate()[0].strip()
except OSError:
v = None
return v
class FileModify(multiprocessing.Process):
def __init__(self, auto_rotate_jpeg, focal_length, verify_file,
refresh_md5_on_file_change, results_pipe, terminate_queue,
run_event):
multiprocessing.Process.__init__(self)
self.results_pipe = results_pipe
self.terminate_queue = terminate_queue
self.run_event = run_event
self.auto_rotate_jpeg = auto_rotate_jpeg
self.focal_length = focal_length
self.verify_file = verify_file
self.refresh_md5_on_file_change = refresh_md5_on_file_change
def check_termination_request(self):
"""
Check to see this process has not been requested to immediately terminate
"""
if not self.terminate_queue.empty():
x = self.terminate_queue.get()
# terminate immediately
return True
return False
def create_rational(self, value):
return '%s/%s' % (value.numerator, value.denominator)
def run(self):
download_count = 0
copy_finished = False
while not copy_finished:
logger.debug("Finished %s. Getting next task.", download_count)
data = self.results_pipe.recv()
if len(data) > 2:
rpd_file, download_count, temp_full_file_name, thumbnail_icon, thumbnail, copy_finished = data
else:
rpd_file, copy_finished = data
if rpd_file is None:
# this is a termination signal
logger.info("Terminating file modify via pipe")
return None
# pause if instructed by the caller
self.run_event.wait()
if self.check_termination_request():
return None
copy_succeeded = True
redo_md5 = False
if self.verify_file:
logger.debug("Verifying file %s....", rpd_file.name)
md5 = hashlib.md5(open(temp_full_file_name).read()).hexdigest()
if md5 <> rpd_file.md5:
logger.critical("%s file verification FAILED", rpd_file.name)
logger.critical("The %s did not download correctly!", rpd_file.title)
rpd_file.status = config.STATUS_DOWNLOAD_FAILED
rpd_file.add_problem(None, pn.FILE_VERIFICATION_FAILED,
{'filetype': rpd_file.title})
rpd_file.error_title = rpd_file.problem.get_title()
rpd_file.error_msg = _("%(problem)s\nFile: %(file)s") % \
{'problem': rpd_file.problem.get_problems(),
'file': rpd_file.full_file_name}
copy_succeeded = False
else:
logger.debug("....file %s verified", rpd_file.name)
if copy_succeeded:
if self.auto_rotate_jpeg and rpd_file.file_type == rpdfile.FILE_TYPE_PHOTO:
if rpd_file.extension in rpdfile.JPEG_EXTENSIONS:
lossless_rotate(rpd_file.temp_full_file_name)
redo_md5 = True
xmp_sidecar = None
# check to see if focal length and aperture data should be manipulated
if self.focal_length is not None and rpd_file.file_type == rpdfile.FILE_TYPE_PHOTO:
if subfolderfile.load_metadata(rpd_file, temp_file=True):
a = rpd_file.metadata.aperture()
if a == '0.0':
logger.info("Adjusting focal length and aperture for %s (%s)", rpd_file.temp_full_file_name, rpd_file.name)
new_focal_length = fractions.Fraction(self.focal_length,1)
new_aperture = fractions.Fraction(8,1)
if rpd_file.extension in WRITE_XMP_INPLACE:
try:
rpd_file.metadata["Exif.Photo.FocalLength"] = new_focal_length
rpd_file.metadata["Exif.Photo.FNumber"] = new_aperture
rpd_file.metadata.write(preserve_timestamps=True)
redo_md5 = True
logger.debug("Wrote new focal length and aperture to %s (%s)", rpd_file.temp_full_file_name, rpd_file.name)
except:
logger.error("failed to write new focal length and aperture to %s (%s)!", rpd_file.temp_full_file_name, rpd_file.name)
else:
# write to xmp sidecar
xmp_sidecar = mxmp.XmpMetadataSidecar(rpd_file.temp_full_file_name)
xmp_sidecar.set_exif_value('FocalLength', self.create_rational(new_focal_length))
xmp_sidecar.set_exif_value('FNumber', self.create_rational(new_aperture))
# store values in rpd_file, so they can be used in the subfolderfile process
rpd_file.new_focal_length = new_focal_length
rpd_file.new_aperture = new_aperture
if xmp_sidecar is not None:
# need to write out xmp sidecar
o = xmp_sidecar.write_xmp_sidecar()
logger.debug("Wrote XMP sidecar file")
logger.debug("exiv2 output: %s", o)
rpd_file.temp_xmp_full_name = rpd_file.temp_full_file_name + '.xmp'
if self.refresh_md5_on_file_change and redo_md5:
rpd_file.md5 = hashlib.md5(open(temp_full_file_name).read()).hexdigest()
rpd_file.metadata = None #purge metadata, as it cannot be pickled
self.results_pipe.send((rpdmp.CONN_PARTIAL,
(copy_succeeded, rpd_file, download_count,
temp_full_file_name,
thumbnail_icon, thumbnail)))
self.results_pipe.send((rpdmp.CONN_COMPLETE, None))
|
UTF-8
|
Python
| false | false | 2,014 |
19,250,043,434,546 |
ef39c1360a4ee2e3819fdc65f5df91ddbc727861
|
14e47e4ec708400db173884e0e980007389a4f0d
|
/management/views/__init__.py
|
f3d2bc8eb448960a0005a5ca1119bcf8e8108a43
|
[] |
no_license
|
Frai/semcomp-next
|
https://github.com/Frai/semcomp-next
|
974a5ab8502291145e6452252a27c1a83513a14a
|
63abd0b22b05855684c18b71c1e53a64ca4629fa
|
refs/heads/master
| 2017-11-29T13:57:49.280955 | 2014-12-03T16:44:20 | 2014-12-03T16:44:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render
from semcomp_contact_form.models import Message
from ..decorators import staff_required
from .messages import *
from .events import *
from .places import *
from .lectures import *
from .users import *
from .courses import *
from .companies import *
from .processes import *
from .business import *
from .attendance import *
from .config import *
@staff_required
def manage_overview(request):
context = {
'active_overview': True,
'unanswered_messages': Message.objects.unanswered()
}
return render(request, 'management/overview.html', context)
|
UTF-8
|
Python
| false | false | 2,014 |
7,971,459,335,973 |
7ea6bd13587827edeaa2bbfa9a34eed6fea270a4
|
06d0821423dd3970436b888259b366afdb7f73e9
|
/encapsulation/main.py
|
7413e652074b8d63c8eeece55e723e4cc3595b89
|
[] |
no_license
|
AdrianSane/Learning-Python
|
https://github.com/AdrianSane/Learning-Python
|
795e2c6757aa39406ef3776b4df8faeed06a9d02
|
13e64facfac7e0e9b8dcad789d16210095078bcf
|
refs/heads/master
| 2015-08-11T23:10:15.811715 | 2014-09-27T19:19:02 | 2014-09-27T19:19:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
ENCAPSULATION:
used for restricting information
keeping things private
protects information from others and yourself
NOTE:
one of the ways I can encapsulate is by using properties
properties are special kinds of attributes
created when i need to give special access to a particular attribute
created when i need to regulate the access to that attribute
attributes are a special kind of variable
properties are a special kind of attribute
this is all done by using a special object called a decorator
@decorator - designed to tell python that this function will be treated like a variable
using decorators will make functions be treated like variables
NOTE:
the following tell python that its currently dealing with a property
DECORATORS FOR PROPERTIES
GETTER
@property - property decorator - makes getters
-let people access data
-special function designed to get data
-can be made to be READ ONLY
-if you only have a getter it will be read only
SETTER
@x.setter - setter decorator - a different function to access your property
-makes the property readable AND writeable
-require you to have a getter
-x = the name of the function or attribute that i am accessing
-special function designed to change data
-setters are WRITE ONLY
@x.deleter - for memory management
getters and setters are helper functions
decorators mark functions / methods for their different pieces of information
Why getters and setters solve the problem of privacy and encapsulation:
-you can validate information before it changes important attributes
-setters can check values
-if the value is incorrect the setter can throw an error
-or-
-the setter can correct the value
-helps to update information
-you can set information to be read only or write only
-good for information you dont want to be altered
-but-
-you want that information to be seen
-or-
-vice versa, where you want the information to be altered but not seen
-example:
credit card information
passwords
-getter with NO setter = read only
-setter with NO getter = write only (empty getter)
-you can do more when a property / attribute is accessed
-when a value is changed, it can do more lines of code
-since getters and setters are functions, they can do more lines of code when called (which is more than you can do with a simple variable)
-setter is used to change information in a class
-display is updated right away
encapsulation is the equivalent of locking a door
its not intended to keep anybody out if they insisted on entering
"""
import webapp2
class MainHandler(webapp2.RequestHandler):
def get(self):
self.response.write('Hello world!')
#getters and setters are treated like variables here
lang = LanguageGreeting() #invokes the initializing function from the LanguageGreeting() class
#the greeting being changed
lang.greeting = "oy!" #uses SETTER no parenthesies on greeting because i am not treating the getters and setters as functions, they are treated like they are variables
#the greeting being printed
print lang.greeting #uses GETTER function to return the attribute
class LanguageGreeting(object):
def __init__(self):
self.__greeting = "Hello" #this is an attribute instead of it equaling "Hello", it will equal "Oy!" because of the setter changing its value
@property #this is the getter, getters are always going to return
#this is a function that is returning the value of the attribute
#it creates an avenue of access to the attribute
#this is a function called greeting, notice there are no underscores making this a public function / method
def greeting(self, value):
#this is the attribute of the function
return self.__greeting
@greeting.setter #use a piece of information to change a value
def greeting(self, value):
#recieve the value, and change the attribute
self.__greeting = value #the value is equal to the value of the call from lang.greeting = "oy!"
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
|
UTF-8
|
Python
| false | false | 2,014 |
11,149,735,109,995 |
0a27ddc434e5f1925d45ac05e807331f341fd182
|
a58246305f03d717abeabd7ee273978cd588e786
|
/expint/methods/ExpForwardEuler.py
|
6cbfad629d6fca6011cf8f7a9d7607973a8afb7a
|
[] |
no_license
|
raoulbq/ExpInt
|
https://github.com/raoulbq/ExpInt
|
acfc2458df04334a2291a2b94a8ea0610c629a64
|
7494addcc18e340ab248d05942fc3ad815f178c7
|
refs/heads/master
| 2021-01-17T21:54:13.546223 | 2012-05-06T12:33:45 | 2012-05-06T12:33:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- encoding: utf-8 -*-
from expint.methods.Method import ExponentialMethod
from numpy.linalg import solve
class ExpForwardEuler(ExponentialMethod):
"""
Implements the Exponential Forward Euler method:
u_{n+1} = exp(h_n*A)*u_n + h_n*phi_1(h_n*A)*g(u_n)
Uses the standard Padé approximation to the matrix exponential
"""
@classmethod
def name(self):
return "ExpForwardEuler"
def phi1(self,mat,h):
return solve(h*mat,(self.matexp(h*mat)-np.eye(mat.shape[0])))
def integrate(self, x0, t0, tend, N=100):
raise Exception("incorrect implementation")
"""t,y = integrate(x0, t0, tend, [N])
N: number of timesteps"""
h = np.double(tend-t0)/N
t = np.zeros(N+1); t[0]=t0
x = x0; y = [x0]
for i in range(N):
# build matrix for nxn problem
if x.shape == ():
n = 1
else:
n = x.shape[0]
A = np.zeros((n,n))
for j in range(n):
e = np.zeros(n); e[j] = 1
A[:,j] = self.rhs.ApplyDf(x,e)
expAh = self.matexp(h*A)
x = np.dot(expAh,x) + h*np.dot(self.phi1(h*A,h),self.rhs.Applyg(x)) #phi1(h*A <-- correct???
y.append(x)
t[i+1] = t[i]+h
return t,np.array(y)
|
UTF-8
|
Python
| false | false | 2,012 |
8,126,078,162,784 |
0ca14cd4c3165102381683a095dcefdf701a4beb
|
94eb67b4696e45d0f7cedff3ce6ee761e80372ce
|
/pyethereum/__init__.py
|
fd6b70f1854cf249e5a11b3e9293129496dadee8
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
ethers/pyethereum
|
https://github.com/ethers/pyethereum
|
2852f8f43ca26211ee883df558ead9f279d8f485
|
13062b52150d67bfa77c52a9cb046c8508321720
|
refs/heads/master
| 2021-01-22T10:07:50.646347 | 2014-08-24T19:24:18 | 2014-08-24T19:24:18 | 18,871,881 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import rlp
import utils
import trie
import blocks
import transactions
import processblock
import tester
|
UTF-8
|
Python
| false | false | 2,014 |
12,137,577,610,663 |
f9abf7f75a7e6b28afc0acccf78cea3be78eb6bd
|
d0728bdc15d1029bf0415ad83ecd305e72901d95
|
/ming/odm/__init__.py
|
182ad725be180e63e0116f952bd4bfa878163205
|
[] |
no_license
|
amol-/ming
|
https://github.com/amol-/ming
|
8d8256417ee9ef6f5c1ceacecbdf21d0dc942875
|
04c1e2c3a35662ed2938f49684c57ab3611c6d70
|
refs/heads/master
| 2020-05-19T21:27:15.706411 | 2013-11-28T23:31:53 | 2013-11-28T23:31:53 | 2,355,948 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from ming.odm.base import state, session
from ming.odm.mapper import mapper, Mapper, MapperExtension
from ming.odm.property import RelationProperty, ForeignIdProperty
from ming.odm.property import FieldProperty, FieldPropertyWithMissingNone
from ming.odm.odmsession import ODMSession, ThreadLocalODMSession, SessionExtension
from ming.odm.odmsession import ContextualODMSession
ORMSession=ODMSession
ThreadLocalORMSession=ThreadLocalODMSession
ContextualORMSession=ContextualODMSession
|
UTF-8
|
Python
| false | false | 2,013 |
15,728,170,282,612 |
c2092425df0ccb34a664e33fdd6871381a30cf37
|
442c20ee40468d8feca122916a3e83b338bfff5b
|
/section/section03/Section 3 - Worksheet style.py
|
593fb1d9bcc2929f651e8b691e2ed6dddf0e59cf
|
[] |
no_license
|
obnorthrup/ComputationalMethods-O
|
https://github.com/obnorthrup/ComputationalMethods-O
|
611f94ee0d5a68b601741022a737e9974e6abd8e
|
8c4d8344cd9dfa995b0f5be3167e8a8e38d7e70c
|
refs/heads/master
| 2021-01-17T22:28:45.762070 | 2013-12-04T18:00:50 | 2013-12-04T18:00:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# # Welcome to week 3!
#
# #### Plan
#
# - Dictionaries
# - Classes
# <markdowncell>
# ## Fun with dictionaries
#
# What does this code do?
# <codecell>
def getMobyDickWords():
mobydick = """Call me Ishmael. Some years ago -- never mind how long precisely--having
little or no money in my purse, and nothing particular to interest me on
shore, I thought I would sail about a little and see the watery part of
the world. It is a way I have of driving off the spleen and regulating
the circulation. Whenever I find myself growing grim about the mouth;
whenever it is a damp, drizzly November in my soul; whenever I find
myself involuntarily pausing before coffin warehouses, and bringing up
the rear of every funeral I meet; and especially whenever my hypos get
such an upper hand of me, that it requires a strong moral principle to
prevent me from deliberately stepping into the street, and methodically
knocking people's hats off -- then, I account it high time to get to sea
as soon as I can. This is my substitute for pistol and ball. With a
philosophical flourish Cato throws himself upon his sword; I quietly
take to the ship. There is nothing surprising in this. If they but knew
it, almost all men in their degree, some time or other, cherish very
nearly the same feelings towards the ocean with me.
There now is your insular city of the Manhattoes, belted round by
wharves as Indian isles by coral reefs -- commerce surrounds it with
her surf. Right and left, the streets take you waterward. Its extreme
downtown is the battery, where that noble mole is washed by waves, and
cooled by breezes, which a few hours previous were out of sight of land.
Look at the crowds of water-gazers there."""
normedText = mobydick.replace("\n", " ")
punctuation = ";:,.?!-"
for punct in punctuation:
normedText = normedText.replace(punct, "")
return filter(lambda x: x != "", normedText.split(" "))
#def notEmpty(string):
# return string != ""
# <codecell>
words = getMobyDickWords()
print words
# <markdowncell>
# Let's make a dictionary of word frequencies, like from lecture last Wednesday
# <codecell>
frequencies = {}
# Your code here. How do we loop through the words in the 'words' list?
print frequencies
# <markdowncell>
# Now let's get the frequency of the first letters. First, how do we drill down to get at words and their frequencies in the dict we just made?
# <codecell>
# <codecell>
frequenciesLetters = {}
# Your code here. Get the words from our other dictionary and make a new one for the first letters from it.
print frequenciesLetters
# <markdowncell>
# We can sort frequenciesLetters by the keys (i.e., the letters) pretty easily.
# <codecell>
# <codecell>
# <markdowncell>
# But how can we sort by frequency?
# <codecell>
# <codecell>
# <markdowncell>
# ## Dictionaries of dictionaries (yo dawg)
#
# The dictionary below holds verbs. Each verb is the key for a dictionary of its thematic roles. Each thematic role is a dictionary of the sentence structures that the role can appear in with the verb in question.
# <codecell>
thetaMap = {}
thetaMap["destroy"] = {}
thetaMap["destroy"]["Agent"] = [("S", "NP V NP"), ("S", "NP V NP PP.INSTRUMENT")]
thetaMap["destroy"]["Patient"] = [("S", "NP V NP"), ("S", "NP V NP PP.INSTRUMENT"), ("S", "PP.INSTRUMENT V NP")]
# <codecell>
thetaMap
# <markdowncell>
# Next, let's use this for something. Let's see what syntactic structures involving the ver 'destroy' have both an Agent and a Patient.
# <codecell>
def struxOverlap(verb, role1, role2):
"""
Lists possible syntactic structures containing a given verb and two given thematic arguments
Arguments:
verb (string), a verb key in thetaMap
role1 (string), a theta role key in thetaMap[verb]
role2 (string), another theta role key in thetaMap[verb]
Returns:
overlap (list), the structures shared by thetaMap[verb][role1] and thetaMap[verb][role2]
Raises:
nothing
"""
return []
if __name__ == "__main__":
print struxOverlap("destroy", "Agent", "Patient")
# <markdowncell>
# ## Building classes
#
# Recalling the basics from yesterday: Let's make a simple class and give it an attribute.
# <codecell>
# <markdowncell>
# Let's give it a method or two.
# <codecell>
# <markdowncell>
# Ok, let's make better use of this. I want to make a Coin class with a weight (prob of heads) and a method to flip it that returns 'H' for 'T'
# <codecell>
# <codecell>
# <codecell>
# <markdowncell>
# Anyway, that's not linguistics. Let's made a more useful class!
# <markdowncell>
# ## Noun class
#
# Let's build a class called **Noun**. It should do the following:
#
# - Nouns have singular and plural forms, a mass/count distinction, (and something else of your choice)
# - All of these should work:
# - `apple = Noun("apple")`
# `goose = Noun("goose","geese")`
# `gohan = Noun("rice",mass=True)`
# - I.e., If we don't specify a plural, it should be set automatically, and nouns are count unless specified
# - Printing an instance of Noun should tell you everything you know about it, in a readable way.
# - The method `isCount()` should tell you whether the noun is mass or count.
# - The method `thereAre(int)` should pluralizes correctly, given how many of the noun there are:
# - `apple.thereAre(3)` returns the string `"3 apples"`
# - `apple.thereAre(1)` returns the string `"1 apple"`
# - `gohan.thereAre(4)` returns the string `"'rice' is a mass noun"`
# - There should be a method that interacts with the extra atrribute you made, or changes something about the noun. Maybe a quick way to regularize the plural?
# <codecell>
class Noun(object):
# Your code here. You'll need to add __init__, __str__, isCount, and thereAre, at least.
def testCases():
'''Makes entries for "apple", "goose", and "rice"'''
apple = Noun("apple")
print apple
print apple.isCount()
print apple.thereAre(4)
print apple.thereAre(1)
print apple.thereAre(0)
print
goose = Noun("goose","geese")
print goose
print goose.isCount()
print goose.thereAre(4)
print goose.thereAre(1)
print goose.thereAre(0)
print
gohan = Noun("rice",mass=True)
print gohan
print gohan.isCount()
print gohan.thereAre(4)
print gohan.thereAre(1)
print gohan.thereAre(0)
if __name__ == "__main__":
testCases()
# <codecell>
|
UTF-8
|
Python
| false | false | 2,013 |
18,708,877,548,981 |
6912afd9dd8696a5f6d98353d7ae14df7f649ff7
|
dc29bb49662311c0efd7a89dbfc569fdc04fdec4
|
/gpio/testparser.py
|
110f8132dcc38b74282e3a12c4f9853b83347816
|
[] |
no_license
|
mathyomama/the-git-folder
|
https://github.com/mathyomama/the-git-folder
|
fdece3fe6702093d16963b431aa710b94276eae5
|
09829540e3ec5606eb7d376c864dcd0bf152dc86
|
refs/heads/master
| 2020-06-02T07:49:22.713116 | 2013-10-05T05:02:52 | 2013-10-05T05:02:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import feedparser
d = feedparser.parser("feed://feeds.huffingtionpost.com/HP/MostPopular")
print d['feed]['title']
|
UTF-8
|
Python
| false | false | 2,013 |
8,315,056,712,517 |
550687178594d5d42ac205bb047d61749857a5c2
|
ffa8a728f43b6de2b9a4dbfda18f3eb8518fbbbd
|
/snmp-mibs/JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB.py
|
4259dd49540913fd1d0a9c5fbf09b7c34666763d
|
[] |
no_license
|
oriordan/pysnmp_mibs
|
https://github.com/oriordan/pysnmp_mibs
|
60e0d80e3f50490d9e6ab29d21627fec59ab0cfc
|
92d39abf358a952e55a426e2a4658f4b0824182f
|
refs/heads/master
| 2021-01-09T23:37:59.137750 | 2014-11-26T20:07:28 | 2014-11-26T20:07:28 | 20,253,987 | 11 | 15 | null | false | 2020-07-26T02:49:32 | 2014-05-28T10:43:18 | 2019-08-04T13:55:08 | 2016-04-20T23:27:59 | 4,111 | 12 | 9 | 1 |
Python
| false | false |
# PySNMP SMI module. Autogenerated from smidump -f python JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB
# by libsmi2pysnmp-0.1.3 at Thu May 22 11:57:54 2014,
# Python version sys.version_info(major=2, minor=7, micro=2, releaselevel='final', serial=0)
# Imports
( Integer, ObjectIdentifier, OctetString, ) = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
( NamedValues, ) = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
( ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ) = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
( InetAddress, InetAddressPrefixLength, InetAddressType, ) = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressPrefixLength", "InetAddressType")
( jnxMbgGwIndex, jnxMbgGwName, ) = mibBuilder.importSymbols("JUNIPER-MOBILE-GATEWAYS", "jnxMbgGwIndex", "jnxMbgGwName")
( jnxMobileGatewayMibRoot, ) = mibBuilder.importSymbols("JUNIPER-SMI", "jnxMobileGatewayMibRoot")
( Bits, Counter64, Gauge32, Integer32, ModuleIdentity, MibIdentifier, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Unsigned32, ) = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Counter64", "Gauge32", "Integer32", "ModuleIdentity", "MibIdentifier", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Unsigned32")
( DisplayString, TextualConvention, ) = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
# Objects
jnxMobileGatewayPgwSMIPPoolMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5)).setRevisions(("2011-11-15 12:00","2011-01-13 12:00",))
if mibBuilder.loadTexts: jnxMobileGatewayPgwSMIPPoolMib.setOrganization("Juniper Networks, Inc.")
if mibBuilder.loadTexts: jnxMobileGatewayPgwSMIPPoolMib.setContactInfo("Juniper Technical Assistance Center\nJuniper Networks, Inc.\n1194 N. Mathilda Avenue\nSunnyvale, CA 94089\nE-mail: [email protected]")
if mibBuilder.loadTexts: jnxMobileGatewayPgwSMIPPoolMib.setDescription("This module defines objects pertaining to Mobile-Edge Subscriber\nManagement IP pool Services")
jnxMbgSMIPPoolNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 0))
jnxMbgSMIPPoolObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1))
jnxMbgSMIPPoolTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 1))
if mibBuilder.loadTexts: jnxMbgSMIPPoolTable.setDescription("The table lists local address pools and their statistics")
jnxMbgSMIPPoolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 1, 1)).setIndexNames((0, "JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolLogicalSystem"), (0, "JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolRoutingInstance"), (0, "JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolName"))
if mibBuilder.loadTexts: jnxMbgSMIPPoolEntry.setDescription("An entry representing a single address range or prefix entry \nin the pool")
jnxMbgSMIPPoolName = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: jnxMbgSMIPPoolName.setDescription("A name which identifies this pool on the mobile-gateway.")
jnxMbgSMIPPoolLogicalSystem = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: jnxMbgSMIPPoolLogicalSystem.setDescription("A name which identifies the logical-system to which the address \npool belongs.")
jnxMbgSMIPPoolRoutingInstance = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 128))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: jnxMbgSMIPPoolRoutingInstance.setDescription("A name which identifies the routing instance to which the address \npool belongs.")
jnxMbgSMIPPoolType = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 1, 1, 4), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxMbgSMIPPoolType.setDescription("Indicates whether this pool entry is of type ipv4 or ipv6.")
jnxMbgSMIPPoolFree = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 1, 1, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxMbgSMIPPoolFree.setDescription("Total number of free addresses in this pool entry.")
jnxMbgSMIPPoolInUse = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 1, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxMbgSMIPPoolInUse.setDescription("Total number of used addresses in this pool entry.")
jnxMbgSMIPPoolUtil = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 1, 1, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxMbgSMIPPoolUtil.setDescription("Percentage utilization for this pool entry.")
jnxMbgSMIPPoolNotificationVars = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2))
jnxMbgSMIPPoolThresholdPoolName = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 1), DisplayString()).setMaxAccess("notifyonly")
if mibBuilder.loadTexts: jnxMbgSMIPPoolThresholdPoolName.setDescription("The name which identifies the address pool on the mobile-gateway\nfor which the threshold was exceeded.")
jnxMbgSMIPPoolThresholdLSName = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 2), DisplayString()).setMaxAccess("notifyonly")
if mibBuilder.loadTexts: jnxMbgSMIPPoolThresholdLSName.setDescription("The name which identifies the logical-system on the mobile-gateway\nin which the address pool threshold was exceeded.")
jnxMbgSMIPPoolThresholdRIName = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 3), DisplayString()).setMaxAccess("notifyonly")
if mibBuilder.loadTexts: jnxMbgSMIPPoolThresholdRIName.setDescription("The name which identifies the routing instance on the mobile-gateway\nin which the address pool threshold was exceeded.")
jnxMbgSMIPPoolConfiguredThreshold = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 4), Unsigned32()).setMaxAccess("notifyonly").setUnits("percent")
if mibBuilder.loadTexts: jnxMbgSMIPPoolConfiguredThreshold.setDescription("The threshold value configured for an address pool on the mobile \ngateway exceeding which a notification is generated.")
jnxMbgSMIPPoolCurrentThreshold = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 5), Unsigned32()).setMaxAccess("notifyonly").setUnits("percent")
if mibBuilder.loadTexts: jnxMbgSMIPPoolCurrentThreshold.setDescription("The current threshold value for an address pool on the mobile \ngateway. This can be equal to or greater than the configured\nthreshold value.")
jnxMbgSMIPPoolMMPoolName = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 6), DisplayString()).setMaxAccess("notifyonly")
if mibBuilder.loadTexts: jnxMbgSMIPPoolMMPoolName.setDescription("The name which identifies the address pool on the mobile-gateway\nwhich underwent a change in the maintenance-mode state.")
jnxMbgSMIPPoolMMLSName = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 7), DisplayString()).setMaxAccess("notifyonly")
if mibBuilder.loadTexts: jnxMbgSMIPPoolMMLSName.setDescription("The name which identifies the logical-system on the mobile-gateway\nwhich underwent a change in the maintenance-mode state.")
jnxMbgSMIPPoolMMRIName = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 8), DisplayString()).setMaxAccess("notifyonly")
if mibBuilder.loadTexts: jnxMbgSMIPPoolMMRIName.setDescription("The name which identifies the routing instance on the mobile-gateway\nwhich underwent a change in the maintenance-mode state.")
jnxMbgSMIPPoolPrevMMState = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 9), DisplayString()).setMaxAccess("notifyonly")
if mibBuilder.loadTexts: jnxMbgSMIPPoolPrevMMState.setDescription("A string that indicates the maintenance-mode state .")
jnxMbgSMIPPoolNewMMState = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 10), DisplayString()).setMaxAccess("notifyonly")
if mibBuilder.loadTexts: jnxMbgSMIPPoolNewMMState.setDescription("A string that indicates the maintenance-mode state .")
jnxMbgSMIPRangeHiThresRangeName = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 11), DisplayString()).setMaxAccess("notifyonly")
if mibBuilder.loadTexts: jnxMbgSMIPRangeHiThresRangeName.setDescription("The name which identifies the address pool's range on the mobile-gateway\nfor which the threshold was exceeded.")
jnxMbgSMIPRangeHiThresPoolName = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 12), DisplayString()).setMaxAccess("notifyonly")
if mibBuilder.loadTexts: jnxMbgSMIPRangeHiThresPoolName.setDescription("The name which identifies the address pool on the mobile-gateway, whose\nrange threshold was exceeded.")
jnxMbgSMIPRangeHiLSName = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 13), DisplayString()).setMaxAccess("notifyonly")
if mibBuilder.loadTexts: jnxMbgSMIPRangeHiLSName.setDescription("The name which identifies the logical-system on the mobile-gateway\nin which the address range threshold was exceeded.")
jnxMbgSMIPRangeHiRIName = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 14), DisplayString()).setMaxAccess("notifyonly")
if mibBuilder.loadTexts: jnxMbgSMIPRangeHiRIName.setDescription("The name which identifies the routing instance on the mobile-gateway\nin which the address range threshold was exceeded.")
jnxMbgSMIPRangeHiCfgThres = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 15), Unsigned32()).setMaxAccess("notifyonly").setUnits("percent")
if mibBuilder.loadTexts: jnxMbgSMIPRangeHiCfgThres.setDescription("The threshold value configured for an address pool range on the mobile \ngateway exceeding which a notification is generated.")
jnxMbgSMIPRangeHiCurrUtil = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 16), Unsigned32()).setMaxAccess("notifyonly").setUnits("percent")
if mibBuilder.loadTexts: jnxMbgSMIPRangeHiCurrUtil.setDescription("The current threshold value for an address pool range on the mobile \ngateway. This can be equal to or greater than the configured\nthreshold value.")
jnxMbgSMIPRangeLowThresRangeName = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 17), DisplayString()).setMaxAccess("notifyonly")
if mibBuilder.loadTexts: jnxMbgSMIPRangeLowThresRangeName.setDescription("The name which identifies the address pool's range on the mobile-gateway\nfor which the low threshold was reached.")
jnxMbgSMIPRangeLowThresPoolName = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 18), DisplayString()).setMaxAccess("notifyonly")
if mibBuilder.loadTexts: jnxMbgSMIPRangeLowThresPoolName.setDescription("The name which identifies the address pool on the mobile-gateway, whose\nrange low threshold was reached.")
jnxMbgSMIPRangeLowLSName = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 19), DisplayString()).setMaxAccess("notifyonly")
if mibBuilder.loadTexts: jnxMbgSMIPRangeLowLSName.setDescription("The name which identifies the logical-system on the mobile-gateway\nin which the address range low threshold was reached.")
jnxMbgSMIPRangeLowRIName = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 20), DisplayString()).setMaxAccess("notifyonly")
if mibBuilder.loadTexts: jnxMbgSMIPRangeLowRIName.setDescription("The name which identifies the routing instance on the mobile-gateway\nin which the address range low threshold was reached.")
jnxMbgSMIPRangeLowCfgThres = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 21), Unsigned32()).setMaxAccess("notifyonly").setUnits("percent")
if mibBuilder.loadTexts: jnxMbgSMIPRangeLowCfgThres.setDescription("The threshold value configured for an address pool range on the mobile \ngateway reaching which a notification is generated.")
jnxMbgSMIPRangeLowCurrUtil = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 22), Unsigned32()).setMaxAccess("notifyonly").setUnits("percent")
if mibBuilder.loadTexts: jnxMbgSMIPRangeLowCurrUtil.setDescription("The current threshold value for an address pool range on the mobile \ngateway. This can be equal to or greater than the configured\nthreshold value.")
jnxMbgSMIPPoolHTCfgThres = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 23), Unsigned32()).setMaxAccess("notifyonly").setUnits("percent")
if mibBuilder.loadTexts: jnxMbgSMIPPoolHTCfgThres.setDescription("The threshold value configured for an address pool on the mobile gateway \nexceeding which a notification is generated.")
jnxMbgSMIPPoolCurrUtil = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 24), Unsigned32()).setMaxAccess("notifyonly").setUnits("percent")
if mibBuilder.loadTexts: jnxMbgSMIPPoolCurrUtil.setDescription("The current utilization value for an address pool on the mobile \ngateway. This can be equal to or greater than the configured\nthreshold value.")
jnxMbgSMIPPoolLTCfgThres = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 2, 25), Unsigned32()).setMaxAccess("notifyonly").setUnits("percent")
if mibBuilder.loadTexts: jnxMbgSMIPPoolLTCfgThres.setDescription("The threshold value configured for an address pool on the mobile gateway\nreaching which a notification is generated.")
jnxMbgIPPoolTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 3))
if mibBuilder.loadTexts: jnxMbgIPPoolTable.setDescription("The table exposes the local address pools attributes and\ntheir statistics.\n\nThis table contains information about local address pools only.")
jnxMbgIPPoolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 3, 1)).setIndexNames((0, "JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgIPPoolId"))
if mibBuilder.loadTexts: jnxMbgIPPoolEntry.setDescription("An entry representing a single address range or prefix entry \nin the pool. It is indexed by Pool Id.")
jnxMbgIPPoolId = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 3, 1, 1), Unsigned32()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: jnxMbgIPPoolId.setDescription("A Pool Id which identifies a pool on the mobile-gateway.")
jnxMbgIPPoolLogicalSystem = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 3, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxMbgIPPoolLogicalSystem.setDescription("A name which identifies the logical-system to which the address \npool belongs on the mobile gateway.")
jnxMbgIPPoolRoutingInstance = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 3, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxMbgIPPoolRoutingInstance.setDescription("A name which identifies the routing instance to which the address \npool belongs on the mobile gateway.")
jnxMbgIPPoolName = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 3, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxMbgIPPoolName.setDescription("A name which identifies the pool on the mobile-gateway.")
jnxMbgIPPoolType = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 3, 1, 5), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxMbgIPPoolType.setDescription("The type configured for this pool on the mobile gateway.\nTypes supported are Ipv4(1) or IPv6(2).")
jnxMbgIPPoolFree = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 3, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxMbgIPPoolFree.setDescription("Total number of free addresses in this pool.")
jnxMbgIPPoolInUse = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 3, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxMbgIPPoolInUse.setDescription("Total number of used addresses in this pool.")
jnxMbgIPPoolUtil = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 3, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxMbgIPPoolUtil.setDescription("Percentage utilization for this pool.")
jnxMbgIPPoolRangeTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 4))
if mibBuilder.loadTexts: jnxMbgIPPoolRangeTable.setDescription("The table exposes the local address pool range's attributes and\ntheir statistics.\n\n This table contains information about local address pools only.")
jnxMbgIPPoolRangeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 4, 1)).setIndexNames((0, "JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgIPPoolId"), (0, "JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgIPPoolRangeName"))
if mibBuilder.loadTexts: jnxMbgIPPoolRangeEntry.setDescription("An entry representing a address ranges in the pool. It is\nindexed by the Gateway Index, Logical System Id,\nRouting Instance Id, Pool Id and Range Id.")
jnxMbgIPPoolRangeName = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 4, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: jnxMbgIPPoolRangeName.setDescription("The name of the local IP address pool range")
jnxMbgIPPoolRangeType = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 4, 1, 2), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxMbgIPPoolRangeType.setDescription("The type configured for this range on the mobile gateway.\nTypes supported are Ipv4(1) or IPv6(2).")
jnxMbgIPPoolRangeFree = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 4, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxMbgIPPoolRangeFree.setDescription("Total number of free addresses in this range.")
jnxMbgIPPoolRangeInUse = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 4, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxMbgIPPoolRangeInUse.setDescription("Total number of used addresses in this range.")
jnxMbgIPPoolRangeUtil = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 1, 4, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxMbgIPPoolRangeUtil.setDescription("Percentage utilization for this range.")
# Augmentions
# Notifications
jnxMbgSMIPPoolThresholdExceeded = NotificationType((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 0, 1)).setObjects(*(("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolCurrentThreshold"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolThresholdLSName"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolThresholdPoolName"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolConfiguredThreshold"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolThresholdRIName"), ) )
if mibBuilder.loadTexts: jnxMbgSMIPPoolThresholdExceeded.setDescription("This notification signifies that the number of addresses allocated\nfrom a given address pool has exceeded a pre-configured threshold \nvalue.")
jnxMbgSMIPPoolMMStateChange = NotificationType((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 0, 2)).setObjects(*(("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolMMPoolName"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolPrevMMState"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolMMRIName"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolNewMMState"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolMMLSName"), ) )
if mibBuilder.loadTexts: jnxMbgSMIPPoolMMStateChange.setDescription("This notification indicates that the pool name indicated by \nLS-name, RI-name and pool-name undergoes a change in the\nmaintenance-mode state.")
jnxMbgSMIPRangeHighThresExcd = NotificationType((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 0, 3)).setObjects(*(("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPRangeHiLSName"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPRangeHiRIName"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPRangeHiCfgThres"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPRangeHiThresPoolName"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPRangeHiCurrUtil"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPRangeHiThresRangeName"), ) )
if mibBuilder.loadTexts: jnxMbgSMIPRangeHighThresExcd.setDescription("This notification indicates that the range name that exceeded \nhigher threshold.")
jnxMbgSMIPRangeLowThresRchd = NotificationType((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 0, 4)).setObjects(*(("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPRangeLowThresPoolName"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPRangeLowCurrUtil"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPRangeLowRIName"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPRangeLowCfgThres"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPRangeLowLSName"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPRangeLowThresRangeName"), ) )
if mibBuilder.loadTexts: jnxMbgSMIPRangeLowThresRchd.setDescription("This notification indicates that the range name that reached \nlower threshold.")
jnxMbgSMIPPoolHighThresExcd = NotificationType((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 0, 5)).setObjects(*(("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolThresholdLSName"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolThresholdPoolName"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolCurrUtil"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolHTCfgThres"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolThresholdRIName"), ) )
if mibBuilder.loadTexts: jnxMbgSMIPPoolHighThresExcd.setDescription("This notification signifies that the number of addresses allocated\nfrom a given address pool has exceeded a pre-configured threshold \nvalue.")
jnxMbgSMIPPoolLowThresRchd = NotificationType((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 0, 6)).setObjects(*(("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolThresholdLSName"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolThresholdPoolName"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolCurrUtil"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolLTCfgThres"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgSMIPPoolThresholdRIName"), ) )
if mibBuilder.loadTexts: jnxMbgSMIPPoolLowThresRchd.setDescription("This notification signifies that the number of addresses allocated\nfrom a given address pool has reached the lower threshold value.")
jnxMbgIPPoolExhausted = NotificationType((1, 3, 6, 1, 4, 1, 2636, 3, 66, 5, 0, 7)).setObjects(*(("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgIPPoolName"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgIPPoolLogicalSystem"), ("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", "jnxMbgIPPoolRoutingInstance"), ) )
if mibBuilder.loadTexts: jnxMbgIPPoolExhausted.setDescription("This notification signifies that the given pool has exhausted all its\naddresses and there are no free addresses left.")
# Exports
# Module identity
mibBuilder.exportSymbols("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", PYSNMP_MODULE_ID=jnxMobileGatewayPgwSMIPPoolMib)
# Objects
mibBuilder.exportSymbols("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", jnxMobileGatewayPgwSMIPPoolMib=jnxMobileGatewayPgwSMIPPoolMib, jnxMbgSMIPPoolNotifications=jnxMbgSMIPPoolNotifications, jnxMbgSMIPPoolObjects=jnxMbgSMIPPoolObjects, jnxMbgSMIPPoolTable=jnxMbgSMIPPoolTable, jnxMbgSMIPPoolEntry=jnxMbgSMIPPoolEntry, jnxMbgSMIPPoolName=jnxMbgSMIPPoolName, jnxMbgSMIPPoolLogicalSystem=jnxMbgSMIPPoolLogicalSystem, jnxMbgSMIPPoolRoutingInstance=jnxMbgSMIPPoolRoutingInstance, jnxMbgSMIPPoolType=jnxMbgSMIPPoolType, jnxMbgSMIPPoolFree=jnxMbgSMIPPoolFree, jnxMbgSMIPPoolInUse=jnxMbgSMIPPoolInUse, jnxMbgSMIPPoolUtil=jnxMbgSMIPPoolUtil, jnxMbgSMIPPoolNotificationVars=jnxMbgSMIPPoolNotificationVars, jnxMbgSMIPPoolThresholdPoolName=jnxMbgSMIPPoolThresholdPoolName, jnxMbgSMIPPoolThresholdLSName=jnxMbgSMIPPoolThresholdLSName, jnxMbgSMIPPoolThresholdRIName=jnxMbgSMIPPoolThresholdRIName, jnxMbgSMIPPoolConfiguredThreshold=jnxMbgSMIPPoolConfiguredThreshold, jnxMbgSMIPPoolCurrentThreshold=jnxMbgSMIPPoolCurrentThreshold, jnxMbgSMIPPoolMMPoolName=jnxMbgSMIPPoolMMPoolName, jnxMbgSMIPPoolMMLSName=jnxMbgSMIPPoolMMLSName, jnxMbgSMIPPoolMMRIName=jnxMbgSMIPPoolMMRIName, jnxMbgSMIPPoolPrevMMState=jnxMbgSMIPPoolPrevMMState, jnxMbgSMIPPoolNewMMState=jnxMbgSMIPPoolNewMMState, jnxMbgSMIPRangeHiThresRangeName=jnxMbgSMIPRangeHiThresRangeName, jnxMbgSMIPRangeHiThresPoolName=jnxMbgSMIPRangeHiThresPoolName, jnxMbgSMIPRangeHiLSName=jnxMbgSMIPRangeHiLSName, jnxMbgSMIPRangeHiRIName=jnxMbgSMIPRangeHiRIName, jnxMbgSMIPRangeHiCfgThres=jnxMbgSMIPRangeHiCfgThres, jnxMbgSMIPRangeHiCurrUtil=jnxMbgSMIPRangeHiCurrUtil, jnxMbgSMIPRangeLowThresRangeName=jnxMbgSMIPRangeLowThresRangeName, jnxMbgSMIPRangeLowThresPoolName=jnxMbgSMIPRangeLowThresPoolName, jnxMbgSMIPRangeLowLSName=jnxMbgSMIPRangeLowLSName, jnxMbgSMIPRangeLowRIName=jnxMbgSMIPRangeLowRIName, jnxMbgSMIPRangeLowCfgThres=jnxMbgSMIPRangeLowCfgThres, jnxMbgSMIPRangeLowCurrUtil=jnxMbgSMIPRangeLowCurrUtil, jnxMbgSMIPPoolHTCfgThres=jnxMbgSMIPPoolHTCfgThres, jnxMbgSMIPPoolCurrUtil=jnxMbgSMIPPoolCurrUtil, jnxMbgSMIPPoolLTCfgThres=jnxMbgSMIPPoolLTCfgThres, jnxMbgIPPoolTable=jnxMbgIPPoolTable, jnxMbgIPPoolEntry=jnxMbgIPPoolEntry, jnxMbgIPPoolId=jnxMbgIPPoolId, jnxMbgIPPoolLogicalSystem=jnxMbgIPPoolLogicalSystem, jnxMbgIPPoolRoutingInstance=jnxMbgIPPoolRoutingInstance, jnxMbgIPPoolName=jnxMbgIPPoolName, jnxMbgIPPoolType=jnxMbgIPPoolType, jnxMbgIPPoolFree=jnxMbgIPPoolFree, jnxMbgIPPoolInUse=jnxMbgIPPoolInUse, jnxMbgIPPoolUtil=jnxMbgIPPoolUtil, jnxMbgIPPoolRangeTable=jnxMbgIPPoolRangeTable, jnxMbgIPPoolRangeEntry=jnxMbgIPPoolRangeEntry, jnxMbgIPPoolRangeName=jnxMbgIPPoolRangeName, jnxMbgIPPoolRangeType=jnxMbgIPPoolRangeType, jnxMbgIPPoolRangeFree=jnxMbgIPPoolRangeFree, jnxMbgIPPoolRangeInUse=jnxMbgIPPoolRangeInUse, jnxMbgIPPoolRangeUtil=jnxMbgIPPoolRangeUtil)
# Notifications
mibBuilder.exportSymbols("JUNIPER-MOBILE-GATEWAY-SM-IP-POOL-MIB", jnxMbgSMIPPoolThresholdExceeded=jnxMbgSMIPPoolThresholdExceeded, jnxMbgSMIPPoolMMStateChange=jnxMbgSMIPPoolMMStateChange, jnxMbgSMIPRangeHighThresExcd=jnxMbgSMIPRangeHighThresExcd, jnxMbgSMIPRangeLowThresRchd=jnxMbgSMIPRangeLowThresRchd, jnxMbgSMIPPoolHighThresExcd=jnxMbgSMIPPoolHighThresExcd, jnxMbgSMIPPoolLowThresRchd=jnxMbgSMIPPoolLowThresRchd, jnxMbgIPPoolExhausted=jnxMbgIPPoolExhausted)
|
UTF-8
|
Python
| false | false | 2,014 |
6,760,278,530,059 |
23afe9870173e7b082e9f9b577b6ad3ec8f06734
|
92a026bbfdaa6bab9349e0985c44681041f9c905
|
/RSGraviton/RSAnalyzer/python/Summer11/MET_Run2011A_PromptReco_v6_2011Ago26.py
|
f97bd5dbd5fbee3f78b9594d4194cbe760a9c644
|
[] |
no_license
|
trtomei/usercode
|
https://github.com/trtomei/usercode
|
63f7d912b8ac2c425348ecb1fc7fb5eda6d89179
|
9e14c1d93c9f868747e180ca7adfa3638ea52eb0
|
refs/heads/master
| 2020-05-14T21:52:08.941759 | 2012-05-14T19:16:01 | 2012-05-14T19:16:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ('PoolSource',fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_134_1_VSt.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_83_1_q1k.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_27_1_M6w.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_92_1_mCe.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_3_1_3BR.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_125_1_Ggq.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_63_1_1ji.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_28_1_MqI.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_43_1_QJP.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_68_1_JUL.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_126_1_tEc.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_9_1_jYA.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_111_1_7bU.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_143_1_lWc.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_110_1_i6w.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_53_1_mhp.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_44_1_JfP.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_88_1_xMc.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_30_1_rou.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_93_1_KOY.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_19_1_apV.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_142_1_SOQ.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_14_1_Na7.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_87_1_vH1.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_38_1_Vap.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_76_1_xiS.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_67_1_H3M.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_119_1_iNn.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_29_1_UX4.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_34_1_3I1.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_146_1_Gl1.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_47_1_WdF.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_117_1_qM9.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_48_1_it3.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_80_1_q8Y.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_89_1_TOy.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_6_1_Qdg.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_97_1_Lft.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_15_1_3QE.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_40_1_pJe.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_21_1_p9h.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_129_1_K1t.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_82_1_1Au.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_90_1_41g.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_77_1_kEN.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_60_1_dg8.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_102_1_ynM.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_49_1_k4D.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_133_1_3GG.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_8_1_VLg.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_136_1_Nro.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_2_1_65Y.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_1_1_5AC.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_140_1_eUh.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_73_1_e0v.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_5_2_OlY.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_31_2_wgm.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_50_2_Bii.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_59_2_1Rk.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_109_2_Xh9.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_41_2_1Ip.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_64_2_rnf.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_13_2_N1k.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_138_2_aGG.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_39_2_LFT.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_58_2_e5x.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_78_2_e70.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_135_2_28u.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_71_2_AG6.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_46_2_FDC.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_69_2_jn1.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_122_2_REi.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_98_2_7Fk.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_79_2_L12.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_104_2_8Vg.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_114_2_5D1.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_54_2_gCj.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_18_2_K1r.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_113_2_hUu.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_112_2_k4i.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_139_2_38z.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_96_2_zJz.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_20_2_Y21.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_91_2_lq3.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_132_2_Awq.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_94_2_Xws.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_24_2_KN9.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_120_2_jYx.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_130_2_rxa.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_17_2_cyM.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_65_2_2Vk.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_145_2_jLF.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_33_2_NPt.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_106_2_LDf.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_52_2_sEK.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_22_2_8FX.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_75_2_2Y0.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_16_2_J7u.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_118_2_3xU.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_123_2_tyb.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_70_2_CxH.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_11_2_SIl.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_127_2_iwh.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_23_2_nFu.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_116_2_41E.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_84_2_CD2.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_10_2_0rt.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_12_2_6nw.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_103_2_IJt.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_99_2_hxi.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_4_2_nIg.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_26_2_4kn.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_128_2_Azb.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_74_2_H7w.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_25_2_w5v.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_7_2_PIK.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_81_2_oVM.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_36_2_uNH.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_121_2_pOu.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_144_2_Xtn.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_131_2_eWd.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_105_2_fK0.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_45_2_Hnr.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_51_2_F7D.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_35_2_Zx0.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_107_2_YDh.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_101_2_5dx.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_72_2_fpI.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_115_2_NT7.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_32_2_v9B.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_42_2_jTX.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_124_2_2fO.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_108_2_uIl.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_66_2_L6n.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_62_2_8yh.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_37_2_b3l.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_85_2_HCZ.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_95_2_uYV.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_61_2_ETs.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_55_2_OPq.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_100_2_gV6.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_141_2_rqg.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_137_2_VRr.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_86_2_QIB.root",
"/store/user/tomei/MET_Run2011A-PromptReco_v6_2011Ago26/skim_57_3_hfn.root"
]);
|
UTF-8
|
Python
| false | false | 2,012 |
472,446,425,482 |
a08e3994934ca7768ac45c2c95e2cd4dc371ad94
|
5754acb4e6d1011ebf2f9f8c152a54d0813b7e33
|
/src/global_stuff.py
|
c72c12e5617d78f72a67bff4d3dcd51c39e7d50f
|
[] |
no_license
|
glareprotector/active_site
|
https://github.com/glareprotector/active_site
|
7558a12afe4c4a540b6df26c0a5adbfc6fee6fc2
|
64686472c5b635063def9e0742c776c6db8c76da
|
refs/heads/master
| 2021-01-23T15:42:14.500439 | 2012-10-27T16:54:55 | 2012-10-27T16:54:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on Mar 9, 2012
@author: glareprotector
'''
from manager import *
import string
#import Bio.PDB
import csv
import constants
import string
import re
import math
import pdb
FILE_MANAGER_SIZE = 500
OBJ_MANAGER_SIZE = 500
MUSCLE_PATH = '/mnt/work/fultonw/muscle3.8.31_i86linux64'
DSSP_PATH = '/mnt/work/fultonw/active_site/dssp/dssp-2.0.4-linux-amd64'
to_reindex = True
recalculate = False
recalculate_nodewise_loss_f = True
metric_cutoffs = [1,2,3,4,5,6,7,8,9]
RESULTS_FOLDER = '../new_results/testing_logreg/'
RESULTS_BASE_FOLDER = '../the_results/'
NACCESS_PATH = '/mnt/work/fultonw/active_site/Naccess/naccess'
NACCESS_FOLDER = '/mnt/work/fultonw/active_site/Naccess/'
BLAST_PATH = '/mnt/work/fultonw/blast/ncbi-blast-2.2.26+/bin/psiblast'
BLASTDB_PATH = '/mnt/work/fultonw/blast/ncbi-blast-2.2.26+/bin/nr/nr'
CONSERVATION_FOLDER = '/home/fultonw/conservation_code/'
ORIG_CHAINS = '../catres_pdbs'
CSA_FILE = '../catres_sites'
success_file = 'success_catres.txt'
fail_file = 'fail_catres.txt'
proc_id = 0
|
UTF-8
|
Python
| false | false | 2,012 |
6,691,559,070,383 |
879e096a5fba39e4eddb463708221a9f81c1425e
|
b5092245bd6a5cc705437aab4a225cfc0ed8d18a
|
/jsb/lib/jsbimport.py
|
e303a0fd27a5f8aa4c3fc5776049b0172b400455
|
[
"MIT"
] |
permissive
|
melmothx/jsonbot
|
https://github.com/melmothx/jsonbot
|
66942cafa1abb300c12feb3c38fdeb9ba48e4cd2
|
4d9ba385555da03f881f6c4354c062f7f3c9949c
|
refs/heads/master
| 2020-08-05T03:24:56.096271 | 2011-03-30T13:56:06 | 2011-03-30T14:20:24 | 1,528,864 | 9 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# jsb/jsbimport.py
#
#
""" use the imp module to import modules. """
## basic imports
import time
import sys
import imp
import os
import thread
import logging
## _import function
def _import(name):
""" do a import (full). """
mods = []
mm = ""
for m in name.split('.'):
mm += m
mods.append(mm)
mm += "."
for mod in mods: imp = __import__(mod)
logging.debug("jsbimport - got module %s" % sys.modules[name])
return sys.modules[name]
## force_import function
def force_import(name):
""" force import of module <name> by replacing it in sys.modules. """
try: del sys.modules[name]
except KeyError: pass
plug = _import(name)
return plug
def _import_byfile(modname, filename):
try: return imp.load_source(modname, filename)
except NotImplementedError: return _import(filename[:-3].replace(os.sep, "."))
|
UTF-8
|
Python
| false | false | 2,011 |
5,763,846,147,018 |
098d3d74fe5952b36eb6ad5a1abc868a0bcef378
|
c2d6fc4f1c0fd046750c33ba97bf0d95d1986500
|
/archspace/planets/listeners.py
|
abd918229ba2698bf0bb4dd8b87b71becccab311
|
[] |
no_license
|
iamromeo/website-archspace
|
https://github.com/iamromeo/website-archspace
|
2940389bd29fa315c0f4200d833ebc83e428cb75
|
1fbad0ec9e1d0cbe444a64437dfdfb2650069fe9
|
refs/heads/master
| 2021-01-16T00:16:59.172427 | 2010-04-23T05:44:57 | 2010-04-23T05:44:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from game.signals import turn
from players.signals import power
from players.models import Player
from django.db.models.signals import post_save
def on_turn(player, turn, **kwargs):
"""
do terraform
collect pp
"""
for planet in player.planets.all():
points = planet.get_point_breakdown()
planet.terraform_points += points['terraform']
planet.terraform()
player.production_points += points['pp'] + planet.get_commerce()
planet.save()
turn.connect(on_turn)
def on_power(player, **kwargs):
total_power = 0
for planet in player.planets.all():
total_power += planet.get_power()
return total_power
power.connect(on_power)
def create_home_planet(instance, created, **kwargs):
if created:
player.planets.create_home_planet(player)
post_save.connect(create_home_planet, sender=Player)
|
UTF-8
|
Python
| false | false | 2,010 |
1,219,770,739,975 |
41e8a88555db2a90a7ebed9c96fb94d91d496a04
|
40066aae817abedea054eebaeef43b0630f7452f
|
/stubs/3.2/pwd.py
|
fc15f5b5f2825def988cc6e696d184b454e6be2a
|
[
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] |
non_permissive
|
silky/mypy
|
https://github.com/silky/mypy
|
281420ac2ca795a35db5c60ab300294db0e95619
|
de6a8d3710df9f49109cb682f2092e4967bfb92c
|
refs/heads/master
| 2021-01-20T19:05:38.643133 | 2013-08-22T17:53:16 | 2013-08-22T17:53:16 | 12,587,928 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Stubs for pwd
# NOTE: These are incomplete!
from typing import Any
def getpwuid(uid: int) -> Any: pass
def getpwnam(name: str) -> Any: pass
|
UTF-8
|
Python
| false | false | 2,013 |
12,730,283,074,794 |
7a134a4bc58d70b3f9e4b09f8a9a27abf5af4c78
|
9fffb8d0539a27722695ee1bf77afda2255f4120
|
/Python Codes/CSE 201 - HW#5.py
|
3ebf069518d6a6b78d36ab067dbc98141e36aa24
|
[] |
no_license
|
mukasama/portfolio
|
https://github.com/mukasama/portfolio
|
fdde5f1b022cc3d7b5abf1c35e170ad9f5d3f401
|
92e1d231f76ad7473a2318da87e8b3817a9e4e5b
|
refs/heads/master
| 2016-09-07T15:41:07.331327 | 2014-10-19T17:53:42 | 2014-10-19T17:53:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Febuary/ 14/ 2013
# CSE 201, Homewwork #5
# Anthony Rogers, Cristhofer Munoz, Martin Mukasa
def str2num(s):
'''
This procedure starts off by importing sting then assigning our upper and
lower case letters to a variable. In this case we use alpha for upper case
and beta for lower case letters. We then have to present our alpha and beta
letters into a list of variable so that it make it easier for python to
search for. This is where we set two other variable called DELTA and SIGMA
and then use the variable called OMEGA to add the two. We then list our
numbers from 00 - 51 to represent the upper and lower case letter. From there
we set up an digit that will add all out variables asked for in (s), that it
later return. We then use the for function as it goes through the loop to
see which word is asked for in (s), finds each letter and its corresponding
number/ digit and then returns the digits/ number for each letter in the word
typed in.
'''
import string
alpha = string.ascii_lowercase
beta = string.ascii_uppercase
DELTA = list(alpha)
SIGMA = list(beta)
OMEGA = DELTA + SIGMA
num = ["00","01","02","03","04","05","06","07","08","09","10","11","12","13","14","15","16","17","18","19","20","21","22","23","24","25","26","27","28","29","30","31","32","33","34","35","36","37","38","39","40","41","42","43","44","45","46","47","48","49","50","51"]
digit = ""
for letter in s:
for k in range(len(OMEGA)):
if letter == OMEGA[k]:
k = num[k]
digit = digit + str(k)
return digit
print
def str2numsp(s):
'''
This procedure is very similar to the first code, just that this time we are
asked to add in two more values which are the space and period between the
word being presented to us. Without repeating everything in the first code
above, we then list our numbers from 00 - 51 to represent the upper and lower
case letter and then add on 52 and 53 too represents the space and period.
It goes through the loop to see which word is asked for in (s), finds each
letter, space and period with its corresponding number/ digit and then
returns the digits/ number for each letter, space and period in the word
typed in.
'''
import string
alpha = string.ascii_lowercase
beta = string.ascii_uppercase
DELTA = list(alpha)
SIGMA = list(beta)
space = " "
period = "."
Lspace = list(space)
Lperiod = list(period)
OMEGA = DELTA + SIGMA + Lspace + Lperiod
num = ["00","01","02","03","04","05","06","07","08","09","10","11","12","13","14","15","16","17","18","19","20","21","22","23","24","25","26","27","28","29","30","31","32","33","34","35","36","37","38","39","40","41","42","43","44","45","46","47","48","49","50","51","52","53"]
digit = ""
for letter in s:
for k in range(len(OMEGA)):
if letter == OMEGA[k]:
k = num[k]
digit = digit + str(k)
print
return digit
print
def num2str(s):
'''
This procedure is very similar to the second code above, just that this time
we are asked the opposite. We are given numbers and asked to return the
word in those numbers. Using the second code we just change were we had OMEGA
to num and then digit to letters. It then goes through the same loop as in the
code above. Finds the numbers/ digits typed in (s) assigns it to the corresponding
letter then returns letter, space and period in the number/ digits typed in.
'''
import string
alpha = string.ascii_lowercase
beta = string.ascii_uppercase
DELTA = list(alpha)
SIGMA = list(beta)
space = " "
period = "."
Lspace = list(space)
Lperiod = list(period)
OMEGA = DELTA + SIGMA + Lspace + Lperiod
num = ["00","01","02","03","04","05","06","07","08","09","10","11","12","13","14","15","16","17","18","19","20","21","22","23","24","25","26","27","28","29","30","31","32","33","34","35","36","37","38","39","40","41","42","43","44","45","46","47","48","49","50","51","52","53"]
letters = ""
for number in s:
for k in range(len(num)):
if number == num[k]:
k = OMEGA[k]
letters = letters + str(k)
return letters
print
def perf(n):
'''
This procedure is shorter than the other ones because we are asked to say if
the positive integer given in perfect or not. We begin by assigning our sum
equal to 0 and then create a for function that goes through the loop. If the
positive integer is equal to the sum of its integer divisors, except itself
it is considered perfect therefore Yes is returned and if not therefore No
will be returned.
'''
sum = 0
for p in range(1,n):
if n%1 == 0:
sum = sum + p
if sum == n:
return "Yes"
if sum != n:
return "No"
print str2num('Thepriceofgreatnessisresponsibility')
print str2numsp('Growing old is inevitable but growing up is optional.')
print num2str('5014205232141952341953')
print perf(945)
|
UTF-8
|
Python
| false | false | 2,014 |
11,630,771,476,556 |
f6ccd22ea64e3ca014a3f388a83b02b8b3f9e2fa
|
c955988ef1f47d9143b38f266673bb357b13d04f
|
/IFCPythonSDK/submodel.py
|
c05030f50c8f4d7722c7f3529d790cf3ed39c4b1
|
[] |
no_license
|
quntian/BimCenter
|
https://github.com/quntian/BimCenter
|
1c3d75afe0ed79320ed503d07089560ede6f313b
|
5d67a975321d0b7c352d5e9aec2f6d440c205fa0
|
refs/heads/master
| 2020-02-20T09:16:13.958734 | 2014-05-22T11:20:57 | 2014-05-22T11:20:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
#coding=utf-8
#Filename:submodel.py
import log
import re
import common
from spfheader import SPFHeader
from utils import getLine,render
pattern=re.compile(r'#(\d+)')
class SubModel(object):
""""""
def __init__(self):
super(SubModel,self).__init__()
self.lines={}
self.header=SPFHeader()
def read(self,fp):
""" read lines to a dict:id->key(params)
"""
if not self.header.parse(fp):
log.error("SubModel : Can't parse header section, line %d"%common.counter)
return False
#data section
s=getLine(fp)
if not s or s!='DATA':
log.error("SubModel : Can't find DATA section, line %d"%common.counter)
return False
#id=ENTITYNAME(......)
while True:
beg=0
s=getLine(fp)
if not s:
log.error("SubModel : Unexpected End Of File, line %d"%common.counter)
return False
if s=="ENDSEC":
break
i=s.find('=')
if i==-1 or s[0]!='#':
log.error("SubModel : Syntax error on entity id, line %d"%common.counter)
return False
entityId=int(s[1:i])
beg=i+1
i=s.find('(',beg)
if i==-1 or s[-1]!=')':
log.error("SubModel : Syntax error on entity definition, line %d"%common.counter)
return False
entityName=s[beg:i]
params=s[i+1:-1]
#print "#%s=%s(%s);"%(entityId,entityName,params)
self.lines[entityId]=(entityName,params)
s=getLine(fp)
if not s or s!="END-ISO-10303-21":
log.error("SubModel : Can't find END-ISO-10303-21 token, line %d"%common.counter)
return False
return True
def getModelWithInverses(self,id):
""" get sub model with explicit ref and inverses
"""
pass
def getModel(self,ids):
""" get sub model with explicit ref
"""
currentIds=ids if isinstance(ids,list) else [ids]
idx=0
while idx<len(currentIds):
curId=currentIds[idx]
if not curId in self.lines:
log.error("SubModel : Can't find id %d, line %d"%(curId,common.counter))
return None
line=self.lines[curId]
params=line[1]
refs=pattern.findall(params)
for ref in refs:
if not int(ref) in currentIds:
currentIds.append(int(ref))
idx+=1
return sorted(currentIds)
def getGeometryModel(self,id):
""" get sub model only with geometry info
"""
currentIds=[id]
idx=0
while idx<len(currentIds):
curId=currentIds[idx]
if not curId in self.lines:
log.error("SubModel : Can't find id %d, line %d"%(curId,common.counter))
return None
line=self.lines[curId]
params=line[1]
refs=pattern.findall(params)
for ref in refs:
if not int(ref) in currentIds:
currentIds.append(int(ref))
idx+=1
return sorted(currentIds)
def nameToIds(self,name):
""""""
ids=[]
name=name.upper()
for id,value in self.lines.items():
if value[0]==name:
ids.append(id)
return ids
def generateModelFile(self,ids,to):
""""""
code=''
for i in ids:
line=self.lines[i]
code+='#%d=%s(%s);\n'%(i,line[0],line[1])
render('temp.ifc',
{'schema':','.join(self.header.fileSchema.schemaIdentifiers),'data':code},
to)
def getCategoryModel(self,categories):
""""""
ids=[]
for category in categories:
ids+=self.nameToIds(category)
return self.getModel(ids)
if __name__ == '__main__':
model=SubModel()
with open("files/example.ifc",'r') as fp:
res=model.read(fp)
categories=['IFCWALLSTANDARDCASE','IFCSLAB','IFCDOOR','IFCCOLUMN','IFCBEAM','IFCWINDOW','IFCBUILDINGELEMENTPROXY']
#model.generateModelFile(model.getModel([79,155,201,246]),'my.ifc')
model.generateModelFile(model.getCategoryModel(categories),'my.ifc')
import os
os.system('viewer.exe files\\my.ifc')
|
UTF-8
|
Python
| false | false | 2,014 |
19,516,331,432,165 |
ffd45c1159aa913dd3b082c053c05840922fb300
|
e2d8f4ed659c162c4c7aef90d121c22a90848de5
|
/src/bibim/util/tests/test_config.py
|
362d4e6b1641fbeef0daf2b5517fc4c13e2a172e
|
[
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft"
] |
non_permissive
|
Alex-Linhares/bibtexIndexMaker
|
https://github.com/Alex-Linhares/bibtexIndexMaker
|
e279400cf66b0275bbeb47ac91585f5063f36873
|
cec15dd552b5c1a6928fba65d4e06291f42b91ec
|
refs/heads/master
| 2020-03-27T12:28:38.595359 | 2010-07-26T09:31:38 | 2010-07-26T09:31:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright 2010 Ramon Xuriguera
#
# This file is part of BibtexIndexMaker.
#
# BibtexIndexMaker is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BibtexIndexMaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BibtexIndexMaker. If not, see <http://www.gnu.org/licenses/>.
import unittest #@UnresolvedImport
from bibim.util.config import configuration
class TestBibimConfig(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_database(self):
self.failUnless(configuration.database == 'sqlite:///bibim.db')
def test_search_engine(self):
self.failUnless(configuration.search_engine >= 0)
def test_search_properties(self):
properties = configuration._get_search_properties()
self.failUnless(properties['min_query_length'] == 6)
def test_wrapper_properties(self):
properties = configuration._get_wrapper_properties()
self.failUnless(properties['max_wrappers'] > 0)
def test_black_list(self):
black_list = configuration.black_list
self.failUnless(len(black_list) == 2)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
UTF-8
|
Python
| false | false | 2,010 |
8,856,222,579,113 |
38bd24e3127ef984b4984d7f692833464c017182
|
b7d042a564749ee40b1505a784718411720e2968
|
/HomeAutomation/__init__.py
|
22feef47ca44f343bcaaa4acc9150709d1a48990
|
[] |
no_license
|
puiterwijk/HomeAutomation
|
https://github.com/puiterwijk/HomeAutomation
|
734b9ec27274e432c842ead101e64e9fdb463448
|
2bd3ec7da8ce34525367412ea088740eaaa1a227
|
refs/heads/master
| 2020-05-20T05:47:09.954075 | 2014-11-02T20:51:36 | 2014-11-02T21:08:47 | 26,068,746 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright (c) 2014, Patrick Uiterwijk <[email protected]>
# All rights reserved.
#
# This file is part of HomeAutomation.
#
# HomeAutomation is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HomeAutomation is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HomeAutomation. If not, see <http://www.gnu.org/licenses/>.
from abc import ABCMeta, abstractmethod
import time
import stomp
import json
import logging
import argparse
logging.basicConfig()
PREFIX = 'org.puiterwijk.homeautomation'
class BaseModule(object):
__metaclass__ = ABCMeta
def __init__(self, description='A homeautomation module'):
self.args = self._get_params(description)
self.environment = self.args.environment
self.conn = stomp.Connection(
host_and_ports=[(self.args.server, self.args.port)],
use_ssl=True,
ssl_ca_certs=self.args.cacert,
ssl_cert_file=self.args.cert,
ssl_key_file=self.args.key)
self.conn.set_listener('me', self)
self.conn.start()
self.conn.connect()
def add_extra_arguments(self, parser):
pass
def _get_params(self, description):
# TODO: Read defaults from config file
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--server', help='Server address',
default='queue.puiterwijk.org')
parser.add_argument('--port', help='Server port',
default=61614)
parser.add_argument('--cacert', help='CA cert file',
default='cacert.pem')
parser.add_argument('--cert', help='Certificate file',
default='testcert.pem')
parser.add_argument('--key', help='Certificate key',
default='testcert.key')
parser.add_argument('--environment', help='Environment',
default='test')
self.add_extra_arguments(parser)
return parser.parse_args()
def work(self):
while True:
time.sleep(5)
def _create_url(self, is_queue, topic):
parts = [PREFIX, self.environment, topic]
topic = '.'.join(parts)
if is_queue:
return '/queue/%s' % topic
else:
return '/topic/%s' % topic
def send(self, topic, message, is_queue=False, **kwargs):
url = self._create_url(is_queue, topic)
self.conn.send(url, json.dumps(message), **kwargs)
def subscribe(self, topic, is_queue=False, name=None, **kwargs):
url = self._create_url(is_queue, topic)
if not name:
name = topic
self.conn.subscribe(destination=url,
id=name,
**kwargs)
def on_error(self, headers, message):
print 'ERROR! Heades: %s, message: %s' % (headers, message)
def on_message(self, headers, message):
_, msgtype, destination = headers['destination'].split('/')
if not destination.startswith(PREFIX):
# We do not care about this message
return
destination = destination[len(PREFIX)+1:]
destination = destination.split('.', 1)
if destination[0] != self.environment:
# Wrong environment
return
topic = destination[1]
try:
body = json.loads(message)
except:
body = None
self.message_received(topic, body)
@abstractmethod
def message_received(self, topic, body):
pass
|
UTF-8
|
Python
| false | false | 2,014 |
10,385,230,947,672 |
45267f47a39af572a6c570aa0469841b1121c8d2
|
082233a07f31433b3dc663b4c009e224c88e8c01
|
/glimpse/glab/cli_test.py
|
ea7ffcd8c2d479a3bab9b09a7c14762f4f80df13
|
[
"MIT"
] |
permissive
|
mthomure/glimpse-project
|
https://github.com/mthomure/glimpse-project
|
85a7e0112117ba45269478d8e7c94b92858212c3
|
8b98390850351385acfda5be3088cd4db4cc4a09
|
refs/heads/master
| 2021-01-19T10:25:58.132248 | 2014-10-25T19:39:50 | 2014-10-25T19:39:50 | 2,373,268 | 1 | 3 | null | false | 2014-06-23T05:13:08 | 2011-09-12T18:44:26 | 2014-06-23T05:13:08 | 2014-06-23T05:13:08 | 4,275 | 9 | 5 | 5 |
C++
| null | null |
from . import cli
from .cli import *
from glimpse.models.ml import Model, Params
from glimpse.pools import SinglecorePool
from glimpse.util.garray import toimage
from glimpse.util.gtest import *
# helper function
def MakeOpts():
return OptValue(MakeCliOptions())
class CliWithActivityTests(unittest.TestCase):
def testLoadParams(self):
# Set some non-default parameters
p = Params(num_scales = 52, s1_num_orientations = 13)
pool = SinglecorePool()
exp = ExperimentData()
opts = MakeOpts()
opts.extractor.no_activity = True
with TempFile() as path:
with open(path, 'w') as fh:
pickle.dump(p, fh)
opts.extractor.param_file = path
CliWithActivity(opts, exp, pool)
self.assertEqual(exp.extractor.model.params, p)
def testLoadPrototypes(self):
m = Model()
ks = [ np.random.random((10,) + kshape).astype(ACTIVATION_DTYPE)
for kshape in m.params.s2_kernel_shapes ]
exp = ExperimentData()
exp.extractor.model = m
opts = MakeOpts()
opts.extractor.no_activity = True
with TempFile() as path:
with open(path, 'w') as fh:
pickle.dump(ks, fh)
opts.extractor.prototypes.path = path
CliWithActivity(opts, exp, SinglecorePool())
self.assertEqual(len(exp.extractor.model.s2_kernels), len(ks))
for s2_k, k in zip(exp.extractor.model.s2_kernels, ks):
self.assertTrue(np.all(s2_k == k))
def test_withActivity(self):
opts = MakeOpts()
f = RecordedFunctionCall()
with MonkeyPatch(cli, 'ComputeActivation', f):
CliWithActivity(opts, ExperimentData(), SinglecorePool())
self.assertTrue(f.called)
class CliEvaluateTests(unittest.TestCase):
def test_errorOnMissingLayer(self):
opts = MakeOpts()
opts.evaluation.layer = None
with self.assertRaises(OptionError):
CliEvaluate(opts, ExperimentData())
class CliProjectTests(unittest.TestCase):
def test_loadInputExp(self):
iexp = ExperimentData()
iexp.corpus.paths = [ "a%d.png" % i for i in range(4) ]
opts = MakeOpts()
opts.extractor.no_activity = True
with TempFile() as ifname:
with open(ifname, 'wb') as fh:
pickle.dump(iexp, fh)
opts.input_path = ifname
exp = CliProject(opts)
self.assertEqual(exp, iexp)
def test_errorOnBadCorpus(self):
opts = MakeOpts()
opts.corpus.root_dir = '/does/not/exist'
with self.assertRaises(OSError):
CliProject(opts)
def testEndToEnd_evaluate_noCrossval(self):
corpus = dict(cls_a = ("11.jpg", "12.jpg"),
cls_b = ("21.jpg", "22.jpg"))
opts = MakeOpts()
opts.evaluation.evaluate = True
opts.evaluation.layer = "c1"
with TempDir() as root:
MakeCorpusWithImages(root, **corpus)
opts.corpus.root_dir = root
exp = CliProject(opts)
self.assertIsNotNone(exp)
self.assertEqual(len(exp.evaluation), 1)
self.assertIsNotNone(exp.evaluation[0].results.score)
def testEndToEnd_evaluate_withCrossval(self):
corpus = dict(cls_a = ("11.jpg", "12.jpg"),
cls_b = ("21.jpg", "22.jpg"))
opts = MakeOpts()
opts.evaluation.evaluate = True
opts.evaluation.cross_validate = True
opts.evaluation.cross_val_folds = 2
opts.evaluation.layer = "c1"
with TempDir() as root:
MakeCorpusWithImages(root, **corpus)
opts.corpus.root_dir = root
exp = CliProject(opts)
self.assertIsNotNone(exp)
self.assertEqual(len(exp.evaluation), 1)
self.assertIsNotNone(exp.evaluation[0].results.score)
def testEndToEnd_noEvaluate(self):
corpus = dict(cls_a = ("11.jpg", "12.jpg"),
cls_b = ("21.jpg", "22.jpg"))
opts = MakeOpts()
opts.evaluation.evaluate = False
opts.evaluation.layer = "c1"
with TempDir() as root:
MakeCorpusWithImages(root, **corpus)
opts.corpus.root_dir = root
exp = CliProject(opts)
self.assertIsNotNone(exp)
self.assertEqual(len(exp.evaluation), 0)
def testSetPool(self):
f = RecordedFunctionCall()
opts = MakeOpts()
opts.pool_type = 'MY-CRAZY-POOL'
opts.corpus.root_dir = '1'
with MonkeyPatch((cli, 'MakePool', f),
(cli, 'CliWithActivity', lambda *x: None),
(cli, 'SetCorpus', lambda *x,**kw: None)):
CliProject(opts)
self.assertSequenceEqual(f.args, (opts.pool_type,))
def testWriteResults(self):
opts = MakeOpts()
opts.extractor.no_activity = True
opts.corpus.root_dir = '1'
with TempFile() as fname:
opts.result_path = fname
with MonkeyPatch(cli, 'SetCorpus', lambda *args, **kw: None):
CliProject(opts)
with open(fname) as fh:
exp = pickle.load(fh)
self.assertEqual(type(exp), ExperimentData)
import subprocess
import cPickle as pickle
class ShellTests(unittest.TestCase):
def test(self):
corpus = dict(cls_a=('a1.png', 'a2.png'), cls_b=('b1.png', 'b2.png'))
with TempDir() as root:
MakeCorpusWithImages(root, **corpus)
with TempFile() as fname:
cmd = ["python", "-m", "glimpse.glab.cli", "-p", "imprint",
"-E", "-c", root, "-o", fname, "-E", "-t", "singlecore"]
subprocess.check_call(cmd)
with open(fname) as fh:
exp = pickle.load(fh)
self.assertEqual(len(exp.corpus.paths), 4)
self.assertTrue((exp.corpus.labels == (0,0,1,1)).all())
self.assertEqual(len(exp.extractor.activation), 4)
self.assertEqual(len(exp.evaluation), 1)
self.assertIsNotNone(exp.evaluation[0].results.score)
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false | false | 2,014 |
17,411,797,425,872 |
b8be38df414af5e5084a9a3e36893d5887e8eca2
|
56926612d8b193f64f977c1dfd86b863892ff3ea
|
/Account.py
|
442afa62bac4924e5c9621ff2cc0284f42ae1b11
|
[
"GPL-2.0-only"
] |
non_permissive
|
lramos15/PyControl
|
https://github.com/lramos15/PyControl
|
66c7fbc6a0b100435b85d8a8d27bb5627600d060
|
59de804f3f61a5d43aa6f19064c8c8f4195669e6
|
refs/heads/master
| 2020-05-31T02:07:25.572989 | 2014-12-26T22:00:40 | 2014-12-26T22:00:40 | 28,521,955 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from time import sleep
import MySQLdb
import AccountValidationTest
import OSChecker
db = MySQLdb.connect(host="", # your host
user="", # your username
passwd="", # your password
db="") # name of the data base
cursor = db.cursor()#Defines a cursor allowing me to execute SQL commands
name = raw_input("Enter your username:")
AccountValidationTest.LoginScript(name)
OSChecker.OSCheck()
def timer(time, _print=False):
if time == 0:
cursor.execute("SELECT * FROM Accounts WHERE Username= %s;", (name))
cursor.execute("UPDATE Accounts SET Credits = 0 WHERE Username = %s;", (name))
db.commit()#commit
OSChecker.LockAccount(name, time)
return ''
else:
if _print: print time
sleep(1)
return timer(time - 1, _print)
cursor.execute("SELECT * FROM Accounts WHERE Username=%s;", (name))
for test in cursor.fetchall():
creds = test[4]
print "Credit Balance: " + `creds`
time_set = creds * 60
timer(time_set, True)
|
UTF-8
|
Python
| false | false | 2,014 |
15,719,580,343,694 |
97876232390fa6ca7130e5cb47538bb913a4f7fa
|
f9ad953cc547960eda33fead24d4ad29b1fc9471
|
/turbion/bits/pingback/signals.py
|
b6784c58ad5613a8514d3c13bf416f8d585c6566
|
[
"BSD-3-Clause"
] |
permissive
|
strogo/turbion
|
https://github.com/strogo/turbion
|
49c3ed454f24319463948288424e18c7541a5bb1
|
b9b9c95e1a4497e6c4b64f389713a9a16226e425
|
refs/heads/master
| 2021-01-13T02:08:05.997563 | 2011-01-04T13:29:19 | 2011-01-04T13:29:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.dispatch import Signal
pingback_recieved = Signal()
|
UTF-8
|
Python
| false | false | 2,011 |
8,254,927,156,035 |
89069d82ca679f49d8a9f3fe5ffa717881f35d9b
|
b72f7fa9568a7fa7ec017a31f751094dc3db4e70
|
/bae-service-demo-python/redis_demo.py
|
206d5d661fadd67822cb20cf24f28c95a7c62ebc
|
[] |
no_license
|
zhoushuai/bae-demo
|
https://github.com/zhoushuai/bae-demo
|
2592e60b954082489802f40f81988cc917fe9e7b
|
3bbbbd01e5407cb9d39d3849541d1fbd91b4ac6f
|
refs/heads/master
| 2019-07-06T01:55:14.499332 | 2014-03-13T06:22:20 | 2014-03-13T06:22:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#-*- coding:utf-8 -*-
def test_redis():
import redis
rd_name = "HsXNSJuTYsVJIJdeGnvY"
myauth = "%s-%s-%s"%("apikey", "secretkey", rd_name)
### 连接redis服务
r = redis.Redis(host = "redis.duapp.com", port = 80, password = myauth)
### 进行各种redis操作,如set、get
r.set("foo", "bar")
return "get foo=> %s success!"%r.get("foo")
def app(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/html')]
start_response(status, headers)
try:
return test_redis()
except:
return 'handle exception'
from bae.core.wsgi import WSGIApplication
application = WSGIApplication(app)
|
GB18030
|
Python
| false | false | 2,014 |
18,056,042,540,610 |
e3307bfafa9626a95bc4c2dd94dad80a05150076
|
6c8ded12605459790bab76dd4e91bd604479897c
|
/assign3/search.py
|
0cf87ea99d1f347786dc7ac0f864195419103215
|
[
"GPL-3.0-only",
"GPL-3.0-or-later"
] |
non_permissive
|
sulin90/Artificial-Intelligence
|
https://github.com/sulin90/Artificial-Intelligence
|
544a5c5465335e3acf348bc4edf68f05cfbcbede
|
65eda743b43525f545b70a2371de12d59804c4e1
|
refs/heads/master
| 2021-05-26T23:36:35.218689 | 2013-01-14T00:32:28 | 2013-01-14T00:32:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
'''
File: search.py
Author: Corey Prophitt <[email protected]>
Class: CS440, Colorado State University.
License: GPLv3, see license.txt for more details.
Description:
RBFS search algorithm and the IDS search algorithms.
'''
###############################################################################
# The IDS search algorithm (provided by Chuck Anderson) and modified by me
###############################################################################
COUNT = 0
def incCount():
global COUNT
COUNT = COUNT + 1
def getNodeCount():
global COUNT
tmp = COUNT
COUNT = 0
return tmp
def iterativeDeepeningSearch(startState, actionsF, takeActionF, goalTestF, maxDepth):
startState
for depth in range(maxDepth):
result = depthLimitedSearchHelper(startState,actionsF,takeActionF,goalTestF,depth)
if result is not "cutoff":
result.insert(0,startState)
return result
return "cutoff"
def depthLimitedSearchHelper(state, actionsF, takeActionF, goalTestF, depthLimit):
if goalTestF(state):
return []
if depthLimit == 0:
return "cutoff"
cutoffOccurred = False
for action in actionsF(state):
childState = takeActionF(state,action)
incCount()
result = depthLimitedSearchHelper(childState,actionsF,takeActionF,goalTestF, depthLimit-1)
if result is "cutoff":
cutoffOccurred = True
elif result is not "failure":
result.insert(0,childState)
return result
if cutoffOccurred:
return "cutoff"
###############################################################################
###############################################################################
###############################################################################
# The RBFS algorithm (provided in class notes) and modified by me
###############################################################################
NCOUNT = 0
def getNCount():
global NCOUNT
tmp = NCOUNT
NCOUNT = 0
return tmp
class Node:
def __init__(self, state, f=0, g=0 ,h=0):
self.state = state
self.f = f
self.g = g
self.h = h
def __repr__(self):
return "Node(" + repr(self.state) + ", f=" + repr(self.f) + \
", g=" + repr(self.g) + ", h=" + repr(self.h) + ")"
def RBFS(startState, actionsF, takeActionF, goalTestF, hF):
h = hF(startState)
startNode = Node(state=startState, f=0+h, g=0, h=h)
return RBFSHelper(startNode, actionsF, takeActionF, goalTestF, hF, float('inf'))
def RBFSHelper(parentNode, actionsF, takeActionF, goalTestF, hF, fmax):
global NCOUNT
if goalTestF(parentNode.state):
return ([parentNode.state], parentNode.g)
## Construct list of children nodes with f, g, and h values
actions = actionsF(parentNode.state)
if not actions:
return ("failure", float('inf'))
children = []
for action in actions:
(childState,stepCost) = takeActionF(parentNode.state, action)
h = hF(childState)
NCOUNT += 1
g = parentNode.g + stepCost
f = max(h+g, parentNode.f)
childNode = Node(state=childState, f=f, g=g, h=h)
children.append(childNode)
while True:
# find best child
children.sort(key = lambda n: n.f) # sort by f value
bestChild = children[0]
if bestChild.f > fmax:
return ("failure",bestChild.f)
# next lowest f value
alternativef = children[1].f if len(children) > 1 else float('inf')
# expand best child, reassign its f value to be returned value
result,bestChild.f = RBFSHelper(bestChild, actionsF, takeActionF, goalTestF,
hF, min(fmax,alternativef))
if result is not "failure":
result.insert(0,parentNode.state)
return (result, bestChild.f)
###############################################################################
###############################################################################
###############################################################################
# Functions common between the algorithms
###############################################################################
def ebf(numberNodes, depth, precision=0.01):
'''A quick and dirty way to calculate EBF, slightly off.'''
if depth == 0:
return 0.000
return numberNodes**(1.0/depth)
###############################################################################
###############################################################################
if __name__ == "__main__":
pass
|
UTF-8
|
Python
| false | false | 2,013 |
9,337,258,901,777 |
d04bb6bb00b38bccd4039f75b75b569c330b422f
|
b01ffccce39624ddaba3f435695fcd5fda7aa68c
|
/github/Issue.py
|
0a6288cb2e027ec6532f0f050bb6d8a7b9337148
|
[
"Apache-2.0"
] |
permissive
|
sagarsane/abetterportfolio
|
https://github.com/sagarsane/abetterportfolio
|
90992bed0b35194acbc11f5c78b39cca0167ca51
|
f30c0076fbe2fa51b2f0e0f1a8ae5fd1f47a6a7b
|
refs/heads/master
| 2021-01-15T16:29:08.424033 | 2013-09-28T03:48:47 | 2013-09-28T03:49:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright 2012 Vincent Jacques
# [email protected]
# This file is part of PyGithub. http://vincent-jacques.net/PyGithub
# PyGithub is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with PyGithub. If not, see <http://www.gnu.org/licenses/>.
import GithubObject
import PaginatedList
import Repository
import IssueEvent
import Label
import NamedUser
import Milestone
import IssueComment
import IssuePullRequest
class Issue( GithubObject.GithubObject ):
@property
def assignee( self ):
self._completeIfNotSet( self._assignee )
return self._NoneIfNotSet( self._assignee )
@property
def body( self ):
self._completeIfNotSet( self._body )
return self._NoneIfNotSet( self._body )
@property
def closed_at( self ):
self._completeIfNotSet( self._closed_at )
return self._NoneIfNotSet( self._closed_at )
@property
def closed_by( self ):
self._completeIfNotSet( self._closed_by )
return self._NoneIfNotSet( self._closed_by )
@property
def comments( self ):
self._completeIfNotSet( self._comments )
return self._NoneIfNotSet( self._comments )
@property
def created_at( self ):
self._completeIfNotSet( self._created_at )
return self._NoneIfNotSet( self._created_at )
@property
def html_url( self ):
self._completeIfNotSet( self._html_url )
return self._NoneIfNotSet( self._html_url )
@property
def id( self ):
self._completeIfNotSet( self._id )
return self._NoneIfNotSet( self._id )
@property
def labels( self ):
self._completeIfNotSet( self._labels )
return self._NoneIfNotSet( self._labels )
@property
def milestone( self ):
self._completeIfNotSet( self._milestone )
return self._NoneIfNotSet( self._milestone )
@property
def number( self ):
self._completeIfNotSet( self._number )
return self._NoneIfNotSet( self._number )
@property
def pull_request( self ):
self._completeIfNotSet( self._pull_request )
return self._NoneIfNotSet( self._pull_request )
@property
def repository( self ):
self._completeIfNotSet( self._repository )
return self._NoneIfNotSet( self._repository )
@property
def state( self ):
self._completeIfNotSet( self._state )
return self._NoneIfNotSet( self._state )
@property
def title( self ):
self._completeIfNotSet( self._title )
return self._NoneIfNotSet( self._title )
@property
def updated_at( self ):
self._completeIfNotSet( self._updated_at )
return self._NoneIfNotSet( self._updated_at )
@property
def url( self ):
self._completeIfNotSet( self._url )
return self._NoneIfNotSet( self._url )
@property
def user( self ):
self._completeIfNotSet( self._user )
return self._NoneIfNotSet( self._user )
def add_to_labels( self, *labels ):
assert all( isinstance( element, Label.Label ) for element in labels ), labels
post_parameters = [ label.name for label in labels ]
headers, data = self._requester.requestAndCheck(
"POST",
self.url + "/labels",
None,
post_parameters
)
def create_comment( self, body ):
assert isinstance( body, ( str, unicode ) ), body
post_parameters = {
"body": body,
}
headers, data = self._requester.requestAndCheck(
"POST",
self.url + "/comments",
None,
post_parameters
)
return IssueComment.IssueComment( self._requester, data, completed = True )
def delete_labels( self ):
headers, data = self._requester.requestAndCheck(
"DELETE",
self.url + "/labels",
None,
None
)
def edit( self, title = GithubObject.NotSet, body = GithubObject.NotSet, assignee = GithubObject.NotSet, state = GithubObject.NotSet, milestone = GithubObject.NotSet, labels = GithubObject.NotSet ):
assert title is GithubObject.NotSet or isinstance( title, ( str, unicode ) ), title
assert body is GithubObject.NotSet or isinstance( body, ( str, unicode ) ), body
assert assignee is GithubObject.NotSet or assignee is None or isinstance( assignee, NamedUser.NamedUser ), assignee
assert state is GithubObject.NotSet or isinstance( state, ( str, unicode ) ), state
assert milestone is GithubObject.NotSet or milestone is None or isinstance( milestone, Milestone.Milestone ), milestone
assert labels is GithubObject.NotSet or all( isinstance( element, ( str, unicode ) ) for element in labels ), labels
post_parameters = dict()
if title is not GithubObject.NotSet:
post_parameters[ "title" ] = title
if body is not GithubObject.NotSet:
post_parameters[ "body" ] = body
if assignee is not GithubObject.NotSet:
post_parameters[ "assignee" ] = assignee._identity if assignee else ''
if state is not GithubObject.NotSet:
post_parameters[ "state" ] = state
if milestone is not GithubObject.NotSet:
post_parameters[ "milestone" ] = milestone._identity if milestone else ''
if labels is not GithubObject.NotSet:
post_parameters[ "labels" ] = labels
headers, data = self._requester.requestAndCheck(
"PATCH",
self.url,
None,
post_parameters
)
self._useAttributes( data )
def get_comment( self, id ):
assert isinstance( id, int ), id
headers, data = self._requester.requestAndCheck(
"GET",
self._parentUrl( self.url ) + "/comments/" + str( id ),
None,
None
)
return IssueComment.IssueComment( self._requester, data, completed = True )
def get_comments( self ):
return PaginatedList.PaginatedList(
IssueComment.IssueComment,
self._requester,
self.url + "/comments",
None
)
def get_events( self ):
return PaginatedList.PaginatedList(
IssueEvent.IssueEvent,
self._requester,
self.url + "/events",
None
)
def get_labels( self ):
return PaginatedList.PaginatedList(
Label.Label,
self._requester,
self.url + "/labels",
None
)
def remove_from_labels( self, label ):
assert isinstance( label, Label.Label ), label
headers, data = self._requester.requestAndCheck(
"DELETE",
self.url + "/labels/" + label._identity,
None,
None
)
def set_labels( self, *labels ):
assert all( isinstance( element, Label.Label ) for element in labels ), labels
post_parameters = [ label.name for label in labels ]
headers, data = self._requester.requestAndCheck(
"PUT",
self.url + "/labels",
None,
post_parameters
)
@property
def _identity( self ):
return self.number
def _initAttributes( self ):
self._assignee = GithubObject.NotSet
self._body = GithubObject.NotSet
self._closed_at = GithubObject.NotSet
self._closed_by = GithubObject.NotSet
self._comments = GithubObject.NotSet
self._created_at = GithubObject.NotSet
self._html_url = GithubObject.NotSet
self._id = GithubObject.NotSet
self._labels = GithubObject.NotSet
self._milestone = GithubObject.NotSet
self._number = GithubObject.NotSet
self._pull_request = GithubObject.NotSet
self._repository = GithubObject.NotSet
self._state = GithubObject.NotSet
self._title = GithubObject.NotSet
self._updated_at = GithubObject.NotSet
self._url = GithubObject.NotSet
self._user = GithubObject.NotSet
def _useAttributes( self, attributes ):
if "assignee" in attributes: # pragma no branch
assert attributes[ "assignee" ] is None or isinstance( attributes[ "assignee" ], dict ), attributes[ "assignee" ]
self._assignee = None if attributes[ "assignee" ] is None else NamedUser.NamedUser( self._requester, attributes[ "assignee" ], completed = False )
if "body" in attributes: # pragma no branch
assert attributes[ "body" ] is None or isinstance( attributes[ "body" ], ( str, unicode ) ), attributes[ "body" ]
self._body = attributes[ "body" ]
if "closed_at" in attributes: # pragma no branch
assert attributes[ "closed_at" ] is None or isinstance( attributes[ "closed_at" ], ( str, unicode ) ), attributes[ "closed_at" ]
self._closed_at = self._parseDatetime( attributes[ "closed_at" ] )
if "closed_by" in attributes: # pragma no branch
assert attributes[ "closed_by" ] is None or isinstance( attributes[ "closed_by" ], dict ), attributes[ "closed_by" ]
self._closed_by = None if attributes[ "closed_by" ] is None else NamedUser.NamedUser( self._requester, attributes[ "closed_by" ], completed = False )
if "comments" in attributes: # pragma no branch
assert attributes[ "comments" ] is None or isinstance( attributes[ "comments" ], int ), attributes[ "comments" ]
self._comments = attributes[ "comments" ]
if "created_at" in attributes: # pragma no branch
assert attributes[ "created_at" ] is None or isinstance( attributes[ "created_at" ], ( str, unicode ) ), attributes[ "created_at" ]
self._created_at = self._parseDatetime( attributes[ "created_at" ] )
if "html_url" in attributes: # pragma no branch
assert attributes[ "html_url" ] is None or isinstance( attributes[ "html_url" ], ( str, unicode ) ), attributes[ "html_url" ]
self._html_url = attributes[ "html_url" ]
if "id" in attributes: # pragma no branch
assert attributes[ "id" ] is None or isinstance( attributes[ "id" ], int ), attributes[ "id" ]
self._id = attributes[ "id" ]
if "labels" in attributes: # pragma no branch
assert attributes[ "labels" ] is None or all( isinstance( element, dict ) for element in attributes[ "labels" ] ), attributes[ "labels" ]
self._labels = None if attributes[ "labels" ] is None else [
Label.Label( self._requester, element, completed = False )
for element in attributes[ "labels" ]
]
if "milestone" in attributes: # pragma no branch
assert attributes[ "milestone" ] is None or isinstance( attributes[ "milestone" ], dict ), attributes[ "milestone" ]
self._milestone = None if attributes[ "milestone" ] is None else Milestone.Milestone( self._requester, attributes[ "milestone" ], completed = False )
if "number" in attributes: # pragma no branch
assert attributes[ "number" ] is None or isinstance( attributes[ "number" ], int ), attributes[ "number" ]
self._number = attributes[ "number" ]
if "pull_request" in attributes: # pragma no branch
assert attributes[ "pull_request" ] is None or isinstance( attributes[ "pull_request" ], dict ), attributes[ "pull_request" ]
self._pull_request = None if attributes[ "pull_request" ] is None else IssuePullRequest.IssuePullRequest( self._requester, attributes[ "pull_request" ], completed = False )
if "repository" in attributes: # pragma no branch
assert attributes[ "repository" ] is None or isinstance( attributes[ "repository" ], dict ), attributes[ "repository" ]
self._repository = None if attributes[ "repository" ] is None else Repository.Repository( self._requester, attributes[ "repository" ], completed = False )
if "state" in attributes: # pragma no branch
assert attributes[ "state" ] is None or isinstance( attributes[ "state" ], ( str, unicode ) ), attributes[ "state" ]
self._state = attributes[ "state" ]
if "title" in attributes: # pragma no branch
assert attributes[ "title" ] is None or isinstance( attributes[ "title" ], ( str, unicode ) ), attributes[ "title" ]
self._title = attributes[ "title" ]
if "updated_at" in attributes: # pragma no branch
assert attributes[ "updated_at" ] is None or isinstance( attributes[ "updated_at" ], ( str, unicode ) ), attributes[ "updated_at" ]
self._updated_at = self._parseDatetime( attributes[ "updated_at" ] )
if "url" in attributes: # pragma no branch
assert attributes[ "url" ] is None or isinstance( attributes[ "url" ], ( str, unicode ) ), attributes[ "url" ]
self._url = attributes[ "url" ]
if "user" in attributes: # pragma no branch
assert attributes[ "user" ] is None or isinstance( attributes[ "user" ], dict ), attributes[ "user" ]
self._user = None if attributes[ "user" ] is None else NamedUser.NamedUser( self._requester, attributes[ "user" ], completed = False )
|
UTF-8
|
Python
| false | false | 2,013 |
12,352,325,972,738 |
510268e42a1393ca33f256b4c3dbcb9aedf4d577
|
ba9ccf7c0ab365e69ee00aa2c2b8031da4c6b548
|
/sprintboard.py
|
508ea61dbca499cb81965319bc68fa4196f42576
|
[] |
no_license
|
redhat-cip/scrum_using_trello
|
https://github.com/redhat-cip/scrum_using_trello
|
33053866c474ce8e2276fa3b417549ba2e834912
|
6ad7d28d3faff4180681d7359ac5a070e56726ad
|
refs/heads/master
| 2020-12-11T01:37:26.767138 | 2014-12-26T21:30:14 | 2014-12-26T21:30:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#
# Copyright (C) 2014 eNovance SAS <[email protected]>
#
# Author: Frederic Lepied <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
'''
import sys
import trellolib
def process_board(board_name):
client = trellolib.init_client()
sprint_backlog = None
sprint_task_lists = []
(board,) = trellolib.lookup_boards(client, board_name)
if not board:
print('unable to find board "%s"' % board_name)
sys.exit(1)
for lst in board.open_lists():
if lst.name == 'User Stories':
sprint_backlog = lst
elif lst.name.find('Stories') == -1:
sprint_task_lists.append(lst)
if not sprint_backlog:
print('No "User Stories" list. Aborting.')
sys.exit(1)
stories = {}
for card in sprint_backlog.list_cards():
card.fetch()
stories[trellolib.filter_name(card.name)] = card
for lst in sprint_task_lists:
for card in lst.list_cards():
us_name = card.name.split(' ')[0]
try:
item = None
task_list = None
checked = lst.name in ('Done', 'Rejected')
task_list = trellolib.lookup_checklist('Tasks',
stories[us_name])
if task_list:
item = trellolib.lookup_item(card.url, task_list)
if not item:
print('%s not in the Tasks checklist of %s, adding it' %
(card.name, us_name))
if not task_list:
print('Creating the Tasks checklist in %s' % us_name)
task_list = stories[us_name].add_checklist(
'Tasks',
[card.url, ])
else:
task_list.add_checklist_item(card.url)
item = trellolib.lookup_item(card.url, task_list)
if item:
if item['checked'] != checked:
task_list.set_checklist_item(item['name'], checked)
print('Set the checked state of %s to %s' %
(card.name, checked))
except KeyError:
print('Card "%s" not associated with a User Strory (%s)' %
(card.name, card.url))
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: %s <board name>' % sys.argv[0])
sys.exit(1)
process_board(sys.argv[1])
# sprintboard.py ends here
|
UTF-8
|
Python
| false | false | 2,014 |
558,345,770,745 |
2bb1e583c384f6fe38c19bcc76f800e53fa346fb
|
24de5b67cb3417f65aade0e826fc17aba1ad56c0
|
/help/models.py
|
ae6434778a9b7b489e6df7047a10d699a76f7bbe
|
[] |
no_license
|
travistanderson/starfishcommunity
|
https://github.com/travistanderson/starfishcommunity
|
0f9441a813a4df311253d2d46b53d9ee8b1ec108
|
2d0d1a63e9105f9b465700db38db5e0f06d8d9e7
|
refs/heads/master
| 2017-12-07T23:40:05.475435 | 2012-10-11T18:43:54 | 2012-10-11T18:43:54 | 566,623 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# help/models.py
from django.db import models
from django.contrib.sites.models import Site
class Category(models.Model):
category = models.CharField(max_length=300)
sorter = models.IntegerField(max_length=3)
def __unicode__(self):
return self.category
class Faq(models.Model):
question = models.CharField(max_length=300)
answer = models.TextField(max_length=10000)
sorter = models.IntegerField(max_length=3)
cat = models.ForeignKey(Category)
def __unicode__(self):
return self.question
|
UTF-8
|
Python
| false | false | 2,012 |
3,822,520,934,398 |
43def1bb11c5ee08ff2e77afcdd002fa4b0672f0
|
dacfd8776c6f839376efffbf317b6337e0f5912f
|
/dbtest.py
|
a5420ce1d393856efa228a8ca871ae162fbf122b
|
[] |
no_license
|
daverobertson63/dmdfe
|
https://github.com/daverobertson63/dmdfe
|
b9e7de23eba2e96aa73156108879b62c330d2d99
|
079ba929733f7909a934cf2e2ff65f1d4fb9ba4b
|
refs/heads/master
| 2016-08-03T15:23:14.888303 | 2014-10-15T21:10:36 | 2014-10-15T21:10:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
################################################################################
#
#
################################################################################
import sys
from com.ziclix.python.sql import zxJDBC
################################################################################
DATABASE = "solarsys.db"
JDBC_URL = "jdbc:sqlite:%s" % DATABASE
JDBC_DRIVER = "org.sqlite.JDBC"
TABLE_NAME = "planet"
TABLE_DROPPER = "drop table if exists %s;" % TABLE_NAME
TABLE_CREATOR = "create table %s (name, size, solar_distance);" % TABLE_NAME
RECORD_INSERTER = "insert into %s values (?, ?, ?);" % TABLE_NAME
PLANET_QUERY = """
select name, size, solar_distance
from %s
order by size, solar_distance desc
""" % TABLE_NAME
PLANET_DATA = [('mercury' , 'small' , 57), # distance in million kilometers
('venus' , 'small' , 107),
('earth' , 'small' , 150),
('mars' , 'small' , 229),
('jupiter' , 'large' , 777),
('saturn' , 'large' , 888),
('uranus' , 'medium', 2871),
('neptune' , 'medium', 4496),
('pluto' , 'tiny' , 5869),
]
################################################################################
def main():
dbConn = getConnection(JDBC_URL, JDBC_DRIVER)
cursor = dbConn.cursor()
try:
cursor.execute(TABLE_DROPPER)
cursor.execute(TABLE_CREATOR)
except zxJDBC.DatabaseError, msg:
print msg
sys.exit(1)
try:
cursor.executemany(RECORD_INSERTER, PLANET_DATA)
dbConn.commit()
except zxJDBC.DatabaseError, msg:
print msg
sys.exit(2)
try:
cursor.execute(PLANET_QUERY)
for row in cursor.fetchall():
name, size, dist = row[:]
print "%-16.16s %-8.8s %4d" % (name, size, dist)
except zxJDBC.DatabaseError, msg:
print msg
sys.exit(3)
cursor.close()
dbConn.close()
sys.exit(0)
################################################################################
def getConnection(jdbc_url, driverName):
"""
Given the name of a JDBC driver class and the url to be used
to connect to a database, attempt to obtain a connection to
the database.
"""
try:
# no user/password combo needed here, hence the None, None
dbConn = zxJDBC.connect(jdbc_url, None, None, driverName)
except zxJDBC.DatabaseError, msg:
print msg
sys.exit(-1)
return dbConn
################################################################################
################################################################################
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,014 |
11,665,131,184,164 |
dc31dcf93b13fca763d6f8a43ea7b81753ead06f
|
91d14603728e736cc8fe9ee38efd37fe4f222765
|
/book_management.py
|
56403441e47653786e69576730eba06070de94bf
|
[] |
no_license
|
PGryllos/search_for_a_book
|
https://github.com/PGryllos/search_for_a_book
|
cf41ec8852f1f328b2b94db79c4ee1675362d7f6
|
3b1c3b2948ebe95b4a8bd24e24894b3a03334cd6
|
refs/heads/master
| 2016-08-06T18:18:21.790411 | 2013-07-01T04:43:56 | 2013-07-01T04:43:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# Ceid Upatras , data structures assignment 2012-2013
# This file contains definitions of all the functions
# we are going to use for this project. The main usage
# of these functions (better called methods) will be
# to handle data about books ( store titles, add books
# to lists, handle information about the author etc )
import os
import re
import sys
import math
import subprocess
import codecs
import time
from math import sqrt
from classes import *
from sorting_methods import *
from decimal import *
# define global lists for authors and books
books = []
authors = []
number_of_books = 0
search = None
#create Trie based on title
title_trie = Trie()
#create Trie based on surname
lastname_trie = Trie()
def time_calc(arg):
# this function calculates average execution time
global books
count = 0
for i in range(0 , 80 ):
start_time = time.time()
linear_search(0 , i*books[i].id_code)
count = count + time.time() - start_time
print("Average elapsed time for linear search: " + str(count/80))
count = 0
for i in range(0 , 80 ):
start_time = time.time()
binary_search(0 , i*books[i].id_code)
count = count + time.time() - start_time
print("Average elapsed time for binary search: " + str(count/80))
count = 0
for i in range(0 , 80):
start_time = time.time()
binary_int_search(0 , i*books[i].id_code)
count = count + time.time() - start_time
print("Average elapsed time for binary interopolation search: " + str(count/80))
return 0
def linear_search(test , test_var):
# test == 0 ( search books list by id)
# test == 1 ( search books list by title)
# test == 2 ( search authors list by title)
# in success the function returns the object that
# was found, in failure returns None
global books
global authors
if test == 0:
for element in books:
if element.id_code == test_var:
return element
else:
for element in books:
if element.title == test_var:
return element
return None
# the first argument is not going to be used in binary & binary interpolation
# search since we are only going to search upon ids, but it needs to be there
# so that the search option menu works properly
def binary_search(test , test_var):
global books
start_time = time.time()
first = 0
last = number_of_books - 1
count = 0
while True:
median = int(((last - first)/2)+first)
if books[median].id_code == test_var:
return books[median]
elif books[median].id_code < test_var:
first = median
else:
last = median
count += 1
if count > math.log(number_of_books,2):
return None
def binary_int_search(test , testvar):
global books
first = 0
last = number_of_books - 1
size = number_of_books - 1
if testvar < books[first].id_code or testvar > books[last].id_code:
return None
aux = (testvar - books[first].id_code)/(books[last].id_code - books[first].id_code)
Next = int(size * aux) + 1
if Next > number_of_books - 1:
Next = int(Next*0.75)
count = 0
while True:
i = 0
if Next < 0:
return None
if books[Next].id_code == testvar:
return books[Next]
elif size < 5:
return linear_search(test , testvar)
elif books[Next].id_code < testvar:
while books[int(Next + i*sqrt(size) - 1)].id_code < testvar:
try:
if(books[int(Next + (i+1)*sqrt(size))]):
i += 1
except:
break
last = int(Next + i*sqrt(size))
first = int(Next + (i-1)*sqrt(size))
else:
while books[int(Next - i*sqrt(size) + 1)].id_code > testvar:
try:
if(books[int(Next - (i+1)*sqrt(size))]):
i += 1
except:
break
first = int(Next - i*sqrt(size))
last = int(Next - (i-1)*sqrt(size))
size = last - first + 1
count += 1
aux = (testvar - books[first].id_code)/(books[last].id_code - books[first].id_code)
Next = first + int((last - first + 1)*aux)-1
while Next > number_of_books - 1:
Next = int(Next*0.75)
if count > math.log(number_of_books,2):
return None
def trie_based_search(test , test_var):
# test == 0 - search in titles_trie
# test == 1 - search in lastname_trie
global books
if test == 0:
if(lastname_trie.getWord(test_var)) == 0:
return 0
else:
return None
else:
if( title_trie.getWord(test_var)) == 0:
return 0
else:
return None
search_type = {
0 : linear_search,
1 : binary_search,
2 : binary_int_search,
3 : trie_based_search
}
def fill_authors_list(first_name , last_name , book_pointer):
global authors
global books
exists = 0
count = 0
for i in authors:
if i.firstname == first_name:
if i.lastname == last_name:
exists = 1
if exists == 1:
books[book_pointer].bauthor = i
break
count += 1
if exists == 0:
authors.append(author(first_name , last_name))
books[book_pointer].bauthor = authors[count]
def filling_tries():
global books
try:
for elm in books:
title_trie.addWord(str(elm.title))
finally:
print("digital tree: [Titles_Trie] initialized successfully")
try:
for elm in books:
lastname_trie.addWord(str(elm.bauthor.lastname))
finally:
print("digital tree: [Lastname_Trie] initialized successfully ")
# this function will be used to parse data from a specified file
def load_books( filename ):
global books
global number_of_books
global title_trie
global lastname_trie
regex = re.compile(
"\"(?P<book_id>.*?)\"\;\"(?P<book_title>.*?)\"\;\"(?P<author_f>.*?)\s+(?P<author_l>.*?)\";\"(?P<year>.*?)\"\;\"(?P<pub_house>.*?)\".*"
)
with open(filename, 'r') as data:
i = 0
for line in data:
match = regex.match(line)
if match:
# summary = " publication year: " + match.group("year") + " by : " + match.group("pub_house")
books.append( book(int(match.group("book_id")),match.group("book_title")))
fill_authors_list(match.group("author_f"),match.group("author_l"), i)
i += 1
number_of_books += 1
print(str(number_of_books) + " books were added to the list")
# sorting the books list using heap_sort algorithm
heap_sort(books)
# lets fill the Tries data structures
filling_tries()
return 0
def add_book(mode):
# mode == 0 ( for adding a book to the list )
# mode == 1 ( for deleting a book from the list )
# returns 0 for success
global books
global search
# the user should chose what searching method to use
searching_method()
print("\nTo add or remove a book fill in the details below")
# summary = "Unavailable"
while True:
book_id = int(input("Book id: "))
if search == 3:
print("There is not an available digital tree for search by id")
print("Binary search will be used instead")
search = 1
result = search_type[search](0 , book_id)
if result == None:
if mode == 1:
print("id : " + str(book_id) + " could not be found")
else:
book_title = input("Book title: ")
books.append( book(book_id , book_title))
print("id : " + str(books[len(books)-1].id_code) +" , "+ books[len(books)-1].title +" successfully added to the list ")
author_f=input("Author's first name: ")
author_l=input("Author's last name: ")
fill_authors_list(author_f , author_l , len(books)-1)
# we need to rearrange the list
heap_sort(books)
return 0
else:
if mode == 1:
if input("Book : " + result.title + "is going to be removed (y/n) : ") == 'y':
books.remove(result)
print("id : " + str(book_id) + " successfully removed from list ")
# we need to rearrange digital trees
filling_tries()
return 0
else:
return 0
print("id : " + str(book_id) + " already registered for book : " + result.title )
def searching_method():
global search
print("Chose the search method that you would like to use (listed bellow)")
print("[0] - Linear search ")
print("[1] - Binary search (cannot be used for options 6 & 8)")
print("[2] - Binary interpolation search (cannot be used for options 6 & 8)")
print("[3] - Searching using digital trees (can only be used for options 5 & 8)")
search = int(input(": "))
def display_menu():
print("Chose one of the following options:")
print("[0] exit")
print("[1] Load books from file")
print("[2] Save books to file")
print("[3] Add a book to the list")
print("[4] Remove a book from the list")
print("[5] Display a book by id")
print("[6] Display a book by title")
print("[7] Display books")
print("[8] Display a book by surname search")
print("[9] Help")
print("[10] testing searching methods")
while True:
try:
a = input(": ")
number = int(a)
if (0 <= number <= 10):
return number
else:
raise
except KeyboardInterrupt:
searching_method()
except:
print("you should enter a digit [1-10]")
def display_book(mode):
# mode == 0 , display a book by id
# mode == 1 , display a book by title is the selected searching method is the trie
# then the result can be only a true or false conclusion about whether the books
# or the author exists in the database or not
# mode == 2 , display all books
global title_trie
global lastname_trie
global search
# the user should chose what searching method to use
searching_method()
if mode == 0:
book_id = int(input("Book id: "))
# preventing user's mistakes
if search == 3:
print("There is not an available digital tree for search by id")
print("Binary search will be used instead")
search = 1
result = search_type[search](0 , book_id)
if result == None:
print("id : " + str(book_id) + " could not be found")
else:
print("id : "+ str(result.id_code) +" title : "+ result.title +" author : " +
result.bauthor.firstname +" "+ result.bauthor.lastname )
return 0
elif mode == 1:
if search == 1:
print("Binary search cannot be used on book titles, linear search will be used instead")
search = 0
book_title = str(input("Book title: "))
result = search_type[search](1 , book_title)
if result == None:
print( book_title + " could not be found")
else:
if search == 3:
print("Book "+ book_title +" was found in the database")
return 0
print("id : "+ str(result.id_code) +" title : "+ result.title +" author : " +
result.bauthor.firstname +" "+ result.bauthor.lastname )
return 0
elif mode == 2:
for elm in books:
print("\""+str(elm.id_code) +"\": \""+elm.title+"\" by "+elm.bauthor.firstname+" "+elm.bauthor.lastname)
return 0
else:
name = input("Authors lastname: ")
if search == 3 and trie_based_search(0 , name) == 0:
print("Author " + name + " was found in the database")
return 0
print("[Books found]")
for elm in books:
if elm.bauthor.lastname == name:
print(elm.title)
return 0
def help_text(filename):
subprocess.call(['./help.sh'])
return 0
def save_books(filename):
global books
f = codecs.open(filename , 'w' , 'utf-8')
f.write(u"number of books : "+ str(number_of_books)+"\n")
f.close()
for elm in books:
f = codecs.open(filename , 'a' , 'utf-8')
f.write(u"\""+str(elm.id_code)+"\";\""+elm.title+"\";\""+elm.bauthor.firstname+" "+elm.bauthor.lastname+"\";\n")
f.close()
return 0
# this is going to be main function to handle the book data
def main():
global search
# in case the file is not specified we are going to use a default
# value
if len(sys.argv) == 2:
file_to_use = sys.argv[1]
else:
file_to_use = 'books.txt'
options = {
1 : (load_books , file_to_use),
2 : (save_books , file_to_use),
3 : (add_book , 0),
4 : (add_book , 1),
5 : (display_book , 0),
6 : (display_book , 1),
7 : (display_book , 2),
8 : (display_book , 3),
9 : (help_text , 'help.sh'),
10 : (time_calc , 0 )
}
while True:
# TODO : implement proper exception handling
option_num = display_menu()
if option_num == 0:
break
if options[option_num][0](options[option_num][1]) != 0:
print("something went wrong")
# saving changes to file
save_books(file_to_use)
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 2,013 |
19,421,842,130,329 |
2e912eedf691a216a83a1de7b689fd51856fea6d
|
2f866c13e8c03529116e3916b4571444db76bd20
|
/scripts/utils/__init__.py
|
114e0660094579d46bb068c8a4c92a4cd6cf67c7
|
[] |
no_license
|
hjrnunes/desacordo_ortografico
|
https://github.com/hjrnunes/desacordo_ortografico
|
2bce6c806fb8c8828c65bb53ec7369f04e6f6974
|
8a0a1fb32724a1266e2e278cebc1097dc278eb89
|
refs/heads/master
| 2021-01-17T14:27:00.069868 | 2013-02-24T21:12:09 | 2013-02-24T21:12:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
from utils.settings import Settings
from utils.exceptions import NoInputGiven
import sys, re
PREPOSITIONS = [
"a", "o", "as", "às", "à", "os", "ao", "aos", "na", "nas", "no", "nos",
"ante", "antes", "apó", "após", "saté", "com", "contra", "de", "des",
"da", "das", "do", "dos", "desde", "em", "entre", "perante", "por",
"sem", "sob", "sobre", "trás", "no", "nos", "anti", "auto", #"para"
]
MAIN_PREPOSITIONS = ["auto", "anti"]
def items_to_dict(items):
words = {}
for key,value in items:
if not key:
subwords = re.findall(r'(.*)[pc]([^aeiou].*)', value)
if subwords and len(subwords[0])==2:
words[ ''.join(subwords[0]) ] = value
elif '-' in value:
words[ value.replace('-', ' ') ] = value
else:
words[ key ] = value
else:
words[ key ] = value
return words
def user_input(options):
if Settings.DONT_ASK:
raise NoInputGiven()
print "\nDon't know which one is right: "
print "\n".join( ['\t%s - %s' %(i+1, options[i]) for i in range(len(options))])
print '\t0 - None of the above'
index = input("Which option is the right one? ")
if index==0 or index>len(options):
print 'No given answer'
raise NoInputGiven()
print "Given answer: %s" %options[index-1]
return options[index-1]
class verbosity(object):
def __init__(self, text=None):
self.text = text
def __call__(self, f):
def wrapper(*args, **kwargs):
if Settings.VERBOSE:
if self.text == None:
print '.',
sys.stdout.flush()
else:
print '\n%s' %self.text
return f(*args, **kwargs)
return wrapper
|
UTF-8
|
Python
| false | false | 2,013 |
8,985,071,592,446 |
72bfc0a6f496ee9c59fd9ffc76d3dd040643f09b
|
c92a3b9a9e8f6fcd2153dc8cd854788438a5c737
|
/shopping_cart/scripts/sc_main.py
|
7eb7324a1b4a35035ddef9b75f3e89b7e81731df
|
[] |
no_license
|
s9wischu/uow_Botinator
|
https://github.com/s9wischu/uow_Botinator
|
96c60c53bc9ad9c9f2c4d11cc990b85d65972544
|
83265834ef02b52880ff9063cde855201c5ddf24
|
refs/heads/master
| 2021-05-27T12:41:32.243198 | 2014-12-09T01:57:28 | 2014-12-09T01:57:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from kobuki_msgs.msg import BumperEvent
from random import randint
from sensor_msgs.msg import PointCloud2
import sensor_msgs.point_cloud2 as pc2
import sys
from beginner_tutorials.msg import Scan_User
from time import time
from std_msgs.msg import String
from sound_play.msg import SoundRequest
import structures
import server
userTime = 0.0
userTimeDelta = 2.0
userId = 0
userLost = 0
ignore = 0
angular = 0
pubVelocity = 0
subScanUser = 0
subCamDepth = 0
pubSound = 0
state = "START"
# login mechanism; after webcam recognices user fidutial it needs a perios of userTimeDelta sec not seeing user fidutial to be able to login/logout
# when user logs out, switch off follow mode (unsubscribe)
def customerLoggedIn(customer):
global userId, subCamDepth, userTime, userTimeDelta, pubSound, state
rospy.loginfo("user logged in")
# TODO: support more than one gender.
# Say welcome message
soundRequest = SoundRequest()
soundRequest.sound = -3 #Say text
soundRequest.command = 1 #Play once
soundRequest.arg = "Welcome, Mister " + customer + "!"
pubSound.publish(soundRequest)
def callback(data):
global pubVelocity
global ignore
global angular
# convert pointcloud to laser_scan and publish
if ignore != 0:
ignore -= 1
msg = Twist()
msg.angular.z = angular
pubVelocity.publish(msg)
return
data_out = pc2.read_points(data, field_names=None, skip_nans=True, uvs=[])
# Number of buckets to use for y-coordinates
number_buckets = 1000
# Threshold for points to be considered foreground
factor = 0.85
buckets = [[] for x in range(number_buckets)]
for point in data_out:
y = point[1]
bucket = int((y + 1.0) / (2.0 / number_buckets))
buckets[bucket].append(point)
pointlist = []
# Calculate the average for each bucket (if there are any points)
for i in range(number_buckets):
if(len(buckets[i]) == 0): continue
# Calculate the average of all the z-coordinates in the bucket
average = 0.0
for point in buckets[i]:
average += point[2]
average /= len(buckets[i])
# Add the points with z-coordinates above the threshold (dependent
# on average of z-coordinates)
for point in buckets[i]:
if point[2] < factor * average:
pointlist.append(point)
rospy.loginfo("average in bucket " + str(i) + ": " + str(average))
# Iterate over all points in pointlist and select the point that is closest
# to the camera (i.e., lowest z-coordinate).
minz = sys.maxint;
closest_point = None
for point in pointlist:
if point[2] < minz:
minz = point[2]
closest_point = point
msg = Twist()
if closest_point is None:
rospy.loginfo("No point could be found.");
angular = 0
else:
rospy.loginfo("Closest point found: (" + str(point[0]) + "," + str(point[1]) \
+ ") with z-value " + str(point[3]))
if point[0] < 0:
rospy.loginfo("it is on the left")
else:
rospy.loginfo("it is on the right")
if closest_point[0] < 0.5:
# Rotate to the right
angular = +1
ignore = 5
elif closest_point[0] > 0.5:
# Rotate to the left
angular = -1
ignore = 5
def initialization():
global pubVelocity, subScanUser, pubSound
rospy.init_node('main', anonymous=True)
pubVelocity = rospy.Publisher('/mobile_base/commands/velocity', Twist, queue_size=10)
server.ServerData.pubSound = rospy.Publisher('/robotsound', SoundRequest, queue_size=10)
subScanUser = rospy.Subscriber("scan_customer", String, customerLoggedIn);
# TODO: Subscriber for items
def main():
initialization()
rospy.spin()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
|
UTF-8
|
Python
| false | false | 2,014 |
2,241,972,942,052 |
e8d3385c87dd5772a696e643346f52537c0ecba3
|
c5b6288b7ff57f37aff0dd6e8a966219477a394e
|
/pajarillos_analyser/db/injector_manager.py
|
f00f0ec41f9409439c56ee28078763e060f1a9f6
|
[] |
no_license
|
xcu/pajarillos_analyser
|
https://github.com/xcu/pajarillos_analyser
|
092bea0b7b3076fb8ba9637129a5c2e3f21ba283
|
aa9a947851ca49b572b6622d889855c4a0c6d6f2
|
refs/heads/master
| 2021-01-22T22:49:16.339039 | 2014-04-29T14:06:29 | 2014-04-29T14:06:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class InjectorManager(object):
def __init__(self, registered_injectors=[]):
self.registered_injectors = registered_injectors
def register_injector(self, injector):
self.registered_injectors.append(injector)
def to_db(self, streamer):
for message in streamer:
# each injector can inject the same message wherever it needs to
for injector in iter(self.registered_injectors):
injector.to_db(message)
# do whatever is left
for injector in iter(self.registered_injectors):
injector.last_to_db()
|
UTF-8
|
Python
| false | false | 2,014 |
5,815,385,720,463 |
0c4f2b533c46f59521175a279031603fe083f745
|
13294f13a7966e2febc3a449413f1dd80b1dc539
|
/acquisition/mongo_writer.py
|
6257935dbe0138f99b7f47ae0e79fe2098706041
|
[] |
no_license
|
aismail/AmI-Platform
|
https://github.com/aismail/AmI-Platform
|
a02bef6bac09e8cb84e143c2e4f6f1cbefed2ca3
|
89b70ae336417f32fb99f17bf8eacd0bbb84e2eb
|
refs/heads/master
| 2020-12-25T04:37:09.713615 | 2013-09-02T18:01:59 | 2013-09-02T18:01:59 | 2,614,674 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from collections import defaultdict
import time
import pymongo
from pymongo.errors import OperationFailure
from core import PDU
from core import constants
class MongoWriter(PDU):
""" PDU for writing all measurements to MongoDB. """
QUEUE = 'mongo-writer'
DATABASE = 'measurements'
COLLECTION = 'docs'
TTL = constants.SECONDS_IN_DAY
BETWEEN_WRITES = 1 # seconds between writes for a sensor_type
def __init__(self):
""" After performing base class initializations, make sure that
the mongo collection where measurements are written has a TTL
index on the created_at column.
"""
super(MongoWriter, self).__init__()
self.collection.ensure_index([('created_at', pymongo.DESCENDING)],
background = True,
expireAfterSeconds = self.TTL)
self.collection.ensure_index('context');
self.last_written_for_sensor_type = defaultdict(lambda: 0)
def process_message(self, message):
""" This PDU processes messages by writing them to MongoDB.
Measurements are written to a predefined collection, which is a
TTL collection that expires items older than 1 day. We need a TTL
collection so that our database doesn't fill up.
"""
if not self._should_be_saved(message):
return
# Add a created_at field so that we can expire items older than TTL
message['created_at'] = int(time.time())
try:
self.collection.save(message, safe = True)
# After saving the message successfully, mark it as saved
self._mark_as_saved(message)
except OperationFailure, e:
import traceback
traceback.print_exc()
@property
def collection(self):
""" Shortcut for getting the Mongo collection. """
try:
db = getattr(self.mongo_connection, self.DATABASE, None)
collection = getattr(db, self.COLLECTION, None)
return collection
except:
import traceback
traceback.print_exc()
return None
def _should_be_saved(self, message):
""" Decide whether the current message should be written or not.
The decision is mased based on the last time a write was performed
for that given sensor type.
"""
# Messages that don't have a sensor_type will be rate-limited together
sensor_type = message.get('sensor_type', 'default')
last_written = self.last_written_for_sensor_type[sensor_type]
return time.time() - last_written >= self.BETWEEN_WRITES
def _mark_as_saved(self, message):
""" Mark a message as successfully saved in the db.
This actually updates the timestamp for the sensor_type of the message.
"""
sensor_type = message.get('sensor_type', 'default')
self.last_written_for_sensor_type[sensor_type] = time.time()
if __name__ == "__main__":
module = MongoWriter()
module.run()
|
UTF-8
|
Python
| false | false | 2,013 |
11,974,368,836,449 |
0c7ec2cd2f6f75be04f2600046b47fabc1a75c1b
|
952c34aed7b14f134fee22ec7ead50c1d1232cba
|
/carrental/car_gen.py
|
abc5ac1d009e58011de8ae5c0780ac7b25a5698c
|
[] |
no_license
|
jyzackoh/carrental
|
https://github.com/jyzackoh/carrental
|
ed6d9566dea443e3b7b57fcdeb31e15c8542feff
|
b8c80f31e318b2e19fb8150ae1c946aa15fabf6e
|
refs/heads/master
| 2021-01-18T16:32:10.711810 | 2014-11-10T05:44:24 | 2014-11-10T05:44:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from models import Car, CarInstance, UserDetails, Booking
from django.contrib.auth.models import User
from datetime import date, timedelta
import uuid
# AUTOMATIC = 'AU'
# MANUAL = 'MA'
# TRANSMISSION_CHOICES = (
# (AUTOMATIC, 'Automatic'),
# (MANUAL, 'Manual'),
# )
# HATCHBACK = 'ha'
# LUXURY_SEDAN = 'lu'
# MPV = 'mv'
# SEDAN = 'se'
# SPORT = 'sp'
# SUV = 'su'
#CUV = 'cu'
# TYPE_CHOICES = (
# (HATCHBACK, 'Hatchback'),
# (LUXURY_SEDAN, 'Luxury sedan'),
# (MPV, 'MPV'),
# (SEDAN, 'Sedan'),
# (SPORT, 'Sport'),
# (SUV, 'SUV'),
# (CUV, 'Crossover'),
# )
def popz():
#TOYOTA CARS
Car.objects.create(make_model="Toyota Corolla Altis", max_passengers=5, transmission="AU", aircon=True, type="se")
Car.objects.create(make_model="Toyota Corolla Axio", max_passengers=5, transmission="AU", aircon=True, type="se")
Car.objects.create(make_model="Toyota Camry", max_passengers=5, transmission="AU", aircon=True, type="se")
Car.objects.create(make_model="Toyota Corona", max_passengers=5, transmission="AU", aircon=True, type="se")
Car.objects.create(make_model="Toyota AE86", max_passengers=5, transmission="AU", aircon=True, type="sp")
Car.objects.create(make_model="Toyota Wish", max_passengers=7, transmission="AU", aircon=True, type="mv")
Car.objects.create(make_model="Toyota Picnic", max_passengers=7, transmission="AU", aircon=True, type="mv")
Car.objects.create(make_model="Toyota Alphard", max_passengers=6, transmission="AU", aircon=True, type="mv")
Car.objects.create(make_model="Toyota Vios", max_passengers=5, transmission="AU", aircon=True, type="se")
Car.objects.create(make_model="Toyota Aurius", max_passengers=5, transmission="AU", aircon=True, type="ha")
#NISSAN CARS
Car.objects.create(make_model="Nissan Note", max_passengers=5, transmission="AU", aircon=True, type="mv")
Car.objects.create(make_model="Nissan Almera", max_passengers=4, transmission="AU", aircon=True, type="se")
Car.objects.create(make_model="Nissan Sylphy", max_passengers=5, transmission="AU", aircon=True, type="se")
Car.objects.create(make_model="Nissan Teana", max_passengers=5, transmission="AU", aircon=True, type="lu")
Car.objects.create(make_model="Nissan Qashqai", max_passengers=5, transmission="MA", aircon=True, type="cu")
Car.objects.create(make_model="Nissan GT-R", max_passengers=2, transmission="MA", aircon=True, type="sp")
Car.objects.create(make_model="Nissan 370Z", max_passengers=2, transmission="AU", aircon=True, type="sp")
Car.objects.create(make_model="Nissan Elgrand", max_passengers=7, transmission="AU", aircon=True, type="mv")
Car.objects.create(make_model="Nissan Murano", max_passengers=5, transmission="MA", aircon=True, type="su")
Car.objects.create(make_model="Nissan X-Trail", max_passengers=7, transmission="AU", aircon=True, type="su")
#HONDA CARS
Car.objects.create(make_model="Honda Jazz", max_passengers=5, transmission="AU", aircon=True, type="ha")
Car.objects.create(make_model="Honda Insight", max_passengers=5, transmission="AU", aircon=True, type="ha")
Car.objects.create(make_model="Honda Accord", max_passengers=5, transmission="AU", aircon=True, type="se")
Car.objects.create(make_model="Honda City", max_passengers=5, transmission="AU", aircon=True, type="se")
Car.objects.create(make_model="Honda Civic", max_passengers=5, transmission="AU", aircon=True, type="se")
Car.objects.create(make_model="Honda Odyssey", max_passengers=7, transmission="AU", aircon=True, type="mv")
Car.objects.create(make_model="Honda CR-V", max_passengers=5, transmission="AU", aircon=True, type="cu")
#KIA CARS
Car.objects.create(make_model="Kia Forte", max_passengers=5, transmission="AU", aircon=True, type="se")
Car.objects.create(make_model="Kia Cerato", max_passengers=5, transmission="AU", aircon=True, type="se")
Car.objects.create(make_model="Kia Optima", max_passengers=5, transmission="AU", aircon=True, type="se")
Car.objects.create(make_model="Kia Sorento", max_passengers=7, transmission="AU", aircon=True, type="su")
Car.objects.create(make_model="Kia Sportage", max_passengers=5, transmission="AU", aircon=True, type="cu")
#PORSCHE CARS
Car.objects.create(make_model="Porsche Boxter", max_passengers=2, transmission="AU", aircon=True, type="sp")
Car.objects.create(make_model="Porsche Cayman", max_passengers=2, transmission="AU", aircon=True, type="sp")
Car.objects.create(make_model="Porsche 911 Carrera", max_passengers=2, transmission="MA", aircon=True, type="sp")
Car.objects.create(make_model="Porsche 918 Spyder", max_passengers=2, transmission="AU", aircon=True, type="sp")
Car.objects.create(make_model="Porsche Panamera", max_passengers=4, transmission="AU", aircon=True, type="lu")
Car.objects.create(make_model="Porsche Macan", max_passengers=5, transmission="AU", aircon=True, type="cu")
Car.objects.create(make_model="Porsche Cayenne", max_passengers=5, transmission="AU", aircon=True, type="cu")
#MERCEDES CARS
Car.objects.create(make_model="Mercedes A-Class", max_passengers=5, transmission="AU", aircon=True, type="ha")
Car.objects.create(make_model="Mercedes B-Class", max_passengers=5, transmission="AU", aircon=True, type="mv")
Car.objects.create(make_model="Mercedes C-Class", max_passengers=5, transmission="AU", aircon=True, type="se")
Car.objects.create(make_model="Mercedes CL-Class", max_passengers=5, transmission="AU", aircon=True, type="lu")
Car.objects.create(make_model="Mercedes E-Class", max_passengers=5, transmission="AU", aircon=True, type="se")
Car.objects.create(make_model="Mercedes G-Class", max_passengers=5, transmission="AU", aircon=True, type="su")
Car.objects.create(make_model="Mercedes M-Class", max_passengers=5, transmission="AU", aircon=True, type="su")
Car.objects.create(make_model="Mercedes R-Class", max_passengers=5, transmission="AU", aircon=True, type="mv")
Car.objects.create(make_model="Mercedes S-Class", max_passengers=5, transmission="AU", aircon=True, type="lu")
users = ['John', 'Bill', 'Sarah', 'Katie', 'Jim', 'Pierre', 'Claudia', 'Josh', 'Diablo', 'Horace',
'Ben', 'Quentin', 'William', 'Elrond', 'Richard', 'Tina', 'Jonathan',
'George', 'Elaine', 'Margery', 'Sam', 'Francis', 'Sylvia',
'Rhaegar', 'Tammy', 'Oprah', 'Poincare', 'Abel', 'Descarte', 'Fitzgerald',
'Goldburg', 'Henri', 'Justin', 'Kathryn', "L'hopital", 'Xavier', 'Cauchy',
'Adam', 'Madison', 'Monroe', 'Jackson', 'Van Buren', 'Harrison', 'Tyler', 'Polk',
'Taylor', 'Fillmore', 'Pierce', 'Buchanan', 'Lincoln', 'Johnson', 'Grant',
'Hayes', 'Garfield', 'Arthur', 'Cleveland', 'McKinley', 'Roosevelt', 'Taft',
'Wilson', 'Harding', 'Coolidge', 'Hoover', 'Truman', 'Eisenhower', 'Kennedy',
'Nixon', 'Carter', 'Reagan', 'Bush', 'Clinton', 'Obama', 'Hilbert', 'Ramanujan',
'Noether', 'Weyl', 'Klein', 'Galois', 'Riemann', 'Lagrange', 'Gauss', 'Euler',
'Fermat', 'Leibniz', 'Hawking', 'Kendall', 'Feynman', 'Nambu', 'Wheeler', 'Dirac',
'Fermi', 'Heisenberg', 'Pauli', 'Bose', 'de Broglie', 'Schrodinger', 'Bohr',
'Rutherford', 'Curie', 'Plank', 'Hertz', 'Tesla', 'Lorentz', 'Becqueral',
'Boltzmann', 'Maxwell', 'Faraday']
cars = Car.objects.all()
cars_length = len(cars)
cars_start = 0;
colours = ['black', 'silver', 'purple', 'pink', 'yellow', 'blue', 'red', 'orange',
'green', 'white']
colours_length = len(colours)
colours_start = 0;
prev_cars = [None, None]
for index, user in enumerate(users):
email = '%s@%s.com' % (user.lower(), user.lower())
password = user.lower() + 'password'
nric = 'S%07d%s' % (index, chr(index%26 + 65))
dob = date(1970 + index, 1, 1)
contact = 91111111 + index
license_issue = date(1990 + index, 1, 1)
address = user + ' road'
new_user = User.objects.create_user(user, email, password)
UserDetails.objects.create(user=new_user, nric=nric, dob=dob,
contact=contact, license_issue_date=license_issue,
address=address)
if index > 0:
for i in range(0,2): #the next person rents the previous person's cars
Booking.objects.create(start=date.today()+timedelta(days=i*10),
end=date.today()+timedelta(days=i*10+10),
car_instance=prev_cars[i],
borrower=new_user)
for i in range(0,2):
prev_cars[i] = CarInstance.objects.create(car=cars[cars_start % cars_length],
colour=colours[colours_start % colours_length],
owner=new_user,
price=(1000 + 50*(cars_start%cars_length)),
candrivemy=((cars_start%2) == 0),
year=(1970 + cars_start%10),
carplate='S%s%s%04d%s' % (chr(cars_start%26 + 65),
chr((cars_start+5)%26 + 65),
cars_start,
chr(((cars_start+cars_start/26)%26 + 65)))
)
cars_start += 1
colours_start += 1
|
UTF-8
|
Python
| false | false | 2,014 |
10,651,518,933,155 |
db355c9c884a60dbe11719623cbd1929a796abc9
|
25e101cb88b5af1b5de81352d83bc27c2d55f1a7
|
/maras/index/hmap.py
|
3363bba7a311689ef5fcfae3e5f8f5379c976cc8
|
[
"Apache-2.0"
] |
permissive
|
thatch45/maras
|
https://github.com/thatch45/maras
|
94dd06cb58589197e2d429adebaa4ba0a09a2e44
|
ae6a0281c15eca1b65aae48b5e0ad1b75e444812
|
refs/heads/master
| 2021-01-15T23:34:57.555791 | 2014-11-22T19:45:23 | 2014-11-22T19:45:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
A hash based index
'''
# Import python libs
import struct
import os
# Import maras libs
import maras.utils
# Import third party libs
import msgpack
class HMapIndex(object):
'''
Hash index
'''
def __init__(
self,
name,
dbpath,
hash_limit=0xfffff,
key_hash='sha1',
header_len=1000):
self.dbpath = dbpath
self.path = os.path.join(dbpath, '{0}.index'.format(name))
self.hash_limit = hash_limit
self.key_hash, self.key_size = maras.utils.get_hash_data(key_hash)
self.header = {'hlim': hash_limit,
'keyh': key_hash,
'ksz': self.key_size,
'nsz': 8}
self.header_len = header_len
self.h_delim = '_||_||_'
self.fp = self.__open_index()
self.h_bucket_fmt, self.h_bucket_size = self.__gen_bucket_fmt()
def __gen_hbucket_fmt(self):
'''
Generate the hash bucket struct format based on the sizes in the
header
'''
# Match msgpack big edian
# hbucket format is:
# key, data_location, next
# key is length of the hash function
# data_location and next are longs
fmt = '>s{0}LL'.format(self.key_size)
test_struct = struct.pack(fmt, maras.util.rand_hex_str(self.key_size), 0, 0)
return fmt, len(test_struct)
def __open_index(self):
'''
Auto create or open the index
'''
if not os.path.exists(self.path):
return self.create()
return self.open_index()
def create(self):
'''
Create a new index
'''
if os.path.exists(self.path):
raise ValueError('Index exists')
fp_ = open(self.path, 'w+b')
header = '{0}{1}'.format(msgpack.dumps(self.header), self.h_delim)
fp_.write(header)
return fp_
def open_index(self):
'''
Open an existing index
'''
if not os.path.isfile(self.path):
raise ValueError('No Index Exists')
fp_ = open(self.path, 'rb')
raw_head = fp_.read(self.header_len)
self.header = msgpack.loads(raw_head[:raw_head.index(self.h_delim)])
self.hash_limit = self.header['hlim']
self.key_hash, self.key_size = maras.utils.get_hash_data(self.header['keyh'])
fp_.seek(0)
return fp_
def _hash_position(self, key, first):
'''
Calculate the position of the hash based on the key and start location
'''
return abs(hash(key) & self.hash_limit) * self.h_bucket_size + first
def _get_h_entry(self, pos):
'''
Return the unpacked tuple if it exists, else None
'''
self.fp.seek(pos)
raw = self.fp.read(self.h_bucket_size)
try:
return struct.unpack(self.h_bucket_fmt, raw)
except Exception:
return None
def _find_h_tail(self, rec_top, entry):
'''
Use the entry to find the end of the linked list of referenced h_refs
'''
while entry[2]:
rec_top = entry[2]
entry = self._get_entry(entry[2])
return rec_top, entry
def _write_h_entry(self, h_pos, key, id_, start, size, next_):
'''
Write the hash entry
'''
top = self._write_d_entry(id_, start, size)
h_entry = struct.pack(self.h_bucket_fmt, key, top, next_)
self.fp.seek(h_pos)
self.fp.write(h_entry)
def _write_collision(self, entry, h_pos, key, id_, start, size):
'''
'''
top = self._write_d_entry(id_, start, size)
# find the tail
tail_pos, tail = self._find_h_tail(h_pos, entry)
tail_entry = struct.pack(self.h_bucket_fmt, tail[0], tail[1], top)
self.fp.seek(tail_pos)
self.fp.write(tail_entry)
self.fp.seek(0, 2)
h_entry = struct.pack(self.h_bucket_fmt, key, top, 0)
self.fp.write(h_entry)
def _write_d_entry(self, id_, start, size):
'''
Write the data ref entry
'''
self.fp.seek(0, 2)
if self.fp.tell() < self.header_len:
self.fp.seek(self.header_len)
top = self.fp.tell()
self.fp.write(struct.pack(self.h_bucket_fmt, id_, start, size))
return top
def insert(self, key, id_, start, size):
'''
Insert the data into the specified location
'''
if not id_:
id_ = maras.utils.rand_hex_str(self.key_size)
h_pos = self._hash_position(key, self.header_len)
entry = self._get_entry(h_pos)
if entry is None:
self._write_h_entry(h_pos, key, id_, start, size, 0)
elif key != entry[0]:
# hash_collision
self._write_collision(entry, h_pos, key, id_, start, size)
return True
|
UTF-8
|
Python
| false | false | 2,014 |
10,677,288,711,897 |
6716fbb753c5d6ebf3cc004c2158db2ddeeb7713
|
9214736766cce5399cf0d178b1398438fc40357d
|
/libs/pyGPR/src/GPR/COPY/temp.py
|
eea24d4be3c31e7942602134d7dc7f44f1930d9a
|
[
"GPL-2.0-or-later"
] |
non_permissive
|
CustomComputingGroup/MLO
|
https://github.com/CustomComputingGroup/MLO
|
daaa391984a7b795354e518563733c98692b460c
|
3af52321da6a5bfb3b3cc04df714eb04250e157c
|
refs/heads/master
| 2021-01-01T19:34:15.891410 | 2013-05-21T16:23:26 | 2013-05-21T16:23:26 | 7,650,010 | 0 | 1 | null | false | 2019-01-21T19:53:47 | 2013-01-16T17:12:56 | 2014-01-23T15:19:44 | 2013-05-21T16:23:26 | 33,242 | 2 | 3 | 9 |
Python
| false | null |
import numpy as np
from solve_chol import solve_chol
import Tools
from copy import copy,deepcopy
from random import sample
def foo():
foo.counter += 1
print "Counter is %d" % foo.counter
def randperm(k):
# return a random permutation of range(k)
z = range(k)
y = []
ii = 0
while z and ii < 2*k:
n = sample(z,1)[0]
y.append(n)
z.remove(n)
ii += 1
return y
def flatten(l, ltypes=(list, tuple)):
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
def convert_to_array(hyp):
y = np.concatenate((np.concatenate((hyp.mean, hyp.cov),axis=0),hyp.lik),axis=0)
return y
def convert_to_class(x,hyp):
y = deepcopy(hyp)
Lm = len(hyp.mean)
Lc = len(hyp.cov)
y.mean = x[:Lm]
y.cov = x[Lm:(Lm+Lc)]
y.lik = x[(Lm+Lc):]
return y
def tempfunc(x,y=None,z=0):
print x,y,z
'''if x == 1 and z>0:
return 6,None,None
elif x ==2 and z>0:
return 6,5,None
elif z>0:
return 6,5,4
else:
return "z is not active"
'''
def unique(x):
# First flatten x
y = [item for sublist in x for item in sublist]
if isinstance(x,np.ndarray):
n,D = x.shape
assert(D == 1)
y = np.array( list(set(x[:,0])) )
y = np.reshape(y, (len(y),1))
else:
y = list(set(y))
return y
class hyperParameters:
def __init__(self):
self.mean = []
self.cov = []
self.lik = []
if __name__ == '__main__':
hyp = hyperParameters()
hyp.mean = [3.3,3.1]
hyp.cov = [0.5]
#x = np.array([2,3,4,2])
#y = [2,3,4,2]
#print np.shape(x)[0]
#print type(x), type(unique(x))
#print type(y), type(unique(y))
#x = np.random.rand(10,1)
#x[x < 0.5] = 1
#x[x > 0] = 1
#ux = unique(x)
#ind = ( ux != 1 )
#if any( ux[ind] != -1):
# print 'You attempt classification using labels different from {+1,-1}\n'
#x = np.random.rand(10,1)
#tempfunc(1,None,3)
#tempfunc(1,None)
n = 5
x = np.random.rand(n,n)
X = np.dot(x,x.T)
L = np.linalg.cholesky(X).T
#Ltril = np.all( np.tril(L,-1) == 0 )
#print Ltril
#M = solve_chol(L,np.eye(n))
#print np.dot(M,X)
#print np.dot(X,M)
#y = convert_to_array(hyp)
#print y
#z = list(y)
#z = convert_to_class(y,hyp)
#print z.mean, z.cov, z.lik
#x = z.pop(0)
#print x,z
#z.insert(0,x)
#a = tuple(z)
#b = list(a)
#b[0] = -300
#print "a = ",a
#y = np.random.random((5,1))
#nz = range(len(y[:,0]))
#Fmu1 = np.dot(X.T,y[nz,:])
#Fmu = np.dot(X.T,y)
#print np.allclose(Fmu1,Fmu)
#y = np.reshape(x,(5,))
#A = [3,4,5]
#print A, flatten(A)
#A = [3,[4,5]]
#print A, flatten(A)
#A = [3,[4,5],[3,[4,5],[3,4,5]]]
#print A, flatten(A)
#k = 10
#y = randperm(k)
#print y
#foo.counter = 0
#foo()
#t1,t2 = np.meshgrid(np.arange(-4,4.1,0.1),np.arange(-4,4.1,0.1))
#t = [t1(:) t2(:)]; n = len(t)
#y = np.array( zip( np.reshape(t1,(np.prod(t1.shape),)), np.reshape(t2,(np.prod(t2.shape),)) ) )
#print y[0,:]
y = np.random.random((4,1))
a = copy(y[0])
print a
y[0] = 1.2
print a,1.2
del a
del y
|
UTF-8
|
Python
| false | false | 2,013 |
17,317,308,160,133 |
edeb78abaab322e9609174db01eaefd9d5828975
|
fdedb8c3a87b5f4e217b58c5aeb8050ed0370934
|
/src/SConscript
|
b1ddd405a89c97ec6e921ce41722e8fc4aeff63a
|
[] |
no_license
|
bububa/Mime
|
https://github.com/bububa/Mime
|
7e92b86c2c6a3ddd2293cc78c0481b030cdce2a8
|
d4e605d857e21bf0cd9b745a708d90245608469d
|
refs/heads/master
| 2021-01-02T22:59:07.820609 | 2010-06-16T09:30:34 | 2010-06-16T09:30:34 | 723,701 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
SConscript([
'libmime/SConscript',
'app/SConscript',
])
|
UTF-8
|
Python
| false | false | 2,010 |
7,507,602,851,280 |
997f2ff51a5e2c12b51bfb169f7a340e5749ce9f
|
fc072f9d8651a9d8c0f556b7d1497ea32ed9ee6d
|
/addon.py
|
bc86686f8fb3f55be409adf04325bf4beef193ff
|
[
"GPL-2.0-only"
] |
non_permissive
|
toodi/plugin.video.fdm.dk
|
https://github.com/toodi/plugin.video.fdm.dk
|
df19072c8c36d2b65c5c767b1d2bac528a92151e
|
cb4d81f1a3d4b1ee572c5735b34b2b6c36d90e3a
|
refs/heads/master
| 2020-12-25T22:36:29.143225 | 2014-01-05T18:11:43 | 2014-01-05T18:11:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#
# Copyright (C) 2014 Tommy Winther
# http://tommy.winther.nu
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this Program; see the file LICENSE.txt. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import os
import sys
import urllib2
import urlparse
import xml.etree.ElementTree
import buggalo
import xbmcgui
import xbmcaddon
import xbmcplugin
DATA_URL = 'http://vms.api.qbrick.com/rest/v3/getplayer/D5D17F48C5C03FBE?statusCode=xml'
# TODO improve xpath expressions once script.module.elementtree is 1.3+
def showCategories():
doc = loadXml()
for category in doc.findall('categories/category'):
if category.attrib.get('type') != 'standard':
continue
id = category.attrib.get('id')
name = category.attrib.get('name')
item = xbmcgui.ListItem(name, iconImage=ICON)
item.setProperty('Fanart_Image', FANART)
xbmcplugin.addDirectoryItem(HANDLE, PATH + '?category=' + id, item, isFolder=True)
xbmcplugin.endOfDirectory(HANDLE)
def showMedia(categoryId):
doc = loadXml()
for media in doc.findall('media/item'):
correctCategory = False
categories = media.findall('categories/category')
for category in categories:
if category.text == categoryId:
correctCategory = True
break
if not correctCategory:
continue
title = media.findtext('title')
image = media.findtext('images/image')
smilUrl = media.findtext('playlist/stream/format/substream')
infoLabels = dict()
infoLabels['studio'] = ADDON.getAddonInfo('name')
infoLabels['plot'] = media.findtext('description')
infoLabels['title'] = title
#infoLabels['date'] = date.strftime('%d.%m.%Y')
infoLabels['aired'] = media.findtext('publishdate')[0:10]
infoLabels['year'] = int(media.findtext('publishdate')[0:4])
item = xbmcgui.ListItem(title, iconImage=image, thumbnailImage=image)
item.setInfo('video', infoLabels)
item.setProperty('Fanart_Image', image)
item.setProperty('IsPlayable', 'true')
xbmcplugin.addDirectoryItem(HANDLE, PATH + '?smil=' + smilUrl.replace('&', '%26'), item)
xbmcplugin.endOfDirectory(HANDLE)
def playMedia(smilUrl):
doc = loadXml(smilUrl)
if doc is not None:
base = doc.find('head/meta').attrib.get('base')
videos = doc.findall('body/switch/video')
video = videos[len(videos) - 1].attrib.get('src')
# todo quality
item = xbmcgui.ListItem(path=base)
item.setProperty('PlayPath', video)
xbmcplugin.setResolvedUrl(HANDLE, True, item)
else:
xbmcplugin.setResolvedUrl(HANDLE, False, xbmcgui.ListItem())
def loadXml(url=DATA_URL):
try:
u = urllib2.urlopen(url)
response = u.read()
u.close()
return xml.etree.ElementTree.fromstring(response)
except Exception:
return None
if __name__ == '__main__':
ADDON = xbmcaddon.Addon()
PATH = sys.argv[0]
HANDLE = int(sys.argv[1])
PARAMS = urlparse.parse_qs(sys.argv[2][1:])
ICON = os.path.join(ADDON.getAddonInfo('path'), 'icon.png')
FANART = os.path.join(ADDON.getAddonInfo('path'), 'fanart.jpg')
buggalo.SUBMIT_URL = 'http://tommy.winther.nu/exception/submit.php'
try:
if 'category' in PARAMS:
showMedia(PARAMS['category'][0])
elif 'smil' in PARAMS:
playMedia(PARAMS['smil'][0])
else:
showCategories()
except Exception:
buggalo.onExceptionRaised()
|
UTF-8
|
Python
| false | false | 2,014 |
4,681,514,385,130 |
d564a041dd7c5fa66bcad6dae8ca29e70edd431a
|
74a2d87d6f17025cc4e8ba1964ea66256d75b573
|
/altmetrics/settings_production.py
|
e068c8380ab1863c7bb13b7c3e503ab0680860af
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] |
non_permissive
|
wearp/SocialSerials
|
https://github.com/wearp/SocialSerials
|
c03e01c74aae6d9521502922efa9af30659596c0
|
d5954519d9afa8b34dd53ff3c92d162f663ec5d8
|
refs/heads/master
| 2015-08-15T06:47:33.742700 | 2014-11-24T19:48:13 | 2014-11-24T19:48:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# import altmetrics.settings
try:
from settings import *
except ImportError as e:
pass
# disable debug for production
DEBUG = False
TEMPLATE_DEBUG = False
# Deployment on Heroku #
########################
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
|
UTF-8
|
Python
| false | false | 2,014 |
10,591,389,373,223 |
a0fc28a5fa683d519127168b9568d8352774c95b
|
dee766bad9f030c622b6a00ba5d71bacf77eeccd
|
/apps/bigbrother/urls.py
|
1534e12ef969f3a592053bb931ff702812e7d11e
|
[
"AGPL-3.0-only",
"CC-BY-SA-3.0"
] |
non_permissive
|
pombredanne/SpreadBand
|
https://github.com/pombredanne/SpreadBand
|
4edc2f30a083a984257abe57ce2215709a2522bf
|
3d99e967aaea6f750722cd7c7e7e9a569ba593aa
|
refs/heads/master
| 2021-01-17T20:31:40.780315 | 2012-08-23T14:39:52 | 2012-08-23T14:39:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls.defaults import *
from django.conf import settings
urlpatterns = patterns('',
(r'^show$', 'bigbrother.views.show'),
(r'^monthly.png$', 'bigbrother.views.make_monthly_graph'),
(r'^month_data$', 'bigbrother.views.monthly_data'),
(r'^yearly.png$', 'bigbrother.views.make_yearly_graph'),
)
|
UTF-8
|
Python
| false | false | 2,012 |
18,760,417,167,926 |
763923b0c5640cabaa9948e86adc8845289f30b2
|
a941e9240a281bc1325a5159bee43a073d3ac0a9
|
/osx_notifier.py
|
bafc0339b6d3962a010980f5274bd7ca411be6cd
|
[
"GPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only"
] |
non_permissive
|
Tiimber/terminal-notification
|
https://github.com/Tiimber/terminal-notification
|
a3e3bb0e9203b41467f2e44c8308f41ec64f7ade
|
5a13888d07e73bfe3ae07e28a9ec497742b53b4e
|
refs/heads/master
| 2016-09-11T09:15:54.601284 | 2014-11-17T07:12:13 | 2014-11-17T07:12:13 | 20,415,798 | 3 | 0 | null | false | 2014-11-13T08:04:16 | 2014-06-02T19:06:04 | 2014-06-14T17:14:19 | 2014-11-13T08:04:16 | 6,360 | 1 | 0 | 0 |
Python
| null | null |
import subprocess
def notify_obj(notify_object):
title = str(notify_object['title']) if 'title' in notify_object else None
subtitle = str(notify_object['subtitle']) if 'subtitle' in notify_object else None
message = str(notify_object['message']) if 'message' in notify_object else None
sound = str(notify_object['sound']) if 'sound' in notify_object else None
group = str(notify_object['group']) if 'group' in notify_object else None
remove = str(notify_object['remove']) if 'remove' in notify_object else None
notify(title=title, subtitle=subtitle, message=message, sound=sound, group=group, remove=remove)
def notify(title=None, subtitle=None, message=None, sound=None, group=None, remove=None):
params = []
if title is not None:
t = '-title {!r}'.format(title)
params.append(t)
if subtitle is not None:
s = '-subtitle {!r}'.format(subtitle)
params.append(s)
if message is not None:
m = '-message {!r}'.format(message)
params.append(m)
if sound is not None:
so = '-sound {!r}'.format(sound)
params.append(so)
if group is not None:
g = '-group {!r}'.format(group)
params.append(g)
if remove is not None:
r = '-remove {!r}'.format(remove)
params.append(r)
# Mute output, since this normally only tells if there was an old notification being removed
fh = open("NUL","w")
subprocess.Popen('terminal-notifier {}'.format(' '.join(params)), shell=True, stderr=fh).wait()
fh.close()
|
UTF-8
|
Python
| false | false | 2,014 |
15,221,364,137,238 |
2baa31e9fba2aa4ab6d403c1f197c9da7d52043d
|
6182080dd7323b8fe89f334088398f3351dacc80
|
/document.py
|
593c1797ab45440ecd07f04bc30633fa2756d0ab
|
[] |
no_license
|
tophsic/inkscape-extensions
|
https://github.com/tophsic/inkscape-extensions
|
893272dd000f7c4bf244e45739c68f2c9bc5e3a8
|
c9606c2c925307c29cec30b85bf5c7b001d3a25c
|
refs/heads/master
| 2020-04-06T03:33:43.316835 | 2013-06-22T18:51:09 | 2013-06-22T18:51:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import inkex
import sys
import functions
import re
class Document(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
def effect(self):
dimensions = functions.getDimensions(sys.argv[-1], 'tag2')
x = float(dimensions[1])
widthTo = str(x + float(dimensions[3]) + x)
document = self.document.getroot()
widthFrom = document.get('width')
viewBox = document.get('viewBox').split(' ')
viewBox[2] = widthTo
document.set('viewBox', ' '.join(viewBox))
document.set('width', widthTo)
if __name__ == '__main__':
e = Document()
e.affect()
|
UTF-8
|
Python
| false | false | 2,013 |
6,751,688,601,884 |
50d5f6d834dca8777a90b89915e082fe7625a321
|
629567699f67cdfe54bff28688847b22bfe83e4a
|
/website/mail.py
|
bb6c5eb65a095206eab4720229ae275574489778
|
[] |
no_license
|
google-code-export/freemusic
|
https://github.com/google-code-export/freemusic
|
2c09a5de6fc6cff3f731e76c26a29858cde22f5f
|
c941ff989e4679bda6bbbed82cd7716685a2ce3b
|
refs/heads/master
| 2018-12-28T12:21:11.879435 | 2011-12-30T11:05:48 | 2011-12-30T11:05:48 | 32,133,392 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 noet:
import logging
import os
import re
from google.appengine.api import mail
from google.appengine.ext.webapp import template
def send(to, text):
# 1. Определяем заголовок.
subject = '(no subject)'
m = re.search('<h1>(.+)</h1>', text)
if m is not None:
subject = m.group(1)
text = text.replace(m.group(0), '').strip()
# 2. Достаём HTML часть.
html = u'(empty message)'
m = re.search('<html>.*</html>', text, re.S)
if m is not None:
html = m.group(0)
text = text.replace(m.group(0), '').strip()
# http://code.google.com/intl/ru/appengine/docs/python/mail/emailmessagefields.html
mail.send_mail(sender='[email protected]', to=to, bcc='[email protected]', subject=subject, body=text, html=html)
def send2(to, template_name, data):
directory = os.path.dirname(__file__)
path = os.path.join(directory, 'templates', template_name + '.html')
return send(to, template.render(path, data))
|
UTF-8
|
Python
| false | false | 2,011 |
15,547,781,643,379 |
5ced267dc13cfa1f33b55741ad075d0a682203aa
|
c7c62c5de87228e3fe5ffe179bdf23cb336dcf96
|
/django_gravatar/templatetags/gravatar_tags.py
|
1280704d5237c594dcce0918781201bc849411ee
|
[
"MIT"
] |
permissive
|
chronossc/django-gravatar
|
https://github.com/chronossc/django-gravatar
|
a863fbb718c3849e96db96b1f186d845f18eb2bd
|
b179700dc333080e82dbeb76e99c8da9bf5fc4ce
|
refs/heads/master
| 2021-01-18T11:51:29.988469 | 2010-09-22T03:31:11 | 2010-09-22T03:31:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import re
import urllib
import hashlib
from django import template
from django.conf import settings
URL_RE = re.compile(r'^https?://([-\w\.]+)+(:\d+)?(/([\w/_\.]*(\?\S+)?)?)?',
re.IGNORECASE)
EMAIL_RE = re.compile(r'^[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}$',
re.IGNORECASE)
GRAVATAR_URL_PREFIX = 'http://www.gravatar.com/avatar/'
DEFAULT_PARAMS = \
{
# api_key: (gravatar_key, value),
'size': ('s', 80), # value is in [1,512]
'rating': ('r', 'g'), # 'pg', 'r', or 'x'
'default': ('d', ''), # 'identicon', 'monsterid', 'wavatar', '404', or escaped URI
}
register = template.Library()
def _build_gravatar_url(email, params):
"""Generate a Gravatar URL.
"""
# step 1: get a hex hash of the email address
email = email.strip().lower().encode('utf-8')
if not EMAIL_RE.match(email):
return ''
email_hash = hashlib.md5(email).hexdigest()
# step 2a: build a canonized parameters dictionary
if not type(params).__name__ == 'dict':
params = params.__dict__
actual_params = {}
default_keys = DEFAULT_PARAMS.keys()
for key, value in params.items():
if key in default_keys:
k, default_value = DEFAULT_PARAMS[key]
# skip parameters whose values are defaults,
# assume these values are mirroring Gravatar's defaults
if value != default_value:
actual_params[k] = value
# step 2b: validate the canonized parameters dictionary
# silently drop parameter when the value is not valid
for key, value in actual_params.items():
if key == 's':
if value < 1 or value > 512:
del actual_params[key]
elif key == 'r':
if value.lower() not in ('g', 'pg', 'r', 'x'):
del actual_params[key]
# except when the parameter key is 'd': replace with 'identicon'
elif key == 'd':
if value.lower() not in ('identicon', 'monsterid', 'wavatar', '404'):
if not URL_RE.match(value): # if not a valid URI
del actual_params[key]
else: # valid URI, encode it
actual_params[key] = value # urlencode will encode it later
# step 3: encode params
params_encode = urllib.urlencode(actual_params)
# step 4: form the gravatar url
gravatar_url = GRAVATAR_URL_PREFIX + email_hash
if params_encode:
gravatar_url += '?' + params_encode
return gravatar_url
class GravatarURLNode(template.Node):
def __init__(self, email, params):
self.email = email
self.params = params
def render(self, context):
try:
if self.params:
params = template.Variable(self.params).resolve(context)
else:
params = {}
# try matching an address string literal
email_literal = self.email.strip().lower()
if EMAIL_RE.match(email_literal):
email = email_literal
# treat as a variable
else:
email = template.Variable(self.email).resolve(context)
except template.VariableDoesNotExist:
return ''
# now, we generate the gravatar url
return _build_gravatar_url(email, params)
@register.tag(name="gravatar_url")
def get_gravatar_url(parser, token):
"""For template tag: {% gravatar_url <email> <params> %}
Where <params> is an object or a dictionary (variable), and <email>
is a string object (variable) or a string (literal).
"""
try:
tag_name, email, params = token.split_contents()
except ValueError:
try:
tag_name, email = token.split_contents()
params = None
except ValueError:
raise template.TemplateSyntaxError('%r tag requires one or two arguments.' %
token.contents.split()[0])
# if email is quoted, parse as a literal string
if email[0] in ('"', "'") or email[-1] in ('"', "'"):
if email[0] == email[-1]:
email = email[1:-1]
else:
raise template.TemplateSyntaxError(
"%r tag's first argument is in unbalanced quotes." % tag_name)
return GravatarURLNode(email, params)
|
UTF-8
|
Python
| false | false | 2,010 |
17,016,660,437,928 |
b24aa8127fc131e2ef89b8e51031cadcf05696dc
|
be84495751737bbf0a8b7d8db2fb737cbd9c297c
|
/renmas3/shapes/__init__.py
|
ae850285ce28b3267efae9b6e7dbaf188ba7d8b9
|
[] |
no_license
|
mario007/renmas
|
https://github.com/mario007/renmas
|
5e38ff66cffb27b3edc59e95b7cf88906ccc03c9
|
bfb4e1defc88eb514e58bdff7082d722fc885e64
|
refs/heads/master
| 2021-01-10T21:29:35.019792 | 2014-08-17T19:11:51 | 2014-08-17T19:11:51 | 1,688,798 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from .mesh_desc import MeshDesc
from .shape import Shape
from .ray_triangle import ray_triangle_intersection
from .hit import HitPoint
from .triangle import Triangle
from .sphere import Sphere
from .rectangle import Rectangle
from .mgr import ShapeManager
from .linear import LinearIsect
from .load_meshes import load_meshes
from .bbox import BBox, random_in_bbox
from .flat_mesh import FlatMesh
from .utils import create_mesh, fetch_triangle, load_meshes_from_file
|
UTF-8
|
Python
| false | false | 2,014 |
9,199,819,970,473 |
4477dfccf57cc5c48fb62150473d3ff75672d2d8
|
0dfafea4929b6dbfadc77114ca6474dd17182d6e
|
/canary/ui/edit/study/species/__init__.py
|
d13011045810240aabc49182a0eb51a5609536e7
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
non_permissive
|
sentinelcanary/sentinelcanary
|
https://github.com/sentinelcanary/sentinelcanary
|
89b5e84de22000afe5433cdce4eb27fed4f85b61
|
849f0874d064b842147735a283840ec01ac2961e
|
refs/heads/master
| 2021-01-15T16:52:26.225608 | 2013-07-11T20:56:39 | 2013-07-11T21:12:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# $Id$
_q_exports = [
'_q_index',
'find',
'add',
'types',
]
from quixote.errors import TraversalError
from canary.ui.edit.study.species import species_ui
from canary.ui.edit.study.species.species_ui import SpeciesActions
_q_index = species_ui._q_index
find = species_ui.find
add = species_ui.add
types = species_ui.types
def _q_lookup (request, species_id):
try:
if not species_id == None:
return SpeciesActions(species_id)
else:
raise TraversalError
except:
return not_found('species')
|
UTF-8
|
Python
| false | false | 2,013 |
17,935,783,451,442 |
e7bc5b963137ac35ef15dfbd208632c5e2dafeb5
|
766b65940c5802602e33d55ac6563cfd64e056de
|
/prob03/small/tiny/views.py
|
386d0788587f4c4ceae4266bc8c67105e9d81297
|
[
"MIT"
] |
permissive
|
rmorison/funtimes
|
https://github.com/rmorison/funtimes
|
2c6ff0747c15cfd4f7c995f0d8cf87b84a503810
|
ad736fbee76a63424e5677902136905fbab75242
|
refs/heads/master
| 2020-04-10T07:38:20.351298 | 2014-06-06T04:56:38 | 2014-06-06T04:56:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render
"""
I could use django-restframework or TastyPie,
but it's easier to emit dictionaries and you said
you didn't want anything fancy.
I'm foregoing implementing a syndication library, if you want
an atom or rss protocol, just let me know and I'll make it
happen. ( https://docs.djangoproject.com/en/dev/ref/contrib/syndication/ )
"""
""" Algorithum
Create a small, working Django site that serves a simple activity feed list with 3 filters:
1. My posts
2. Me and the posts of everyone I'm tracking (note: use an asymmetric relationship, meaning, "Even if I track you, you may or may not track me".)
3. Everybody's posts
"""
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db.models.query import QuerySet
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from tiny.models import RegisteredUser, FeedItem
import json
class FeedItemEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, QuerySet):
return [o for o in obj]
if isinstance(obj, RegisteredUser):
return {
'username': obj.user.username,
'password': 'aoeu',
'login-at': '/admin/',
}
if isinstance(obj, FeedItem):
return {
'content': mark_safe(force_unicode(obj.content).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", ''')),
'author': obj.user.username,
'password': 'aoeu',
'email': obj.user.email,
}
return super(FeedItemEncoder, self).default(obj)
def feed(request):
if request.user.is_anonymous():
return HttpResponse(json.dumps({
'status': 'Failure - You should login',
'available-logins': RegisteredUser.objects.all()
}, cls=FeedItemEncoder))
# For time sake, I'm going to skip making this pretty pretty.
following = RegisteredUser.objects.get(user=request.user).tracking.all()
following_values = following.values_list('id', flat=True)
FILTER_TYPES = {
'e': lambda: FeedItem.objects.all(),
'm': lambda: FeedItem.objects.filter(user=request.user),
# There is probably a better way to do this.
'f': lambda: FeedItem.objects.filter(user__pk__in=[
r.user.pk for r in RegisteredUser.objects.filter(pk__in=following_values)
]),
}
filter_type = request.GET.get('filterOn', FILTER_TYPES.keys()[0])
if filter_type not in FILTER_TYPES.keys():
filter_type = FILTER_TYPES.keys()[0]
return HttpResponse(json.dumps({
'status': 'OK',
'data': {
'additional_views': [
'?filterOn='.join([reverse('tiny-feed'), key])
for key in FILTER_TYPES.keys()
],
'current-filter': filter_type,
'you-are': RegisteredUser.objects.get(user=request.user),
'following': following,
'content': FILTER_TYPES.get(filter_type)(),
}
}, cls=FeedItemEncoder), content_type="application/json")
|
UTF-8
|
Python
| false | false | 2,014 |
12,146,167,546,088 |
9250d65c33441b7df03b34ec6e8010480c693401
|
f821fea81d3d3b0ce925d80ad332b659fcc517a9
|
/python/RootObjectLoader.py
|
23830d69e8b1785a4fd36382c09b86ad36228c80
|
[
"CC-BY-NC-SA-3.0"
] |
non_permissive
|
psteinb/ZbFitter
|
https://github.com/psteinb/ZbFitter
|
d173f093e7b3249fc750436af8eedc1d0429238c
|
a99d58fe6aa9ef2553578bb409400cec939a95f3
|
refs/heads/master
| 2021-01-19T21:48:26.984694 | 2012-02-23T13:11:47 | 2012-02-23T13:11:47 | 3,525,450 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import ROOT
import os,sys,re
sys.path.append("/home/psteinb/bin/")
import setupATLASRootStyle
from ConfigParser import *
class RootObjectLoader:
"""
python class that allows reading a file descriminating files to open and which TNameds to load
the file names may contain place holders __FOO__ that will be replaced by their corresponding shell variable, if it exists
"""
def __init__(self, _configFile=""):
"""
Arguments:
- `_configFile`:
"""
self.cfgParser = ConfigParser()
self.tFiles = {}
self.loadedDict = {}
self.loadedCommands = {}
self.setConfigFile(_configFile)
def setConfigFile(self,_cfile):
if os.path.isfile( _cfile ):
self.__configFile = _cfile
self.cfgParser.read(_cfile)
else:
#print _cfile," not found"
self.__configFile = None
def findDirString(self,_string):
""" small function to extract a string of the format __FOO__ from the given string """
start = _string.find("__")
end = _string.find("__",start+1)+2
return _string[start:end]
def getDirFromEnvironment(self,_string):
""" return the directory given by _string environment variable if it exists """
env = os.getenv(_string)
if env and os.path.isdir(env.split(":")[0]):
return env.split(":")[0]
else:
print "no directory extracted from ", _string
return None
def replaceDirString(self,_filestring):
""" search the _filestring for __FOO__ and replace it with the directory given by $FOO """
value = _filestring
if _filestring.count("__")>1:
placeHolder = self.findDirString(_filestring)
dirname = self.getDirFromEnvironment(placeHolder[2:-2])
if dirname:
value = _filestring.replace(placeHolder,dirname)
else:
print "no directory found according to ", placeHolder
return value
else:
#print "nothing found"
return value
def close(self):
""" close all associated files """
for i in self.tFiles.values():
for o in i:
if o.__nonzero__():
o.Close()
def openFiles(self,_verbose=False):
""" open all files given """
for sec in self.cfgParser.sections():
self.loadedDict[sec] = []
self.loadedCommands[sec] = []
self.tFiles[sec] = []
commands = []
for k,v in self.cfgParser.items(sec):
print ">>",k,v
if v.count("[]."):
commands.append(v)
self.loadedCommands[sec].append(v)
continue
if not v.lower().count(".root"):
continue
fileName = self.replaceDirString(v.split(":")[0])
currFile = None
if fileName and os.path.isfile(fileName):
currFiles = [ item for item in self.tFiles[sec] if item.GetName().count(fileName.split("/")[-1]) ]
if not currFiles:
self.tFiles[sec].append(ROOT.TFile(fileName))
currFile = self.tFiles[sec][-1]
else:
currFile = currFiles[0]
else:
print fileName, " does not exist"
continue
if not currFile:
print "%s not loaded" % fileName
continue
tnamed = currFile.Get(v.split(":")[-1])
if tnamed.__nonzero__():
self.loadedDict[sec].append(tnamed)
# print ">> object %s found in %s " % (v.split(":")[0],v.split(":")[-1])
else:
print ">> object %s not found in %s " % (v.split(":")[-1],fileName)
for loaded in self.loadedDict[sec]:
if not loaded.__nonzero__():
continue
for cmd in commands:
try:
eval(cmd.replace("[]","loaded"))
except Exception as inst:
print "unable to execute %s on %s" % (cmd,loaded.GetName())
if _verbose:
self.printLoadedObjects()
def printLoadedObjects(self):
""" print out all loaded objects """
for k,v in self.loadedDict.iteritems():
print ">> loaded %s from %s" % (k,self.__configFile)
for i in v:
print "\t%s : %s" % (i.GetDirectory().GetFile().GetName(),i.GetName())
if __name__ == '__main__':
if '-h' in sys.argv or '--help' in sys.argv and len(sys.argv[1:])!=1:
print "test usage: RootObjectLoader.py <configFile>"
sys.exit(1)
if os.path.isfile(sys.argv[1]):
loader = RootObjectLoader(sys.argv[1])
try:
loader.openFiles()
except Exception as exc:
print "RootObjectLoader failed due to\n%s(%i >> %i)\t> %s <" % (__file__,sys.exc_info()[-1].tb_lineno,sys.exc_info()[-1].tb_next.tb_lineno,str(exc))
exitCode = 1
else:
exitCode = 0
finally:
loader.close()
sys.exit(exitCode)
|
UTF-8
|
Python
| false | false | 2,012 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.