__id__
int64 3.09k
19,722B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
256
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 3
values | repo_name
stringlengths 5
109
| repo_url
stringlengths 24
128
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 6.65k
581M
⌀ | star_events_count
int64 0
1.17k
| fork_events_count
int64 0
154
| gha_license_id
stringclasses 16
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
5.76M
⌀ | gha_stargazers_count
int32 0
407
⌀ | gha_forks_count
int32 0
119
⌀ | gha_open_issues_count
int32 0
640
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 2
classes | gha_disabled
bool 1
class | content
stringlengths 9
4.53M
| src_encoding
stringclasses 18
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | year
int64 1.97k
2.01k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
11,278,584,140,323 |
3a390664e753b95f20ee646f3a70ad76fe04e456
|
a704892d86252dde1bc0ff885ea5e7d23b45ce84
|
/addons-extra/dm_remote_edition/dm_remote_edition.py
|
09fa1d51ac86cc9306f9a64219279d89f5e21299
|
[] |
no_license
|
oneyoung/openerp
|
https://github.com/oneyoung/openerp
|
5685bf8cce09131afe9b9b270f6cfadf2e66015e
|
7ee9ec9f8236fe7c52243b5550fc87e74a1ca9d5
|
refs/heads/master
| 2016-03-31T18:22:41.917881 | 2013-05-24T06:10:53 | 2013-05-24T06:10:53 | 9,902,716 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields
from osv import osv
class dm_campaign_document_job(osv.osv): # {{{
_inherit = "dm.campaign.document.job"
_columns = {
'user_id': fields.many2one('res.users', 'Printer User'),
}
dm_campaign_document_job() # }}}
class dm_campaign_document_job_batch(osv.osv): # {{{
_inherit = "dm.campaign.document.job.batch"
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.has_key('portal_dm') and context['portal_dm']=='service':
cr.execute("select batch_id from dm_campaign_document_job where batch_id in (select id from dm_campaign_document_job_batch) and user_id = %s" %(uid))
batch_ids = map(lambda x: x[0], cr.fetchall())
return batch_ids
return super(dm_campaign_document_job_batch, self).search(cr, uid, args, offset, limit, order, context, count)
dm_campaign_document_job_batch() # }}}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
UTF-8
|
Python
| false | false | 2,013 |
4,552,665,347,887 |
47eba51144d0405252671c2972004365c11eb2ce
|
c3764314029f2c11a968efa6ac326a0a98596dee
|
/search_server.py
|
be81e7c602550cba2e5f4094d44713775a0a0078
|
[] |
no_license
|
prudhviy/ir_project_csc_5710
|
https://github.com/prudhviy/ir_project_csc_5710
|
539757475d32cf66d7438ca54d5a1121bd2b291e
|
8b3a2902e544bd9ceb2287d0320bd0720638f902
|
refs/heads/master
| 2021-01-17T17:07:27.050374 | 2014-12-12T02:31:09 | 2014-12-12T02:31:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
from flask import Flask, request, jsonify
from pyes import *
INDEX_NAME = 'tweets_new'
# set the project root directory as the static folder, you can set others.
app = Flask(__name__, static_url_path='')
def search_all_terms(conn, query_string):
query = {
"filtered": {
"filter": {
"limit": {"value" : 50}
},
"query": {
"match": {
"tweet_text": {
"query": query_string,
"operator": "and"
}
}
}
}
}
result = conn.search(query, indices=[INDEX_NAME], doc_types=["tweet"])
return [p for p in result]
def search_exact_phrase(conn, query_string):
query = {
"filtered": {
"filter": {
"limit": {"value" : 50}
},
"query": {
"match_phrase": {
"tweet_text": query_string
}
}
}
}
result = conn.search(query, indices=[INDEX_NAME], doc_types=["tweet"])
return [p for p in result]
def search_any_terms(conn, query_string):
query = {
"filtered": {
"filter": {
"limit": {"value" : 50}
},
"query": {
"match": {
"tweet_text": {
"query": query_string,
"operator": "or"
}
}
}
}
}
result = conn.search(query, indices=[INDEX_NAME], doc_types=["tweet"])
return [p for p in result]
def search_hash_tags(conn, query_string):
results = []
tags = query_string.split()
for tag in tags:
query = {
"filtered": {
"filter": {
"limit": {"value" : 50}
},
"query": {
"match_phrase": {
"tweet_text": "#" + tag
}
}
}
}
result = conn.search(query, indices=[INDEX_NAME], doc_types=["tweet"])
results = results + [p for p in result]
return results
def search_none_terms(conn, query_string, query_mandatory):
must_terms = query_mandatory.split()
should = []
for term in must_terms:
should.append({"term": {"tweet_text": term} })
query = {
"bool": {
"should": should,
"must_not": {
"term": {"tweet_text": query_string }
}
}
}
result = conn.search(query, indices=[INDEX_NAME], doc_types=["tweet"])
return [p for p in result]
@app.route("/search/", methods=['POST'])
def search_api():
""" To search """
conn = ES('127.0.0.1:9200')
if request.method == 'POST' and request.form['query_type'] == 'adv':
query_term = request.form['query']
query_field = request.form['query_field']
result = {'tweets': []}
if query_field == 'all_terms':
result['tweets'] = search_all_terms(conn, query_term)
elif query_field == 'exact_phrase':
result['tweets'] = search_exact_phrase(conn, query_term)
elif query_field == 'any_terms':
result['tweets'] = search_any_terms(conn, query_term)
elif query_field == 'none_terms':
query_mandatory = request.form['query_mandatory_terms']
result['tweets'] = search_none_terms(conn, query_term, query_mandatory)
elif query_field == 'hash_tags':
result['tweets'] = search_hash_tags(conn, query_term)
return jsonify(**result)
@app.route('/static/<path:path>')
def static_proxy(path):
# send_static_file will guess the correct MIME type
return app.send_static_file(os.path.join('static', path))
if __name__ == "__main__":
#app.run(debug=True)
app.run(host='0.0.0.0')
|
UTF-8
|
Python
| false | false | 2,014 |
1,640,677,546,025 |
fe63a9ff6374fc2c413d8cf40c75d5e4a97f53e5
|
4772f0dda8f522ef1435133fc8b2a63752f43ba1
|
/sortedcontainers/sortedlistwithkey.py
|
cb15c1c600d690c3c8d9bcd333cce0109cd080e8
|
[
"Apache-2.0"
] |
permissive
|
jonathaneunice/sorted_containers
|
https://github.com/jonathaneunice/sorted_containers
|
d4c4768597ec422c66fadb84212c2036d173f982
|
fe8092c825bc85ea2e92114cae731fd444c895a6
|
refs/heads/master
| 2020-11-30T23:32:37.663231 | 2014-07-22T19:51:09 | 2014-07-22T19:51:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
#
# Sorted list with key implementation.
from sys import hexversion
from .sortedlist import SortedList
from collections import MutableSequence
from itertools import chain
if hexversion < 0x03000000:
range = xrange
class Pair:
def __init__(self, key, value):
self.key = key
self.value = value
def __eq__(self, that):
return self.key == that.key
def __ne__(self, that):
return self.key != that.key
def __lt__(self, that):
return self.key < that.key
def __le__(self, that):
return self.key <= that.key
def __gt__(self, that):
return self.key > that.key
def __ge__(self, that):
return self.key >= that.key
def __getitem__(self, index):
return self.key if index == 0 else self.value
def __repr__(self):
return 'Pair({0}, {1})'.format(repr(self.key), repr(self.value))
class SortedListWithKey(MutableSequence):
def __init__(self, iterable=None, key=lambda val: val, value_orderable=True, load=100):
self._key = key
self._list = SortedList(load=load)
self._ordered = value_orderable
if value_orderable:
self._pair = lambda key, value: (key, value)
else:
self._pair = Pair
if iterable is not None:
self.update(iterable)
def clear(self):
self._list.clear()
def add(self, value):
pair = self._pair(self._key(value), value)
self._list.add(pair)
def update(self, iterable):
_key, _pair = self._key, self._pair
self._list.update(_pair(_key(val), val) for val in iterable)
def _iter(self, pair):
_list = self._list
start = _list.bisect_left(pair)
end = _list.bisect_right(pair)
yield start
if start == end:
return
start_pos, start_idx = _list._pos(start)
end_pos, end_idx = _list._pos(end - 1)
_lists = _list._lists
segments = (_lists[pos] for pos in range(start_pos, end_pos + 1))
iterator = chain.from_iterable(segments)
# Advance the iterator to the start of the items.
for rpt in range(start_idx):
next(iterator)
for rpt in range(end - start + 1):
yield next(iterator)
def __contains__(self, value):
pair = self._pair(self._key(value), value)
if self._ordered:
return pair in self._list
iterator = self._iter(pair)
next(iterator)
for duo in iterator:
if value == duo[1]:
return True
else:
return False
def discard(self, value):
pair = self._pair(self._key(value), value)
if self._ordered:
self._list.discard(pair)
return
iterator = self._iter(pair)
start = next(iterator)
for offset, duo in enumerate(iterator):
if value == duo[1]:
del self._list[start + offset]
return
def remove(self, value):
pair = self._pair(self._key(value), value)
if self._ordered:
self._list.remove(pair)
return
iterator = self._iter(pair)
start = next(iterator)
for offset, duo in enumerate(iterator):
if value == duo[1]:
del self._list[start + offset]
return
else:
raise ValueError
def __delitem__(self, index):
del self._list[index]
def __getitem__(self, index):
if isinstance(index, slice):
return list(tup[1] for tup in self._list[index])
else:
return self._list[index][1]
def __setitem__(self, index, value):
_key, _pair = self._key, self._pair
if isinstance(index, slice):
self._list[index] = list(_pair(_key(val), val) for val in value)
else:
self._list[index] = _pair(_key(value), value)
def __iter__(self):
return iter(tup[1] for tup in iter(self._list))
def __reversed__(self):
return iter(tup[1] for tup in reversed(self._list))
def __len__(self):
return len(self._list)
def bisect_left(self, value):
pair = self._pair(self._key(value), value)
return self._list.bisect_left(pair)
def bisect(self, value):
pair = self._pair(self._key(value), value)
return self._list.bisect(pair)
def bisect_right(self, value):
pair = self._pair(self._key(value), value)
return self._list.bisect_right(pair)
def count(self, value):
pair = self._pair(self._key(value), value)
if self._ordered:
return self._list.count(pair)
iterator = self._iter(pair)
next(iterator)
return sum(1 for duo in iterator if duo[1] == value)
def append(self, value):
pair = self._pair(self._key(value), value)
self._list.append(pair)
def extend(self, iterable):
_key, _pair = self._key, self._pair
self._list.extend(_pair(_key(val), val) for val in iterable)
def insert(self, index, value):
pair = self._pair(self._key(value), value)
self._list.insert(index, pair)
def pop(self, index=-1):
return self._list.pop(index)[1]
def index(self, value, start=None, stop=None):
pair = self._pair(self._key(value), value)
if self._ordered:
return self._list.index(pair, start, stop)
_len = self._list._len
if start == None:
start = 0
if start < 0:
start += _len
if start < 0:
start = 0
if stop == None:
stop = _len
if stop < 0:
stop += _len
if stop > _len:
stop = _len
if stop <= start:
raise ValueError
iterator = self._iter(pair)
begin = next(iterator)
for offset, val in enumerate(iterator):
if value == val[2] and start <= (begin + offset) < stop:
return begin + offset
else:
raise ValueError
def as_list(self):
return list(tup[1] for tup in self._list.as_list())
def __add__(self, that):
result = SortedListWithKey(
key=self._key,
value_orderable=self._ordered,
load=self._list._load
)
values = self.as_list()
values.extend(that)
result.update(values)
return result
def __iadd__(self, that):
self.update(that)
return self
def __mul__(self, that):
values = self.as_list() * that
return SortedListWithKey(
values,
key=self._key,
value_orderable=self._ordered,
load=self._list._load
)
def __imul__(self, that):
values = self.as_list() * that
self.clear()
self.update(values)
return self
def __eq__(self, that):
return ((len(self) == len(that))
and all(lhs == rhs for lhs, rhs in zip(self, that)))
def __ne__(self, that):
return ((len(self) != len(that))
or any(lhs != rhs for lhs, rhs in zip(self, that)))
def __lt__(self, that):
return ((len(self) <= len(that))
and all(lhs < rhs for lhs, rhs in zip(self, that)))
def __le__(self, that):
return ((len(self) <= len(that))
and all(lhs <= rhs for lhs, rhs in zip(self, that)))
def __gt__(self, that):
return ((len(self) >= len(that))
and all(lhs > rhs for lhs, rhs in zip(self, that)))
def __ge__(self, that):
return ((len(self) >= len(that))
and all(lhs >= rhs for lhs, rhs in zip(self, that)))
def __repr__(self):
temp = 'SortedListWithKey({0}, key={1}, value_orderable={2}, load={3})'
return temp.format(
repr(self.as_list()),
repr(self._key),
repr(self._ordered),
repr(self._list._load)
)
def _check(self):
_list, _key = self._list, self._key
_list._check()
assert all(pair[0] == _key(pair[1]) for pair in _list)
|
UTF-8
|
Python
| false | false | 2,014 |
12,275,016,539,609 |
9b5f10de24acf701d98ebbd7b0a48f87744857bd
|
5714865cd60f25746af73e0ad83d25e531b2a18a
|
/interAssay.py
|
4b4e5ea7132f41707fc796877557a0feadff2ece
|
[
"Apache-2.0"
] |
permissive
|
fak/globalAnalysis
|
https://github.com/fak/globalAnalysis
|
02bf69d8273ced6be233d5e3d5ec4fbd872d9c6b
|
dce16c0e86f2d7e266e3b12e7bd0c70897a96f46
|
refs/heads/master
| 2021-01-19T19:37:40.081887 | 2013-10-18T15:52:31 | 2013-10-18T15:52:31 | 7,114,109 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Function: interAssay
This script generates a distribution of differencs observed between two experiments assessing the potency of identical compounds and targets
--------------------
Felix Kruger
[email protected]
"""
def interAssay():
import queryDevice
import mkDict
import queries
import writePairs
import os
import addProperties
import readIds
import string
import yaml
# Read config file.
paramFile = open('gla.yaml')
params = yaml.safe_load(paramFile)
species = params['species']
# Get information for all relevant activities from ChEMBL.
for spec in species:
specName = string.replace(spec, ' ','_')
dictFile = "data/inter_compDict_%s_%s.pkl" % (specName, params['release'])
results = "data/interAssay_%s_%s.tab" % (specName, params['release'])
query = queries.activities(spec)
acts = queryDevice.queryDevice(query, params)
mkDict.activities(acts, dictFile)
writePairs.interAssaySampled(results, dictFile)
addProperties.addMolweight("molregno", results, params)
addProperties.addTargetClass("L1","accession", results, params)
addProperties.addSeq100(results)
if __name__ == '__main__':
import sys
if len(sys.argv) != 1: # the program name and the two arguments i
sys.exit("All parameters passed through gla.yaml")
interAssay()
|
UTF-8
|
Python
| false | false | 2,013 |
3,315,714,774,649 |
03e4cd126eaf0ea284223ecaa060635922040ba4
|
ac6743eb881c77ba8b80e19638969a032f0c5df3
|
/leo/setup.py
|
1f70613c44c8c9c64f468a4a2fc59e87767d51f3
|
[] |
no_license
|
leo-editor/leo-cvs-2006-2008
|
https://github.com/leo-editor/leo-cvs-2006-2008
|
4a983046e293d809698f11aa47eae640ad4fd07a
|
da696020bda4752700bf96f6417751346c92e3c4
|
refs/heads/master
| 2016-09-08T01:20:34.767629 | 2008-02-27T19:56:03 | 2008-02-27T19:56:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Must be an @nosent file.
# print '='*30,'setup.py','='*30
from distutils.core import setup
import leoGlobals as g
long_description = \
"""Leo is an IDE, an outliner, a scripting and unit testing framework based on Python,
a literate programming tool, a data organizer and a project manager.
Leo is written in 100% pure Python and works on any platform that supports
Python 2.2.1 or above and the Tk Tk 8.4 or above.
Download Python from http://python.org/
Download tcl/Tk from http://tcl.activestate.com/software/tcltk/
"""
# long_description = g.adjustTripleString(long_description,c.tab_width)
# print repr(long_description)
version='4.4.7-final' # No spaces and no trailing comma.
if 1:
setup (
name='leo',
version=version,
author='Edward K. Ream',
author_email='[email protected]',
url='http://webpages.charter.net/edreamleo/front.html',
download_url='http://sourceforge.net/project/showfiles.php?group_id=3458',
py_modules=[], # The manifest specifies everything.
description = 'Leo: Literate Editor with Outlines',
license='Python', # licence [sic] changed to license in Python 2.3
platforms=['all',],
long_description = long_description,
# keywords = 'outline, outliner, ide, editor, literate programming',
)
print ; print 'setup.py done'
|
UTF-8
|
Python
| false | false | 2,008 |
8,770,323,218,442 |
01ccdf756d60d3c5a714324a87df1b90383258b7
|
a61d91fdd5c0314c8a82e5929ca02f087935444d
|
/tests/test_mipsb_instruction.py
|
42cc3de42275ace268923b95f70044d86d65268e
|
[] |
no_license
|
dongrote/pyda
|
https://github.com/dongrote/pyda
|
0408ad138960f4541a2acfbd149fd43b08ebe40c
|
f00864bd6e09e621568ffde717c811a22fc12af8
|
refs/heads/master
| 2016-09-06T17:15:08.258240 | 2013-05-06T22:16:40 | 2013-05-06T22:16:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from mipsb.instruction import BigEndianMipsInstruction
import unittest
class TestRInstructionOperands(unittest.TestCase):
def setUp(self):
self.bin = '\x00\xc7\x28\x20' # add $a1, $a2, $a3
self.instruction = BigEndianMipsInstruction(self.bin)
def test_getRd(self):
self.assertEqual(self.instruction.rd,5)
def test_getRt(self):
self.assertEqual(self.instruction.rt,7)
def test_getRs(self):
self.assertEqual(self.instruction.rt,6)
class TestRInstructionShift(unittest.TestCase):
def setUp(self):
self.skipTest('Test implementation incomplete')
self.bin = '' # sll $a1, $a2, 4
self.instruction = BigEndianMipsInstruction(self.bin)
def test_getShiftAmount(self):
self.assertEqual(self.instruction.shift,4)
class TestSyscallInstruction(unittest.TestCase):
def setUp(self):
self.bin = '\x00\x00\x00\x0c'
self.instruction = BigEndianMipsInstruction(self.bin)
def test_getOpcodeMnemonic(self):
self.assertEqual(self.instruction.mnemonic,'syscall')
if '__main__' == __name__:
unittest.main()
|
UTF-8
|
Python
| false | false | 2,013 |
566,935,715,782 |
e2dc5c005a9e2997b62916466193de18a3c39d6f
|
41888e22a41f9e5f615e48737998a8b9ea4e4cab
|
/HostExtractor/URLLister.py
|
b73bc9288995117378c885fe9f6ee82e30831a3b
|
[] |
no_license
|
mohiulalamprince/python-scripts
|
https://github.com/mohiulalamprince/python-scripts
|
983b403e8d28addf0b56c2bc0ed9c0c5d382f2af
|
ea70a890eeaa47a4dbe6174206e13630714324a6
|
refs/heads/master
| 2021-05-27T00:38:19.083532 | 2014-04-28T09:30:46 | 2014-04-28T09:31:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import sys
class URLLister:
def __init__(self):
self.urls = []
def reset(self):
self.urls = []
def feed(self, thisData):
lb = 0
ub = 0
#print thisData
while True:
lb = thisData.find("href=\"", ub)
if lb == -1:
break
ub = thisData.find("\"", lb+6)
#print lb, " ", ub
print "URL: ", thisData[lb+6:ub]
self.urls.append(thisData[lb+6:ub])
if __name__ == "__main__":
import urllib
usock = urllib.urlopen("http://smslib.org/")
parser = URLLister()
parser.feed(usock.read())
usock.close()
|
UTF-8
|
Python
| false | false | 2,014 |
1,382,979,501,601 |
268c6e10366d8eca9c13d826c00d9b0911e1a5e4
|
b874aebfa05919f8824a9c8c440ce4271490e64a
|
/datools/som.py
|
f513b9d2c0ded78b0234d558e3c111d0d9df2f96
|
[
"GPL-1.0-or-later",
"GPL-2.0-only"
] |
non_permissive
|
darribas/darribas-python-tools
|
https://github.com/darribas/darribas-python-tools
|
7b7589a215cedf4fcde16022e4b67828270799ba
|
719669a22e11793bdc6e62e8d94bfe8106ce2d70
|
refs/heads/master
| 2016-09-08T02:01:54.147141 | 2012-08-15T17:05:24 | 2012-08-15T17:05:24 | 1,405,327 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Utils to work with Self-Organizing Maps
'''
import numpy as N
import numpy.ma as ma
def load_dat(dat_link):
'''
Load the data of a .dat file into a numpy array
Arguments
---------
dat_link : string
Path to the file
Returns
-------
a : ndarray
Numpy array with the data from the .dat file
'''
a = []
dat = open(dat_link)
for line in dat:
if line[0] != '#':
line = line.strip('\n').strip('\r').split(' ')
a.append(line)
dat.close()
a = N.array(a[1:])
af = N.zeros(a.shape)
for col in range(a.shape[1]):
try:
af[:, col] = map(float, a[:, col])
except:
print 'Column with strings'
return af
def csv2dat(csv_link):
'''
Convert a csv file into .dat format suitable for SOM_PAK and other
libraries
NOTE: includes the csv header in the first line preceded by '#'
...
Arguments
---------
csv_link : string
path of the csv (without extension) to be converted. The
.dat file will be created at the same location with the
same name but .dat extension
'''
fo = open(csv_link + '.csv', 'r')
ofo = open(csv_link + '.dat', 'w')
head = fo.readline().replace(',', ' ')
ofo.write('#' + head)
ofo.write(str(len(head.strip('#').strip('\n').split(' ')) - 1) + '\n')
for line in fo:
ofo.write(line.replace(',', ' '))
ofo.close()
fo.close()
return 'Done!'
def stdDat(datIN_link, names=True):
"""
Standardize a dat file and create a new one with same link (+'Z').
NOTE: Assumes first line as header (preceded by '#')
...
Arguments
---------
datIN_link : string
path to the .dat file to be converted
names : boolean
If True (default) takes the last column as names and leaves it
out of the standardization
"""
fo = open(datIN_link + '.dat')
h0, h1 = fo.readline(), fo.readline()
lines = fo.readlines()
fo.close()
a = []
for line in lines:
line = line.strip('\n').split(' ')
a.append(line)
a = N.array(a)
print a.shape
data, names = a[:, :-1], N.array([a[:, -1]]).T
z = getZmv(data, 'x')
#z = N.hstack((z, names))
fo = open(datIN_link + 'Z.dat', 'w')
fo.write(h0 + h1)
for row, name in zip(z, names):
line = ' '.join(row)
line += ' %s\n'%name
fo.write(line)
fo.close()
return 'Done'
def getZmv(a,mv):
"""
Helper for stdDat
Arguments:
* a: array of strings with the input data
* mv: string for missing values (e.g. 'x')
Returns:
* z: standardized masked array
"""
mascara=N.zeros(a.shape)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
if a[i,j]==mv:
mascara[i,j]=1
a[i,j]=0
am=ma.masked_array(a,mask=mascara)
am=N.array(am,dtype=float)
z=N.copy(am)
z=(z-z.mean(axis=0))/z.std(axis=0)
z=ma.masked_array(z,dtype=str)
for i in range(mascara.shape[0]):
for j in range(mascara.shape[1]):
if mascara[i,j]==1:
z[i,j]='x'
return z
|
UTF-8
|
Python
| false | false | 2,012 |
2,018,634,672,971 |
55f35a62e6e25dbe999eee360de604b1ca0b09e0
|
973888c6e8d710ac80ecfaa01f02e21dccafa96e
|
/lib/rapidsms/urls.py
|
889c0448ac4c773a4c5fc8933afd0d24dcffab1f
|
[
"BSD-3-Clause"
] |
permissive
|
laughinghan/rapidsms-core-dev
|
https://github.com/laughinghan/rapidsms-core-dev
|
32a5b88e4bdf3ba99bd57a615ff9409a44e127ae
|
67ad08244b785909d420e814e0a88058623077c0
|
refs/heads/master
| 2021-01-16T20:26:50.990337 | 2010-08-14T15:44:05 | 2010-08-14T15:44:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from django.conf.urls.defaults import *
from . import views
urlpatterns = patterns('',
url(r'^$', views.dashboard),
url(r'^dashboard/add/widget/$', views.add_dashboard_widget),
url(r'^dashboard/add/widget_entry/$', views.add_dashboard_widget_entry),
url(r'^dashboard/del/$', views.delete_dashboard_widget),
url(r'^accounts/login/$', views.login),
url(r'^accounts/logout/$', views.logout),
)
|
UTF-8
|
Python
| false | false | 2,010 |
11,072,425,696,392 |
4f57ea2ccb4014a1a0d74c36ca32cb03822c6d1e
|
781433ba455609489551739db12c3dab4030ab83
|
/fabfile.py
|
c4e581afbfe3a8ff9e96149f61067fa4417c2b2f
|
[] |
no_license
|
UCL-RITS/emerald_play
|
https://github.com/UCL-RITS/emerald_play
|
99026bd1f90790892b3efab38544c9fea44ff42b
|
182a1a209ca6ad1e139141477c3ca77324195970
|
refs/heads/master
| 2020-05-17T18:54:37.558065 | 2013-01-10T11:44:01 | 2013-01-10T11:44:01 | 7,539,947 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from deploy.deploy import *
|
UTF-8
|
Python
| true | false | 2,013 |
2,585,570,355,538 |
1a9f14bb40868e9284c6450e7b7c04b64a24977a
|
9d0837915f61581ee0a05fc7c17ca76b75261e52
|
/blog/models.py
|
17a31738e79450796c6a4a6e0f286d61b5fa358b
|
[] |
no_license
|
Deathridge/cloaked-octo-dubstep
|
https://github.com/Deathridge/cloaked-octo-dubstep
|
a06afe4b0fc88e60cf13eee20fc5a5ede7728fd6
|
e1864630753bcb3c3a4edbc91833bfaaf82fa0d6
|
refs/heads/master
| 2020-06-08T09:06:50.830236 | 2014-11-28T11:13:42 | 2014-11-28T11:13:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
# Create your models here.
class Post(models.Model):
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_at = models.DateTimeField(auto_now=True, editable=False)
title = models.CharField(max_length = 140)
body = models.TextField()
published = models.BooleanField(default=True)
def __unicode__(self):
return self.title
class Comment(models.Model):
created_at = models.DateTimeField(auto_now_add=True, editable=False)
author = models.CharField(max_length = 60)
body = models.TextField()
post = models.ForeignKey(Post)
def __unicode__(self):
return unicode("%s: %s" % (self.post, self.body[:60]))
|
UTF-8
|
Python
| false | false | 2,014 |
18,090,402,256,935 |
16076bae7ffe37ca3e281d1e9d7deed8afc1552f
|
9624fe4a5a3eb23d8d1bdf5d2ce197ea06d00123
|
/Transcoder.py
|
b82de5fceec166ed0a6fc0e167b660053fa24195
|
[
"GPL-2.0-only"
] |
non_permissive
|
xaccc/videoapiserver
|
https://github.com/xaccc/videoapiserver
|
71312d05ea3c4e722c590342dc9871550e030a22
|
34fb1b46978bbaa30a64fc21ed521a18cab3a7b4
|
refs/heads/master
| 2021-01-13T01:44:00.515832 | 2014-05-21T09:57:47 | 2014-05-21T09:57:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#coding=utf-8
import subprocess, threading, time, multiprocessing
import re, os, signal
import commands, uuid, json
from datetime import datetime
from MediaProbe import MediaProbe
from ConfigParser import ConfigParser
from MySQL import MySQL
applicationConfig = ConfigParser()
applicationConfig.read('Config.ini')
def enum(**enums):
return type('Enum', (), enums)
TaskStatus = enum(QUEUE='排队中', RUN='转码中', FINISHED='转码完成', ERROR='转码失败')
TranscodeTemplates = enum(
QCIF = {
'video_codec' : 'libx264 -profile:v baseline -level 12',
'video_bitrate' : 90*1024,
'video_width' : 160,
'audio_codec' : 'aac -strict -2',
'audio_channel' : 1,
'audio_bitrate' : 16*1024,
},
CIF = {
'video_codec' : 'libx264 -profile:v baseline -level 12',
'video_bitrate' : 150*1024,
'video_width' : 320,
'audio_codec' : 'aac -strict -2',
'audio_channel' : 1,
'audio_bitrate' : 24*1024,
},
SD = {
'video_codec' : 'libx264 -profile:v baseline -level 12',
'video_bitrate' : 400*1024,
'video_width' : 640,
'audio_codec' : 'aac -strict -2',
'audio_channel' : 1,
'audio_bitrate' : 32*1024,
},
HD = {
'video_codec' : 'libx264 -profile:v baseline -level 21',
'video_bitrate' : 800*1024,
'video_width' : 1280,
'audio_codec' : 'aac -strict -2',
'audio_channel' : 1,
'audio_bitrate' : 64*1024,
},
HDPRO = {
'video_codec' : 'libx264 -profile:v baseline -level 21',
'video_bitrate' : 2*1024*1024,
'video_width' : 1920,
'audio_codec' : 'aac -strict -2',
'audio_channel' : 2,
'audio_bitrate' : 128*1024,
},
)
class Transcoder(object):
__workers = []
__lock = threading.Lock()
def __init__(self, Started = None, Progress = None, Finished = None, Error = None):
self.__cb_Start = Started
self.__cb_Progress = Progress
self.__cb_Finished = Finished
self.__cb_Error = Error
def addTask(self, settings, arg = None):
worker = Worker(settings = settings, mgr = self, arg = arg)
Transcoder.__lock.acquire()
Transcoder.__workers.append(worker)
if len(Transcoder.__workers) == 1:
worker.start()
Transcoder.__lock.release()
pass
def Count(self):
return len(Transcoder.__workers)
def worker_started(self, worker, arg):
if self.__cb_Start:
self.__cb_Start(arg)
def worker_progress(self, worker, arg, percent, fps):
if self.__cb_Progress:
self.__cb_Progress(arg, percent, fps)
def worker_finished(self, worker, arg):
Transcoder.__lock.acquire()
Transcoder.__workers.remove(worker)
if len(Transcoder.__workers) > 0:
Transcoder.__workers[0].start()
Transcoder.__lock.release()
if self.__cb_Finished:
self.__cb_Finished(arg)
def worker_error(self, worker, arg):
Transcoder.__lock.acquire()
Transcoder.__workers.remove(worker)
if len(Transcoder.__workers) > 0:
Transcoder.__workers[0].start()
Transcoder.__lock.release()
if self.__cb_Error:
self.__cb_Error(arg)
@staticmethod
def videoGeneratePoster(fileName):
destFileName, fileExtension = os.path.splitext(fileName)
media = MediaProbe(fileName)
duration = int(media.duration())
return Transcoder.VideoPoster(fileName)
@staticmethod
def VideoPoster(fileName, destFileName = None, ss = None):
if ss == None:
media = MediaProbe(fileName)
duration = int(media.duration())
ss = float(duration) / 5
if destFileName == None:
destFileName, fileExtension = os.path.splitext(fileName)
destFileName += '.jpg'
code, text = commands.getstatusoutput('avconv -ss %s -i %s -map_metadata -1 -vframes 1 -y "%s"' % (str(ss), fileName, destFileName))
if code != 0:
return False
return True
@staticmethod
def videoTransform(fileName, destFileName):
"""
从上传位置转移到视频位置,并根据需要进行转码
"""
media = MediaProbe(fileName)
vcodec = 'copy' if media.videoCodec() == 'h264' else 'libx264 -b:v %d' % media.videoBitrate()
acodec = 'copy' if media.audioCodec() == 'aac' else 'aac -strict -2 -b:a %d' % media.audioBitrate()
code, text = commands.getstatusoutput('avconv -v 0 -i "%s" -map_metadata -1 -vcodec %s -acodec %s -y "%s"' % (fileName, vcodec, acodec, destFileName))
if code != 0:
return False
return Transcoder.videoGeneratePoster(destFileName)
class Worker(threading.Thread):
def __init__(self, settings, mgr=None, arg=None):
"""
settings: {
'file'
'video_codec'
'video_bitrate'
'video_width'
'video_height'
'audio_codec'
'audio_channels'
'audio_bitrate'
'format'
'output'
}
"""
if not settings.has_key('file') or not settings.has_key('output'):
raise ValueError, 'settings'
threading.Thread.__init__(self)
self._mgr = mgr
self._arg = arg
self._settings = settings
self._progress = 0
self._fps = 0
self._started = False
self._isfinished = False
self._keepaspect = True
self._probe = MediaProbe(self._settings['file'])
def fps(self):
return self._fps
def settings(self):
return self._settings
def progress(self):
return self._progress / self._probe.duration() if not self._isfinished else 1 if self._started else -1
def hasError(self):
return (not self._started) and self._isfinished
def isProcessing(self):
return threading.Thread.isAlive(self)
def isStarted(self):
return self._started
def isFinished(self):
return self._isfinished
def keepAspect(self, keep=None):
if keep != None:
self._keepaspect = bool(keep)
return self._keepaspect
def createSubProecess(self):
command = []
# input
command.append('avconv')
command.append('-i "%s" -map_metadata -1' % self._settings['file'])
# video settings
if self._settings.get('video_codec') == 'copy':
command.append('-vcodec copy')
elif self._settings.get('video_codec') == 'none':
command.append('-vn')
else:
command.append('-vcodec')
command.append(str(self._settings.get('video_codec', 'libx264 -profile:v baseline -level 21')))
command.append('-b:v')
command.append(str(self._settings.get('video_bitrate', '150k')))
command.append('-s')
command.append(str(self._settings.get('video_size', '%ix%i' % (
int(self._settings.get('video_width', 320)),
int(self._settings.get('video_width', 320)/self._probe.videoAspect() if self.keepAspect() else self._settings.get('video_height', 240))
))))
# audio settings
if self._settings.get('audio_codec') == 'copy':
command.append('-acodec copy')
elif self._settings.get('audio_codec') == 'none':
command.append('-an')
else:
command.append('-acodec')
command.append(str(self._settings.get('audio_codec', 'aac -strict -2')))
command.append('-ac')
command.append(str(self._settings.get('audio_channels', '1')))
command.append('-b:a')
command.append(str(self._settings.get('audio_bitrate', '32k')))
# output settings
command.append('-f')
command.append(str(self._settings.get('format', 'mp4')))
command.append('-y "%s"' % self._settings['output'])
return subprocess.Popen(' '.join(command),
stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, close_fds=True)
def run(self):
self.subp = self.createSubProecess()
buf = ''
while True:
while True: #read output
readed = self.subp.stderr.read(1)
if not readed or len(readed) == 0:
break; # don't read anything
buf += readed
if 'bits/s' in buf:
#process line
line = buf[0:buf.index('bits/s')+6]
buf = buf[len(line):]
times = re.findall(r"time=\s*([\d+|\.]+?)\s", line)
if len(times) > 0:
if not self._started and self._mgr:
self._mgr.worker_started(self, self._arg)
self._started = True
self._progress = float(times[0])
fps = re.findall(r"fps=\s*([\d+|\.]+?)\s", line)
if len(fps) > 0:
self._fps = float(fps[0])
if self._mgr != None:
self._mgr.worker_progress(self, self._arg, self.progress(), self._fps)
if self.subp.poll() != None:
self._isfinished = True
if self._mgr != None:
if self._started:
self._mgr.worker_finished(self, self._arg)
else:
self._mgr.worker_error(self, self._arg)
break; # subprocess exit!!!
__shutdown = threading.Event()
def sig_handler(sig, frame):
print 'shutdown ...'
__shutdown.set()
def __Started(transcodeId):
db = MySQL()
db.update("UPDATE `video_transcode` set `transcode_time` = now(), `progress` = 0 WHERE `id` = %s", (transcodeId))
db.end()
print "[%s] [Start] %s ..." % (datetime.now().strftime('%Y-%m-%dT%H:%M:%S'), transcodeId)
pass
def __Progress(transcodeId, percent, fps):
db = MySQL()
db.update("UPDATE `video_transcode` set `update_time` = now(), `progress` = %s WHERE `id` = %s", (float(percent), transcodeId))
db.end()
pass
def __Finished(transcodeId):
print "[%s] [Finished] %s ..." % (datetime.now().strftime('%Y-%m-%dT%H:%M:%S'), transcodeId)
db = MySQL()
db.update("UPDATE `video_transcode` set `is_ready` = 1, `progress` = 1 WHERE `id` = %s", (transcodeId))
db.end()
pass
def __Error(transcodeId):
print "[%s] [Error] %s ..." % (datetime.now().strftime('%Y-%m-%dT%H:%M:%S'), transcodeId)
pass
def StartTranscodeService():
import socket
hostname = socket.gethostname()
pid = os.getpid()
f = open('transcoder.pid', 'wb')
f.write(str(pid))
f.close()
signal.signal(signal.SIGTERM, sig_handler)
signal.signal(signal.SIGINT, sig_handler)
db = MySQL()
transcoder = Transcoder(Started = __Started, Progress = __Progress, Finished = __Finished, Error = __Error)
uploadDirectory = applicationConfig.get('Server','Upload')
videoDirectory = applicationConfig.get('Video','SavePath')
if not os.path.exists(videoDirectory):
os.makedirs(videoDirectory)
while True:
if __shutdown.wait(1):
break; # exit thread
if transcoder.Count() > 0:
continue; # wait process
taskList = db.list('SELECT * FROM `video_transcode` WHERE `transcoder` IS NULL ORDER BY `id` LIMIT 0,1 FOR UPDATE')
for task in taskList:
db.update("UPDATE `video_transcode` set `transcoder` = %s WHERE `id` = %s", (hostname, task['id']))
db2 = MySQL()
videoInstance = db2.get("SELECT * FROM `video` WHERE `id`=%s", (task['video_id']))
if videoInstance:
fileName = "%s/%s" % (uploadDirectory, videoInstance['upload_id'])
destFileName = "%s/%s" % (videoDirectory, task['file_name'])
transcoder.addTask({
'file' : fileName,
'video_codec' : task['video_codec'],
'video_bitrate' : task['video_bitrate'],
'video_width' : task['video_width'],
'video_height' : task['video_height'],
'audio_codec' : task['audio_codec'],
'audio_channels': task['audio_channels'],
'audio_bitrate' : task['audio_bitrate'],
'output' : destFileName,
}, arg = task['id'])
db.end()
while transcoder.Count() > 0:
theading.sleep(1)
print '.'
if __name__ == '__main__':
import sys
if len(sys.argv) >= 2:
def xxx(file, percent, fps):
print "[%s] complete: %s, fps: %s" % (file, percent, fps)
def xxx2(file):
print "[%s] completed!" % (file)
transcoder = Transcoder(Progress = xxx, Finished=xxx2)
transcoder.addTask({'file': sys.argv[1],
'output': sys.argv[2]}, sys.argv[1])
else:
StartTranscodeService()
|
UTF-8
|
Python
| false | false | 2,014 |
738,734,422,076 |
cd5d93814cbe0df6e858e9dfb48c0edf345ab66c
|
e7d3f94b8e0107f60ce1ae8dbf6118b30ff71cc2
|
/examples/example4.py
|
af38f750ea2a51bb1d63f2b6bbe03ec1ed31fa5f
|
[
"GPL-2.0-only"
] |
non_permissive
|
maksudc/WFDB-SWIG
|
https://github.com/maksudc/WFDB-SWIG
|
63323c3516dbb0a97f7d565e46acee7306cf2cf1
|
a3b6e91b229f5c1025b5b9b26980b8ce18200e8a
|
refs/heads/master
| 2021-01-19T06:32:16.098906 | 2014-02-19T21:36:52 | 2014-02-19T21:36:52 | 16,999,735 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#
# File: example4.py I. Henry 30 March 2005
# last revised: 13 January 2013
#
# WFDB Example 4: Generating an R-R Interval Histogram
#
# This program reads an annotation file, determines the intervals
# between beat annotations (assumed to be the R-R intervals), and
# accumulates a histogram of them.
#
# This is a Python translation of example1.c from the WFDB
# Programmer's Guide.
#
# http://www.physionet.org/physiotools/wpg/wpg_49.htm#Example-4
#
# Copyright (C) 2013 Isaac C. Henry ([email protected])
#
# This file is part of wfdb-swig.
#
# wfdb-swig is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wfdb-swig is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with wfdb-swig. If not, see <http://www.gnu.org/licenses/>.
import wfdb, sys
def main(argv):
a = wfdb.WFDB_Anninfo()
annot = wfdb.WFDB_Annotation()
if len(argv) < 3:
print "usage:", argv[0], "annotator record"
sys.exit(1)
a.name = argv[1]
a.stat = wfdb.WFDB_READ
if wfdb.annopen(argv[2], a, 1) < 0: sys.exit(2)
rrmax = 3 * wfdb.sampfreq(argv[2])
if rrmax <= 0: sys.exit(3)
rrhist = [0] * int(rrmax+1)
while 1:
if not (wfdb.getann(0,annot) == 0 and not wfdb.wfdb_isqrs(annot.anntyp)): break
t = annot.time
while wfdb.getann(0, annot) == 0:
if wfdb.wfdb_isqrs(annot.anntyp):
rr = annot.time - t
if rr > rrmax: rr = rrmax
rrhist[rr] += 1
t = annot.time
for rr in range(1, int(rrmax)):
print '%(rr)4d %(time)s' % {'rr': rrhist[rr], 'time': wfdb.mstimstr(rr)}
rr += 1
print '%(rr)4d %(time)s (or longer)' % {'rr': rrhist[rr], 'time': wfdb.mstimstr(rr)}
wfdb.wfdbquit()
if __name__ == "__main__":
main(sys.argv)
|
UTF-8
|
Python
| false | false | 2,014 |
1,924,145,360,014 |
4da29f1da41da52502fb0c8086d2d503396b804b
|
de4d41c0b832e4b8731c2cc89adf100e17805f32
|
/src/x2/x2/urls.py
|
3497a32afb80c3a6bd0339dc4a470114491de2de
|
[] |
no_license
|
pmitros/x2
|
https://github.com/pmitros/x2
|
74db11b64e94ee763bd6ba2729fbc2dcdb6ee914
|
c1ea2b17568e82d72148056e465d0f9428f9de3a
|
refs/heads/master
| 2020-05-16T23:51:26.606805 | 2014-03-21T20:16:34 | 2014-03-21T20:16:34 | 11,745,201 | 1 | 0 | null | false | 2013-08-26T15:23:38 | 2013-07-29T17:21:21 | 2013-08-26T15:23:38 | 2013-08-26T15:23:38 | 910 | null | 0 | 13 |
Python
| null | null |
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'marketing.views.index', name='home'),
url(r'^debug_index$', 'instructor.views.index'),
url(r'^x2/instructor/(?P<course_slug>.+)/(?P<session_slug>.+)/view-layout$', 'instructor.views.view_layout'),
url(r'^x2/instructor/(?P<course_slug>.+)/(?P<session_slug>.+)/manage-layout$', 'instructor.views.manage_layout'),
url(r'^x2/instructor/(?P<course_slug>.+)/(?P<session_slug>.+)/capture$', 'instructor.views.capture'),
url(r'^x2/instructor/(?P<course_slug>.+)/(?P<session_slug>.+)/capture-iframe$', 'instructor.views.capture_iframe'),
url(r'^teacher', 'instructor.views.instructor_shorturl'),
url(r'^capture', 'instructor.views.capture_shorturl'),
url(r'^interactions', 'instructor.views.interactions'),
# url(r'^x2/ajax/layout/create$', 'instructor.views.create_layout'),
url(r'^x2/ajax/layout/blocks/update$', 'instructor.views.ajax_layout_blocks_update'),
url(r'^x2/ajax/layout/students/update$', 'instructor.views.ajax_layout_students_update'),
url(r'^x2/ajax/layout/student/update$', 'instructor.views.ajax_layout_student_update'),
url(r'^x2/ajax/layout/session-student/update$', 'instructor.views.ajax_layout_session_student_update'),
url(r'^x2/ajax/layout/help-request/new$', 'instructor.views.ajax_layout_help_request_new'),
url(r'^x2/ajax/capture/interaction/stop$', 'instructor.views.ajax_capture_interaction_stop'),
url(r'^x2/ajax/capture/interaction/store_media$', 'instructor.views.ajax_capture_interaction_store_media'),
url(r'^x2/ajax/capture/interaction/accept$', 'instructor.views.ajax_capture_interaction_accept'),
url(r'^x2/ajax/layout/students/progress$', 'instructor.views.ajax_layout_students_progress'),
# url(r'^ajax/layout/remove$', 'instructor.views.remove_layout'),
url(r'^account/', include('account.urls')),
# Examples:
# url(r'^$', 'x2.views.home', name='home'),
# url(r'^x2/', include('x2.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^x2/admin/', include(admin.site.urls)),
url("", include('stuview.urls')),
url("", include('django_socketio.urls')),
url('', include('player.urls')),
)
if settings.DEBUG:
# static files (images, css, javascript, etc.)
urlpatterns += patterns('',
(r'^x2/media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
(r'^captest/(?P<path>.*)$', 'django.views.static.serve', {'document_root': '/edx/x2/src/capture-widget/src'}),
)
urlpatterns += staticfiles_urlpatterns()
|
UTF-8
|
Python
| false | false | 2,014 |
4,604,204,973,799 |
723e3c02208bf6a2a97ba0cb76f74dcd6e1e4f60
|
3ffef006cd5b0e950cc92f89e689819d317b801a
|
/crm_member/migrations/0001_initial.py
|
755abe3ad6ffd26aa8306f88fa1ae790e5818650
|
[] |
no_license
|
prauscher/vpanel2
|
https://github.com/prauscher/vpanel2
|
8b48d5b0472714ab6d415bfd00e563f57e6b5541
|
2abeebf5e7df3e3811e71d9c9228aa0aed33544d
|
refs/heads/master
| 2020-12-24T16:59:25.700423 | 2013-10-04T09:27:23 | 2013-10-04T09:27:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Membership'
db.create_table(u'crm_member_membership', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('label', self.gf('django.db.models.fields.CharField')(max_length=30)),
))
db.send_create_signal(u'crm_member', ['Membership'])
# Adding model 'Member'
db.create_table(u'crm_member_member', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('contact', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['crm_contacts.Contact'])),
('membership', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['crm_member.Membership'])),
('joinDate', self.gf('django.db.models.fields.DateField')()),
('resignationDate', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
))
db.send_create_signal(u'crm_member', ['Member'])
def backwards(self, orm):
# Deleting model 'Membership'
db.delete_table(u'crm_member_membership')
# Deleting model 'Member'
db.delete_table(u'crm_member_member')
models = {
u'crm.entity': {
'Meta': {'object_name': 'Entity'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'links_rel_+'", 'blank': 'True', 'to': u"orm['crm.Entity']"}),
'memo': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['crm.Tag']", 'symmetrical': 'False', 'blank': 'True'})
},
u'crm.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'crm_contacts.contact': {
'Meta': {'object_name': 'Contact', '_ormbases': [u'crm.Entity']},
'emails': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['crm_contacts.Email']", 'symmetrical': 'False'}),
u'entity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['crm.Entity']", 'unique': 'True', 'primary_key': 'True'}),
'telephoneNumbers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['crm_contacts.TelephoneNumber']", 'symmetrical': 'False', 'blank': 'True'})
},
u'crm_contacts.email': {
'Meta': {'object_name': 'Email'},
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'crm_contacts.telephonenumber': {
'Meta': {'object_name': 'TelephoneNumber'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
u'crm_member.member': {
'Meta': {'object_name': 'Member'},
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['crm_contacts.Contact']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'joinDate': ('django.db.models.fields.DateField', [], {}),
'membership': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['crm_member.Membership']"}),
'resignationDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'crm_member.membership': {
'Meta': {'object_name': 'Membership'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '30'})
}
}
complete_apps = ['crm_member']
|
UTF-8
|
Python
| false | false | 2,013 |
2,585,570,360,635 |
2686dcaf62f9705cc9449952916696640962ccd6
|
0d7160928364c05b3a31a5a4d9d26b734aa175b3
|
/taskhat/ngrams.py
|
7ff1e1a7925b07bfcb680528f6e82c28bcc8507c
|
[] |
no_license
|
ericl/taskhat
|
https://github.com/ericl/taskhat
|
2582a6e83c26ef07ea377c26582ea53787dc3505
|
52d898e07575afc7124bed271bb43ff45c7258d1
|
refs/heads/master
| 2021-01-25T08:28:10.543753 | 2012-01-28T05:18:41 | 2012-01-28T05:18:41 | 754,725 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import util
import re
def substitutions(token):
yield token
variation = re.sub('[0-9]', 'N', token)
if variation != token:
yield variation
def ngen(text, nn=[1,2,3,4]):
"""
Returns a dictionary of ngram counts for a string.
"""
ngrams = util.Counter()
nn.sort()
text = text.lower()
for n in nn:
for i in range(len(text)-n+1):
token = text[i:i+n]
for s in substitutions(token):
ngrams[s] += 1
ngrams[int(len(text)**0.5)] = 1
return ngrams
if __name__ == '__main__':
print ngen("It 'ate' 237 apples.")
|
UTF-8
|
Python
| false | false | 2,012 |
5,858,335,430,198 |
ec4b5ee56afdb080101d75b4528ed4e7daa33eac
|
f98770b849ecacaa132eb95443c054c16c29bee6
|
/utils/filters.py
|
e5661e398884925edba9a3a37a476cda181317a5
|
[] |
no_license
|
lcruz/crowd
|
https://github.com/lcruz/crowd
|
c69d4df0662a4e836198557e8238902632d96b69
|
a8e5ac8518aa94536622fe178bd754f32ea391d5
|
refs/heads/master
| 2021-01-01T19:07:26.660919 | 2011-09-14T14:55:03 | 2011-09-14T14:55:03 | 2,385,923 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import Flask
from flask import render_template, redirect
app = Flask("crowd")
@app.template_filter('datetimeformat')
def datetimeformat(value, format='%H:%M / %d-%m-%Y'):
return value.strftime(format)
|
UTF-8
|
Python
| false | false | 2,011 |
223,338,316,865 |
653cfe34fdc136c234bee16b002b292f0c02bf3b
|
b03621f8512a4b7598737045585fca8bd02526d1
|
/europython2012/trainings/twisted/EuroPython-2011-Twisted-Training/threadedclient.py
|
1db424b2c229fbb92afdbcbcc215d833e9ab0409
|
[] |
no_license
|
dialelo/notes
|
https://github.com/dialelo/notes
|
e91ecf76387106428682685d069d209446509013
|
c9637f8f220e9b26417d9e2bde699df7bbcdf6a9
|
refs/heads/master
| 2016-09-16T10:14:39.486326 | 2014-10-30T16:39:52 | 2014-10-30T16:39:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import threading
from multiclient import make_connection
def t_connection(host, port, d):
print 'sending', d
print make_connection(host, port, d)
if __name__ == '__main__':
import sys
host, port = sys.argv[1].split(':')
data_to_send = sys.argv[2:]
threads = []
for d in data_to_send:
t = threading.Thread(target=t_connection, args=(host, int(port), d))
t.start()
threads.append(t)
for t in threads:
t.join()
print 'finished'
|
UTF-8
|
Python
| false | false | 2,014 |
9,526,237,466,141 |
25c5cbd1dd26134a327728eeb707e6463d50ba5f
|
fba2ef3d2867d118aae52d96dbac75b4bf63a42b
|
/tests.py
|
c6b0ab405d3bc19dce449abf4d6d9888884a6d9d
|
[] |
no_license
|
bryanveloso/elophant.py
|
https://github.com/bryanveloso/elophant.py
|
e8a150db786d3ff5341845b3e6c64f8a2e57d4d6
|
a74019d6c52c53cb6d254a7b561f44eae9af540c
|
refs/heads/master
| 2021-01-10T14:19:04.871979 | 2013-03-01T08:27:08 | 2013-03-01T08:27:08 | 8,473,829 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import py, pytest
@pytest.fixture(scope='module')
def api():
from elophant import Elophant
api = Elophant(key='KhsmDi7vcb5QM7Sh6BQY', region='na')
return api
@pytest.fixture(scope='module')
def data():
return {
'account_id': 32736337,
'summoner_name': 'Crs Rhux',
'summoner_id': 19959767,
'team_id': 'TEAM-7362d288-7d5c-45be-a3d6-4e39bd925679',
'team_name': 'Crs'
}
@pytest.mark.usefixtures('api', 'data')
class TestClass:
def test_get_summoner(self, api, data):
call = api.get_summoner(data['summoner_name'])
assert call['success']
assert call['data']['name'] == data['summoner_name']
def test_get_mastery_pages(self, api, data):
call = api.get_mastery_pages(data['summoner_id'])
assert call['success']
def test_get_rune_pages(self, api, data):
call = api.get_rune_pages(data['summoner_id'])
assert call['success']
def test_get_recent_games(self, api, data):
call = api.get_recent_games(data['account_id'])
assert call['success']
def test_get_summoner_names(self, api):
call = api.get_summoner_names([19959767, 34292665])
assert call['success']
assert call['data'][0] == 'Crs Rhux'
assert call['data'][1] == 'Bischu'
def test_get_leagues(self, api, data):
call = api.get_leagues(data['summoner_id'])
assert call['success']
def test_get_ranked_stats(self, api, data):
call = api.get_ranked_stats(data['account_id'])
assert call['success']
def test_get_summoner_team_info(self, api, data):
call = api.get_summoner_team_info(data['summoner_id'])
assert call['success']
def test_get_team(self, api, data):
call = api.get_team(data['team_id'])
assert call['success']
def test_find_team(self, api, data):
call = api.find_team(data['team_name'])
assert call['success']
def test_get_team_ranked_stats(self, api, data):
call = api.get_team_ranked_stats(data['team_id'])
assert call['success']
|
UTF-8
|
Python
| false | false | 2,013 |
12,713,103,213,501 |
52089b39d70ab100c4ec3d0112aea1992054e186
|
4ab6d43f47226599674e1c540e842ce4d21ca633
|
/rc/cms/management/commands/loadpages.py
|
839c945f5195b757fd5efca70d54f61f923f54d9
|
[] |
no_license
|
rerb/django-irc
|
https://github.com/rerb/django-irc
|
138617e3b85c75382f6f9127b21cd62c71e2f257
|
6acb6d1ca092484cda7bbb7e467083d5801708be
|
refs/heads/master
| 2021-01-26T08:18:00.001327 | 2014-05-05T18:45:18 | 2014-05-05T18:45:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from rc.cms.process import generate_pages
class Command(BaseCommand):
# option_list = BaseCommand.option_list + (
# make_option('--category', action='store', dest='category_name',
# help='Category of models to load'),
# make_option('--models', action='store', dest='model_names',
# help='Models to load') )
def handle(self, *args, **options):
generate_pages()
|
UTF-8
|
Python
| false | false | 2,014 |
6,777,458,433,035 |
ce6b0cb23c9aa24108a432286cc7ebd3d651fab2
|
9ced9c6a566d81c374e28a39fcdea817c43235a7
|
/main.py
|
abf44fe1f48508289a84b0cbe347202cdf9a4cb4
|
[
"MIT"
] |
permissive
|
Dryvenn/fifa
|
https://github.com/Dryvenn/fifa
|
2ca1ce15d218fece3d595ddeb095b9ca4a097165
|
df10ea3cbb56add3e7c39065b42776c7f054f5bf
|
refs/heads/master
| 2016-07-28T07:00:25.015655 | 2014-11-19T20:28:04 | 2014-11-19T20:28:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
FILEPATH = "./Samples/c17.v"
import time
from datetime import timedelta
from supervisor import *
class Main:
def __init__(self):
self.doQueries()
def doQueries(self):
self.queryFile()
self.queryModifier()
self.queryQ()
def queryFile(self):
file = input("\nPlease specify the path of a verilog file:\n(Hit ENTER for " + FILEPATH + " )\n\n> ")
self.file = file if file != "" else FILEPATH
def queryModifier(self):
modifier = input("\nType in 'O1' for the stuck-at-one flaw, '10' for the stuck-at-zero, or press ENTER not to activate these.\n\n> ")
modifier = modifier if modifier != "" else "00"
if modifier != "00" and modifier != "01" and modifier != "10":
raise ValueError("The modifier must be either '01', either '10' or '00'.")
self.modifier = modifier
def queryQ(self):
q = input("\nType in the reliability you want for all the gates, or hit ENTER for the default value (0.5).\n\n> ")
q = q if q != "" else 0.5
try:
q = float(q)
except ValueError:
print("q must be a float")
if q < 0 or q > 1:
raise ValueError("q must verify 0 <= q <= 1")
self.q = q
def run(self):
print("\nPlease wait for the tests to occur.\nIt can take up to several minutes depending on your computer.\n\n")
supervisor = Supervisor(self.file, self.modifier, self.q)
startTime = time.time()
supervisor.launch()
endTime = time.time()
delta = timedelta(seconds=endTime-startTime)
message = str(delta.days) + " days" if delta.days > 0 else ( str(delta.seconds) + " seconds" if delta.seconds > 0 else "less than a second" )
print("The computation took " + message + ".")
Main().run()
|
UTF-8
|
Python
| false | false | 2,014 |
12,627,203,889,982 |
646862ff97d093c22a794f14471a1a6c334c5d3e
|
9a4abee416b2d281630287c1c36fb59856b9126c
|
/p2.py
|
a9bc87693f7dd2b114c1676c608feffac3316295
|
[
"GPL-2.0-only"
] |
non_permissive
|
tvanesse/projectEuler
|
https://github.com/tvanesse/projectEuler
|
9209178d1d741979178129f1ca9b77d5a67fff11
|
84e286b302399ce883e01fd870ada1c8e43d5e28
|
refs/heads/master
| 2016-09-06T02:14:28.800376 | 2014-07-27T19:05:41 | 2014-07-27T19:05:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Project Euler - Problem 2
Copyright (C) 2014 Thomas Vanesse
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from pylab import *
def fibo_memo(n) :
'''
Returns the nth number in the fibonacci sequence.
'''
# A clean way to create a static attribute within a function
if not hasattr(fibo_memo, "memo"):
fibo_memo.memo = {}
if n in fibo_memo.memo :
return fibo_memo.memo[n]
else :
if n==0:
return 0
elif n <= 2 :
return 1
else :
f = fibo_memo(n-1) + fibo_memo(n-2)
fibo_memo.memo[n] = f
return f
def p2 (N):
'''
Each new term in the Fibonacci sequence is generated by adding the
previous two terms. By starting with 1 and 2, the first 10 terms
will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values
do not exceed `N`, find the sum of the even-valued terms.
'''
# Recall that even numbers always end with a digit of 0, 2, 4, 6 or 8.
# I noticed that if you take a closer look at the Fibonacci sequence, you
# have some kind of pattern alternating between even and odd values. See for
# yourself :
#
# Fib : 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, ...
# index: 1 2 3 4 5 6 7 8 9 10 11 12 13 14
# even : n n y n n y n n y n n y n n
#
# But I can't explain why. Anyway I think I can only compute the nth Fibonacci
# number once every 3 step, according to the pattern above.
curr_index = 3
acc = []
while True:
f = fibo_memo(curr_index)
if f > N:
break
else:
acc.append(f)
curr_index += 3
return sum(acc)
print(p2(4000000))
|
UTF-8
|
Python
| false | false | 2,014 |
11,570,641,938,264 |
93d126ec418ad134374cce17562fe8289b80232e
|
98ddb6e6e4c0154b936586c04eb256d63b0556b4
|
/ReconStruct/ManifestStr.py
|
6fb08ca0ce8175bf3694a52c05f41e5e5ff8a818
|
[
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer"
] |
non_permissive
|
Techlord-RCE/ReconStruct
|
https://github.com/Techlord-RCE/ReconStruct
|
78f341eb1af7462bcb3a845e1c572561c0c30917
|
056ba80dd484a8569c933d3910b0321af2bd9ba7
|
refs/heads/master
| 2021-05-28T15:59:51.460048 | 2014-09-08T16:47:25 | 2014-09-08T16:47:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
ReconStruct
Reconstruct is a application which helps easily reverse engineer binary file
formats. It is tested to run on Python 2.7, 3.3, 3.4 and pypy.
Copyright (c) 2014 Sandy Carter
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
try:
from ReconStruct.ManifestBase import ManifestBase, ParsedBase
except ImportError:
from ManifestBase import ManifestBase, ParsedBase
class ManifestStr(ManifestBase):
"""Descriptor manifest which parses strings"""
def __init__(self, label, size, type_name='str', parent=None):
super(ManifestStr, self).__init__(label, size, type_name, parent)
def __call__(self, data, start=0):
try:
return self.parser()(
self,
data[start:start + self.size].decode("utf-8"),
start,
self.size
)
except UnicodeError as e:
return self.parser()(self, str(e), start, self.size)
@classmethod
def type(cls):
return 'str'
@classmethod
def parser(cls):
return ParsedStr
class ParsedStr(ParsedBase):
def __init__(self, manifest, data, index, size):
super(ParsedStr, self).__init__(manifest, data, index, size)
|
UTF-8
|
Python
| false | false | 2,014 |
18,537,078,858,438 |
5851629ba6084391311251ea49fcd5b0e8166a84
|
faa1472bbea67fe06ffb433063d077307411c303
|
/apps/mfa_articles/admin.py
|
dede70c0fcfd4dc6c1b5aeb72d3f706382d6a772
|
[] |
no_license
|
ixtel/mini-cms-for-mfa
|
https://github.com/ixtel/mini-cms-for-mfa
|
969aaf963239a1597f7a8d5a062a7620f1801a0a
|
efb569cd6ab56d781893300443d35210d40c79b6
|
refs/heads/master
| 2021-01-18T20:51:42.631322 | 2011-07-04T03:20:43 | 2011-07-04T03:20:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from mfa_articles.models import Article, SiteCard, Category
class ArticleAdmin(admin.ModelAdmin):
list_display = ('title', 'slug', 'description',
'keywords', 'pub_date', 'content')
search_field = ('title', 'slug')
class Media:
js = [
'/static/tiny_mce/tiny_mce.js',
'/media/common_js/tinymce_setup.js',
]
class CategoryAdmin(admin.ModelAdmin):
list_display = ('title', 'slug', 'description')
search_field = ('title', 'slug')
class Media:
js = [
'/static/tiny_mce/tiny_mce.js',
'/media/common_js/tinymce_setup.js',
]
class SiteCardAdmin(admin.ModelAdmin):
list_display = ('title', 'description', 'keywords', 'text')
class Media:
js = [
'/static/tiny_mce/tiny_mce.js',
'/media/common_js/tinymce_setup.js',
]
admin.site.register(Article, ArticleAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(SiteCard, SiteCardAdmin)
|
UTF-8
|
Python
| false | false | 2,011 |
5,368,709,150,025 |
c11144afb9ab15750b70278960549689cfa108e6
|
edb4e20c49f5e67654b2d8d94151fe5b9a0b2829
|
/(#131)PrimeCubeProperty.py
|
15eeeca6918591128f84b3ccb5a55a095a5010ac
|
[] |
no_license
|
bigeast/ProjectEuler
|
https://github.com/bigeast/ProjectEuler
|
c58e4ab59ce0f07126e26b5aead9a86aeca0f329
|
157ae34fc3d22ce32d4366e96dc8ef8dc453c958
|
refs/heads/master
| 2021-01-21T09:43:34.760900 | 2013-12-27T02:15:34 | 2013-12-27T02:15:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import time
def Primes(a):
sieve=[True]*(a+1)
sieve[:2]=[False, False]
sqrt=int(a**.5)
for x in range(2, sqrt):
if sieve[x]:
sieve[2*x::x]=[False]*(a/x-1)
return sieve
def root3rd(x):
y, y1 = None, 2
while y!=y1:
y = y1
y3 = y**3
d = (2*y3+x)
y1 = (y*(y3+2*x)+d//2)//d
return y
def isCube(x):
y=int(x**(1./3))+1
if y*y*y==x:
return True
return False
def main(a):
p=Primes(a)
primes=[]
count=1
for x in range(len(p)):
if p[x]:
primes.append(x)
c=1
for p in primes:
#print p
for n in range(c,c+30):
#print p, n, n**3+n**2*p
if isCube(n**9+n**6*p):
c=n+1
count+=1
print p,n
break
print count
#print root3rd(62)
main(1000000)
def find(n):
s=[]
c=[]
for x in range(1,n):
s.append(x**2)
c.append(x**3)
return s,c
def isPrime(n):
if n<2:
#checks if its less than 2
return False
if n==2:
#2 is the first prime number
return True
if n%2==0:
return False
for x in range(3,int(n**0.5)+1,2):
#checks if it has any factors beneath its square root plus 1
if n%x==0:
return False
return True
#returns true if all else is false.
def main2(a):
'''p=Primes(a)
primes=[]
count=0
for x in range(len(p)):
if p[x]:
primes.append(x)'''
s,c=find(10000)
for x in c:
for y in c:
if y>=x:
break
temp=x-y
temp2=round(y**(2./3))
temp3=temp/temp2
if temp3==int(temp3):
if temp3<a:
if isPrime(temp3):
print temp/temp2, temp2**.5
break
|
UTF-8
|
Python
| false | false | 2,013 |
19,327,352,872,021 |
05927fc569cc4fdada33e4599cb9ec94ed27dc9a
|
5ba8678c743ef25405528ac299a04a1251d11750
|
/vumi/application/message_store.py
|
3503d3556e978850d0ec6350d137b287dda482d2
|
[
"BSD-2-Clause"
] |
permissive
|
xmbsn/vumi
|
https://github.com/xmbsn/vumi
|
801a393b27401ca25c0178bf8f0618556fc9742e
|
81e45da6224763be55ef4b94d92a09499a0b40d1
|
refs/heads/master
| 2021-01-16T20:06:44.411418 | 2012-04-16T10:10:47 | 2012-04-16T10:10:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- test-case-name: vumi.application.tests.test_message_store -*-
# -*- coding: utf-8 -*-
"""Message store."""
from uuid import uuid4
from datetime import datetime
from vumi.message import (TransportEvent, TransportUserMessage,
from_json, to_json, VUMI_DATE_FORMAT)
class MessageStore(object):
"""Vumi Go message store.
HBase-like data schema:
# [row_id] -> [family] -> [columns]
batches:
batch_id -> common -> ['tag']
-> messages -> column names are message ids
-> replies -> column names are inbound_message ids
tags:
tag -> common -> ['current_batch_id']
messages:
message_id -> body -> column names are message fields,
values are JSON encoded
-> events -> column names are event ids
-> batches -> column names are batch ids
inbound_messages:
message_id -> body -> column names are message fields,
values are JSON encoded
events:
event_id -> body -> column names are message fields,
values are JSON encoded
Possible future schema tweaks for later:
* third_party_ids table that maps third party message ids
to vumi message ids (third_pary:third_party_id -> data
-> message_id)
* Consider making message_id "batch_id:current_message_id"
(this makes retrieving batches of messages fast, it
might be better to have "timestamp:current_message_id").
"""
def __init__(self, r_server, r_prefix):
self.r_server = r_server
self.r_prefix = r_prefix
def batch_start(self, tags):
batch_id = uuid4().get_hex()
batch_common = {u'tags': tags}
tag_common = {u'current_batch_id': batch_id}
self._init_status(batch_id)
self._put_common('batches', batch_id, 'common', batch_common)
self._put_row('batches', batch_id, 'messages', {})
for tag in tags:
self._put_common('tags', self._tag_key(tag), 'common', tag_common)
return batch_id
def batch_done(self, batch_id):
tags = self.batch_common(batch_id)['tags']
tag_common = {u'current_batch_id': None}
if tags is not None:
for tag in tags:
self._put_common('tags', self._tag_key(tag), 'common',
tag_common)
def add_outbound_message(self, msg, tag=None, batch_id=None):
msg_id = msg['message_id']
self._put_msg('messages', msg_id, 'body', msg)
self._put_row('messages', msg_id, 'events', {})
if batch_id is None and tag is not None:
batch_id = self.tag_common(tag)['current_batch_id']
if batch_id is not None:
self._put_row('messages', msg_id, 'batches', {batch_id: '1'})
self._put_row('batches', batch_id, 'messages', {msg_id: '1'})
self._inc_status(batch_id, 'message')
self._inc_status(batch_id, 'sent')
def get_outbound_message(self, msg_id):
return self._get_msg('messages', msg_id, 'body', TransportUserMessage)
def add_event(self, event):
event_id = event['event_id']
self._put_msg('events', event_id, 'body', event)
msg_id = event['user_message_id']
self._put_row('messages', msg_id, 'events', {event_id: '1'})
event_type = event['event_type']
for batch_id in self._get_row('messages', msg_id, 'batches'):
self._inc_status(batch_id, event_type)
def get_event(self, event_id):
return self._get_msg('events', event_id, 'body',
TransportEvent)
def add_inbound_message(self, msg, tag=None, batch_id=None):
msg_id = msg['message_id']
self._put_msg('inbound_messages', msg_id, 'body', msg)
if batch_id is None and tag is not None:
batch_id = self.tag_common(tag)['current_batch_id']
if batch_id is not None:
self._put_row('batches', batch_id, 'replies', {msg_id: '1'})
def get_inbound_message(self, msg_id):
return self._get_msg('inbound_messages', msg_id, 'body',
TransportUserMessage)
def batch_common(self, batch_id):
common = self._get_common('batches', batch_id, 'common')
tags = common['tags']
if tags is not None:
common['tags'] = [tuple(x) for x in tags]
return common
def batch_status(self, batch_id):
return self._get_status(batch_id)
def tag_common(self, tag):
common = self._get_common('tags', self._tag_key(tag), 'common')
if not common:
common = {u'current_batch_id': None}
return common
def batch_messages(self, batch_id):
return self._get_row('batches', batch_id, 'messages').keys()
def batch_replies(self, batch_id):
return self._get_row('batches', batch_id, 'replies').keys()
def message_batches(self, msg_id):
return self._get_row('messages', msg_id, 'batches').keys()
def message_events(self, msg_id):
return self._get_row('messages', msg_id, 'events').keys()
# batch status is stored in Redis as a cache of batch progress
def _batch_key(self, batch_id):
return ":".join([self.r_prefix, "batches", "status", batch_id])
def _init_status(self, batch_id):
batch_key = self._batch_key(batch_id)
events = TransportEvent.EVENT_TYPES.keys() + ['message', 'sent']
initial_status = dict((event, '0') for event in events)
self.r_server.hmset(batch_key, initial_status)
def _inc_status(self, batch_id, event):
batch_key = self._batch_key(batch_id)
self.r_server.hincrby(batch_key, event, 1)
def _get_status(self, batch_id):
batch_key = self._batch_key(batch_id)
raw_statuses = self.r_server.hgetall(batch_key)
statuses = dict((k, int(v)) for k, v in raw_statuses.items())
return statuses
# tag <-> batch mappings are stored in Redis
def _tag_key(self, tag):
return "%s:%s" % tag
# interface to redis -- intentionally made to look
# like a limited subset of HBase.
def _get_msg(self, table, row_id, family, cls):
payload = self._get_common(table, row_id, family)
# TODO: this is a hack needed because from_json(to_json(x)) != x
# if x is a datetime. Remove this once from_json and to_json
# are fixed.
payload['timestamp'] = datetime.strptime(payload['timestamp'],
VUMI_DATE_FORMAT)
return cls(**payload)
def _put_msg(self, table, row_id, family, msg):
return self._put_common(table, row_id, family, msg.payload)
def _get_common(self, table, row_id, family):
"""Retrieve and decode a set of JSON-encoded values."""
data = self._get_row(table, row_id, family)
pydata = dict((k.decode('utf-8'), from_json(v))
for k, v in data.items())
return pydata
def _put_common(self, table, row_id, family, pydata):
"""JSON-encode and update a set of values."""
data = dict((k.encode('utf-8'), to_json(v)) for k, v
in pydata.items())
return self._put_row(table, row_id, family, data)
def _get_row(self, table, row_id, family):
"""Retreive a set of column values from storage."""
r_key = self._row_key(table, row_id, family)
return self.r_server.hgetall(r_key)
def _put_row(self, table, row_id, family, data):
"""Update a set of column values in storage."""
r_key = self._row_key(table, row_id, family)
if data:
self.r_server.hmset(r_key, data)
def _row_key(self, table, row_id, family):
"""Internal method for use by _get_row and _put_row."""
return ":".join([self.r_prefix, table, family, row_id])
|
UTF-8
|
Python
| false | false | 2,012 |
2,199,023,271,187 |
85ae9ab33cf4f58d882c2091d9f62979c2d5ab08
|
76f99691ae4229afe280c9d02e558dc446025b18
|
/Language File Creators/compile-langs.py
|
e380fb4b5c85dc9533bc888eae28c79f0cf81841
|
[] |
no_license
|
lobenmai/Fur
|
https://github.com/lobenmai/Fur
|
ba9a04ab2f293b05da4c348d372b7d2f4464af1a
|
ba99c5cb02bb8079343a4a3947c05e50a5d8d4da
|
refs/heads/master
| 2021-05-16T02:02:10.173264 | 2014-04-22T02:55:14 | 2014-04-22T02:55:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import enpelt, english
import espelt, spanish
import xnpelt, xino
for module in [enpelt, english, espelt, spanish, xnpelt, xino]: module.compile()
|
UTF-8
|
Python
| false | false | 2,014 |
12,232,066,892,170 |
2bd672ed394f693e55886164965f59f87d700ccc
|
6f96077c9fa706415030934e3c053109a673b84d
|
/gameserver/boardgame/utils/MessageProcessor.py
|
23675a00ee50c02b0c7b0298411e101a07035775
|
[
"BSD-3-Clause"
] |
permissive
|
leenmie/galaxy-poker
|
https://github.com/leenmie/galaxy-poker
|
e1a7bd00caf8a0c4be4b8ac79d7f58adee625486
|
4a8d86e9bebc3aaafb90ccd0fd647f5d78236f8a
|
refs/heads/master
| 2016-09-06T00:05:48.039463 | 2014-07-28T10:18:25 | 2014-07-28T10:18:25 | 22,338,687 | 3 | 4 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import logging
import traceback
import json
import boardgame.processors.JoinProcessor
import boardgame.processors.PartProcessor
import boardgame.processors.ReadyProcessor
import boardgame.processors.DebugDealProcessor
import boardgame.processors.AvatarProcessor
import boardgame.processors.ConvertCash2MoneyProcessor
import boardgame.processors.PingProcessor
import boardgame.processors.InfoProcessor
import boardgame.TexasPoker.processors.JoinProcessor
import boardgame.TexasPoker.processors.BetProcessor
#import boardgame.SimplePointGame.processors
LOGGER = logging.getLogger('gamedebug')
from Message import Message
from Player import Player
#from Game import Game
Processor_Mapper = {
'TexasPokerGame':{
'ping': 'boardgame.processors.PingProcessor.PingProcessor',
'join': 'boardgame.TexasPoker.processors.JoinProcessor.JoinProcessor',
'part': 'boardgame.processors.PartProcessor.PartProcessor',
'ready': 'boardgame.processors.ReadyProcessor.ReadyProcessor',
'bet': 'boardgame.TexasPoker.processors.BetProcessor.BetProcessor',
},
}
class getProcessor():
def __init__(self, mapper):
self._mapper = mapper
self._processors = dict()
for key in self._mapper.keys():
self._processors[key] = eval(self._mapper[key])
def get_processor(self, command):
processor = None
if self._processors.has_key(command):
return self._processors[command]
return processor
class bodyParser():
"""legacy parser, delimiter is space"""
def __init__(self,body):
self._body = body
self._parse()
def _parse(self):
parts = self._body.split(" ")
if len(parts)>0:
self._command = parts[0]
self._arguments = []
if len(parts)>1:
self._arguments = parts[1:]
def get_command(self):
return self._command
def get_arguments(self):
return self._arguments
class JSONbodyParser(bodyParser):
def _parse(self):
try:
#print self._body
message = json.loads(self._body)
#print message
self._command = message["command"]
self._arguments = message["arguments"]
except:
raise JSONCommandException()
class JSONCommandException(Exception):
pass
class MessageProcessor():
def __init__(self, game):
self._game = game
class_name = self._game.__class__.__name__
mapper = Processor_Mapper[class_name]
self._getProcessor = getProcessor(mapper)
def process(self, message):
game = self._game
#player = Player(self._player) ##to be working
player = message.get_from_user()
parser = bodyParser(message.get_body())
command = parser.get_command()
arguments = parser.get_arguments()
pro = self._getProcessor.get_processor(command)
process_result = False
if pro:
try:
proc = pro(game, player, arguments)
process_result = proc.process()
except:
#raise(Exception('MessageProcessor: unexpected error.'))
#raise
LOGGER.error('MessageProcessor: unexpected error.'+traceback.format_exc())
process_result = False
#else:
#print 'Invalid command'
# self._game.send_output(player, 'ERROR XX')
if not process_result:
self._game.send_output(player, 'ERROR XX')
self._game.commit_output()
return process_result
class JSONMessageProcessor(MessageProcessor):
def process(self, message):
game = self._game
process_result = False
parser = None
#player = Player(self._player) ##to be working
player = message.get_from_user()
try:
parser = JSONbodyParser(message.get_body())
except:
LOGGER.error('JSONMessageProcessor: unexpected error.'+traceback.format_exc())
#pass
if parser:
command = parser.get_command()
arguments = parser.get_arguments()
pro = self._getProcessor.get_processor(command)
if pro:
try:
proc = pro(game, player, arguments)
process_result = proc.process()
except:
#raise(Exception('MessageProcessor: unexpected error.'))
#raise
LOGGER.error('JSONMessageProcessor: unexpected error.'+traceback.format_exc())
process_result = False
#else:
#print 'Invalid command'
# self._game.send_output(player, 'ERROR XX')
if not process_result:
self._game.send_JSON_output(player, {'command': 'ERROR', 'code':'XX'})
self._game.commit_output()
return process_result
|
UTF-8
|
Python
| false | false | 2,014 |
18,811,956,768,342 |
4fdc36248a217cbbf2120c8e32d0bbb07da78829
|
10315791e3780ec0dd0406289908ae6e91dc0413
|
/crunchy/src/pluginloader.py
|
6a78308aaa5fb7a1e181e2dfe6df1458ac71f841
|
[
"MIT"
] |
permissive
|
Yuffel/crunchy
|
https://github.com/Yuffel/crunchy
|
1adb3976f4dad610c7ebf37206ae9a656223ac39
|
a50b5657526d0799935965bc76f1edbcc04b15a4
|
refs/heads/master
| 2018-01-08T10:58:59.916672 | 2012-03-11T21:45:09 | 2012-03-11T21:45:09 | 52,207,480 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
pluginloader.py: Loading plugins
unit tests in test_pluginloader.rst
"""
import sys
import os
from imp import find_module
import os.path
import src.interface as interface
DEBUG = False
def gen_register_list(initial_list): # tested
"""generates a registration ordering from the dependencies.
It could happen that some plugin would require (at loading time)
some services provided by others.
This function ensures that plugin will be loaded so as
to ensure that "required" plugins are loaded before "requiring" ones.
"""
final_list = []
found_this_iter = True
while (len(initial_list) > 0) and found_this_iter:
found_this_iter = False
for mod in initial_list:
if not hasattr(mod, "requires"):
mod.requires = set()
if not hasattr(mod, "provides"):
mod.provides = set()
capability_set = set()
pos = 0
while pos < len(final_list):
capability_set.update(final_list[pos].provides)
pos += 1
if mod.requires.issubset(capability_set):
final_list.insert(len(final_list), mod)
initial_list.remove(mod)
found_this_iter = True
break
return final_list
def gen_plugin_list():
'''looks for all python files in directory "plugins/", and assume
that they are all "plugins".'''
pluginpath = os.path.join(os.path.dirname(find_module("crunchy")[1]),
"src", "plugins/")
try:
pluginfiles = [x[:-3] for x in os.listdir(pluginpath) if x.endswith(".py")]
except OSError:
# if we get here, then pluginpath doesn't exist: try again with a slightly different one
# (this is a fix for the .app distro)
pluginpath = os.path.join(os.path.dirname(find_module("crunchy")[1]),"plugins/")
pluginfiles = pluginfiles = [x[:-3] for x in os.listdir(pluginpath) if x.endswith(".py")]
return pluginfiles
def init_plugin_system(server):
"""load the plugins and has them self-register."""
plugins = gen_plugin_list()
interface.server['server'] = server
# In case Crunchy was not started from its root directory via
# python crunchy.py, but instead from another directory like
# python /this/path/to/crunchy.py
# we need to add explictly the path to the
sys.path.insert(0, os.path.join(interface.config['crunchy_base_dir'],
"src", "plugins"))
# another hack to make it work on a mac
sys.path.insert(0, os.path.join(interface.config['crunchy_base_dir'], "plugins"))
# In addition, add the same for the non-plugins files that are meant to be
# imported by the user, such as graphics.py, etc.
# For this, we always need the absolute path as the base path may be changed
# by the user through some code execution.
sys.path.insert(0, os.path.join(interface.config['crunchy_base_dir'],
"src", "imports"))
# another hack to make it work on a mac
sys.path.insert(0, os.path.join(interface.config['crunchy_base_dir'], "imports"))
imported_plugins = []
if DEBUG:
print("Importing plugins.")
for plugin in plugins:
try:
mod = __import__ (plugin, globals())
imported_plugins.append(mod)
except:
print("Could not import the following plugin:", plugin)
register_list = gen_register_list(imported_plugins)
if DEBUG:
print("Registering plugins.")
for mod in register_list:
if hasattr(mod, "register"):
if server != ["testplugins"]: # skip for self-testing
mod.register()
if DEBUG:
print(" * Registered %s" % mod.__name__)
if __name__ == "__main__":
DEBUG = True
init_plugin_system(["testplugins"])
|
UTF-8
|
Python
| false | false | 2,012 |
11,888,469,475,466 |
14ab4017d2fcddbd45d6ee43cef9962a061de614
|
f1f68be43eb98672a1b500373a73fd10dba07717
|
/fantasee/models.py
|
5c23e9915d946ede0c67c9d72decfccbfc12ba24
|
[] |
no_license
|
caseycrites/fantasee
|
https://github.com/caseycrites/fantasee
|
1f0dedaf16f795713f1eb77c83da32fa0f8faea6
|
1f5e2ac716e13b9c535a2b261ad3d233f41dffd4
|
refs/heads/master
| 2020-06-09T05:23:19.198797 | 2013-06-11T13:33:01 | 2013-06-11T13:33:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from collections import Counter
import re
from bs4 import BeautifulSoup
import requests
def sanitize_text(text):
return text.replace(u'\xa0', u'@').replace(', ', ',')
class ESPNEntity(object):
_BASE_URL = 'http://games.espn.go.com'
_SPORTS = {
'baseball': 'flb',
'basketball': 'fba',
'football': 'ffl',
'hockey': 'fhl'
}
dom_location = None
class Player(ESPNEntity):
dom_location = 'td.playertablePlayerName'
name = None
team = None
injured = False
positions = []
_soup_regex = re.compile(
r'([^*,]+)(\*?),(\w+)@([^@]+)', flags=re.U
)
@classmethod
def from_soup(cls, soup):
player = cls()
matched_player_info = player._soup_regex.match(sanitize_text(soup.text))
player.name = matched_player_info.group(1)
player.injured = matched_player_info.group(2) == '*'
player.team = matched_player_info.group(3)
player.positions = matched_player_info.group(4).split(',')
return player
def __str__(self):
return '%s plays %s for %s' % (
self.name, ', '.join(self.positions), self.team
)
class FantasyTeam(ESPNEntity):
dom_location = 'table.playerTableTable'
name = None
players = []
@classmethod
def from_soup(cls, soup):
team = cls()
team.name = soup.select('tr.playerTableBgRowHead > th > a')[0].text
team.players = [
Player.from_soup(markup) for markup in
soup.select(Player.dom_location)
]
return team
def position_counts(self):
return Counter([val for sub in self.players for val in sub.positions])
def injured_players(self):
return [player for player in self.players if player.injured]
def __str__(self):
return '%s has %d players' % (self.name, len(self.players))
class League(ESPNEntity):
league_id = None
sport = None
teams = []
@classmethod
def from_sport_and_id(cls, sport, league_id):
league = cls()
league.league_id = league_id
league.sport = sport
resp = requests.get('%s/%s/leaguerosters?leagueId=%s' % (
league._BASE_URL, league._SPORTS[sport], league.league_id
))
soup = BeautifulSoup(resp.text)
league.teams = [
FantasyTeam.from_soup(markup) for markup
in soup.select(FantasyTeam.dom_location)
]
return league
def __str__(self):
return '%s is a %d-team %s league' % (
self.league_id, len(self.teams), self.sport
)
|
UTF-8
|
Python
| false | false | 2,013 |
6,021,544,195,172 |
62b0662426c324539556fd44ef7ca29283a52447
|
9ca6df194f9def6076a65237f0b89ba5fe16fc9a
|
/example_projects/test_project/settings.py
|
558dbbd2dab2375c70dadd57c0ad6233938b22f7
|
[] |
no_license
|
FlavioFalcao/marinemap
|
https://github.com/FlavioFalcao/marinemap
|
4332f80e9957cbb522e995a0b851483ff80215ae
|
c001e16615caa2178c65ca0684e1b6fd56d3f93d
|
refs/heads/master
| 2021-01-18T09:26:34.979627 | 2014-01-04T00:36:23 | 2014-01-04T00:36:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Django settings for oregon project.
from lingcod.common.default_settings import *
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'test_project',
'USER': 'postgres',
}
}
TIME_ZONE = 'America/Vancouver'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
SECRET_KEY = '=knpq2es_kedoi-j1es=$o02nc*v$^=^8zs*&s@@nij@zev%m2'
WAVE_ID = 'wavesandbox.com!q43w5q3w45taesrfgs' # Your Google Wave Account - may not be needed
ROOT_URLCONF = 'test_project.urls'
TEMPLATE_DIRS = ( os.path.realpath(os.path.join(os.path.dirname(__file__), 'templates').replace('\\','/')), )
INSTALLED_APPS += ( 'lingcod.raster_stats', 'mlpa', )
# For some reason, running the raster_stats tests causes
# the xml test runner to fail to output the xml
EXCLUDE_FROM_TESTS.append('lingcod.raster_stats')
KML_EXTRUDE_HEIGHT = 700
import os
MEDIA_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__),'mediaroot'))
POSTGIS_TEMPLATE='template1'
try:
from settings_local import *
except:
pass
|
UTF-8
|
Python
| false | false | 2,014 |
9,096,740,753,536 |
c362071bbf7151ddb11a1f8a8687355a24cf85b2
|
58e8aafb70a250936e638fabd9f5cbe899e39ca3
|
/src/python/blocks/apps/aggregator/models.py
|
3c81846d2f59f8417b7232d98f626a30838447c6
|
[] |
no_license
|
weijia/django-blocks
|
https://github.com/weijia/django-blocks
|
ec6d683f326ac8848041bd06c9033ff56e99402f
|
5c97c287b882d61d70483b1d0a1ccaf2b761e933
|
refs/heads/master
| 2021-01-10T02:14:28.503511 | 2014-03-04T17:06:05 | 2014-03-04T17:06:05 | 36,102,952 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
from django.contrib import admin
from django.utils.safestring import mark_safe
class Feed(models.Model):
title = models.CharField(max_length=300)
feed_url = models.URLField(unique=True, max_length=300)
public_url = models.URLField(max_length=300)
class Meta:
db_table = 'aggregator_feeds'
def __unicode__(self):
return self.title
class FeedAdmin(admin.ModelAdmin):
search_fields = ('title',)
list_display = ('title', 'public_url',)
admin.site.register(Feed, FeedAdmin)
class FeedItem(models.Model):
feed = models.ForeignKey(Feed)
title = models.CharField(max_length=300)
link = models.URLField(max_length=300)
summary_html = models.TextField(blank=True)
content_html = models.TextField(blank=True)
date_modified = models.DateTimeField()
guid = models.CharField(max_length=300, unique=True, db_index=True)
def _get_summary(self):
return mark_safe(self.summary_html)
summary = property(_get_summary)
def _get_content(self):
return mark_safe(self.content_html)
content = property(_get_content)
class Meta:
db_table = 'aggregator_feeditems'
ordering = ("-date_modified",)
def __unicode__(self):
return self.title
def get_absolute_url(self):
return self.link
|
UTF-8
|
Python
| false | false | 2,014 |
8,031,588,865,572 |
ef0ece581f75222b3bbc0524a2e151f90eaec9aa
|
879c90f5485f7fbdd9c65dd56eb73bca0e52ff18
|
/view/error.py
|
30ff26b7e924e6df4eb18e5424e41044f868c6b4
|
[] |
no_license
|
xi4nyu/MyMvc
|
https://github.com/xi4nyu/MyMvc
|
f54fd2e51c029a884a56e0b15ab0766344b7b844
|
b10aa709346cc74fbcaf9cb183720da836218bca
|
refs/heads/master
| 2016-09-03T06:34:19.672645 | 2013-03-07T05:28:12 | 2013-03-07T05:28:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding: utf-8
from core.web import BaseHandler
class NotFoundHandler(BaseHandler):
def prepare(self):
self.send_error(404)
|
UTF-8
|
Python
| false | false | 2,013 |
15,152,644,649,796 |
0c9ea1c4a3a9d6d2295c0f8ff5fc8a9e8bbb2613
|
73e02ee1e3537247f51781ce6d2bd4b0aa8f2e93
|
/dynamodb/condition.py
|
86cf8fa09a88241a0591faf75b96f2efd3b2a262
|
[] |
no_license
|
mulka/boto_mock
|
https://github.com/mulka/boto_mock
|
0f946b0844e4e4e81428abd5b2bf3b43cb7680d5
|
1c22b58fc52485c56a941764b0e1c460e35eb2f5
|
refs/heads/master
| 2020-06-06T11:29:56.164071 | 2012-06-07T03:31:29 | 2012-06-07T03:31:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from boto.dynamodb.types import dynamize_value
class Condition(object):
"""
Base class for conditions. Doesn't do a darn thing but allows
is to test if something is a Condition instance or not.
"""
pass
class ConditionNoArgs(Condition):
"""
Abstract class for Conditions that require no arguments, such
as NULL or NOT_NULL.
"""
def __repr__(self):
return '%s' % self.__class__.__name__
def to_dict(self):
return {'ComparisonOperator': self.__class__.__name__}
class ConditionOneArg(Condition):
"""
Abstract class for Conditions that require a single argument
such as EQ or NE.
"""
def __init__(self, v1):
self.v1 = v1
def __repr__(self):
return '%s:%s' % (self.__class__.__name__, self.v1)
def to_dict(self):
return {'AttributeValueList': [dynamize_value(self.v1)],
'ComparisonOperator': self.__class__.__name__}
class ConditionTwoArgs(Condition):
"""
Abstract class for Conditions that require two arguments.
The only example of this currently is BETWEEN.
"""
def __init__(self, v1, v2):
self.v1 = v1
self.v2 = v2
def __repr__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.v1, self.v2)
def to_dict(self):
values = (self.v1, self.v2)
return {'AttributeValueList': [dynamize_value(v) for v in values],
'ComparisonOperator': self.__class__.__name__}
"""
class EQ(ConditionOneArg):
pass
class NE(ConditionOneArg):
pass
class LE(ConditionOneArg):
pass
class LT(ConditionOneArg):
pass
class GE(ConditionOneArg):
pass
class GT(ConditionOneArg):
pass
class NULL(ConditionNoArgs):
pass
class NOT_NULL(ConditionNoArgs):
pass
class CONTAINS(ConditionOneArg):
pass
class NOT_CONTAINS(ConditionOneArg):
pass
class BEGINS_WITH(ConditionOneArg):
pass
class IN(ConditionOneArg):
pass
class BEGINS_WITH(ConditionOneArg):
pass
"""
class BETWEEN(ConditionTwoArgs):
pass
|
UTF-8
|
Python
| false | false | 2,012 |
18,296,560,708,262 |
398c14ab5f0d28302057b193d516a89f65d6fc17
|
0b7e12394b259d7cf0afedaa2b4cd220539e1f89
|
/articles/templatetags/rss.py
|
9d5b511bcd30ffb6da79fca78695ef3d66aed131
|
[
"MIT"
] |
permissive
|
platypus-creation/django-articles
|
https://github.com/platypus-creation/django-articles
|
a4b3c61589b2a868aca4044953c1fb7357ed31fd
|
8cf768ac4e03c8a5da9e54eac142ac49b3269009
|
refs/heads/master
| 2021-01-23T20:46:45.962168 | 2012-09-03T13:38:31 | 2012-09-03T13:38:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django import template
from django.core.urlresolvers import reverse
from django.conf import settings
register = template.Library()
@register.inclusion_tag('articles/rss.html', takes_context=True)
def rss(context, override=None):
if 'request' in context:
request = context['request']
language = settings.LANGUAGES[settings.DEFAULT_LANGUAGE-1][0]
if hasattr(request, 'LANGUAGE_CODE'):
language = request.LANGUAGE_CODE
language = language[:2]
return {
'url': override and override or request.build_absolute_uri(reverse('rss_feed', args=[language]))
}
return {}
|
UTF-8
|
Python
| false | false | 2,012 |
1,915,555,436,992 |
81f712c735fa2817f4a4b44257280c25fd4aef1c
|
585ae7cc811433953f518891d745242891072b3a
|
/src/scripts/buildtest/lookup.py
|
9536c0b88a2b7e43cacd50d1fb6f65d8f06adfc6
|
[
"GPL-3.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-other-permissive"
] |
non_permissive
|
florence-amaddine/snac-nox
|
https://github.com/florence-amaddine/snac-nox
|
2a33939ba128f47f8ad35bf0b793e3457f51c5c0
|
e45bf1f0f35821c447f288701be7851d941b4c2e
|
refs/heads/master
| 2020-05-30T11:54:00.108330 | 2011-03-25T15:25:18 | 2011-03-25T15:25:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import matplotlib
matplotlib.use('Agg')
import pickle
import pwd
import os
import info
import graph
def create_image(argv):
p = info.Profile()
b = info.Build()
t = info.Test()
r = info.Result()
values = []
for v in argv[1:]:
if v == 'True':
values.append(True)
elif v == 'False':
values.append(False)
elif v == 'None':
values.append(None)
else:
try:
values.append(float(v))
except:
values.append(v)
(p.user, p.machine, p.run_date, \
b.commit, b.last_author, b.build_date, \
t.configuration, t.command, t.packets, t.rules, t.policies, \
r.total, r.user, r.system, ind, dep) = values
if p.user == p.machine == p.run_date:
p = p.user
if b.commit == b.last_author == b.build_date:
b = b.commit
if t.configuration == t.command == t.packets == t.rules == t.policies:
t = t.configuration
if r.total == r.user == r.system:
r = r.total
user = pwd.getpwuid(os.getuid())[0]
input = '/var/www/buildtest/' + user +'/archive/performance.pkl'
raw_points = pickle.load(open(input,'r'))
g = graph.Grapher(raw_points,'librarian')
search = info.RawData(p, b, t, r)
print search
g.graph(ind, dep, search)
if __name__ == "__main__":
import sys
create_image(sys.argv)
|
UTF-8
|
Python
| false | false | 2,011 |
18,159,121,744,845 |
0c8050437ad4d290d2e989b7f751aeb1f3d05ddd
|
a23eba06ad0382d090a306e4c9a9700176b20ad3
|
/standard/reports/CustomerActivitiesReport.py
|
35525b6fcdb2c0ad1270a67ddb720cfbbb6ed87c
|
[] |
no_license
|
koapesrl/segupak
|
https://github.com/koapesrl/segupak
|
08df882b4ae4ca10845f5fc778c760bdfcbcd5ac
|
5dae4b193b8d0ff5ea6e0e1f0e6932b074f4382b
|
refs/heads/master
| 2016-06-02T15:04:01.516940 | 2013-08-17T17:04:36 | 2013-08-17T17:04:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#encoding: utf-8
from OpenOrange import *
from Report import Report
class CustomerActivitiesReport(Report):
def defaults(self):
Report.defaults(self)
def run(self):
self.printReportTitle("Activities for Customers")
from Customer import Customer
customer = Customer()
customer.Code = self.getRecord().CustCode
customer.load()
self.startTable()
self.startHeaderRow()
self.addValue(customer.Code)
self.addValue(customer.Name)
self.endHeaderRow()
self.endTable()
self.startTable()
self.users = {}
self.act_to_avoid = []
query = Query()
query.sql = "SELECT {Code}, {Name} FROM [User]"
self.duration = timedelta()
if query.open():
for rec in query:
self.users[rec.Code] = rec.Name
self.showVouchers()
self.showCases()
self.showActivities()
self.startTable()
self.header("Total Horas: ", str(round(self.duration.seconds / 3600.0 + self.duration.days * 24,2)))
self.endTable()
def showVouchers(self):
from Voucher import Voucher
specs = self.getRecord()
query = Query()
query.sql = "SELECT {SerNr} FROM [Voucher]"
query.sql += "WHERE?AND {TransDate} BETWEEN d|%s| AND d|%s|" % (specs.FromDate, specs.ToDate)
query.sql += "WHERE?AND {CustCode} = s|%s|" % (specs.CustCode)
query.sql += "ORDER BY {TransDate}, {FromTime}"
self.startTable()
if query.open():
if (query.count()):
self.headerB("Vouchers")
self.headerA("Date", "Voucher Nr", "Consultant", "Start", "End", "Duration", "Details")
for rec in query:
voucher = Voucher.bring(rec.SerNr)
self.startRow()
self.addValue(voucher.TransDate)
self.addValue(voucher.SerNr)
self.addValue(voucher.ConsultantName)
self.addValue(voucher.FromTime)
self.addValue(voucher.ToTime)
self.addValue(voucher.TotalHours)
self.addValue(voucher.Comment)
self.duration += timedelta(hours = voucher.TotalHours)
self.endRow()
if voucher.ActivityId:
self.act_to_avoid.append(voucher.ActivityId)
self.endTable()
def showCases(self):
from Case import Case
from Status import Status
specs = self.getRecord()
query = Query()
query.sql = "SELECT {SerNr} FROM [Case]"
query.sql += "WHERE?AND {TransDate} BETWEEN d|%s| AND d|%s|" % (specs.FromDate, specs.ToDate)
query.sql += "WHERE?AND {CustCode} = s|%s|" % (specs.CustCode)
query.sql += "ORDER BY {TransDate}, {FromTime}"
self.startTable()
if query.open():
if (query.count()):
self.headerB("Cases")
self.headerA("Opened", "Closed", "Case Nr", "Asignado","Status", "Details")
for rec in query:
case = Case.bring(rec.SerNr)
self.startRow()
self.addValue(case.TransDate)
if case.Status: #Closed
self.addValue(case.FinishDate)
else:
self.addValue(tr("Open"))
self.addValue(case.SerNr)
self.addValue(case.Asignee)
self.addValue(Status.getComment(case.State))
if case.Events.count():
self.addValue(case.Events[0].Comment)
else:
self.addValue("")
self.endRow()
self.endTable()
def showActivities(self):
from Activity import Activity
specs = self.getRecord()
query = Query()
query.sql = "SELECT {SerNr} FROM [Activity]"
query.sql += "WHERE?AND {StartDate} BETWEEN d|%s| AND d|%s|" % (specs.FromDate, specs.ToDate)
query.sql += "WHERE?AND {CustCode} = s|%s|" % (specs.CustCode)
query.sql += "ORDER BY {StartDate}, {StartTime}"
self.startTable()
if query.open():
if (query.count()):
self.headerB("Activities")
self.headerA("Date", "Start", "End", "Person","Comment", "Duration", "Detail")
for rec in query:
if rec.SerNr not in self.act_to_avoid:
act = Activity.bring(rec.SerNr)
for user in act.getUsersList():
self.startRow()
self.addValue(act.StartDate)
self.addValue(act.StartTime)
self.addValue(act.EndTime)
self.addValue(self.users.get(user, ""))
self.addValue(act.Comment,Width=250)
self.addValue(act.Duration)
self.addValue(act.Detail)
self.duration += timeDiff(act.Duration,time(0,0,0))
self.endRow()
self.endTable()
|
UTF-8
|
Python
| false | false | 2,013 |
4,157,528,379,373 |
eb934748907f03aaaf76ffd23ea4c5c7b15f39f1
|
a3faa8d2bbf7e669ce1003880b14f858d93102ce
|
/socket_server.py
|
32ab7d6f9085a90ecaa5438f9d1c7897f0029f4b
|
[] |
no_license
|
iwanjek/ROV-control-server
|
https://github.com/iwanjek/ROV-control-server
|
5018a6982e90ce8552558ff1004fdd0d74af3411
|
2f01c6fabe21de94fc41aabc9599fd0a20f4c971
|
refs/heads/master
| 2021-01-18T04:23:26.347745 | 2014-02-28T23:06:50 | 2014-02-28T23:06:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Basic template for the websocket portion of the control server.
To connect to the websocket, create a websocket in JavaScript
that connects to ws://<server_ip>:8888/command_ws and/or
ws://<server_ip>:8888/arm_ws and begin sending messages (the
messages print to the console for now).
This server will be run in its own thread, as a module
'''
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import json
# handles data to and from command UI
class CommandWSHandler(tornado.websocket.WebSocketHandler):
def open(self):
print 'new connection'
self.write_message("Hello World")
def on_message(self, message):
# print 'message received %s' % message
self.parse_message(message)
self.write_message(message);
def on_close(self):
print 'connection closed'
def parse_message(self, msg):
#this will take the received JSON data and perform the appropriate actions
command_api = json.loads(msg)
print "Thrust: ", command_api['thrust']
print "Extend: ", command_api['extend']
print "Grasp: ", command_api['claw']
print "Camera: ", command_api['camera']
# handles data to and from the arm UI
class ArmWSHandler(tornado.websocket.WebSocketHandler):
def open(self):
print 'Arm command link is active.'
# send an acknowledgement that the arm is now under user control
#self.write_message("Hello World")
def on_message(self, message):
print 'message received %s' % message
self.parseMessage(message)
def on_close(self):
# log that the connection with the UI was lost.
# print 'connection closed'
pass
def parse_message(self, msg):
#this will take the received JSON data and perform the appropriate actions
pass
######## from here down will need to be modified to set up other threads #####
application = tornado.web.Application([
(r'/command_ws', CommandWSHandler),
(r'/arm_ws', ArmWSHandler),
])
if __name__ == "__main__":
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
|
UTF-8
|
Python
| false | false | 2,014 |
11,776,800,352,617 |
b2fdc7d330100fc565c7f3d45bdc8ebfc86daafc
|
1fbcfc5a67cd5549f78b2cdc28a12a305de95d46
|
/tasks/settings.py
|
9989383515abb91ed466c5a317c401d16dc5148d
|
[] |
no_license
|
kristjanr/tasks
|
https://github.com/kristjanr/tasks
|
7eae7bc89336742b0dc0355f3de3e19c469f90c4
|
c4a1d9633d22b0fe6564fcd328987c3c4f982515
|
refs/heads/master
| 2020-07-08T02:54:52.360647 | 2014-09-15T03:22:54 | 2014-09-15T03:22:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Django settings for tasks project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
MEDIA_ROOT = BASE_DIR + "/media/"
MEDIA_URL = '/media/'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qk@lhmatzr$3fu=eua$urd73me&7@7louxx4v5=6p20)+_2f%m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'sidrun',
'django_summernote',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'tasks.urls'
WSGI_APPLICATION = 'tasks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'tasks',
'USER': os.getlogin(),
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '5432'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Tallinn'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = 'sidrun/static/'
STATIC_URL = '/static/'
# more config options: https://github.com/lqez/django-summernote#options
SUMMERNOTE_CONFIG = {
# Change editor size
'width': '90%',
'height': '300',
'toolbar': [["style", ["style"]],
["font", ["bold", "italic", "underline", "superscript", "subscript", "strikethrough", "clear"]],
["fontname", ["fontname"]], ["color", ["color"]], ["para", ["ul", "ol", "paragraph"]],
["height", ["height"]], ["table", ["table"]], ["insert", ["link", "video", "hr"]],
["view", ["fullscreen", "codeview"]], ["help", ["help"]]]
}
|
UTF-8
|
Python
| false | false | 2,014 |
13,752,485,319,296 |
0a651418de519e4baa6118965f0436f8741e586f
|
d4e74b3321bdeff2cf28762859114d47d6d8b4b2
|
/src/python/Narith/base/Protocols/Arp.py
|
3e876e7c8bd845f13c1a7fa21c38e8196fea4c79
|
[] |
no_license
|
lnxg33k/Narith
|
https://github.com/lnxg33k/Narith
|
0e761064b919e4d802acc917c1add9942de8c3f3
|
545a4d84d20f85f013701ae68adb33e3123f8b1d
|
refs/heads/master
| 2021-01-18T11:01:32.650417 | 2013-08-17T19:19:12 | 2013-08-17T19:19:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
[Narith]
File : Arp.py
Author : Saad Talaat
Date : 19th July 2013
brief : Structure to hold ARP
'''
from Narith.base.Packet.Protocol import Protocol
class Arp(Protocol):
'''
Fields:
Hardware Type.
Protocol type.
Hardware size.
Protocol size.
Opcode.
sender mac.
sender ip.
target mac.
target ip.
'''
__htypes = {1 : 'Ethernet'}
__ptypes = { 0x800 : 'IP' }
__opcodes = {
1: 'request',
2: 'reply',
3: 'request-reserve',
4: 'reply-reserve'}
def __init__(self,y):
super(Arp, self).__init__()
self.__arp = { 'htype' : None }
self.__sarp = {'htype' : None}
self.__arp['htype'] = int(y[:2].encode('hex'),16)
self.__arp['ptype'] = int(y[2:4].encode('hex'),16)
self.__arp['hsize'] = int(y[4:5].encode('hex'),16)
self.__arp['psize'] = int(y[5:6].encode('hex'),16)
self.__arp['opcode'] = int(y[6:8].encode('hex'),16)
self.__arp['src_mac'] = y[8:14]
self.__arp['src_ip'] = y[14:18]
self.__arp['dst_mac'] = y[18:24]
self.__arp['dst_ip'] = y[24:28]
self.__sarp['htype'] = self.__htypes[self.__arp['htype']]
self.__sarp['ptype'] = self.__ptypes[self.__arp['ptype']]
self.__sarp['hsize'] = str(self.__arp['hsize'])
self.__sarp['psize'] = str(self.__arp['psize'])
self.__sarp['opcode'] = self.__opcodes[self.__arp['opcode']]
self.__sarp['src_mac'] = map(hex, map(ord, self.__arp['src_mac']))
self.__sarp['src_ip'] = ".".join( map(str, map(ord, self.__arp['src_ip'])))
self.__sarp['dst_mac'] = map(hex,map(ord,self.__arp['dst_mac']))
self.__sarp['dst_ip'] = ".".join(map(str,map(ord,self.__arp['dst_ip'])))
''' Fix string macs '''
self.__macFix('src_mac')
self.__macFix('dst_mac')
def __macFix(self,key):
tmp = []
for i in self.__sarp[key]:
if(len(i) == 3):
i = '0x0'+i[2]
tmp.append(i)
self.__sarp[key] = ":".join("".join(tmp).split("0x")[1:])
##########################
# Properties
@property
def src(self):
return self.src_mac,self.src_ip
@src.setter
def src(self,val):
if type(val) != tuple:
raise ValueError, "Malformed Value"
elif len(val) != 2:
raise ValueError, "Malformed Value"
self.src_mac = val[0]
self.src_ip = val[1]
@property
def src_mac(self):
return self.__sarp['src_mac']
@src_mac.setter
def src_mac(self,val):
if (type(val) != str) or ( len(val.split(":")) != 6):
raise ValueError, "Malformed value"
self.__sarp['src_mac'] = val
self.__arp['src_mac'] = "".join([chr(j) for j in [int(c,base=16) for c in self.__sarp['src_mac'].split(":")]])
@property
def src_ip(self):
return self.__sarp['src_ip']
@src_ip.setter
def src_ip(self,val):
if (type(val) != str) or ( len(val.split(".")) != 4):
raise ValueError, "Malformed value"
self.__sarp['src_ip'] = val
self.__arp['src_ip'] = "".join([chr(int(j)) for j in val.split(".")])
@property
def target(self):
return self.target_mac, self.target_ip
@target.setter
def target(self,val):
if type(val) != tuple:
raise ValueError, "Malformed Value"
elif len(val) != 2:
raise ValueError, "Malformed Value"
self.target_mac = val[0]
self.target_ip = val[1]
@property
def target_mac(self):
return self.__sarp['dst_mac']
@target_mac.setter
def target_mac(self,val):
if (type(val) != str) or ( len(val.split(":")) != 6):
raise ValueError, "Malformed value"
self.__sarp['dst_mac'] = val
self.__arp['dst_mac'] = "".join([chr(j) for j in [int(c,base=16) for c in self.__sarp['src_mac'].split(":")]])
@property
def target_ip(self):
return self.__sarp['dst_ip']
@target_ip.setter
def target_ip(self,val):
if (type(val) != str) or ( len(val.split(".")) != 4):
raise ValueError, "Malformed value"
self.__sarp['dst_ip'] = val
self.__arp['dst_ip'] = "".join([chr(int(j)) for j in val.split(".")])
@property
def hardware_type(self):
return self.__sarp['htype']
@property
def opcode(self):
return self.__sarp['opcode']
@opcode.setter
def opcode(self,val):
if (type(val) is not str) or val not in self.__opcodes.values():
raise ValueError, "Malformed value"
self.__sarp['opcode'] = val
for k,v in self.__opcodes.iteritems():
if v == val:
self.__arp['opcode'] = k
@property
def length(self):
return 8 + self.__arp['hsize']*2 + self.__arp['psize']*2
|
UTF-8
|
Python
| false | false | 2,013 |
8,847,632,644,355 |
9442975ba63938024743cf4e6b12fde2d76e1cda
|
19274e940388535b6c18334c03ba4b01ccde0e9a
|
/code/analysis/get_ALL_state_pops.py
|
b4e94b9b8e1f9356ec5f5e5b233be3b4289230fe
|
[
"GPL-3.0-only"
] |
non_permissive
|
kyleabeauchamp/EnsemblePaper
|
https://github.com/kyleabeauchamp/EnsemblePaper
|
332182e49763a344db1d37c9596c2fd2f7e37075
|
5ff85a6ed7df70c11933bfac64ae20993f57442a
|
refs/heads/master
| 2020-05-17T07:46:41.931857 | 2014-02-26T18:41:50 | 2014-02-26T18:41:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import experiment_loader
import ALA3
from fitensemble import belt
import itertools
import sys
bayesian_bootstrap_run = 1
num_threads = 3
rank = int(sys.argv[1])
grid = itertools.product(ALA3.ff_list, ALA3.prior_list)
grid = [("oplsaa", "maxent")]
for k, (ff, prior) in enumerate(grid):
if k % num_threads == rank:
print(ff, prior)
regularization_strength = ALA3.regularization_strength_dict[prior][ff]
predictions, measurements, uncertainties = experiment_loader.load(ff)
phi, psi, ass_raw, state_ind = experiment_loader.load_rama(ff, ALA3.stride)
pymc_filename = ALA3.data_directory + "/models/model_%s_%s_reg-%.1f-BB%d.h5" % (ff, prior, regularization_strength, bayesian_bootstrap_run)
belt_model = belt.BELT.load(pymc_filename)
state_pops_trace = belt_model.trace_observable(state_ind.T)
state_pops = state_pops_trace.mean(0)
state_uncertainties = state_pops_trace.std(0)
out_directory = ALA3.data_directory + "state_populations/"
np.savez_compressed(out_directory + "state_populations_%s_%s_reg_%.1f_BB%d.npz" % (ff, prior, ALA3.regularization_strength_dict[prior][ff], bayesian_bootstrap_run), state_pops_trace)
|
UTF-8
|
Python
| false | false | 2,014 |
5,823,975,666,777 |
52b40d6e3ded1804a4da702465228a8ce71ee54a
|
270761b76e92105dbd3bbf085948707e0f7a3ea7
|
/twilionagios/twilio_nagios.py
|
75cff79f7f13bbd9276e1fb1bb9e29bf3babf524
|
[
"Apache-2.0"
] |
permissive
|
managedit/twilionagios
|
https://github.com/managedit/twilionagios
|
4c265a4001958529d37b37ab485d0b0808b05a5f
|
73b53f593c08e5afbcc62a76ff8c3c62083ca604
|
refs/heads/master
| 2021-01-15T17:02:52.628750 | 2012-03-25T19:32:13 | 2012-03-25T19:32:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import re
import urllib
import sys
from syslog import syslog
from twisted.web.resource import Resource
HOST_STATE_MSG = {
0: 'up',
1: 'down',
2: 'unreachable'
}
SERVICE_STATE_MSG = {
0: 'ok',
1: 'warning',
2: 'critical',
3: 'unknown'
}
class TwilioNagios(Resource):
isLeaf = True
def __init__(self, objects_file, status_file, external_file):
self.objects = objects_file
self.status = status_file
self.external = external_file
def render(self, request):
request.setHeader('Content-Type', 'text/xml')
# Reload the nagios data files
self.status_parsed = self.parse_status()
self.objects_parsed = self.parse_objects()
# Parse the request..
try:
action, hostname, service = request.postpath
hostname = urllib.unquote_plus(hostname)
service = urllib.unquote_plus(service)
except (KeyError, ValueError):
return '<Response/>'
# Trigger the correct action
if action == 'host':
response = self.hostalert(request, hostname)
elif action == 'hostaction':
response = self.hostaction(request, hostname)
elif action == 'service':
response = self.servicealert(request, hostname, service)
elif action == 'serviceaction':
response = self.serviceaction(request, hostname, service)
return response
def hostalert(self, request, hostname):
host_data = self.objects_parsed[('host', hostname)]
status_data = self.status_parsed[('host', hostname)]
state = int(status_data['current_state'])
response = """
<Response>
<Say>ALERT! Host %s is %s, I repeat, the host %s is %s</Say>
<Gather action="/hostaction/%s/host" method="GET" numDigits="1">
<Say>Press 1 to acknowledge</Say>
<Say>Press 2 to disable alerts for this host</Say>
</Gather>
<Say>We didn't receive any input. Goodbye!</Say>
</Response> """ % (hostname,
HOST_STATE_MSG[state],
hostname,
HOST_STATE_MSG[state],
urllib.quote_plus(hostname))
return response
def hostaction(self, request, hostname):
digit = int(request.args['Digits'][0])
cid = request.args['To'][0]
if digit == 1:
# Acknowledge Service Issue
response = """
<Response>
<Say>Acknowledging this service issue. Goodbye!</Say>
</Response> """
with open(self.external, "w") as f:
f.write("[0] ACKNOWLEDGE_HOST_PROBLEM;%s;1;1;1;Twilio;Ackd via Twilio by %s\n" % (hostname, cid))
elif digit == 2:
# Disable Host Alerts
response = """
<Response>
<Say>Disabling alerts for this host. Goodbye!</Say>
</Response> """
with open(self.external, "w") as f:
f.write("[0] DISABLE_HOST_NOTIFICATIONS;%s\n" % (hostname))
else:
response = """
<Response>
<Say>Invalid choice.</Say>
<Redirect method="GET">/host/%s/host</Redirect>
</Response> """ % (urllib.quote_plus(hostname))
return response
def servicealert(self, request, hostname, service):
host_data = self.objects_parsed[('host', hostname)]
status_data = self.status_parsed[(service, hostname)]
state = int(status_data['current_state'])
response = """
<Response>
<Say>ALERT! Service %s on host %s is %s, I repeat, Service %s on host %s is %s</Say>
<Gather action="/serviceaction/%s/%s" method="GET" numDigits="1">
<Say>Press 1 to acknowledge this service issue</Say>
<Say>Press 2 to disable alerts for this service</Say>
</Gather>
<Say>We didn't receive any input. Goodbye!</Say>
</Response> """ % (service,
hostname,
SERVICE_STATE_MSG[state],
service,
hostname,
SERVICE_STATE_MSG[state],
urllib.quote_plus(hostname),
urllib.quote_plus(service))
return response
def serviceaction(self, request, hostname, service):
digit = int(request.args['Digits'][0])
cid = request.args['To'][0]
if digit == 1:
# Acknowledge Service Issue
response = """
<Response>
<Say>Acknowledging this service issue. Goodbye!</Say>
</Response> """
with open(self.external, "w") as f:
f.write("[0] ACKNOWLEDGE_SVC_PROBLEM;%s;%s;1;1;1;Twilio;Ackd via Twilio by %s\n" % (hostname, service, cid))
elif digit == 2:
# Disable Service Alerts
response = """
<Response>
<Say>Disabling alerts for this service. Goodbye!</Say>
</Response> """
with open(self.external, "w") as f:
f.write("[0] DISABLE_SVC_NOTIFICATIONS;%s;%s\n" % (hostname, service))
else:
response = """
<Response>
<Say>Invalid choice.</Say>
<Redirect method="GET">/host/%s/%s</Redirect>
</Response> """ % (urllib.quote_plus(hostname),
urllib.quote_plus(service))
return response
def parse_objects(self):
filename = self.objects
conf = []
f = open(filename, 'r')
for i in f.readlines():
if i[0] == '#': continue
matchID = re.search(r"define ([\w]+) {", i)
matchAttr = re.search(r"[ ]*([\w]+)\s+(.*)$", i)
matchEndID = re.search(r"[ ]*}", i)
if matchID:
identifier = matchID.group(1)
cur = [identifier, {}]
elif matchAttr:
attribute = matchAttr.group(1)
value = matchAttr.group(2)
cur[1][attribute] = value
elif matchEndID:
conf.append(cur)
new_conf = {}
for entry in conf:
if entry[0] == 'host':
new_conf[('host', entry[1]['host_name'])] = entry[1]
elif entry[0] == 'service':
new_conf[(entry[1]['service_description'], entry[1]['host_name'])] = entry[1]
return new_conf
def parse_status(self):
filename = self.status
conf = []
f = open(filename, 'r')
for i in f.readlines():
if i[0] == '#': continue
matchID = re.search(r"([\w]+) {", i)
matchAttr = re.search(r"[ ]*([\w]+)=(.*)", i)
matchEndID = re.search(r"[ ]*}", i)
if matchID:
identifier = matchID.group(1)
cur = [identifier, {}]
elif matchAttr:
attribute = matchAttr.group(1)
value = matchAttr.group(2)
cur[1][attribute] = value
elif matchEndID:
conf.append(cur)
new_conf = {}
for entry in conf:
if entry[0] == 'hoststatus':
new_conf[('host', entry[1]['host_name'])] = entry[1]
elif entry[0] == 'servicestatus':
new_conf[(entry[1]['service_description'], entry[1]['host_name'])] = entry[1]
return new_conf
|
UTF-8
|
Python
| false | false | 2,012 |
10,144,712,757,633 |
ddde73b589f6de8ea40a24d5b12954607b5228bf
|
68f1419501293790e8593061c12d1e3d4394a841
|
/lib/enthought/traits/tests/keyword_args_test_case.py
|
28b07eda5e3c680a429b4d6cf357a15154a8798c
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
mattfoster/matplotlib
|
https://github.com/mattfoster/matplotlib
|
91b4c0a51100ce22e70c4e438e18d991a1b4061c
|
0b47697b19b77226c633ec6a3d74a2199a153315
|
refs/heads/master
| 2021-01-18T12:03:56.775493 | 2008-12-04T20:05:40 | 2008-12-04T20:05:40 | 85,467 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from enthought.traits.api import HasTraits, Instance, Int
import unittest
class Bar(HasTraits):
b = Int(3)
class Foo(HasTraits):
bar = Instance(Bar)
class KeyWordArgsTest(unittest.TestCase):
def test_using_kw(self):
bar = Bar(b=5)
foo = Foo(bar=bar)
self.assertEqual(foo.bar.b, 5)
def test_not_using_kw(self):
foo = Foo()
self.assertEqual(foo.bar, None)
|
UTF-8
|
Python
| false | false | 2,008 |
17,231,408,803,994 |
1242f88280934c8dcb2e5f6d8bff9d2ad42f18bf
|
9c10bdedd05a02a72f73fc3577133da2f043e0ec
|
/samurai-x2/samuraix/xcb/window.py
|
713aa917da63f42f4d8024b58902adcc8a311609
|
[] |
no_license
|
jmeireles/samurai-x
|
https://github.com/jmeireles/samurai-x
|
bc602e50813e164aad1797f9662dcf9207e2d8cc
|
3dbf8b63b1339b581ab2edd0ff3f0c8ea8c387ae
|
refs/heads/master
| 2021-01-13T01:49:10.823156 | 2008-10-16T10:52:55 | 2008-10-16T10:52:55 | 32,355,352 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import warnings
import samuraix.event
import cookie
import _xcb
import ctypes
import util
from .drawable import Drawable
from .pixmap import Pixmap
import logging
log = logging.getLogger(__name__)
def _xize_event_mask(events):
"""
convert an iterable containing `event.Event` subclasses
to an xcb event mask and return it.
:note: A warning will be displayed if you do not add the
exposure mask.
"""
mask = 0
for cls in events:
mask |= cls.event_mask
if not _xcb.XCB_EVENT_MASK_EXPOSURE & mask:
warnings.warn('You did not add the exposure event to your event mask.\n'
'Do you really want that?')
return mask
def _xize_resource(res):
"""
return a `resource.Resource`'s internal representation:
its xid.
"""
return res._xid
def _xize_pixmap(pixmap):
"""
return the internal representation of a pixmap: its xid.
This function does call `_xize_resource`, but additionally
asserts that `pixmap` is really a `pixmap.Pixmap` instance.
"""
assert isinstance(pixmap, Pixmap)
return _xize_resource(pixmap)
CLASS_INPUT_OUTPUT = _xcb.XCB_WINDOW_CLASS_INPUT_OUTPUT
STACK_MODE_ABOVE = _xcb.XCB_STACK_MODE_ABOVE
STACK_MODE_BELOW = _xcb.XCB_STACK_MODE_BELOW
GRAB_MODE_ASYNC = _xcb.XCB_GRAB_MODE_ASYNC
GRAB_MODE_SYNC = _xcb.XCB_GRAB_MODE_SYNC
ATTRIBUTE_ORDER = [
('back_pixmap', _xcb.XCB_CW_BACK_PIXMAP, _xize_pixmap),
('back_pixel', _xcb.XCB_CW_BACK_PIXEL),# TODO: xizer
('border_pixmap', _xcb.XCB_CW_BORDER_PIXMAP, _xize_pixmap),
('border_pixel', _xcb.XCB_CW_BORDER_PIXEL),# TODO: xizer
('bit_gravity', _xcb.XCB_CW_BIT_GRAVITY),
('win_gravity', _xcb.XCB_CW_WIN_GRAVITY),
('backing_store', _xcb.XCB_CW_BACKING_STORE),
('backing_planes', _xcb.XCB_CW_BACKING_PLANES),
('backing_pixel', _xcb.XCB_CW_BACKING_PIXEL),
('override_redirect', _xcb.XCB_CW_OVERRIDE_REDIRECT),
('save_under', _xcb.XCB_CW_SAVE_UNDER),
('event_mask', _xcb.XCB_CW_EVENT_MASK, _xize_event_mask),
('dont_propagate', _xcb.XCB_CW_DONT_PROPAGATE),
('colormap', _xcb.XCB_CW_COLORMAP), # TODO: xizer
('cursor', _xcb.XCB_CW_CURSOR) # TODO: xizer
]
WINDOW_CONFIG= [
('x', _xcb.XCB_CONFIG_WINDOW_X),
('y', _xcb.XCB_CONFIG_WINDOW_Y),
('width', _xcb.XCB_CONFIG_WINDOW_WIDTH),
('height', _xcb.XCB_CONFIG_WINDOW_HEIGHT),
('border_width', _xcb.XCB_CONFIG_WINDOW_BORDER_WIDTH),
('sibling', _xcb.XCB_CONFIG_WINDOW_SIBLING),
('stack_mode', _xcb.XCB_CONFIG_WINDOW_STACK_MODE)
]
class Window(Drawable):
"""
a window.
"""
def __init__(self, connection, xid):
"""
instantiate a window from a known X id.
:Parameters:
`connection` : connection.Connection
The corresponding connection
`xid` : int
The X id which has to exist.
"""
super(Window, self).__init__(connection, xid)
def __repr__(self):
return '<Window object XID: %d>' % self._xid
def request_get_property(self, prop):
"""
request the property `name`
:Parameters:
`name` : str or `atom.Atom`
The property's name *or* the corresponding
`atom.Atom` object
:rtype: `cookie.PropertyRequest`
"""
return cookie.PropertyRequest(self.connection, self, \
(self.connection.get_atom_by_name(prop) if isinstance(prop, basestring) \
else prop),
)
def get_property(self, name):
"""
request a property and return its value.
:see: `Window.request_get_property`
"""
return self.request_get_property(name).value
def request_set_property(self, prop, content, format, prop_type=None):
"""
request the setting of the property `prop` to `content`
using the format `format`.
:Parameters:
`prop` : str or `atom.Atom`
The property's name *or* the corresponding
`atom.Atom` object
`content` : list
The object list the property should be set to.
(can be very much, e.g. a Window list, an Atom list, ...)
`format` : int
The format to use. Has to be one of 8, 16, 32
:rtype: `cookie.ChangePropertyRequest`
"""
return cookie.ChangePropertyRequest(self.connection, self, \
(self.connection.get_atom_by_name(prop) if isinstance(prop, basestring) \
else prop),
content, format, prop_type)
def set_property(self, name, content, format, prop_type=None):
"""
request a property change and execute it immediately.
:see: `Window.request_set_property`
"""
return self.request_set_property(name, content, format, prop_type).execute()
def request_send_event(self, event):
"""
request the sending of the event `event`.
:Parameters:
`event` : event.Event subclass instance
The event to send.
:rtype: `cookie.SendEventRequest`
"""
return cookie.SendEventRequest(self.connection, self, event)
def send_event(self, event):
"""
request an event sending and execute.
"""
self.request_send_event(event).execute()
def delete(self):
"""
delete me. TODO.
"""
# delete myself!
super(Window, self).delete()
def destroy(self):
c = _xcb.xcb_destroy_window(self.connection._connection, self._xid)
self.connection.flush()
util.check_void_cookie(self.connection._connection, c)
@classmethod
def create(cls, connection, screen, x, y, width, height, border_width=0, parent=None, class_=None, visual=None, attributes=None):
"""
create a new window and return an instance.
:Parameters:
`connection` : connection.Connection
The corresponding connection.
`screen` : screen.Screen
The corresponding screen instance.
If you specify `parent` *and* `visual`, you can
set `screen` to None.
`x` : int
The initial x coordinate.
`y` : int
The initial y coordinate.
`width` : int
The inital width (in pixels).
`height` : int
The initial height (in pixels).
`border_width` : int
The border size (in pixels).
`parent` : window.Window
The parent window instance. If this is None,
use `screen`'s root window
`class_` : int
One of CLASS_INPUT_OUTPUT (TODO: complete)
defaults to CLASS_INPUT_OUTPUT
`visual` : int
The visual ID to use. If this is None,
use `screen`'s root visual.
`attributes` : dict
a dictionary {key: attribute} containing
attributes which should be set. see `Window.attributes`.
:rtype: `window.Window`
"""
if not class_:
class_ = CLASS_INPUT_OUTPUT
if not visual:
visual = screen.root_visual
if not attributes:
attributes = {}
if not parent:
parent = screen.root
parent = parent._xid
xid = _xcb.xcb_generate_id(connection._connection) # TODO
attr, mask = util.xize_attributes(attributes, ATTRIBUTE_ORDER)
_xcb.xcb_create_window(connection._connection, # connection
_xcb.XCB_COPY_FROM_PARENT, # depth
xid, # xid
parent, # parent xid
x, y,
width, height,
border_width,
class_,
visual,
mask,
attr)
if not 'event_mask' in attributes:
warnings.warn('You did not an event mask to your window.\n'
'Do you really want that?')
connection.flush()
return cls(connection, xid)
def request_get_attributes(self):
return cookie.GetWindowAttributesRequest(self.connection, self)
def get_attributes(self):
return self.request_get_attributes().value
def set_attributes(self, attributes):
attr, mask = util.xize_attributes(attributes, ATTRIBUTE_ORDER)
_xcb.xcb_change_window_attributes_checked(self.connection._connection,
self._xid,
mask,
attr)
self.connection.flush()
attributes = property(get_attributes, set_attributes, doc="""
Change attributes. Item assignment is currently not supported.
TODO: check whether already set events survive.
Valid attributes are:
back_pixmap : pixmap.Pixmap
The background pixmap.
back_pixel
border_pixmap : pixmap.Pixmap
The pixmap used for the borders
bit_gravity
win_gravity
backing_store
backing_planes
backing_pixel
override_redirect : bool
Should be window be visible to the window manager?
save_under
event_mask : iterable of `event.Event` subclasses
The event classes which should be propagated to the window.
dont_propagate
colormap
cursor
TODO:
Not all attributes are 'pythonized' yet.
Only attribute changing is supported for now, not retrieving.
""")
def map(self):
"""
show the window.
"""
_xcb.xcb_map_window(self.connection._connection, self._xid)
self.connection.flush()
def configure(self, **config):
attr, mask = util.xize_attributes(config, WINDOW_CONFIG)
cookie = _xcb.xcb_configure_window_checked(self.connection._connection,
self._xid,
mask,
attr)
self.connection.flush()
util.check_void_cookie(self.connection._connection, cookie)
def resize(self, x, y, width, height):
geom = self.get_geometry().copy()
self.configure(x=x, y=y, width=w, height=h)
geom.x = x
geom.y = y
geom.width = width
geom.height = height
ce = _xcb.xcb_configure_notify_event_t()
ce.response_type = _xcb.XCB_CONFIGURE_NOTIFY
ce.event = self._xid
ce.window = self._xid
ce.x = geom.x
ce.y = geom.y
ce.width = geom.width
ce.height = geom.height
ce.border_width = 1
ce.above_sibling = _xcb.XCB_NONE;
ce.override_redirect = 0
_xcb.xcb_send_event(self.connection.connection,
false, self._xid, _xcb.XCB_EVENT_MASK_STRUCTURE_NOTIFY, ctypes.byref(ce))
def request_query_pointer(self):
return cookie.QueryPointerRequest(self.connection, self)
def query_pointer(self):
return self.request_query_pointer().value
def reparent(self, parent, x, y):
_xcb.xcb_reparent_window(self.connection._connection,
self._xid,
parent._xid,
x,
y)
self.connection.flush()
def request_get_geometry(self):
return cookie.GetGeometryRequest(self.connection, self)
def get_geometry(self):
return self.request_get_geometry().value
def circulate(self, direction):
cookie = _xcb.xcb_circulate_window(self.connection._connection,
self._xid,
direction)
self.connection.flush()
util.check_void_cookie(self.connection._connection, cookie)
@property #should cache? i dont think it should change...
def _tree_cookie(self):
return _xcb.xcb_query_tree_unchecked(self.connection._connection, self._xid)
@property
def children(self):
""" return a generator for all direct children of the window """
tree_r = _xcb.xcb_query_tree_reply(self.connection._connection, self._tree_cookie, None)
if not tree_r:
return False
wins = _xcb.xcb_query_tree_children(tree_r)
if not wins:
raise Exception('cant get tree children')
tree_len = _xcb.xcb_query_tree_children_length(tree_r)
return (Window(self.connection, wins[i]) for i in range(tree_len))
def grab_key(self, keycode, modifiers=0, owner_events=True, pointer_mode=GRAB_MODE_ASYNC, keyboard_mode=GRAB_MODE_ASYNC):
cookie = _xcb.xcb_grab_key(self.connection._connection,
owner_events,
self._xid,
modifiers,
keycode,
pointer_mode,
keyboard_mode)
self.connection.flush()
util.check_void_cookie(self.connection._connection, cookie)
def grab_pointer(self, cursor=None):
if cursor is None:
cursor = self.connection.cursors['Normal']
# need to put this somewhere generic...
MOUSEMASK = (_xcb.XCB_EVENT_MASK_BUTTON_PRESS
| _xcb.XCB_EVENT_MASK_BUTTON_RELEASE
| _xcb.XCB_EVENT_MASK_POINTER_MOTION)
grab_ptr_c = _xcb.xcb_grab_pointer(self.connection._connection,
False,
self._xid,
MOUSEMASK,
_xcb.XCB_GRAB_MODE_ASYNC,
_xcb.XCB_GRAB_MODE_ASYNC,
self._xid,
_xcb.XCB_NONE, # TODO: specify cursor
_xcb.XCB_CURRENT_TIME)
grab_ptr_r = _xcb.xcb_grab_pointer_reply(self.connection._connection, grab_ptr_c, None)
if not grab_ptr_r:
return False
return True
def ungrab_pointer(self):
ungrab_ptr_c = _xcb.xcb_ungrab_pointer(self.connection._connection, _xcb.XCB_CURRENT_TIME)
self.connection.flush()
# util.check_void_cookie(self.connection._connection, ungrab_ptr_c) # TODO?
|
UTF-8
|
Python
| false | false | 2,008 |
14,164,802,151,613 |
7f53bba79e15e137ad7228a99b20f9205e19e92c
|
0aeab3ba93e42d48faa3f876c87a2fe294e4d74e
|
/TriggerStudies/PATreader/test/vertexinspector_cfg.py
|
500183b1e517a78fbfe6c17aeb2cf3edecca07a0
|
[] |
no_license
|
rmanzoni/TauHlt
|
https://github.com/rmanzoni/TauHlt
|
7fe8889c97aad70ca6506ad1c9bed2449d4b3cd3
|
d2ea209e38a3520c5b3962a15b09702a3ef74bb2
|
refs/heads/master
| 2021-01-23T13:58:19.286968 | 2014-08-19T17:07:28 | 2014-08-19T17:07:28 | 16,837,224 | 0 | 2 | null | false | 2014-08-19T17:07:29 | 2014-02-14T12:58:32 | 2014-07-22T07:30:49 | 2014-08-19T17:07:28 | 377,810 | 0 | 2 | 0 |
C++
| null | null |
import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'root://eoscms//eos/cms/store/cmst3/user/manzoni/TauPOG/mt/13TeV/VBF_HToTauTau_M-125_13TeV-powheg-pythia6/onlinePt17_PVconstraint/patTuple_10_2_XaQ.root',
)
)
process.vertex = cms.EDAnalyzer(
'VertexInspector' ,
)
process.p = cms.Path(
process.vertex
)
process.TFileService = cms.Service(
"TFileService" ,
fileName = cms.string("test_vertex.root") ,
closeFileFast = cms.untracked.bool(False)
)
|
UTF-8
|
Python
| false | false | 2,014 |
19,078,244,745,912 |
8141a18295f2d94f734d68df74fa55014a33a58d
|
0f4d7c48f5f64c76559c5a22905fabc3e86daea4
|
/main.py
|
f9598dcf975396bc3b5801d61a0dacdf9001d5e6
|
[
"GPL-3.0-only"
] |
non_permissive
|
saga/parklife
|
https://github.com/saga/parklife
|
b0ea2184825dc441fb85e5b2dcc38004fd1a9569
|
3101b3683a3dc60dfafa1a4d86482e5ad3ecdec1
|
refs/heads/master
| 2017-04-30T02:43:23.992780 | 2011-12-27T21:43:04 | 2011-12-27T21:43:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 Oscar Renalias
#
# Licensed under GNU General Public License version 3:
# http://www.gnu.org/licenses/gpl-3.0.txt
#
import sys
import os
import logging
import webapp2
from app.models.entry import Entry
from app.models.config import Config
from app.view.view import View
from defaults import Defaults
from app.pager.pagedquery import PagedQuery
from google.appengine.ext import db
from google.appengine.ext.db import BadKeyError
from app.core import BaseHandler
from app.utils.classhelper import DynamicDispatcherMixin
class NotFoundPageHandler(BaseHandler):
def get(self):
self.error(404)
self.writeResponse('error.html', {'message': 'The page could not be found'} )
#
# This request handler is reponsible for handling all kinds of entry points to the site content, instead of
# splitting each URL into its own request handler.
# It uses some dynamic method calls to route execution logic to a specific method based on the request and URL
# parameters
#
class FrontHandler(BaseHandler, DynamicDispatcherMixin):
def get(self, *params):
# do some basic stuff first
self.page = self.getCurrentPage()
if len(params) == 0:
template, view_data = self.default()
self.writeResponse(template, view_data)
else:
# dynamically route the method call (stored in params[0] to the correct method with the remaining parameters
if self.has_method(params[0]):
template, view_data = self.call_method(params[0], *params[1:])
self.writeResponse(template, view_data)
def default(self):
query = self.getEntryQuery()
prev, entries, next = query.fetch( self.page, Defaults.POSTS_PER_PAGE )
data = {
'entries': entries,
'prev': prev,
'next': next
}
return 'index.html', data
def tag(self, tag):
query = self.getEntryQuery({'tags = ':tag})
prev, entries, next = query.fetch( self.page, Defaults.POSTS_PER_PAGE )
from app.utils import StringHelper
view_data = { 'entries': entries, 'prev': prev, 'next': next, 'tag': StringHelper.remove_html_tags(tag) }
return 'index.html', view_data
def source(self, source):
query = self.getEntryQuery({'source =':source})
prev, entries, next = query.fetch( self.page, Defaults.POSTS_PER_PAGE )
from app.utils import StringHelper
view_data = { 'entries': entries, 'prev': prev, 'next': next, 'source': StringHelper.remove_html_tags(source) }
return 'index.html', view_data
def entry(self, entry_slug):
# see if we can find the entry by slug
entry = Entry.all().filter('slug =', entry_slug ).filter('deleted = ', False).get()
#entry = self.getEntryQuery({'slug = ': entry_slug}).get()
# entry not found, let's try by id
if entry == None:
try:
entry = Entry.get(entry_slug)
except BadKeyError:
entry = None
if entry == None:
#self.response.out.write( View(self.request, 'error.html').render ({ 'message': 'Entry could not be found '} ))
return 'error.html', { 'message': 'Entry could not be found ', 'error': True}
# if found, display it
return 'entry.html', { 'entry': entry }
def places(self):
# this action generates different content depending on how it is called
view_data = {}
if self.request.get('f') == 'json':
# select those entries that have location data
query = Entry.gql('WHERE lat != :lat AND deleted = :deleted', lat=None, deleted=False)
view_data = { 'entries': query }
return 'places.html', view_data
logging.getLogger().setLevel(logging.DEBUG)
application = webapp2.WSGIApplication([
('/', FrontHandler),
('/(entry)/(.*)', FrontHandler ),
('/(source)/(.*)', FrontHandler),
('/(tag)/(.*)', FrontHandler),
('/(test)/(.*)', FrontHandler),
('/(places)', FrontHandler),
('/.*', NotFoundPageHandler)
], debug=True)
|
UTF-8
|
Python
| false | false | 2,011 |
9,405,978,387,062 |
3d110347bd9c053f1552edd79f4c325b87277fd6
|
c50b442acb88e077819216dc5a237c0c8b8a8180
|
/GrammarBrain/brown_data/experiment_scripts/read_output.py
|
2f272ef72a9bfd298e6e62355a81e8efd301da76
|
[] |
no_license
|
kaeken1jp/GrammarBrain
|
https://github.com/kaeken1jp/GrammarBrain
|
974313f408f1c152c3312fe1abddf2d611e178e4
|
0f79b7fb3fa4baa1ed29f32d8559b373eb977060
|
refs/heads/master
| 2020-03-21T02:33:19.964937 | 2014-03-15T05:26:15 | 2014-03-15T05:26:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import csv
from GrammarBrain.brown_data.experiment_scripts import EXPERIMENT_RESULT_PATH
epoch, train, test, validation = None, None, None, None
for i in range(1,5):
input_path = EXPERIMENT_RESULT_PATH + ('First Real Experiment/part_%d.txt' % i)
with open(input_path, 'rb') as txt:
lines = txt.readlines()
# get list of total errors
total_errors = ['BPTT Errors']
for line in lines:
if 'Total error:' in line:
total_errors.append(float(line.split(' ')[2]))
# get list of (epoch, train, test, validation) errors
train_errors = [('Epoch', 'Training Error', 'Test Error', 'Validation Error')]
for line in lines:
if 'epoch' in line:
epoch = int(line.split(' ')[1])
if 'TRAINING' in line:
train = float(line.split(' ')[2])
if 'TEST' in line:
test = float(line.split(' ')[2])
if 'VALIDATION' in line:
validation = float(line.split(' ')[2])
assert epoch and train and test and validation, 'What??'
train_errors.append((epoch, train, test, validation))
epoch, train, test, validation = None, None, None, None
output_path = EXPERIMENT_RESULT_PATH + ('First Real Experiment/part_%d.csv' % i)
with open(output_path, 'wb') as output:
w = csv.writer(output)
for e in total_errors:
w.writerow([e])
w.writerow([''])
for e in train_errors:
w.writerow(e)
|
UTF-8
|
Python
| false | false | 2,014 |
4,569,845,221,706 |
0c641a78755b03aded2beb1df9bb23e9c4d1bbe6
|
1e7ee5ad0069de3a5d5c22f09d5a8f43f9a5da77
|
/stringCreatorTest.py
|
20e409a9bc5e0d253ab6795846ede224360a591f
|
[
"BSD-2-Clause"
] |
permissive
|
Talos4757/NVIDIABot
|
https://github.com/Talos4757/NVIDIABot
|
1974cb83a083a648c94a4ee73dbcb2ab853cb875
|
f5f51ad8e87a0a100e66c1fd876886376e9cd9a4
|
refs/heads/master
| 2021-01-22T00:24:20.738646 | 2014-11-09T23:41:46 | 2014-11-09T23:41:46 | 28,785,183 | 1 | 0 | null | true | 2015-01-04T20:30:49 | 2015-01-04T20:30:49 | 2014-09-14T04:36:51 | 2014-11-09T23:41:47 | 404 | 0 | 0 | 0 | null | null | null |
data=[12, 133, 1]
dataS=""
for i in range(len(data)):
if(len(str(data[i]))==2):
s=",".join(str(data[i]))
s+=",-1,"
dataS+=s
elif(len(str(data[i]))==3):
s=",".join(str(data[i]))
s+=","
dataS+=s
else:
dataS+=str(data[i])
dataS+=","
print dataS
|
UTF-8
|
Python
| false | false | 2,014 |
13,194,139,561,048 |
e83fa96d2c022a3c79ca3d7f6bca081bf816bab8
|
4e5de397a7bac9e777074daaadb890f4315752a2
|
/athenaCL/libATH/unit.py
|
6db7da457239cbdcab1fa458aaadaeda1f2c73b0
|
[] |
no_license
|
ericahub/athenacl
|
https://github.com/ericahub/athenacl
|
60c5757e11f65f65eeb26c40125dac0e1636d86b
|
96ac2a6859dedf08e8a9aebbed4ef348a66ac707
|
refs/heads/master
| 2021-01-10T12:38:04.869889 | 2011-12-17T20:49:19 | 2011-12-17T20:49:19 | 44,901,312 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#-----------------------------------------------------------------||||||||||||--
# Name: unit.py
# Purpose: unit interval tools
#
# Authors: Christopher Ariza
#
# Copyright: (c) 2004-2010 Christopher Ariza
# License: GPL
#-----------------------------------------------------------------||||||||||||--
import copy
import unittest, doctest
from athenaCL.libATH import drawer
_MOD = 'unit.py'
#-----------------------------------------------------------------||||||||||||--
# unit interval tools
class UnitException(Exception):
pass
#-----------------------------------------------------------------||||||||||||--
def seriesMinMax(series):
"""given any list of numbers, return e min and e max
must convert to list to allow sorting of array or other sequence objects
>>> seriesMinMax([3,4,5])
(3, 5)
"""
seriesLen = len(series)
if seriesLen == 1:
return series[0], series[0]
elif seriesLen == 0:
raise ValueError, 'series with no values given'
q = []
for val in series:
q.append(val)
q.sort()
return q[0], q[-1]
def tableMinMax(table):
"""find min max for embedded lists"""
q = []
for row in table:
q = q + list(seriesMinMax(row)) # collect min max of each row
q.sort()
return q[0], q[-1]
#-----------------------------------------------------------------||||||||||||--
def unitNorm(value, valueRange):
'''Normalize value within the unit interval.
>>> unitNorm(3, (3,10))
0.0
>>> unitNorm(1, (3,10)) # values are not limited here
-0.285714...
>>> unitNorm(6.5, (3,10))
0.5
>>> unitNorm(10, (3,10))
1.0
>>> unitNorm(17, (3,10))
2.0
'''
min, max = seriesMinMax(valueRange)
span = max - min
dif = value - min
if drawer.isInt(dif):
dif = float(dif)
if span != 0:
return dif / span
else: # fill value if span is zero
return 0
def unitNormRange(series, fixRange=None):
"""read all values from a list
normalize values within min and maximum of series
>>> unitNormRange([0,3,4])
[0.0, 0.75, 1.0]
"""
if fixRange != None:
fixRange.sort()
min = fixRange[0]
max = fixRange[-1]
else: # find max and min from values
min, max = seriesMinMax(series)
span = max - min
unit = []
if len(series) > 1:
for val in series:
dif = val - min
if drawer.isInt(dif):
dif = float(dif)
if span != 0:
unit.append(dif / span)
else: # fill value if span is zero
unit.append(0)
else: # if one element, return 0 (could be 1, or .5)
unit.append(0)
return unit
def unitNormRangeTable(table, fixRange=None):
"""read all values from a a list
normalize values wihtin min and maximum of series
>>> unitNormRangeTable([[4,-2],[234,0],[3,7]])
[[0.025423728813559324, 0.0], [1.0, 0.0084745762711864406],
[0.021186440677966101, 0.038135593220338986]]
"""
if fixRange != None:
fixRange.sort()
min = fixRange[0]
max = fixRange[-1]
else: # find max and min from values
min, max = tableMinMax(table)
span = max - min
unit = []
i = 0
for row in table:
unit.append([])
for val in row:
dif = val - min
if drawer.isInt(dif):
dif = float(dif)
if span != 0:
unit[i].append(dif / span)
else: # fill value if span is zero
unit[i].append(0)
i = i + 1
return unit
def unitNormEqual(parts):
"""given a certain number of parts, return a list unit-interval values
between 0 and 1, w/ as many divisions as parts; 0 and 1 always inclusive
>>> unitNormEqual(3)
[0.0, 0.5, 1]
"""
if parts <= 1: return [0]
elif parts == 2: return [0,1]
else:
unit = []
step = 1.0 / (parts - 1)
for y in range(0, parts-1): # one less value tn needed
unit.append(y*step)
unit.append(1) # make last an integer, add manually
return unit
def unitNormStep(step, a=0, b=1, normalized=True):
"""given a step size and an a/b min/max range, calculate number of parts
to fill step through inclusive a,b
then return a unit interval list of values necessary to cover region
Note that returned values are by default normalized within the unit interval.
>>> unitNormStep(.5, 0, 1)
[0.0, 0.5, 1]
>>> unitNormStep(.5, -1, 1)
[0.0, 0.25, 0.5, 0.75, 1]
>>> unitNormStep(.5, -1, 1, normalized=False)
[-1, -0.5, 0.0, 0.5, 1.0]
>>> post = unitNormStep(.25, 0, 20)
>>> len(post)
81
>>> post = unitNormStep(.25, 0, 20, normalized=False)
>>> len(post)
81
"""
if a == b:
return [] # no range, return boundary
if a < b:
min = a
max = b
if a > b:
min = b
max = a
# find number of parts necessary
count = 0 # will count last, so dont count min at begining
values = []
x = min
while x <= max:
values.append(x) # do before incrementing
x += step
count += 1
if normalized:
return unitNormEqual(count)
else:
return values
def unitNormProportion(series):
"""normalize values w/n unit interval, where max is determined
by the sum of the series (proportional section of total)
this is the same as that used for durFraction in Path
>>> unitNormProportion([0,3,4])
[0.0, 0.42857142857142855, 0.5714285714285714]
>>> unitNormProportion([1,1,1])
[0.33333333333333331, 0.33333333333333331, 0.33333333333333331]
"""
# note: negative values should be shifted to positive region first
sum = 0
for x in series:
if x < 0: raise ValueError, 'series members should be positive'
sum = sum + x
assert sum != 0
unit = [] # weights on the unit interval; sum == 1
for x in series:
unit.append((x / float(sum)))
return unit
def unitNormAccumulate(series):
"""give a series of values, all within the unit interval, treate
each as time (x) values, and create a new unit interval spacing
that is proportional to the sequence of series durations
if assume zero is start, means that there will be one more point
than in source
0, 0+n1, 0+n2, 0+n3
>>> unitNormAccumulate([.4,.1,.4,.1])
[0.0, 0.40000000000000002, 0.5, 0.90000000000000002, 1.0]
>>> unitNormAccumulate([.8,.2,.5,.1])
[0.0, 0.5, 0.625, 0.9375, 1.0]
>>> unitNormAccumulate([.5,.5,.5])
[0.0, 0.33333333333333331, 0.66666666666666663, 1.0]
"""
t = 0
accume = [t]
for step in series:
t = t + step
accume.append(t)
unit = []
for pos in accume:
unit.append(float(pos) / t) # t is max
return unit
def denorm(value, a, b):
"""take a normalized value; shift it between min and max
assumes min and max is relevant
>>> denorm(.5, 10, 20)
15.0
>>> denorm(.5, -20, 20)
0.0
>>> denorm(10, -20, 20)
Traceback (most recent call last):
UnitException: value (10) must be in unit interval
"""
if value < 0 or value > 1: # must be normalized
raise UnitException('value (%s) must be in unit interval' % value)
if a == b: return a # no range, return boundary
if a < b:
min = a
max = b
if a > b:
min = b
max = a
# value times range, plus lower boundary
scale = (float(value) * (max - min)) + min
return scale
def denormList(unit, a, b):
"""given a list unit interval values b/n 0 and 1, denorm b/n a and b
>>> denormList([.2, .5], 10, 20)
[12.0, 15.0]
"""
for value in unit:
if value < 0 or value > 1: # must be normalized
raise UnitException('value (%s) must be in unit interval' % value)
if a == b: return a # no range, return boundary
if a < b:
min = a
max = b
if a > b:
min = b
max = a
return [((float(value) * (max - min)) + min) for value in unit]
def interpolate(value, a, b):
"""switch between two values based on q value w/n unit interval;
low q is a, high q is b
low and high are not relevant
>>> interpolate(.5, 10, 20)
15.0
>>> interpolate(.8, 10, 20)
18.0
"""
if value < 0 or value > 1: # must be normalized
raise UnitException('value (%s) must be in unit interval' % value)
if value == 0: return a
if value == 1: return b
# scale each value and sum; min, max, and sign do not matter
return (a * (1-value)) + (b * value)
def limit(value, method=None):
"""may need to use w/ denorm and others above tt do not already limit vals"""
if value > 1: return 1
elif value < 0: return 0
else: return value
#-----------------------------------------------------------------||||||||||||--
def unitBoundaryEqual(parts):
"""return a list of min/mean/max values for a unit interval divided
into user supplied partions
note: lower and upper boundaries do overlap
>>> unitBoundaryEqual(3)
[(0, 0.16666666666666666, 0.33333333333333331), (0.33333333333333331, 0.5,
0.66666666666666663), (0.66666666666666663, 0.83333333333333326, 1.0)]
"""
bounds = []
if parts <= 0:
raise UnitException('cannot process 0 parts')
step = 1.0 / parts
boundL = 0
boundH = None
for face in range(0, parts):
if face != parts - 1:
boundH = step * (face + 1)
else: # last, avoid rounding errors
boundH = 1.0
mean = boundL + (step * .5)
bounds.append((boundL, mean, boundH))
boundL = boundH
return bounds
def unitBoundaryFree(series):
"""take an arbitrary series, and create unit boundaries
for n members of a series, there will be n-1 boundaries
>>> unitBoundaryFree([0,3,4])
[(0.0, 0.375, 0.75), (0.75, 0.875, 1.0)]
"""
unit = unitNormRange(series)
bounds = []
boundL = None
boundH = None
for i in range(0, len(unit)):
if i != len(unit) - 1: # not last
boundL = unit[i]
boundH = unit[i+1]
mean = (boundL + boundH) * .5
bounds.append((boundL, mean, boundH))
else: # last, avoid rounding errors
break
return bounds
def unitBoundaryProportion(series):
"""take an series of parts of an implied sum, create unit boundaries
for n members of a series, there will be n boundaries
note: zero cannot be an entry (not a valid proportion)
>>> unitBoundaryProportion([1,1,2])
[(0, 0.125, 0.25), (0.25, 0.375, 0.5), (0.5, 0.75, 1.0)]
"""
# series cannot have non-specified values, that is, 0
if 0 in series:
raise UnitException('cannot process series that contains zero')
unit = unitNormProportion(series)
bounds = []
boundL = None
boundH = None
sum = 0
for i in range(0, len(unit)):
if i != len(unit) - 1: # not last
boundL = sum
boundH = sum + unit[i]
sum = sum + unit[i]
mean = (boundL + boundH) * .5
bounds.append((boundL, mean, boundH))
else: # last, avoid rounding errors
boundL = sum
boundH = 1.0
mean = (boundL + boundH) * .5
bounds.append((boundL, mean, boundH))
return bounds
def unitBoundaryPos(val, bounds):
"""value is between 0 and 1, map to a value within bounds
there is a slight error in that the last value goes to 1
bounds must be sorted
returns position w/n bounds as index value, 0 to n-1
>>> unitBoundaryPos(.4, [(0, 0.125, 0.25), (0.25, 0.375, 0.5), (0.5, 0.75, 1.0)])
1
>>> unitBoundaryPos(.1, [(0, 0.125, 0.25), (0.25, 0.375, 0.5), (0.5, 0.75, 1.0)])
0
"""
if val < 0 or val > 1: # must be normalized
raise UnitException('value (%s) must be in unit interval' % val)
# make sure boudns cover complete unit interval
if bounds[0][0] != 0 or bounds[-1][2] != 1:
raise UnitException('incomplete bounds')
if val == 1: # special case
return len(bounds) - 1 # last one
else:
for i in range(0, len(bounds)):
a, m, b = bounds[i]
if val >= a and val < b: # exception for 1 above
return i # return mean
#-----------------------------------------------------------------||||||||||||--
def discreteBinaryPad(series, fixRange=None):
"""take an integer series of values
fill all spaces with zeros that are not occupied
the result will always be sorted
>>> discreteBinaryPad([3,4,5])
[1, 1, 1]
>>> discreteBinaryPad([3,20,22])
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1]
"""
# make sure these are ints
for x in series:
if not drawer.isInt(x):
raise UnitException('non integer value found')
discrete = []
if fixRange != None:
fixRange.sort() # make sure sorted
min = fixRange[0]
max = fixRange[-1]
else: # find max and min from values
seriesAlt = list(copy.deepcopy(series))
seriesAlt.sort()
min = seriesAlt[0]
max = seriesAlt[-1]
for x in range(min, max+1):
if x in series:
discrete.append(1)
else: # not in series
discrete.append(0)
return discrete
def discreteCompress(series):
"""takes a series; count the adjacent occurances of the same value
store as ordered pairs; this is primitive compression
>>> discreteCompress([3,3,3,2,2,2,8,8,8,8,8,8,8])
[(3, 3), (2, 3), (8, 7)]
"""
comp = []
xLast = None
xCount = 0
for i in range(0, len(series)):
x = series[i]
if x == xLast or xLast == None:
xCount += 1
elif x != xLast: # new value not the same as last
comp.append((xLast, xCount)) # store previous series
xCount = 1 # start at one for this value
# last value, report
if i == len(series) - 1:
comp.append((x, xCount))
xLast = x
return comp
#-----------------------------------------------------------------||||||||||||--
def boundaryFit(a, b, f, boundaryMethod):
"""take boundary levels a,b and place f within them
available methods include wrap, reflect, limit
used for mask-based parameter objects
>>> boundaryFit(3, 9, 23, 'limit')
9
>>> boundaryFit(3, 9, 10, 'reflect')
8
>>> boundaryFit(3, 9, 12, 'wrap')
6
>>> boundaryFit(3, 9, 5, 'wrap')
5
"""
if a > b:
min = b; max = a
elif a < b:
min = a; max = b
else: # they are the same
min = a; max = a
period = abs(max - min)
center = min + (period * .5)
# within boundary
if f >= min and f <= max: return f
elif f == min or f == max: return f
else: # out of boundary
if max == min: return center # if the same, return boundary
if boundaryMethod == 'limit':
if f > max: return max
elif f < min: return min
elif boundaryMethod == 'wrap':
# shift period to find wihin range
if f > max:
while 1:
f = f - period
if f <= max: break
elif f < min:
while 1:
f = f + period
if f >= min: break
return f
elif boundaryMethod == 'reflect':
while f > max or f < min:
if f > max:
f = max - (abs(f - max))
elif f < min:
f = min + (abs(f - min))
else: break # f >= min or f <= max
return f
def boundaryReject(a, b, f, boundaryMethod):
"""place f outside of a and b;
>>> boundaryReject(3, 9, 23, 'limit')
23
>>> boundaryReject(3, 9, 10, 'reflect')
10
>>> boundaryReject(3, 9, 12, 'wrap')
12
>>> boundaryReject(3, 9, 5, 'wrap')
-1
"""
if a > b:
min = b; max = a
elif a < b:
min = a; max = b
else: # they are the same
min = a; max = a
period = abs(max - min)
center = min + (period * .5)
# outside of boundary
if f <= min or f >= max: return f
elif f == min or f == max: return f
else: # w/n boundary: project outside
if max == min: return center # if the same, return boundary
if boundaryMethod == 'limit':
if f < max and f >= center: return max # middle values round up
elif f > min and f < center: return min
elif boundaryMethod == 'wrap':
# shift period to find wihin range
if f < max and f >= center:
while 1: # add values to bring upward, out of range
f = f + period
if f >= max: break
elif f > min and f < center:
while 1: # subtract values to bring downward, out of range
f = f - period
if f <= min: break
return f
elif boundaryMethod == 'reflect':
while f < max and f > min:
if f < max and f >= center:
f = max + (abs(f - max)) # add the distance from the value to lim
elif f > min and f < center:
f = min - (abs(f - min))
else: break # f >= min or f <= max
return f
# def boundaryDouble(a, b, c, d, f, boundaryMethod):
# """place f w/n a and b but not in c and d
# """
# if a > b:
# min = b; max = a
# elif a < b:
# min = a; max = b
# else: # they are the same
# min = a; max = a
#
# if c > d:
# minNot = d; maxNot = c
# elif c < d:
# minNot = c; maxNot = d
# else: # they are the same
# minNot = c; maxNot = c
#
# post = boundaryFit(min, max, f, boundaryMethod)
#-----------------------------------------------------------------||||||||||||--
class FunnelUnitException(Exception):
pass
#-----------------------------------------------------------------||||||||||||--
class FunnelUnit:
def __init__(self, series):
"""
>>> a = FunnelUnit([3,4,20])
"""
self.srcSeries = series
self.srcSeriesUnit = unitNormRange(series)
self.binaryMap = discreteBinaryPad(series)
# create boundary w/n unit interval of binary rep
self.binaryBound = unitBoundaryEqual(len(self.binaryMap))
self.discrComp = discreteCompress(self.binaryMap)
#print 'len series', len(series)
#print 'len bound', len(self.binaryBound)
#print self.binaryMap
def _seriesPosToBinaryPos(self, pos):
"""giving a pos in series (0 start) return bin position
if """
count = 0 # number of 1's in range
if pos >= len(self.srcSeries):
raise FunnelUnitException('series position out of range')
for i in range(0, len(self.binaryMap)):
if self.binaryMap[i] == 1:
if count == pos:
return i
count = count + 1
def _binaryPosToSeriesPos(self, pos):
if pos >= len(self.binaryMap):
raise FunnelUnitException('binary position out of range')
if self.binaryMap[pos] != 1:
return None
count = 0 # series position
for i in range(0, len(self.binaryMap)):
if self.binaryMap[i] == 1:
if i == pos:
return count
count = count + 1
#-----------------------------------------------------------------------||--
def findReject(self, val):
"""take the binary map, and divide the zero portions appropriate
>>> a = FunnelUnit([0,1,2,3,4,20])
>>> a.findReject(0)
0.0
>>> a.findReject(.1)
0.1000...
"""
if val < 0 or val > 1:
raise FunnelUnitException('value (%s) must be in unit interval' % val)
# get position w/n biary bound
i = unitBoundaryPos(val, self.binaryBound)
if self.binaryMap[i] == 1: # if 1, return that position
pos = self._binaryPosToSeriesPos(i)
return self.srcSeriesUnit[pos]
else: # no series value w/n this boundary
return None
def _findAdjacent(self, pos):
"""given a position in the binary array, determine
lower and upper positions that have a 1
>>> a = FunnelUnit([0,1,2,3,4,20])
>>> a._findAdjacent(10)
(4, 20)
"""
posLower = None
posUpper = None
# get upper
for i in range(pos+1, len(self.binaryMap)):
if self.binaryMap[i] == 1:
posUpper = i
break
# get lower
posArray = range(0, pos)
posArray.reverse()
for i in posArray:
if self.binaryMap[i] == 1:
posLower = i
break
# check for erros
#print _MOD, 'pos, posLower, posUpper', pos, posLower, posUpper
if posLower == None or posUpper == None:
raise FunnelUnitException('neighbor positions cannot be found')
return posLower, posUpper
def findNearest(self, val):
"""
>>> a = FunnelUnit([0,1,2,3,4,20])
>>> a.findNearest(.5)
0.2000...
>>> a.findNearest(.8)
1.0
"""
if val < 0 or val > 1:
raise FunnelUnitException('value (%s) must be in unit interval' % val)
# get position w/n biary bound
i = unitBoundaryPos(val, self.binaryBound)
if self.binaryMap[i] == 1: # if 1, return that position
pos = self._binaryPosToSeriesPos(i)
return self.srcSeriesUnit[pos]
else: # no series value w/n this boundary
absPos = 0
for j in range(0, len(self.discrComp)):
x, count = self.discrComp[j]
# do not need to worry about boudnary conditions, as this
# value will never be the last
relPos = 0
midCount = None
for k in range(0, count): # simulate index values
if absPos == i: # the area looking for
posLower, posUpper = self._findAdjacent(absPos)
midCount = count # store this compressed
break
relPos = relPos + 1
absPos = absPos + 1
if midCount != None: # done, break
break
# determine winner
if midCount % 2 == 0: # even
relMid = (midCount / 2) # middle-upper index w/n count
else: #odd, there is a middle
relMid = (midCount / 2) # middle index w/n count
if relPos < relMid:
pos = self._binaryPosToSeriesPos(posLower)
else:
pos = self._binaryPosToSeriesPos(posUpper)
return self.srcSeriesUnit[pos]
# def test(self):
# max = 40
# print 'findReject'
# for x in range(0,max+1):
# print self.findReject(x/float(max))
# print '\nfindNearest'
# for x in range(0,max+1):
# print self.findNearest(x/float(max))
#-----------------------------------------------------------------||||||||||||--
class Test(unittest.TestCase):
def runTest(self):
pass
def testDummy(self):
self.assertEqual(True, True)
#-----------------------------------------------------------------||||||||||||--
if __name__ == '__main__':
from athenaCL.test import baseTest
baseTest.main(Test)
|
UTF-8
|
Python
| false | false | 2,011 |
7,370,163,920,508 |
4458ac5a704faf8f6bd0a9a642352ae223e0933e
|
bacfc3f6b633b6b73cbb9e40d10674d809358c75
|
/bips/workflows/workflow15.py
|
b81f7bada3ec1f14098d400abef9adb3380e80ea
|
[
"Apache-2.0"
] |
permissive
|
satra/BrainImagingPipelines
|
https://github.com/satra/BrainImagingPipelines
|
fc1c4e63fc790a1931aa421bef6b7d3814ed4e65
|
2b0da2b50814cc685f15fefbae8144624308ebfc
|
refs/heads/master
| 2021-01-18T08:26:03.118958 | 2012-08-18T15:29:57 | 2012-08-18T15:29:57 | 3,511,786 | 1 | 2 | null | true | 2012-07-11T20:43:17 | 2012-02-22T05:50:20 | 2012-06-19T05:16:11 | 2012-06-19T05:16:11 | 280 | null | null | null |
Python
| null | null |
from traits.api import HasTraits, Directory, Bool
import traits.api as traits
from .base import MetaWorkflow, load_config, register_workflow
from .workflow12 import config as pconfig
"""
Part 1: MetaWorkflow
"""
mwf = MetaWorkflow()
mwf.help = """
Diffusion tracking workflow
===========================
"""
mwf.uuid = 'fda82554a43511e1b507001e4fb1404c'
mwf.tags = ['diffusion','dti','tracking']
mwf.script_dir = 'u0a14c5b5899911e1bca80023dfa375f2'
"""
Part 2: Config
"""
class config(HasTraits):
uuid = traits.Str(desc="UUID")
desc = traits.Str(desc='Workflow description')
# Directories
working_dir = Directory(mandatory=True, desc="Location of the Nipype working directory")
sink_dir = Directory(mandatory=True, desc="Location where the BIP will store the results")
crash_dir = Directory(mandatory=False, desc="Location to store crash files")
# Execution
run_using_plugin = Bool(False, usedefault=True, desc="True to run pipeline with plugin, False to run serially")
plugin = traits.Enum("PBS", "PBSGraph","MultiProc", "SGE", "Condor",
usedefault=True,
desc="plugin to use, if run_using_plugin=True")
plugin_args = traits.Dict({"qsub_args": "-q many"},
usedefault=True, desc='Plugin arguments.')
test_mode = Bool(False, mandatory=False, usedefault=True,
desc='Affects whether where and if the workflow keeps its \
intermediary files. True to keep intermediary files. ')
# Subjects
subjects= traits.List(traits.Str, mandatory=True, usedefault=True,
desc="Subject id's. Note: These MUST match the subject id's in the \
Freesurfer directory. For simplicity, the subject id's should \
also match with the location of individual functional files.")
# Preprocessing info
preproc_config = traits.File(desc="preproc json file")
#Advanced
use_advanced_options = traits.Bool()
advanced_script = traits.Code()
def create_config():
c = config()
c.uuid = mwf.uuid
c.desc = mwf.help
return c
mwf.config_ui = create_config
"""
Part 3: View
"""
def create_view():
from traitsui.api import View, Item, Group, CSVListEditor, TupleEditor
from traitsui.menu import OKButton, CancelButton
view = View(Group(Item(name='uuid', style='readonly'),
Item(name='desc', style='readonly'),
label='Description', show_border=True),
Group(Item(name='working_dir'),
Item(name='sink_dir'),
Item(name='crash_dir'),
label='Directories', show_border=True),
Group(Item(name='run_using_plugin'),
Item(name='plugin', enabled_when="run_using_plugin"),
Item(name='plugin_args', enabled_when="run_using_plugin"),
Item(name='test_mode'),
label='Execution Options', show_border=True),
Group(Item(name='subjects', editor=CSVListEditor()),
label='Subjects', show_border=True),
Group(Item(name='preproc_config'),
label='Track', show_border=True),
Group(Item("use_advanced_options"),
Item("advanced_script"),
label="Advanced",show_border=True),
buttons = [OKButton, CancelButton],
resizable=True,
width=1050)
return view
mwf.config_view = create_view
"""
Part 4: Construct Workflow
"""
from .scripts.u0a14c5b5899911e1bca80023dfa375f2.diffusion_base import create_workflow
def get_dataflow(c):
import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio
datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
outfields=['dwi','mask','bvecs','bvals',"reg","mean"]),
name='datasource')
# create a node to obtain the functional images
datasource.inputs.base_directory = c.sink_dir
datasource.inputs.template ='*'
datasource.inputs.field_template = dict(dwi='%s/preproc/outputs/dwi/*',
mask='%s/preproc/outputs/mask/*', bvecs='%s/preproc/outputs/bvecs/*',
bvals='%s/preproc/outputs/bvals/*',reg='%s/preproc/outputs/bbreg/*.dat',
mean='%s/preproc/outputs/mean/*.nii*')
datasource.inputs.template_args = dict(dwi=[['subject_id']],
mask=[['subject_id']],
bvecs=[['subject_id']],
bvals=[['subject_id']],
mean=[["subject_id"]],
reg=[["subject_id"]])
return datasource
foo = pconfig()
def get_wf(c, prep_c=foo):
import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio
import nipype.interfaces.utility as niu
workflow = create_workflow()
datagrabber = get_dataflow(prep_c)
inputspec = workflow.get_node('inputspec')
workflow.connect(datagrabber,'mask',inputspec,'mask')
workflow.connect(datagrabber,'dwi',inputspec,'dwi')
workflow.connect(datagrabber,'bvecs',inputspec,'bvecs')
workflow.connect(datagrabber,'bvals',inputspec,'bvals')
workflow.connect(datagrabber,'reg',inputspec,'reg')
workflow.connect(datagrabber,'mean',inputspec,'mean')
workflow.inputs.inputspec.surf_dir=prep_c.surf_dir
infosource = pe.Node(niu.IdentityInterface(fields=["subject_id"]),name='subject_names')
workflow.connect(infosource,"subject_id",datagrabber, 'subject_id')
workflow.connect(infosource,"subject_id",inputspec, 'subject_id')
sinker = pe.Node(nio.DataSink(),name='sinker')
outputspec=workflow.get_node('outputspec')
workflow.connect(outputspec,'fdt_paths',sinker,'track.fdt_paths')
workflow.connect(outputspec,'log',sinker,'track.log')
workflow.connect(outputspec,'particle_files',sinker,'track.particle_files')
workflow.connect(outputspec,'targets',sinker,'track.targets')
workflow.connect(outputspec,'way_total',sinker,'track.way_total')
sinker.inputs.base_directory=c.sink_dir
workflow.connect(infosource,"subject_id",sinker,"container")
if c.test_mode:
infosource.iterables=("subject_id", [c.subjects[0]])
else:
infosource.iterables=("subject_id", c.subjects)
workflow.base_dir = c.working_dir
return workflow
mwf.workflow_function = get_wf
"""
Part 5: Main
"""
def main(config_file):
c = load_config(config_file,config)
prep_c = load_config(c.preproc_config, pconfig)
workflow = get_wf(c,prep_c)
if c.use_advanced_options:
exec c.advanced_script
if c.test_mode:
workflow.write_graph()
if c.run_using_plugin:
workflow.run(plugin=c.plugin, plugin_args=c.plugin_args)
else:
workflow.run()
return 1
mwf.workflow_main_function = main
"""
Part 6: Main
"""
register_workflow(mwf)
|
UTF-8
|
Python
| false | false | 2,012 |
11,278,584,155,002 |
e328637592250d928f000facf60b88846b508bfa
|
1cb65e445dcf6199d8cd78870e1b92a1c817210b
|
/vcs/testsuite/presentation_tests.py
|
eb9a1197fe0ea9fe4a45f41ee400058ed0565d2c
|
[] |
no_license
|
JoseCamino/DPresenter
|
https://github.com/JoseCamino/DPresenter
|
2d5bf4b93782fe10d215739d8abe1c59c4673e53
|
0ea2a597955f46de981d5160497a643421b3baa1
|
refs/heads/master
| 2020-03-29T13:17:01.143580 | 2014-11-26T22:24:06 | 2014-11-26T22:24:06 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import os.path
import unittest
import vcs
from helper import remove_test_repo, get_vcs
from vcs.fragmenter import PresentationFragmenter
vcs = get_vcs()
directory = os.path.dirname(__file__)
def get_tests():
return (
TestFragmenter,
TestMerge)
class PresentationTest(unittest.TestCase):
def setUp(self):
remove_test_repo()
self.project = vcs.create_project(self, "testrepo")
def tearDown(self):
remove_test_repo()
class TestFragmenter(unittest.TestCase):
def test_fragmenter(self):
presentation_location = os.path.join(directory, "test_requests/testpresentation.pptx")
slides = PresentationFragmenter().fragment_file(presentation_location)
self.assertEqual(len(slides), 3)
class TestMerge(unittest.TestCase):
def setUp(self):
remove_test_repo()
self.project = vcs.create_project("testrepo")
self.presentation = self.project.current_presentation
def tearDown(self):
remove_test_repo()
def test_presentation_data_merges_slides(self):
presentation_location = os.path.join(directory, "test_requests/testpresentation.pptx")
slides = PresentationFragmenter().fragment_file(presentation_location)
[self.presentation.add_slide(None, slide) for slide in slides]
# save the presentation itself to a file
presentation_data = self.presentation.data
presentation_save_path = os.path.join(directory, "testdata/testmerge.pptx")
with open(presentation_save_path, 'wb') as file:
file.write(presentation_data)
# Now test if the output presentation has the correct number of slides
saved_slides = PresentationFragmenter().fragment_file(presentation_save_path)
self.assertEqual(len(saved_slides), len(slides))
def test_presentation_write_data_merges_slides(self):
presentation_location = os.path.join(directory, "test_requests/testpresentation.pptx")
slides = PresentationFragmenter().fragment_file(presentation_location)
[self.presentation.add_slide(None, slide) for slide in slides]
# save the presentation itself to a file
presentation_save_path = os.path.join(directory, "testdata/testmerge.pptx")
presentation_data = self.presentation.write_data_to_file(presentation_save_path)
# Now test if the output presentation has the correct number of slides
saved_slides = PresentationFragmenter().fragment_file(presentation_save_path)
self.assertEqual(len(saved_slides), len(slides))
|
UTF-8
|
Python
| false | false | 2,014 |
16,612,933,521,745 |
3692ed4e15746bb2ce50b2666f5e9b5fe5b3dcba
|
b5d2743086ed73cde6d2cc7f18d2b35c2c23cee1
|
/src/defs.py
|
2108984d3a00e699ee862b73f1c70300195c7f82
|
[] |
no_license
|
vburenin/pystructor
|
https://github.com/vburenin/pystructor
|
4a4619d735e244a3cc769ba2a77249d7646255b3
|
1e98a0fee8fee98f6ca187497a034bd7a6f1925a
|
refs/heads/master
| 2016-03-28T04:25:33.449679 | 2013-08-26T00:06:08 | 2013-08-26T00:06:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
TT_BACKSLASH = 'BACKSLASH'
TT_BOOL_VAL = 'BOOL_VAL'
TT_COMMA = 'COMMA'
TT_SEMICOLON = 'SEMICOLON'
TT_DEFAULTS = 'DEFAULTS'
TT_NS_CLOSED = 'NS_CLOSED'
TT_ENUM_BEGIN = 'ENUM_BEGIN'
TT_ENUM_LABEL = 'ENUM_LABEL'
TT_ENUM_OPTION = 'ENUM_OPTION'
TT_ASSIGN = 'ASSIGN'
TT_MSG_BEGIN = 'MSG_BEGIN'
TT_MSG_LABEL = 'MSG_LABEL'
TT_DEFAULTS_OPENED = 'DEFAULTS_OPENED'
TT_DEFAULTS_CLOSED = 'DEFAULTS_CLOSED'
TT_MSG_DEFAULTS = 'MSG_DEFAULTS'
TT_INTEGER = 'INTEGER'
TT_SINTEGER = 'SINTEGER'
TT_SINGLE_QUOTE = 'SINGLE_QUOTE'
TT_VAR_TYPE = 'VAR_TYPE'
TT_VAR_ID = 'VAR_ID'
TO_OPTIONAL = 'optional'
TO_REQUIRED = 'required'
TO_REPEATED = 'repeated'
DT_DOUBLE = 'double'
DT_FLOAT = 'float'
DT_INT32 = 'int32'
DT_INT64 = 'int64'
DT_UINT32 = 'uint32'
DT_UINT64 = 'uint64'
DT_SINT32 = 'sint32'
DT_SINT64 = 'sint64'
DT_FIXED32 = 'fixed32'
DT_FIXED64 = 'fixed64'
DT_SFIXED32 = 'sfixed32'
DT_SFIXED64 = 'sfixed64'
DT_BOOL = 'bool'
DT_STRING = 'string'
DT_BYTES = 'bytes'
RESERVED_TYPES = {
DT_DOUBLE, DT_FLOAT, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64,
DT_SINT32, DT_SINT64, DT_FIXED32, DT_FIXED64, DT_SFIXED32, DT_SFIXED64,
DT_BOOL, DT_STRING, DT_BYTES
}
RESERVED_MSG_LABELS = {TO_OPTIONAL, TO_REQUIRED, TO_REPEATED}
BOOL_VALS = {'true', 'false'}
ENUM_OPTIONS = {'allow_alias'}
TOKENS = (
TT_BACKSLASH,
TT_BOOL_VAL,
TT_COMMA,
TT_SEMICOLON,
TT_DEFAULTS,
TT_NS_CLOSED,
TT_ENUM_BEGIN,
TT_ENUM_LABEL,
TT_ENUM_OPTION,
TT_ASSIGN,
TT_MSG_BEGIN,
TT_MSG_LABEL,
TT_DEFAULTS_OPENED,
TT_DEFAULTS_CLOSED,
TT_INTEGER,
TT_SINTEGER,
TT_SINGLE_QUOTE,
TT_VAR_TYPE,
TT_VAR_ID
)
|
UTF-8
|
Python
| false | false | 2,013 |
5,763,846,153,201 |
b56202fe79f57b610d3e9a4b7536bacf9ef88c28
|
d96058b33f3f01523442ac0bfd13c9fec6edd69a
|
/totter/mail.py
|
dc929191edecbfeda53d7c60c56e027ba58862c1
|
[] |
no_license
|
rockingchairllc/TotterPyramid
|
https://github.com/rockingchairllc/TotterPyramid
|
2c65f259ee2129d46d67a82e755f72e522e0cdd0
|
bfeb5b8ab86ff7cf1edfeff867d3884d277bdd72
|
refs/heads/master
| 2021-01-02T22:38:50.663901 | 2011-12-15T04:25:46 | 2011-12-15T04:25:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import smtplib
from email.mime.text import MIMEText
def send_email(from_name, to_emails, subject, message):
if not isinstance(to_emails, list):
to_emails = [to_emails]
msg = MIMEText(message.encode('utf-8'), 'plain', 'utf-8')
msg['Subject'] = subject
msg['From'] = from_name
msg['To'] = ','.join(to_emails)
s = smtplib.SMTP('localhost')
s.sendmail(from_name, to_emails, msg.as_string())
|
UTF-8
|
Python
| false | false | 2,011 |
2,284,922,605,119 |
05c095e83390ecafd9fcac59c027a4d880aea7e1
|
980605813b4be035b856a6766230eb78e2bb8ebe
|
/simulation_simple/traffic_sim.py
|
dd3e19a06f83cf43b646355522cd6ecff1e0e1ab
|
[] |
no_license
|
atkm/reed-modeling
|
https://github.com/atkm/reed-modeling
|
40ba99262c74c5ec1c6022f197ce8dde137ceab1
|
034ca9f592768d90362ed70b7db2a8d19b350e91
|
refs/heads/master
| 2016-09-03T06:44:49.344107 | 2013-04-14T02:29:16 | 2013-04-14T02:29:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import scipy as sp
"""
Actual simulation. i.e. no fractional cars.
"""
class Circle:
def __init__(self, lanes, entrances, radius, avg_speed):
self.lanes = lanes
self.ents = entrances
self.radius = radius
self.avg_speed = avg_speed
#self.buff_size = buff_size
class Car:
def __init__(self, size, speed, goal):
self.size = size
self.speed = speed
self.goal = goal
if __name__ == "__main__":
circ = Circle(2, 4, 10, 1)
car1 = Car(1,1,3)
sys.exit(print("Success."))
|
UTF-8
|
Python
| false | false | 2,013 |
9,844,065,064,164 |
e30bb6313c46556bf39d378262b765eb79bc8f86
|
6135340eb5e5af828cbdc4b873e078001a216616
|
/playground/benchmark/sr_tests.py
|
eec98d58e1891b65735e71306c8fc9f63a9b46e2
|
[
"LGPL-3.0-only",
"LGPL-2.0-or-later",
"GPL-1.0-or-later"
] |
non_permissive
|
a20r/playground
|
https://github.com/a20r/playground
|
df0ff6711308d3ee0dd78afd7424f7e959dcc732
|
1ae74280bd104984ec16d62cc74de351ddee7d5f
|
refs/heads/master
| 2021-05-28T01:40:49.023550 | 2014-02-03T15:57:00 | 2014-02-03T15:57:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import math
import random
import parser
# GLOBAL VARS
data_file_extension = ".dat"
def write_test_data(fp, data):
cols = len(data[0].split()) - 1
data_file = open(fp, "wb")
# create header
variables = ["var{0}".format(i + 1) for i in range(cols)]
variables.append("answer")
header = ", ".join(variables)
data_file.write(header + "\n")
# write data
for line in data:
data_file.write("{0}\n".format(line))
# close data file
data_file.close()
def generate_random_matrix(bounds, points, decimal_places=2):
points_generated = 0
matrix = []
columns = len(bounds)
while (points_generated != points):
tmp = []
for i in range(columns):
lower = bounds[i]["lower"]
upper = bounds[i]["upper"]
rand_num = round(random.uniform(lower, upper), decimal_places)
tmp.append(rand_num)
if tmp not in matrix:
matrix.append(tmp)
points_generated += 1
return matrix
def generate_series_matrix(bounds, points, decimal_places=2):
points_generated = 0
matrix = []
columns = len(bounds)
# calculate the steps
for i in range(columns):
step = bounds[i]["upper"] - bounds[i]["lower"]
step = step / float(points)
bounds[i]["step"] = round(step, decimal_places)
while (points_generated != points):
tmp = []
for i in range(columns):
if bounds[i].get("last_number") is not None:
num = round(bounds[i]["last_number"], decimal_places)
num += round(bounds[i]["step"], decimal_places)
bounds[i]["last_number"] = round(num, decimal_places)
else:
num = bounds[i]["lower"]
bounds[i]["last_number"] = round(num, decimal_places)
tmp.append(num)
matrix.append(tmp)
points_generated += 1
return matrix
def evaluate_test_function(equation, var_values):
data = []
points = len(var_values)
for i in range(points):
# eval equation
v = var_values[i]
code = parser.expr(equation).compile()
result = eval(code)
# stringify results
line = map(str, v) # add variable values
line.append(str(result)) # add result
line = ", ".join(map(str, line)) # stringfy the data line
data.append(line)
return data
def arabas_et_al_test_functions(data_file="arabas_et_al-f"):
t_funcs = [
"-v[0] * math.sin(10.0 * math.pi * v[0]) + 1.0",
"int(8.0 * v[0]) / 8.0",
"v[0] * math.copysign(1, v[0])",
" ".join(
"""
0.5 + (math.sin(math.sqrt(v[0] ** 2 + v[1] **2) - 0.5) ** 2)
/ (1 + 0.001 * (v[0] ** 2 + v[1] ** 2)) **2
""".split()
)
]
bounds = [
[{"lower": -2.0, "upper": 1.0}],
[{"lower": 0.0, "upper": 1.0}],
[{"lower": -1.0, "upper": 2.0}],
[{"lower": -100.0, "upper": 100.0}, {"lower": -100.0, "upper": 100.0}],
]
points = [200, 50, 50, 50]
for i in range(len(t_funcs)):
fp = "{0}{1}{2}".format(data_file, i + 1, data_file_extension)
matrix = generate_series_matrix(bounds[i], points[i])
data = evaluate_test_function(t_funcs[i], matrix)
write_test_data(fp, data)
def nguyen_et_al_test_functions(data_file="nguyen_et_al-f"):
t_funcs = [
"v[0] ** 3 + v[0] ** 2 + v[0]",
"v[0] ** 4 + v[0] ** 3 + v[0] ** 2 + v[0]",
"v[0] ** 5 + v[0] ** 4 + v[0] ** 3 + v[0] ** 2 + v[0]",
"v[0] ** 6 + v[0] ** 5 + v[0] ** 4 + v[0] ** 3 + v[0] ** 2 + v[0]",
"math.sin(v[0] ** 2) * math.cos(v[0]) - 1",
"math.sin(v[0]) + math.sin(v[0] + v[0] ** 2) - 1",
"math.log(v[0] + 1) + math.log(v[0] ** 2 + 1)",
"math.sqrt(v[0])",
"math.sin(v[0]) + math.sin(v[1] ** 2)",
"2 * math.sin(v[0]) * math.cos(v[1])"
]
bounds = [
[{"lower": -1, "upper": 1}],
[{"lower": -1, "upper": 1}],
[{"lower": -1, "upper": 1}],
[{"lower": -1, "upper": 1}],
[{"lower": -1, "upper": 1}],
[{"lower": -1, "upper": 1}],
[{"lower": 0, "upper": 2}],
[{"lower": 0, "upper": 4}],
[{"lower": -1, "upper": 1}, {"lower": -1, "upper": 1}],
[{"lower": -1, "upper": 1}, {"lower": -1, "upper": 1}]
]
points = [20, 20, 20, 20, 20, 20, 20, 20, 100, 100]
for i in range(len(t_funcs)):
fp = "{0}{1}{2}".format(data_file, i + 1, data_file_extension)
matrix = generate_random_matrix(bounds[i], points[i])
data = evaluate_test_function(t_funcs[i], matrix)
write_test_data(fp, data)
|
UTF-8
|
Python
| false | false | 2,014 |
3,410,204,043,125 |
c47ce4eec5783b5e09ea5592fd19b4ed33ea1f7b
|
048b57eebaa0f5f328d7cf09f33e56f876758731
|
/lfg_irl/urls.py
|
51422036c34c1b223e56a004010617bb6584b44d
|
[] |
no_license
|
jayluan/lfg
|
https://github.com/jayluan/lfg
|
8986c42216d47e1418aed923459a1685b3b32e30
|
186c33f4b231a3ed4eba02c231c432f1390a50b2
|
refs/heads/master
| 2021-01-13T12:58:39.333839 | 2014-09-25T05:45:49 | 2014-09-25T05:45:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from registration.backends.default.views import RegistrationView
from LookingForGroupMain.forms import CustomRegistrationForm
from django.conf import settings
from django.conf.urls.static import static
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'mysite.views.home', name='home'),
# url(r'^mysite/', include('mysite.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'LookingForGroupMain.views.index'),
url(r'^user/(?P<user>[a-zA-Z0-9]+)', 'LookingForGroupMain.views.user_detail', name='user_detail'),
url(r'^login/$', 'django.contrib.auth.views.login'),
url(r'^logout/$', 'LookingForGroupMain.views.logout_view'),
#not sure why this works but if I just pass the URL to a def in views.py, it does not POST to the model
url(r'^accounts/register/$', RegistrationView.as_view(form_class=CustomRegistrationForm), name='registration_register'),
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'^accounts/', include('UserProfile.urls')),
# url(r'^profile/(\w+)/$', 'LookingForGroupMain.views.profile')
url(r'^groups/', include('BaseGroup.urls'))
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
UTF-8
|
Python
| false | false | 2,014 |
10,780,367,925,763 |
cd57bc883ee02007f58ab6190e4a9c9107f9f205
|
f0fa0faaa3d99bad141dfef5f53f28498662a000
|
/Lab2/divisors.py
|
93b72de78028ff6f205fb6646e3d585be6f7ef5e
|
[] |
no_license
|
jawad13-meet/MEET-YL1
|
https://github.com/jawad13-meet/MEET-YL1
|
e4c66ff361c8ad7889bace92f2777d24a5793467
|
85887c4b8b348eec1b698af4af58b1d6e1978427
|
refs/heads/master
| 2016-09-11T03:22:59.105275 | 2013-11-14T17:58:01 | 2013-11-14T17:58:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
i=int(raw_input(n))
for i in range (1,n+1):
print i
|
UTF-8
|
Python
| false | false | 2,013 |
14,972,256,011,778 |
ee7e045e2e4d66f544e3544fb7889101de1d09b3
|
b98cf351cbf9510b3a0eb622c2b8254fd69320a9
|
/doc/buildbot/master.cfg
|
3015e443a7951b4dee5935c0f5612b3d1efcfd59
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
jesuscript/TopographicaSVN
|
https://github.com/jesuscript/TopographicaSVN
|
52540de8d678d1dd4468bdb9a09d31ed12d1d532
|
071aed6e29dac36403c89228554e02d8e34ae6b1
|
refs/heads/master
| 2020-12-25T03:01:02.359142 | 2011-10-22T11:36:26 | 2011-10-22T11:36:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- python -*-
# ex: set syntax=python:
# buildbot service on windows
# http://ascendwiki.cheme.cmu.edu/BuildBot
import os
# CEBALERT: any way to read this?
MAKEFILE_RELEASE = "0.9.7"
PRERELEASE = "0.9.8"
# the email address here must be reachable by the buildmaster
NOTIFICATION_EMAIL = "[email protected]"
MASTER = "fiver"
######################################################################
####### CONVENIENCE FUNCTIONS & VARIABLES
c = BuildmasterConfig = {} # buildmaster reads this; c for convenience
from buildbot.buildslave import BuildSlave
def add_slave(name,pwd):
"""Add name/pwd to list of slaves"""
c['slaves'].append(
BuildSlave(name,pwd,
notify_on_missing=NOTIFICATION_EMAIL))
def add_schedulers(*args):
for a in args:
c['schedulers'].append(a)
bbpython = "/usr/bin/env python2.6"
bbepydoc = "/usr/bin/epydoc"
from string import Template
substitutions = {}
topographica_script = "./topographica"
runtests_script = "./topo/tests/runtests.py"
def runtestscmd(targets=None):
if targets is None:
return (topographica_script,"-p","targets=['all']",runtests_script)
else:
return (topographica_script,"-p","targets=%s"%targets,runtests_script)
#runtests_coverage
#substitutions.append(
### various make commands
make_clean = ("make","clean")
make_pyc_clean = ("make","clean-pyc")
make_doc = ("make","doc")
make = ("make")
# "PYTHON='/usr/bin/env python2.6'"
make_setup = ("make","PYTHON=/usr/bin/python","topographica-external-python")
make_lint_base = ("make","lint-base")
make_pyflakes = ("make","pyflakes")
make_slow_tests = runtestscmd()
make_tests = runtestscmd(targets=["unit"])
make_gui_tests = runtestscmd(targets=["gui"])
make_unopttrain_tests = ("make","unopt-train-tests")
make_speed_tests = ("make","all-speed-tests")
rsync = ("rsync","-av","topographica.svn.sourceforge.net::svn/topographica/*",".")
make_compare_oo_or = ("make","compare_oo_or")
analyse = ("/home/ceball/topographica/topographica/topographica","/home/ceball/topographica/topographica/doc/buildbot/bbanalysis.py","-c","update_timings();plot_timings();plot_startups();write_page()")
# CEBALERT: ideally analyse would use the buildbot slave copy of topographica and upload the file to the master
# rather than relying on another copy
make_or_comparisons = ("make","or_comparisons")
make_oo_or_comparisons = ("make","oo_or_comparisons")
make_dist = ("make","RELEASE=%s"%PRERELEASE,"dist")
make_dist_setup = ("make",'EPYDOC=%s'%bbepydoc,'PYTHON=%s'%bbpython,"RELEASE=%s"%PRERELEASE,"dist-setup.py")
make_dist_setup_sdist = ("make",'PYTHON=%s'%bbpython,"RELEASE=%s"%PRERELEASE,"dist-setup.py-sdist")
make_dist_setup_wininst = ("make",'PYTHON=%s'%bbpython,"RELEASE=%s"%PRERELEASE,"dist-setup.py-bdist_wininst")
make_dist_setup_msi = ("make",'PYTHON=%s'%bbpython,"RELEASE=%s"%PRERELEASE,"BDIST_WIN_CMD = bdist_msi","dist-setup.py-bdist_wininst")
make_dist__clean = ("rm","-rf","../distributions")
make_deb_svn = ("make",'EPYDOC=%s'%bbepydoc,'PYTHON=%s'%bbpython,"RELEASE=%s"%PRERELEASE,"deb")
make_deb_backports_svn = ("make",'PYTHON=%s'%bbpython,"RELEASE=%s"%PRERELEASE,"deb-backports")
make_deb_svn_ppa = ("make",'PYTHON=%s'%bbpython,"RELEASE=%s"%PRERELEASE,"deb-ppa")
make_rpm_svn = ("make",'PYTHON=%s'%bbpython,"RELEASE=%s"%PRERELEASE,"rpm")
make_no_gui = ("make","GUI=0")
make_reference_manual = ("make","reference-manual")
build_coverage = ("make","-C","external","coverage")
make_clean_coverage = ("make","clean-coverage-output")
make_coverage_html = ("make","coverage-html")
make_clean_coverage_html = ("make","clean-coverage-html")
untar = ("tar","--strip-components=1","-xvf","topographica-"+PRERELEASE+".tar.gz")
rm_currdir = ("rm","-rf","./*")
make_optional = ("make","-C","external","pyaudiolab") # and others if they're being used...
### descriptions of commands (for waterfall display)
descriptions = {
make_clean: ("cleaning","clean"),
make: ("building","build"),
make_setup: (["python","setup.py","install"]),
make_doc: ("make doc","documentation"),
make_lint_base: (["pylint","base"],["pylint","base"]),
make_pyflakes: (["pyflakes"],["pyflakes"]),
make_tests: (["unit","tests"],["unit","tests"]),
make_gui_tests: (["gui","tests"],["gui","tests"]),
make_slow_tests: (["slow","tests"],["slow","tests"]),
make_speed_tests: (["speed","tests"],["speed","tests"]),
make_unopttrain_tests: (["unoptimized","components","tests"],["unoptimized","components","tests"]),
rsync: (["rsync","svn"],["svn","rsync"]),
make_compare_oo_or: (["c++","'oo_or'","comparison"],["c++","'oo_or'","comparison"]),
analyse: (["analysing"],["analysis"]),
make_or_comparisons: (["c++","'or'","comparisons"],["c++","'or'","comparisons"]),
make_oo_or_comparisons: (["c++","'oo_or'","comparisons"],["c++","'oo_or'","comparisons"]),
make_pyc_clean: (["removing","pyc","files"],["clean-pyc"]),
make_dist: (["generating","source","distribution"],["source","distribution"]),
make_dist_setup: (["generating","setup.py","base"],["setup.py","base"]),
make_dist_setup_sdist: (["generating","setup.py","distribution"],["setup.py","distribution"]),
make_dist_setup_wininst: (["generating","exe","distribution"],["exe","distribution"]),
make_dist_setup_msi: (["generating","msi","distribution"],["msi","distribution"]),
make_dist__clean: (["removing","distributions","dir"],["delete","distributions","dir"]),
make_deb_svn: (["generating","deb"],["deb"]),
make_deb_backports_svn: (["backporting","deb"],["deb","backports"]),
make_deb_svn_ppa: (["uploading","to","launchpad"],["launchpad","PPA"]),
make_rpm_svn: (["rpm","spec"],["rpm","spec"]),
make_no_gui: (["building","without","gui"],["build","no","gui"]),
make_reference_manual: (["generating","reference","manual"],["reference","manual"]),
build_coverage: (["build","coverage"],["build","coverage"]),
make_clean_coverage : (["clean","coverage","output"],["clean","coverage","output"]),
make_coverage_html : (["coverage","report"],["coverage","report"]),
make_clean_coverage_html : (["clean","coverage","html"],["clean","coverage","html"]),
untar : (["untar"],["untar"]),
rm_currdir: (["clean","clean"]),
make_optional: (["build","optional","components"],["build","optional","components"])}
from buildbot.steps.source import SVN
from buildbot.steps.shell import ShellCommand
def cvs_checkout(factory_,mode="update",**args):
factory_.addStep(SVN,
svnurl=svnurl,
mode=mode,
haltOnFailure=True,
retry=(10,2),
**args)
def add_command(factory_,command_,xvfb=True,timeout=1200,
haltOnFailure=True,flunkOnFailure=True,flunkOnWarnings=False,warnOnFailure=False,warnOnWarnings=True,
coverage=False,descr=None,
**args):
if descr is None:
happening_description,finished_description = descriptions[command_][0],descriptions[command_][1]
else:
happening_description,finished_description = descr,descr
# ensure cmd is a list (to make simpler any necessary alterations to the command)
if isinstance(command_,str):
cmd = []
cmd.append(command_)
else:
cmd = list(command_)
if coverage:
if cmd[0]==topographica_script:
cmd.insert(1,"-p")
cmd.insert(2,"coverage=True")
elif cmd[0]=="make":
cmd.insert(1,"COVERAGE=1")
else:
raise
if hasattr(factory_,'_NICE') and factory_._NICE is True:
cmd.insert(0,"nice")
if xvfb and not (hasattr(factory_,'_NO_XVFB') and factory_._NO_XVFB is True):
cmd.insert(0,"xvfb-run")
cmd.insert(1,"-a")
factory_.addStep(ShellCommand,command=cmd,description=happening_description,timeout=timeout,
descriptionDone=finished_description,
haltOnFailure=haltOnFailure,flunkOnFailure=flunkOnFailure,flunkOnWarnings=flunkOnWarnings,
warnOnFailure=warnOnFailure,warnOnWarnings=warnOnWarnings,
**args)
def tests(build_factory):
# CEBALERT: won't work on Windows until pyc clean is in python script
add_command(build_factory,make_pyc_clean)
add_command(build_factory,make_tests,haltOnFailure=True)
def optional_tests(build_factory):
add_command(build_factory,make_optional,timeout=2400,haltOnFailure=True,flunkOnFailure=False,warnOnFailure=True)
add_command(build_factory,make_tests,flunkOnFailure=False,warnOnFailure=True)
def slow_tests(build_factory,lite=True):
# CEBALERT: as above about pyc
add_command(build_factory,make_pyc_clean)
add_command(build_factory,make_slow_tests,haltOnFailure=False)
if lite is False:
add_command(build_factory,make_unopttrain_tests)
def _localepydochack(cmdin,local):
if not local:
cmd = list(cmdin)
cmd.insert(1,"EPYDOC=%s"%bbepydoc)
cmd = tuple(cmd)
else:
cmd = cmdin
return cmd
def builddocs(build_factory,suppress_doc_warn=False,local=True,halt_at_failure=False):
if suppress_doc_warn:
# For OS X, til we make doc building easy
warnOnFailure=False
else:
warnOnFailure=True
new_make_doc = _localepydochack(make_doc,local)
descriptions[new_make_doc] = descriptions[make_doc]
new_make_reference_manual = _localepydochack(make_reference_manual,local)
descriptions[new_make_reference_manual] = descriptions[make_reference_manual]
if halt_at_failure:
flunkOnFailure=haltOnFailure=True
else:
flunkOnFailure=haltOnFailure=False
add_command(build_factory,new_make_doc,warnOnFailure=warnOnFailure,flunkOnFailure=flunkOnFailure,haltOnFailure=haltOnFailure)
add_command(build_factory,new_make_reference_manual,haltOnFailure=haltOnFailure,flunkOnFailure=flunkOnFailure,warnOnFailure=warnOnFailure)
def build(build_factory,setup=False,gui=True):
if setup:
add_command(build_factory,make_setup,haltOnFailure=True)
else:
if gui:
mcommand = make
else:
mcommand = make_no_gui
add_command(build_factory,mcommand,timeout=1*60*60,haltOnFailure=True)
##
##def wintests(build_factory):
## cvs_checkout(build_factory)
## add_command(build_factory,make_pyc_clean)
## add_command(build_factory,make_tests,haltOnFailure=False)
## add_command(build_factory,make_gui_tests,haltOnFailure=False)
### add_command(build_factory,make_slow_tests,timeout=3600,haltOnFailure=False)
### add_command(build_factory,make_snapshot_tests,haltOnFailure=False)
### add_command(build_factory,make_unopttrain_tests,timeout=3600,haltOnFailure=False)
##
######################################################################
######################################################################
######################################################################
####### GENERAL
### SVN
TOPOROOT = "https://topographica.svn.sourceforge.net/svnroot/topographica"
svnurl = TOPOROOT+"/trunk/topographica"
from buildbot.changes.svnpoller import SVNPoller
c['change_source']=SVNPoller(svnurl=svnurl,pollinterval=300)
### Connection
c['slavePortnum'] = "tcp:9989" # 9989:interface=127.0.0.1 for local only
### Status
c['projectName'] = "Topographica"
c['projectURL'] = "http://www.topographica.org/"
c['buildbotURL'] = "http://buildbot.topographica.org/"
c['status'] = []
from buildbot.status.html import WebStatus
c['status'].append(WebStatus(8010,allowForce=True))
from buildbot.status import mail
c['status'].append(mail.MailNotifier(fromaddr=NOTIFICATION_EMAIL,
# relayhost=
mode='failing',
extraRecipients=[NOTIFICATION_EMAIL],
sendToInterestedUsers=False))
#
######################################################################
######################################################################
######################################################################
####### BUILDSLAVES (i.e. available machines)
c['slaves' ] = []
for n,p in [("doozy","PASSWD"),
("lodestar","PASSWD"),
("jupiter3","PASSWD"),
("fiver","PASSWD"),
# ("temporary","PASSWD")
]:
add_slave(n,p)
# CEBALERT: this version of BB (0.7) does not expand things like ~ on
# the slave. If a future version does, should remove this and just
# use ~ in the HOME path. Or might be able to use $HOME even now; not
# sure.
slave_homes = dict(
fiver = "/home/ceball",
doozy = "/home/ceball",
lodestar = "/home/s0454615",
jupiter3 = "/home/s0454615"
)
# one build at a time on all machines
from buildbot import locks
doozy_lock = locks.SlaveLock("doozy",maxCount=1)
lodestar_lock = locks.SlaveLock("lodestar",maxCount=1)
#cloud_lock = locks.SlaveLock("cloud",maxCount=1)
fiver_lock = locks.SlaveLock("fiver",maxCount=1)
jupiter3_lock = locks.SlaveLock("jupiter3",maxCount=1)
######################################################################
######################################################################
######################################################################
####### BUILDERS
from buildbot.process.factory import BuildFactory
builders = []
def define_builder(name,slavename,locks,builddir=None,env=None):
if env is None:
env = {}
env.update({'HOME':"%s/.buildbot_fake_home/%s"%(slave_homes[slavename],name)})
if builddir is None:
builddir = name
return dict(name=name,slavename=slavename,builddir=builddir,factory=BuildFactory(),locks=locks,env=env)
backups = define_builder("backups","doozy",[doozy_lock],builddir="topographica_svnroot")
add_command(backups['factory'],rsync,xvfb=False)
##reference = {
## 'name': "reference",
## 'slavename': "doozy",
## 'builddir': "reference",
## 'factory': BuildFactory(),
## 'locks':[doozy_lock]
## }
##cvs_checkout(reference['factory'])
##reference['factory']._NICE=True
##add_command(reference['factory'],make,timeout=2400)
###add_command(reference['factory'],make_or_comparisons,timeout=2*60*60)
##add_command(reference['factory'],make_oo_or_comparisons,timeout=2*60*60)
##
x86_64_DICE_SL5 = define_builder("x86_64_DICE_SL5","jupiter3",[jupiter3_lock])
x86_64_DICE_SL5['factory']._NICE=True
cvs_checkout(x86_64_DICE_SL5['factory'],mode="clobber")
build(x86_64_DICE_SL5['factory'])
slow_tests(x86_64_DICE_SL5['factory'],lite=False)
optional_tests(x86_64_DICE_SL5['factory'])
builddocs(x86_64_DICE_SL5['factory'])
x86_DICE_FC13_setup = define_builder("x86_DICE_FC13_setup","lodestar",[lodestar_lock])
x86_DICE_FC13_setup['factory']._NICE=True
cvs_checkout(x86_DICE_FC13_setup['factory'],mode="clobber")
build(x86_DICE_FC13_setup['factory'],setup=True)
slow_tests(x86_DICE_FC13_setup['factory'])
x86_64_UbuntuLucid_setup = define_builder("x86_64_UbuntuLucid_setup","doozy",[doozy_lock])
x86_64_UbuntuLucid_setup['factory']._NICE=True
# deliberately an updating one to see pyc problems etc (cf DICE setup)
cvs_checkout(x86_64_UbuntuLucid_setup['factory'],mode="update")
build(x86_64_UbuntuLucid_setup['factory'],setup=True)
slow_tests(x86_64_UbuntuLucid_setup['factory'])
#x86_UbuntuNatty_setup = {
# 'name': "x86_UbuntuNatty_setup",
# 'slavename': "ear",
# 'builddir': "x86_UbuntuNatty_setup",
# 'factory': BuildFactory(),
# 'locks':[doozy_lock]
# }
#x86_UbuntuNatty_setup['factory']._NICE=True
#build_then_slowtests(x86_UbuntuNatty_setup['factory'],setup=True,lite=True)
##x86_64_SL55_noX = {
## 'name': "x86_64_SL55_noX",
## 'slavename': "eddiealike",
## 'builddir': "x86_64_SL5.5_noX",
## 'factory': BuildFactory(),
## 'locks':[doozy_lock]
##}
##x86_64_SL55_noX['factory']._NO_XVFB=True
##build(x86_64_SL55_noX['factory'],gui=False)
##
#build_then_slowtests(x86_64_UbuntuNatty_setup['factory'],gui=False,suppress_doc_warn=True)
##optional_tests(x86_64_UbuntuNatty_setup['factory'])
from buildbot.steps.transfer import FileDownload
from buildbot.steps.shell import WithProperties
def add_targz_download_extract(factory):
add_command(factory,rm_currdir)
factory.addStep(FileDownload(mastersrc=WithProperties("public_html/dist/setup.py/topographica-"+PRERELEASE+"~r%(revision)s.tar.gz"),
slavedest="topographica-"+PRERELEASE+".tar.gz"))
#haltOnFailure=True,flunkOnFailure=True)
add_command(factory,untar)
#haltOnFailure=True,flunkOnFailure=True)
def setuppy_install(factory,python_path):
add_command(factory,(python_path,"setup.py","install","--prefix=./local/"),
descr = "install")
#haltOnFailure=True,flunkOnFailure=True)
# CEBALERT: merge with make_tests!
def setuppy_tests(factory,pyver="2.7"):
# CEBALERT: ${PATH} expansion not supported until newer version of
# buildbot (0.8?). Remove ./local/bin/ when upgrading.
add_command(factory,("./local/bin/topographica","-c","import topo.tests.runtests as R; R.start()"),
descr = "slow tests",
env= {#"PATH":"./local/bin:${PATH}",
"PYTHONPATH":"./local/lib/python%s/site-packages/"%pyver})
EPD7_rh5_x86_64 = define_builder("EPD7_rh5_x86_64","jupiter3",[jupiter3_lock])
EPD7_rh5_x86_64['factory']._NICE=True
add_targz_download_extract(EPD7_rh5_x86_64['factory'])
setuppy_install(EPD7_rh5_x86_64['factory'],"/disk/scratch/v1cball/bbsupport/epd-7.1-2-rh5-x86_64/bin/python")
setuppy_tests(EPD7_rh5_x86_64['factory'],pyver="2.7")
#PythonXY26_Win7_x86_64 = {
# 'name': "PythonXY26_Win7_x86_64",
# 'slavename': "temporary",
# 'builddir': "PythonXY26_Win7_x86_64",
# 'factory': BuildFactory(),
# 'locks':[doozy_lock,temporary_lock]
# }
#
#EPD7_Win7_x86_64 = {
# 'name': "EPD7_Win7_x86_64",
# 'slavename': "temporary",
# 'builddir': "EPD_Win7_x86_64",
# 'factory': BuildFactory(),
# 'locks':[doozy_lock]
# }
#EPD7_Win7_x86_64['factory']._NO_XVFB=True
##add_exe_download(EPD7_Win7_x86_64['factory'])
##setuppy_exe_install(EPD7_Win7_x86_64['factory'])
##setuppy_exe_tests(EPD7_Win7_x86_64['factory'])
#
docs = define_builder("docs",MASTER,[fiver_lock]) # CEBALERT: master lock
cvs_checkout(docs['factory'],mode="clobber")
build(docs['factory'],setup=True)
tests(docs['factory'])
builddocs(docs['factory'],local=False,halt_at_failure=True)
from buildbot.steps.transfer import DirectoryUpload
# Make doc available on web
docs['factory'].addStep(DirectoryUpload(slavesrc="doc",masterdest="/var/lib/buildbot/master/public_html/doc"))
coverage = define_builder("coverage",MASTER,[fiver_lock]) # CEBALERT: master lock
cvs_checkout(coverage['factory'],mode="copy")
add_command(coverage['factory'],make_pyc_clean)
build(coverage['factory'])
add_command(coverage['factory'],build_coverage,flunkOnFailure=True,haltOnFailure=True)
add_command(coverage['factory'],make_clean_coverage,flunkOnFailure=True,haltOnFailure=True)
# CEBALERT: use tests(halt=False)
add_command(coverage['factory'],make_tests,haltOnFailure=False,flunkOnFailure=False,coverage=True)
from buildbot.steps.master import MasterShellCommand
add_command(coverage['factory'],make_coverage_html,flunkOnFailure=True,haltOnFailure=True)
coverage['factory'].addStep(MasterShellCommand(command="""rm -rf /var/lib/buildbot/master/public_html/coverage/unittests"""))
# CEBALERT: hack to default dir; depends on allowing easy override
coverage['factory'].addStep(DirectoryUpload(slavesrc="/home/ceball/.buildbot_fake_home/coverage/topographica/tests/coverage_html",masterdest="/var/lib/buildbot/master/public_html/coverage/unittests"))
add_command(coverage['factory'],make_gui_tests,haltOnFailure=False,flunkOnFailure=False,coverage=True)
add_command(coverage['factory'],make_clean_coverage_html,haltOnFailure=True,flunkOnFailure=True)
add_command(coverage['factory'],make_coverage_html,flunkOnFailure=True,haltOnFailure=True)
coverage['factory'].addStep(MasterShellCommand(command="""rm -rf /var/lib/buildbot/master/public_html/coverage/unittests_guitests"""))
coverage['factory'].addStep(DirectoryUpload(slavesrc="/home/ceball/.buildbot_fake_home/coverage/topographica/tests/coverage_html",masterdest="/var/lib/buildbot/master/public_html/coverage/unittests_guitests"))
add_command(coverage['factory'],make_slow_tests,haltOnFailure=False,flunkOnFailure=False,coverage=True)
add_command(coverage['factory'],make_unopttrain_tests,haltOnFailure=False,flunkOnFailure=False,coverage=True)
add_command(coverage['factory'],make_clean_coverage_html,haltOnFailure=True,flunkOnFailure=True)
add_command(coverage['factory'],make_coverage_html,flunkOnFailure=True,haltOnFailure=True)
coverage['factory'].addStep(MasterShellCommand(command="""rm -rf /var/lib/buildbot/master/public_html/coverage/unittests_guitests_slowtests_unopttests"""))
coverage['factory'].addStep(DirectoryUpload(slavesrc="/home/ceball/.buildbot_fake_home/coverage/topographica/tests/coverage_html",masterdest="/var/lib/buildbot/master/public_html/coverage/unittests_guitests_slowtests_unopttests"))
#SnowLeopard = {
# 'name': "SnowLeopard",
# 'slavename': "vmac",
# 'builddir': "SnowLeopard",
# 'factory': BuildFactory(),
# 'locks':[doozy_lock]
# }
#SnowLeopard['factory']._NO_XVFB=True # CB: would be better to set no xvfb on the slave
#build_then_slowtests(SnowLeopard['factory'],suppress_doc_warn=True)
#optional_tests(SnowLeopard['factory'])
performance = define_builder("performance",'doozy',[doozy_lock])
cvs_checkout(performance['factory'])
build(performance['factory'])
add_command(performance['factory'],make_speed_tests,haltOnFailure=False,flunkOnFailure=False,warnOnFailure=True)
##add_command(performance['factory'],analyse,xvfb=False,timeout=180,haltOnFailure=False,flunkOnFailure=False,warnOnFailure=True)
add_command(performance['factory'],make_lint_base,haltOnFailure=False,flunkOnFailure=False,warnOnFailure=True)
add_command(performance['factory'],make_pyflakes,haltOnFailure=False,flunkOnFailure=False,warnOnFailure=True)
from buildbot.steps.transfer import FileUpload
#from buildbot.process.properties import WithProperties
archives = define_builder("archives","fiver",[fiver_lock])
cvs_checkout(archives['factory'],mode="clobber")
# CEBALERT: want to make this work with system python, but need to
# change things like doc/Makefile to work.
#build(archives['factory'])
add_command(archives['factory'],make_dist__clean)
add_command(archives['factory'],make_dist_setup)
add_command(archives['factory'],make_dist_setup_sdist)
add_command(archives['factory'],make_dist_setup_wininst)
#add_command(archives['factory'],make_dist_setup_msi)
add_command(archives['factory'],make_rpm_svn) # CEBALERT: rename to make_dist_setup_bdistrpm
archives['factory'].addStep(FileUpload(
slavesrc="../distributions/topographica-%s/dist/topographica-%s.tar.gz"%(PRERELEASE,PRERELEASE),
masterdest=WithProperties("public_html/dist/setup.py/topographica-"+PRERELEASE+"~r%(got_revision)s.tar.gz")))
archives['factory'].addStep(FileUpload(
slavesrc="../distributions/topographica-%s/dist/topographica-%s.win.exe"%(PRERELEASE,PRERELEASE),
masterdest=WithProperties("public_html/dist/exe/topographica-"+PRERELEASE+"~r%(got_revision)s.win.exe")))
archives['factory'].addStep(FileUpload(
slavesrc="../distributions/topographica-%s/dist/topographica.spec"%PRERELEASE,
masterdest=WithProperties("public_html/dist/rpm/topographica-"+PRERELEASE+"~r%(got_revision)s.spec")))
### current versions (e.g. for build services)
archives['factory'].addStep(FileUpload(
slavesrc="../distributions/topographica-%s/dist/topographica-%s.tar.gz"%(PRERELEASE,PRERELEASE),
masterdest=WithProperties("public_html/dist/current/topographica-"+PRERELEASE+".tar.gz")))
archives['factory'].addStep(FileUpload(
slavesrc="../distributions/topographica-%s/dist/topographica.spec"%PRERELEASE,
masterdest=WithProperties("public_html/dist/current/topographica-"+PRERELEASE+".spec")))
archives['factory'].addStep(FileUpload(
slavesrc="../distributions/topographica-%s/dist/topographica-%s.win.exe"%(PRERELEASE,PRERELEASE),
masterdest=WithProperties("public_html/dist/current/topographica-"+PRERELEASE+".win.exe")))
###
# Might need this:
# osc rebuildpac home:ceball:topographica-unstable
from buildbot.steps import trigger
archives['factory'].addStep(trigger.Trigger(schedulerNames=['test-archives'],
waitForFinish=False))
packages = define_builder("packages","fiver",[fiver_lock])
packages['factory']._NO_XVFB=True # CEBALERT: necessary for gpg caching to work on Ubuntu!
#build(packages['factory'],mode="update")
cvs_checkout(packages['factory'],mode="clobber")
add_command(packages['factory'],make_dist__clean)
add_command(packages['factory'],make_deb_svn)
add_command(packages['factory'],make_deb_backports_svn)
add_command(packages['factory'],make_deb_svn_ppa)
# CEBALERT: in a future version of buildbot, should be able to upload *.rpm
#packages['factory'].addStep(
# FileUpload(
# slavesrc=WithProperties("../distributions/topographica-"+PRERELEASE+"~r%(got_revision)s-1.noarch.rpm"),
# masterdest=WithProperties("public_html/dist/rpm/topographica-"+PRERELEASE+"~r%(got_revision)s-l.noarch.rpm")))
c['builders'] = [
#### builds from source
x86_64_DICE_SL5,
# SnowLeopard,
# Add some old linux without X
#### setup.py builds
x86_DICE_FC13_setup,
x86_64_UbuntuLucid_setup,
# x86_UbuntuNatty_setup,
EPD7_rh5_x86_64,
# EPD7_Win7_x86_64,
# EPD7_OSX_64,
#### others
performance,
# reference,
backups,
archives,
packages,
docs,
coverage]
######################################################################
######################################################################
######################################################################
####### SCHEDULERS
from buildbot.scheduler import Nightly,Periodic,Triggerable
c['schedulers'] = []
add_schedulers(
# Nightly('workday', ['performance'],hour=[15], minute=00),
Nightly("nightly-build",["docs","performance"],hour=03,minute=00),
Nightly("nightly-backup",["backups"],hour=02,minute=00),
Nightly("one-in-3-nightly",["x86_64_DICE_SL5"],dayOfWeek=[0,3,5],hour=03,minute=00),
Nightly("sunday-nightly",["packages","archives","x86_DICE_FC13_setup","coverage","x86_64_UbuntuLucid_setup"],dayOfWeek=6,hour=01,minute=00),
Triggerable(name="test-archives",
builderNames=["EPD7_rh5_x86_64"])
)
######################################################################
######################################################################
|
UTF-8
|
Python
| false | false | 2,011 |
3,410,204,064,139 |
3ad403408600149d578d5643198c9ab6c9246d71
|
b9c066d0ef8a49770dcbfb520617a81b5b042286
|
/apps/dnevniktools/dbutils.py
|
50982e0154edea3e5d1f8b29530ef13d0af22186
|
[] |
no_license
|
ZhSulta/dnevnik
|
https://github.com/ZhSulta/dnevnik
|
5fbea2d853fed76909564e3c56a952d1104ee13d
|
df7c6b892e1f3f0baac31af91444e2f93bf90a53
|
refs/heads/master
| 2020-04-28T00:31:53.204168 | 2012-02-23T07:29:05 | 2012-02-23T07:29:05 | 3,445,603 | 3 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import connection
def get_nextautoincrement( mymodel ):
cursor = connection.cursor() #@UndefinedVariable
cursor.execute( "SELECT Auto_increment FROM temporary.tables WHERE table_name='%s';" % \
mymodel._meta.db_table)
row = cursor.fetchone()
cursor.close()
return row[0]
|
UTF-8
|
Python
| false | false | 2,012 |
6,914,897,354,821 |
e319ad7d3d938988c717f2c1a9868445027b5b8d
|
62825974637b17f8eeadb7fbabb716b9c39c2360
|
/playerlist.py
|
742b8456dc3a1ccf1d13778cbfad1f5249df172a
|
[] |
no_license
|
TheBB/simul
|
https://github.com/TheBB/simul
|
059600fc71f52a5ed9e8b3380237052a9713c713
|
3446118870eba86ba9e9b2cac0734372a70fcfa0
|
refs/heads/master
| 2021-01-22T19:30:53.633812 | 2012-12-19T20:24:37 | 2012-12-19T20:24:37 | 6,141,990 | 5 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
try:
import pyreadline as readline
except ImportError:
import readline
from scipy.stats import norm
from math import sqrt
import simul
debug = False
def get_elo(s=''):
elo = -1
while elo == -1:
try:
elo = simul.better_input('Elo' + (' ' if s != '' else '') + s + ': ',\
swipe=True)
if elo.strip().lower() == '':
return False
elo = float(elo)
except:
elo = -1
return elo
def get_dev(s=''):
dev = -1
while dev == -1:
try:
dev = simul.better_input('Dev' + (' ' if s != '' else '') + s + ': ',\
swipe=True)
if dev.strip().lower() == '':
return False
dev = float(dev)
except:
dev = -1
return dev
def get_player(i, finder=None):
print('Entering player ' + str(i))
result = None
while result == None:
name = simul.better_input('Name: ')
if name == '-':
print('')
return Player('BYE', 'T', -10000, 0, 0, 0)
results = []
if finder != None:
results = finder(name)
if results != None and len(results) > 0:
pl = len(results) > 1
print('Possible match' + ('es' if pl else '') + ':')
i = 1
for res in results:
print((str(i) + ': ' if pl else '') + res['name'] + ' ('\
+ res['race'] + ') from '\
+ res['team'] + ' (' + ('%.2f'%res['elo']) + ', '\
+ ('%.2f'%res['elo_vt']) + ', '\
+ ('%.2f'%res['elo_vz']) + ', '\
+ ('%.2f'%res['elo_vp']) + ')')
i += 1
if pl:
s = 'Which is correct? (1-' + str(len(results)) + ', 0 for none) '
choice = simul.better_input(s, swipe=True)
if choice == 'y':
result = results[0]
elif int(choice) > 0:
result = results[int(choice)-1]
else:
choice = simul.better_input('Accept? (y/n) ', swipe=True)
if choice.lower() == 'y':
result = results[0]
elif finder != None:
if results == []:
print('No matches for \'' + name + '\' in database.')
elif results == None:
print('Unable to consult database.')
elif finder == None:
break
if result != None:
name = result['name']
race = result['race']
elo = result['elo']
elo_vt = result['elo_vt']
elo_vz = result['elo_vz']
elo_vp = result['elo_vp']
dev = result['dev']
dev_vp = result['dev_vp']
dev_vt = result['dev_vt']
dev_vz = result['dev_vz']
else:
race = ''
while race not in ['P', 'Z', 'T']:
race = simul.better_input('Race: ', swipe=True).upper()
elo = get_elo()
if elo == False:
elo = 0
elo_vt = 0
elo_vz = 0
elo_vp = 0
dev = 0.6
dev_vt = 0.6
dev_vp = 0.6
dev_vz = 0.6
else:
elo_vt = get_elo('vT')
elo_vz = get_elo('vZ')
elo_vp = get_elo('vP')
dev = get_dev()
dev_vt = get_dev_vp('vT')
dev_vz = get_dev_vt('vZ')
dev_vp = get_dev_vz('vP')
print('')
return Player(name, race, elo, elo_vp, elo_vt, elo_vz, dev, dev_vp, dev_vt, dev_vz)
class Player:
def __init__(self, name='', race='', elo=0, elo_vp=0, elo_vt=0, elo_vz=0,\
dev=0.6, dev_vp=0.6, dev_vt=0.6, dev_vz=0.6, copy=None):
if copy == None:
self.name = name
self.race = race
self.elo = elo
self.elo_race = {'P': elo_vp, 'T': elo_vt, 'Z': elo_vz}
self.dev = dev
self.dev_race = {'P': dev_vp, 'T': dev_vt, 'Z': dev_vz}
self.flag = -1
else:
self.name = copy.name
self.race = copy.race
self.elo = copy.elo
self.elo_race = copy.elo_race
self.dev = copy.dev
self.dev_race = copy.dev_race
self.flag = copy.flag
def prob_of_winning(self, opponent):
mix = 0.3
my_elo = self.elo + self.elo_race[opponent.race]
op_elo = opponent.elo + opponent.elo_race[self.race]
my_dev = self.dev**2 + self.dev_race[opponent.race]**2
op_dev = opponent.dev**2 + opponent.dev_race[self.race]**2
return norm.cdf(my_elo - op_elo, scale=sqrt(1+my_dev+op_dev))
def copy(self):
return Player(copy=self)
class PlayerList:
def __init__(self, num, finder=None):
self.players = []
k = 1
while len(self.players) < num:
if not debug:
i = len(self.players) + 1
player = get_player(i, finder)
self.players.append(player)
else:
self.players.append(Player('player' + str(k), 'T', 0.1*k, 0.150*k,\
0.1*k, 0.1*k))
k += 1
|
UTF-8
|
Python
| false | false | 2,012 |
4,947,802,342,956 |
4ffc549c5ef6ea668bd07307ba0a152a73a65926
|
7f19441231e92586b3eaa9a06ee8d26ebf87afeb
|
/NemPyTracker/eggbot_scanlinux.py
|
02be23470eb89bb844b35630fab7a10fbbf386f1
|
[] |
no_license
|
vsimonis/gui-tracker
|
https://github.com/vsimonis/gui-tracker
|
4bd99ac972fefd2447cf034131fcc6b19a130ba3
|
03558722b6828abdfa64d07680e0824b9d154693
|
refs/heads/master
| 2016-09-05T22:26:43.741712 | 2014-08-24T14:44:30 | 2014-08-24T14:44:30 | 21,278,501 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import logging
logger = logging.getLogger('scan')
#logger.setLevel(logging.DEBUG)
DEV_TREE = '/dev'
USB_DEVICE_TREE = '/sys/bus/usb/devices'
def findEiBotBoards():
"""Find only those USB devices that declare themselves to be EiBotBoards"""
logger.debug('Starting find EBB')
# find all USB devices whose product name is 'EiBotBoard'
with os.popen( 'fgrep -l EiBotBoard %s/*/product' % USB_DEVICE_TREE ) as pipe:
for path in [ os.path.split( path )[0] for path in pipe.readlines()]:
device = os.path.split( path )[1]
# for each device endpoint ...
for dir in os.listdir( path ):
if dir.startswith( device ):
# find the endpoint that supports tty access
ttydir = os.path.join( USB_DEVICE_TREE, device, dir, 'tty' )
logger.debug('checking endpoint: %s' % ttydir)
if os.path.exists( ttydir ):
# And emit each (the) interface name
for ttyname in os.listdir( ttydir ):
logger.debug('Checking ttyname: %s' % ttyname)
yield os.path.join( DEV_TREE, ttyname )
def findPorts():
for device in os.listdir( DEV_TREE ):
if not device.startswith( 'ttyACM' ):
continue
yield os.path.join( DEV_TREE , device )
if __name__ == '__main__':
logger.info("Looking for EiBotBoards")
for port in findEiBotBoards():
logger.info(port)
logger.info("Looking for COM ports")
for port in findPorts():
logger.info(port)
|
UTF-8
|
Python
| false | false | 2,014 |
19,301,583,062,895 |
05c1f89faffc9567d2ffecc67ae324e023219c99
|
c64f25ee81a77581de6579bc648931e87e670090
|
/horizon/virtualenv/lib/python2.6/site-packages/turbogears/tests/__init__.py
|
f0b03bcd1fb0ea5295d4b780bd593fe836f3edb1
|
[
"Apache-2.0"
] |
permissive
|
ziziwu/openstack
|
https://github.com/ziziwu/openstack
|
1577e626742d877f1aa1aeeeb5f9140cdfa64c3e
|
42184f6d40aa00afb82d52f34363d106aec1fa2c
|
refs/heads/master
| 2016-09-11T13:45:13.618427 | 2014-09-09T08:12:14 | 2014-09-09T08:12:14 | 23,571,303 | 0 | 1 | null | false | 2020-07-24T06:33:24 | 2014-09-02T08:42:03 | 2014-09-02T09:11:58 | 2014-09-09T08:13:20 | 72,824 | 0 | 1 | 1 |
Python
| false | false |
# tests
from warnings import filterwarnings, simplefilter, resetwarnings
def setup():
# Ignore warning about missing NameMapper extension for Cheetah
filterwarnings('ignore', r'\n?.*\bNameMapper\b', UserWarning, 'Cheetah')
# DeprecationWarnings are ignored in Python 2.7 by default,
# so add a filter that always shows them during the tests.
simplefilter('always', DeprecationWarning)
def teardown():
resetwarnings()
|
UTF-8
|
Python
| false | false | 2,014 |
13,108,240,195,567 |
d633d6e9cf7111517e8537d9f49d182f9b3e98cb
|
df6cb6cb5a44af7fb5354770ac6ed360dae89fb6
|
/pure-python/lucid/io.py
|
936587d97bf667decc5b257a697851a31e8b46a6
|
[] |
no_license
|
theojulienne/lucid
|
https://github.com/theojulienne/lucid
|
c0c9532b5b201329741726d1da6caaa902bdad03
|
c79b81334dbffa6ec04fd8e52e328d8a2071e474
|
refs/heads/master
| 2020-11-26T19:35:50.366893 | 2008-05-07T07:35:53 | 2008-05-07T07:35:53 | 7,659,769 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from zope.interface import Interface, implements
from enum import Enum, Flags
__all__ = ("WatchEventKind", "HandleKind", "WatchEvent", "IEventLoop")
### Interfaces
#FIXME: This actually needs to be some kind of 'bitflags' object..
WatchEventKind = Flags("None", "In", "Out", "Err", "Hup")
HandleKind = Enum("Invalid", "FD")
class OSHandle(object):
__slots__ = ("handle", "kind")
def __init__(self, handle = -1, kind = HandleKind.Invalid):
self.handle = handle
self.kind = kind
def __int__(self):
return self.handle
class WatchEvent(object):
__slots__ = ("handle", "events")
def __init__(self, handle, events):
assert isinstance(handle, OSHandle)
#FIXME: check events here
self.handle = handle
self.events = events
class IEventLoop(Interface):
def add_watch(handle, events, handler, args): pass
def update_watch(source, events): pass
def add_timeout(timeout, handler, args): pass
def remove_source(source): pass
def run(): pass
def run_iteration(block): pass
def quit(): pass
def has_pending(): pass
### Implementation Loading
try:
from glib_impl import get_loop
except ImportError, ex:
raise ex
|
UTF-8
|
Python
| false | false | 2,008 |
5,909,875,022,741 |
0ae53137c68054f2d427b2f6dd7b1935b85522b9
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_5/hlncec001/question2.py
|
0ecacc2efd79f9c3a4254454f0630b89b5b5e0b9
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
https://github.com/MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#question2.py
#Cecil Hlungwana
#HLNCEC001
#16 April 2014
cost = eval(input("Enter the cost (in cents):\n")) #This is the first message if prompts. It asks for a cost.
if cost !=0: # If the cost inputted is not zero then the function should run.
coin = eval(input("Deposit a coin or note (in cents):\n"))
num = 0
while coin < cost: #The 'While loop' should run when coin is less than cost.
coins = eval(input("Deposit a coin or note (in cents):\n"))
coin =coins + coin # It should keep on asking for coins if coin is still less than cost.
change = coin - cost # After the loop is done running, it should calculate the change.
len_change = len(str(change)) #This calculates the length of the change.
if change != 0:
if len_change >=3 :
print("Your change is:")
print(str(change)[:-2],"x $1")
while len_change >= 3:
change-=100
len_change = len(str(change))
while 25<= change < 100:
change -= 25
num+=1
if num !=0:
print(num,"x 25c")
num = 0
while 10 <= change < 25:
change -= 10
num+=1
if num!=0:
print(num,"x 10c")
num = 0
while 5 <= change < 10:
change -= 5
num +=1
if num!=0:
print(num,"x 5c")
elif len_change == 2:
print("Your change is:")
while 25<= change < 100:
change -= 25
num+=1
if num != 0:
print(num,"x 25c")
num = 0
while 10 <= change < 25:
change -= 10
num+=1
if num!=0:
print(num,"x 10c")
num = 0
while 5 <= change < 10:
change -= 5
num +=1
if num!=0:
print(num,"x 5c")
if 1<= change < 5:
print(change,'x 1c')
elif len_change == 1:
num = 0
while 5 <= change < 10:
change -= 5
num +=1
if num != 0:
print(num,"x 5c")
if 1 <= change < 5:
print(change,'x 1c')
|
UTF-8
|
Python
| false | false | 2,014 |
10,703,058,521,079 |
48744e72cb4622f7e5cd78ed1fd4e47af64ac9a0
|
e0816ca87cc6e8df3851935d8b683aec02ae5e17
|
/6.py
|
bfcb5ac0a25a820a5088bd92ba8607f61024b937
|
[] |
no_license
|
dmilad/project-euler
|
https://github.com/dmilad/project-euler
|
ad564da88c5b6697c2b2e7ade6c19ebb53eb6d7e
|
5adeb53057e73f90acd1c437c994cd732c3bda14
|
refs/heads/master
| 2016-09-05T14:20:38.277184 | 2014-07-10T06:43:51 | 2014-07-10T06:43:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
sum_sq = 0
for i in range(1,101):
sum_sq += i**2
print (10100/2)**2 - sum_sq
|
UTF-8
|
Python
| false | false | 2,014 |
15,925,738,781,784 |
7a2b20255f6bc8fb3106250c692c4b8f0e8dc79e
|
2a41601f58028f1bc203270c11bc50bccc3c6d25
|
/LanConnect/settings.py
|
e46011c0ad6b1ff9dafb14fc12bf2ec67c64d338
|
[] |
no_license
|
LanConnect/LanConnect
|
https://github.com/LanConnect/LanConnect
|
50384a00d7daf22344ac614ddef1390c397f611e
|
8ad9a8c8d53237bab115e1af653255ef41e7fcc5
|
refs/heads/master
| 2020-08-05T21:56:37.102845 | 2011-12-08T00:38:18 | 2011-12-08T00:38:18 | 2,849,774 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Django settings for LanConnect project.
import os, sys
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(ROOT_PATH)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
#email Settings
EMAIL_HOST = ( 'mail.internode.on.net' )
DEFAULT_FROM_EMAIL = ( '[email protected]' )
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Australia/Adelaide'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-au'
LANGUAGES = (
# ('de', 'German'),
('en', 'English'),
# ('fr', 'French'),
# ('pl', 'Polish'),
# ('ko', 'Korean'),
# ('ru', 'Russian'),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(ROOT_PATH,'..','static')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/static/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/admin-media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'y6bz5dlng7(5tvw+tzrqi!cx^wn7k*nx0e%4%y#z-!4xo1q7f4'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
## 'sphene.community.groupaware_templateloader.load_template_source',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
## 'sphene.community.context_processors.navigation',
)
MIDDLEWARE_CLASSES = (
'debug_toolbar.middleware.DebugToolbarMiddleware',
# 'sphene.sphboard.middleware.PerformanceMiddleware',
# 'sphene.community.middleware.PsycoMiddleware',
'sphene.community.middleware.ThreadLocals',
'sphene.community.middleware.GroupMiddleware',
'sphene.community.middleware.MultiHostMiddleware',
# 'sphene.community.middleware.StatsMiddleware',
'sphene.community.middleware.LastModified',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.doc.XViewMiddleware',
'sphene.community.middleware.PermissionDeniedMiddleware',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(ROOT_PATH, 'sitetemplates'),
)
INSTALLED_APPS = (
#must be at the top for maximum awesome (also to intercept admin templates)
#'fumi',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.humanize',
'django.contrib.flatpages',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.sitemaps',
#'rosetta',
'debug_toolbar',
'south',
'sorl.thumbnail',
#'sphene.community',
#'sphene.sphboard',
#'sphene.sphwiki',
#'sphene.sphblog',
#'joust',
#'achievements',
#'events',
)
INTERNAL_IPS = ("127.0.0.1",) #for ddt and sorl
CACHE_BACKEND = 'dummy://'
LOGIN_REDIRECT_URL = '/'
ROOT_URLCONF = 'LanConnect.urls'
SPH_SETTINGS = {'wiki_rss_url' : '/feeds/wiki/',}
SPH_SETTINGS['community_show_languageswitcher'] = False
# Customize wikilink regex - by default CamelCase would be replaced..
# with this regex only words within [brackets] are replaced.
SPH_SETTINGS['wikilink_regexp'] = r'''((?P<urls><a .*?>.*?</a)|(?P<escape>\\|\b)?(?P<wholeexpression>(\[(?P<snipname>[A-Za-z\-_/0-9]+)(\|(?P<sniplabel>.+?))?\])))'''
DJAPIAN_DATABASE_PATH = os.path.join(ROOT_PATH,'..','cache')
# You can configure this to make every subdomain refer to it's own community 'Group'
SPH_HOST_MIDDLEWARE_URLCONF_MAP = {
r'^(?P<groupName>\w+).localhost.*$': { 'urlconf': 'urlconfs.community_urls', },
'.*': { 'urlconf': 'urlconfs.community_urls',
'params': { 'groupName': 'example' } },
}
#Since LDAP code uses group,dn, to enable our groups to work across many DNs we need to "modify" our code. All groups are prependded by cn=, so we can forget that
AUTHENTICATION_BACKENDS = ('fumi.auth.LDAPBackend',)
AUTH_PROFILE_MODULE = 'community.CommunityUserProfile'
LDAP_AUTH_SETTINGS = ({
'url' : None,
'bindname': None,
'bindpw': None,
'app_name' : None,
'realm' : None,
'memberOf_overlay' : False,
},)
ACHIEVEMENT_IMAGES_DIRECTORY = 'achievement-images/'
#try:
# settings_local overwrites a few settings from here, and has to define SECRET_KEY
from settings_local import *
#except Exception as e:
# print "Warning - Error importing settings_local:", e
|
UTF-8
|
Python
| false | false | 2,011 |
14,611,478,774,916 |
5acd00d814cf56ca702907a7edc6208bf1e6a9bf
|
9aa7e3be8a930ad15b6c102594238b3fc42574a9
|
/vSphere.py
|
bb261f3cb9054ac2d460cddf60946450c1806725
|
[
"MIT"
] |
permissive
|
s1l0uk/vSPLAT
|
https://github.com/s1l0uk/vSPLAT
|
76148aee9f9c873fc6e4e4a48c15601c69a9f567
|
ad67a38ad57591ab4f3a8f1b8a6ab14a8718484e
|
refs/heads/master
| 2021-01-14T10:18:25.650919 | 2014-10-30T14:41:56 | 2014-10-30T14:41:56 | 25,646,984 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#If not standard should be in requirements.txt
import yaml
import sys
import tempfile
import re
import json
import getpass
import random
import atexit
import time
import requests
import os
import pyVmomi
import pyVmomi.Iso8601 as Iso8601
from pyVmomi import vim, vmodl
from pyVim.connect import Disconnect, SmartConnect
from cli.log import LoggingApp
from prettytable import PrettyTable
from threading import Thread
sys.dont_write_bytecode = True
__author__ = ['Paul.Hardy']
class VsphereTool(LoggingApp):
def get_connection(self, config):
try:
connection = SmartConnect(host=config['hostname'], port=int(config['port']), user=config['username'], pwd=config['password'])
except Exception as e:
self.log.debug(e)
quit("Connection: borked!")
atexit.register(Disconnect, connection)
return connection
def get_config(self):
try:
config = yaml.load(open(self.params.config))['vsphere'][self.params.farm]
except IOError as e:
self.log.debug(e)
quit("No Configuration file found at " + self.params.config)
if config['password'] == "":
config['password'] = str(getpass.getpass(prompt='Enter password for %s@%s: ' % (config['username'],config['hostname'])))
return config
def pretty_print_hosts(self,vm):
summary = vm.summary
annotation = summary.config.annotation
ipAddr = summary.guest.ipAddress
if annotation == None or annotation == "":
annotation = "None"
if ipAddr == None:
if summary.config.template == True:
ipAddr = "Template"
else:
ipAddr = "Not Assigned"
question = summary.runtime.question
if question == None:
question == "None"
row = [str(summary.config.name), str(summary.config.vmPathName),str(summary.config.guestFullName),annotation,str(summary.runtime.powerState),ipAddr,question]
return row
def pretty_print_ds(self,ds):
space = str(ds.info.freeSpace/1024) + "/" + str(ds.info.maxFileSize/1024)
vms = []
for vm in ds.vm:
vms.append(vm.name)
vms = "\n".join(vms)
row = [ds.name, ds.info.url, space,vms]
return row
def pretty_print_rp(self,rp):
vms = []
for vm in rp.vm:
vms.append(vm.name)
vms = "\n".join(vms)
row = [rp.name, rp.overallStatus,vms]
return row
def pretty_print_fold(self,fold):
children = []
for child in fold.childEntity:
children.append(child.name)
row = [fold.name,fold.parent.name,fold.overallStatus,"\n".join(children)]
return row
def pretty_print_dc(self,dc):
row = [dc.name,dc.parent.name,dc.overallStatus]
return row
def pretty_print_vlan(self,vlan):
row = [vlan.name,str(vlan).split(':')[-1].replace('\'','')]
return row
def pretty_print_dvport(self,dv):
row = [dv.name, str(dv).split(':')[-1].replace('\'','')]
return row
def pretty_print_hs(self,hs):
stores = []
for store in hs.datastore:
stores.append(store.name)
cpuinfo = "\n".join(["hz: " + str(hs.hardware.cpuInfo.hz), "Cores: " + str(hs.hardware.cpuInfo.numCpuCores), "Packages: " + str(hs.hardware.cpuInfo.numCpuPackages), "Threads: " + str(hs.hardware.cpuInfo.numCpuThreads)])
meminfo = "\n".join(["Denominator: " + str(hs.hardware.memorySize.denominator), "Imag: " + str(hs.hardware.memorySize.imag), "Numerator: " + str(hs.hardware.memorySize.numerator), "Real: " + str(hs.hardware.memorySize.real)])
sysinfo = "\n".join(["uuid: " + str(hs.hardware.systemInfo.uuid), "Model: " + str(hs.hardware.systemInfo.model), "Vendor: " + str(hs.hardware.systemInfo.vendor)])
biosinfo = "\n".join(["Version: " + str(hs.hardware.biosInfo.biosVersion), "Release: " + str(hs.hardware.biosInfo.releaseDate)])
row = [hs.name, hs.overallStatus,cpuinfo,meminfo,sysinfo,biosinfo,"\n".join(stores)]
return row
def index_instances(self, conn, subject=None, regex=None):
row = []
if subject == None:
subject = self.params.detail
template = False
if subject == "vms":
title = ["Name", "Path", "Guest", "Annotation", "State", "IP", "Questions"]
recordtype = vim.VirtualMachine
elif subject == "datastore":
title = ["Name", "URL", "Size", "VMs"]
recordtype = vim.Datastore
elif subject == "resourcepools":
title = ["Name","Status","VMs"]
recordtype = vim.ResourcePool
elif subject == "datacentres":
title = ['Name', "Parent", "Status"]
recordtype = vim.Datacenter
elif subject == "folders":
title = ["Name", "Parent","Status","Children"]
recordtype = vim.Folder
elif subject == "hostsystems":
title = ["Name", "Status","CPU","Memory","System","BIOS","Datastores"]
recordtype = vim.HostSystem
elif subject == "vlans":
title = ["Name","vSphere ID"]
recordtype = vim.Network
elif subject == "templates":
title = ["Name", "Path", "Guest", "Annotation", "State", "IP", "Questions"]
recordtype = vim.VirtualMachine
template = True
elif subject == "dvports":
title = ["Name","vSphere ID"]
recordtype = vim.dvs.DistributedVirtualPortgroup
else:
quit("This function has not been created yet...")
if regex != None:
hosts = self.get_elements_regexed(conn,recordtype,regex,template)
else:
hosts = self.get_elements(conn,recordtype,template)
table = PrettyTable(title)
for host in hosts:
if subject == "templates" and self.params.mode == "index":
table.add_row(self.pretty_print_hosts(host))
elif subject == "vms" and self.params.mode == "index":
table.add_row(self.pretty_print_hosts(host))
elif subject == "datastore" and self.params.mode == "index":
table.add_row(self.pretty_print_ds(host))
elif subject == "resourcepools" and self.params.mode == "index":
table.add_row(self.pretty_print_rp(host))
elif subject == "folders" and self.params.mode == "index":
table.add_row(self.pretty_print_fold(host))
elif subject == "datacentres" and self.params.mode == "index":
table.add_row(self.pretty_print_dc(host))
elif subject == "vlans" and self.params.mode == "index":
table.add_row(self.pretty_print_vlan(host))
elif subject == "hostsystems" and self.params.mode == "index":
table.add_row(self.pretty_print_hs(host))
elif subject == "dvports" and self.params.mode == "index":
table.add_row(self.pretty_print_dvport(host))
else:
if self.params.mode == "index":
self.log.error(host.summary)
print table
self.log.error("Number of " + subject + ": " + str(len(hosts)))
return hosts
def get_folder_by_path(self,conn,datacentre,path):
folders = path.split('/')[1:]
content = conn.RetrieveContent()
obj = []
dc_obj = self.get_elements_regexed(conn,vim.Datacenter,datacentre)[0]
first_tier = content.viewManager.CreateContainerView(dc_obj, [vim.Folder], False).view
for possible_root in first_tier:
if possible_root.name == "vm":
root = possible_root
obj.append(root)
for section in folders:
obj.append(self.narrow_down(content,obj[-1],section))
return obj
def narrow_down(self,content,parent,child):
for item in content.viewManager.CreateContainerView(parent, [vim.Folder], False).view:
if item.name == child:
return item
def detokenize_scripts(self, script):
try:
from definitions import definitions
except Exception as e:
self.log.debug(e)
self.log.error("No definitions file found! - Not detokenizing!")
return open(script,"r")
config = definitions(self)
try:
with open(script, "r") as f:
script = f.read()
#Create regex
regex= re.compile('@.*@')
while regex.search(script) != None:
for i, j in config.iteritems():
script = script.replace(i,j)
return script
except:
quit('Could not find script - please check and try again')
def create(self,conn,extra_data, creds=None):
self.log.error("Getting " + str(extra_data['template']) + " Template for " + str(extra_data['name']))
templates = self.index_instances(conn, "templates", extra_data["template"])
if len(templates) < 1:
quit(str(extra_data['name']) + ": I could not find the template you were looking for... please check and try again!")
elif len(templates) > 1:
self.log.info(str(extra_data['name']) + ": Found more than one template matching " + str(extra_data['template']))
self.log.error(str(extra_data['name']) + ": " + str(templates))
self.log.error(str(extra_data['name']) + ": Going to use " + templates[0].config.name)
template_vm = templates[0]
else:
template_vm = templates[0]
self.log.error(str(extra_data['name']) + ": I found your template - " + str(template_vm.name))
self.log.error(str(extra_data['name']) + " Getting Datastore")
datastores = self.get_elements_regexed(conn,vim.ResourcePool,str(extra_data['resourcepool']))
if len(datastores) < 1:
quit(str(extra_data['name']) + ": I could not find the host you were looking for... please check and try again!")
elif len(datastores) > 1:
self.log.info(str(extra_data['name']) + ": Found more than one Datastore matching " + str(extra_data['resourcepool']))
self.log.error(str(extra_data['name']) + ": " + str(datastores))
self.log.error(str(extra_data['name']) + ": Going to use " + str(datastores[0].config.name))
esx_host = datastores[0]
else:
self.log.error(str(extra_data['name']) + ": I Found the Datastore")
esx_host = datastores[0]
vm_name = str(extra_data['name'])
mem = extra_data['memory']
cpu = extra_data['cpu']
vlan = extra_data['vlan']
self.log.debug(str(vm_name) + " - Memory: " + str(mem) + " CPU: " + str(cpu) + " VLAN: " + str(vlan) )
devices = []
vlan_type = vlan.split(':')[0]
vlan = vlan.split(':')[1]
if vlan_type == "pcnet":
vlan_type = vim.vm.device.VirtualPCNet32()
elif vlan_type == "e1000":
vlan_type = vim.vm.device.VirtualE1000()
elif vlan_type == "vmxnet2":
vlan_type = vim.vm.device.VirtualVmxnet2()
elif vlan_type == "vmxnet3":
vlan_type = vim.vm.device.VirtualVmxnet3()
else:
self.log.error(vm_name + ": Do not what to do with nic type: " + str(vlan_type))
nicspec = vim.vm.device.VirtualDeviceSpec()
nicspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nicspec.device = vlan_type
nicspec.device.wakeOnLanEnabled = True
nicspec.device.addressType = 'assigned'
nicspec.device.deviceInfo = vim.Description()
pg_obj = self.get_elements_regexed(conn, vim.dvs.DistributedVirtualPortgroup, vlan)[0]
dvs_port_connection = vim.dvs.PortConnection()
dvs_port_connection.portgroupKey= pg_obj.key
dvs_port_connection.switchUuid= pg_obj.config.distributedVirtualSwitch.uuid
nicspec.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
nicspec.device.backing.port = dvs_port_connection
nicspec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nicspec.device.connectable.startConnected = True
nicspec.device.connectable.allowGuestControl = True
devices.append(nicspec)
adaptermap = vim.vm.customization.AdapterMapping()
domain = str(extra_data['domain'])
self.log.debug(domain)
ip = str(extra_data['ip'])
self.log.debug(ip)
subnet = str(extra_data['subnet'])
self.log.debug(subnet)
folder = str(extra_data['folder'])
self.log.debug(folder)
gateway = str(extra_data['gw'])
self.log.debug(gateway)
adaptermap.adapter = vim.vm.customization.IPSettings(ip=vim.vm.customization.FixedIp(ipAddress=ip),subnetMask=subnet, gateway=gateway, dnsDomain=domain)
globalip = vim.vm.customization.GlobalIPSettings()
ident = vim.vm.customization.LinuxPrep(domain=domain, hostName=vim.vm.customization.FixedName(name=vm_name))
customspec = vim.vm.customization.Specification(nicSettingMap=[adaptermap], globalIPSettings=globalip, identity=ident)
vmconf = vim.vm.ConfigSpec(numCPUs=cpu, memoryMB=mem, annotation=extra_data['notes'], deviceChange=devices)
relocateSpec = vim.vm.RelocateSpec(pool=esx_host ,datastore=self.get_elements_regexed(conn, vim.Datastore ,extra_data['datastore'])[0])
cloneSpec = vim.vm.CloneSpec(powerOn=True, template=False,customization=customspec,location=relocateSpec, config=vmconf)
vmFolder = self.get_folder_by_path(conn,extra_data['datacentre'],extra_data['folder'])[-1]
clone = template_vm.Clone(name=vm_name, folder=vmFolder, spec=cloneSpec)
self.log.info(vm_name + ": Waiting for VM creation!")
state = clone.info.state
while state == "running":
time.sleep(30)
state = clone.info.state
self.log.info(vm_name + ": " + str(state))
try:
vm = self.get_elements_regexed(conn,vim.VirtualMachine,vm_name)[0]
except IndexError as e:
self.log.debug(vm_name + ": " + str(e))
self.log.error(vm_name + ": Could not find VM after creation - Assumed it did not create and continuing")
return "error"
if state == "error":
self.log.error("Whoops! Your vSphere seems to be having a funny moment")
try:
self.log.error(clone.info.error.msg)
except:
self.log.error("No Error - Must be underlying host issues... will try again!")
self.create(conn,extra_data, creds)
return
if clone.info.error.msg == "Cannot connect to host.":
vm.Destroy()
self.log.error("Delete the VM and try again?")
time.sleep(10)
self.create(conn, extra_data, creds)
return
try:
if clone.info.error.msg == "The name '" + vm_name + "' already exists.":
self.log.error("The instance already exists")
if vm.runtime.powerState == "poweredOn":
self.log.info(vm_name + ": Powering on - Assumed configured and moving on to the next! - please delete instance and rerun if this is not the case!")
vm.PowerOnVM_Task()
return "exists"
except Exception as e:
self.log.debug(vm_name + str(e))
self.log.info(vm_name + ": Woohoo! No Errors during the cloning phase!")
self.log.info(vm_name + ": VM was created on the vSphere")
time.sleep(10)
state = vm.runtime.powerState
self.log.info(vm_name + ": Current state of VM is - " + str(state))
if state != "poweredOn":
self.log.warn("There has been a problem... trying to work out what it is and get this VM up!")
self.log.debug(vm_name + ": " + str(clone.info))
self.log.debug(vm_name + ": Clone Error Message - " + str(clone.info.error.msg))
result = vm.runtime.powerState
while result != 'poweredOn':
self.log.info("VMX Error - Not enough resources... aka Silly vSphere Syndrome!!")
self.log.warn("Attempting Migration!")
all_hosts = self.index_instances(conn, subject="hostsystems", regex=None)
esx_host = random.choice(all_hosts)
self.log.debug(vm_name + ": Migrating to - " + str(esx_host.name))
relocate_spec = vim.vm.RelocateSpec(host=esx_host)
reloc = vm.Relocate(relocate_spec)
while reloc.info.state == "running":
self.log.info("Waiting for relocation to complete!")
time.sleep(10)
self.log.debug(vm_name + ": Powering On - Host:" + str(esx_host.name))
vm.PowerOnVM_Task()
time.sleep(10)
result = vm.runtime.powerState
if vm.runtime.powerState != "poweredOn":
vm.PowerOnVM_Task()
result = vm.runtime.powerState
while result != 'poweredOn':
self.log.info(vm_name + ": Waiting for VM to power up!")
if result == 'poweredOff':
test = vm.PowerOnVM_Task()
result = vm.runtime.powerState
time.sleep(15)
self.log.info(vm_name + ": Looks good! - Let's wait for the OS to be ready!")
ipAddr = vm.summary.guest.ipAddress
while ipAddr == None:
self.log.info(vm_name + ": Waiting for OS to start up!")
time.sleep(30)
ipAddr = vm.summary.guest.ipAddress
self.log.info(vm_name + ": w00t! w00t! We are now ready for configuration!")
self.log.debug(vm_name + ": VM details are: " + str(extra_data))
self.log.debug(vm_name + " current State is: " + str(vm.runtime.powerState))
if extra_data['artifacts'] and extra_data['artifacts'] is not None:
self.drop_a_file(conn,creds,regex=vm_name,artifacts=extra_data['artifacts'])
if extra_data['scripts'] and extra_data['scripts'] is not None:
for script in extra_data['scripts']:
self.run_a_script(conn,creds,regex=vm_name,script=script)
if extra_data['commands'] and extra_data['commands'] is not None:
for command in extra_data['commands']:
self.run_a_command(conn,creds,regex=vm_name,command=command)
time.sleep(10)
def verify_process(self, content, vm, creds, pid):
pids = []
processes = content.guestOperationsManager.processManager.ListProcessesInGuest(vm=vm, auth=creds)
for process in processes:
if process.pid == pid:
if process.exitCode == None:
self.log.debug("still running process!")
reply = content.guestOperationsManager.fileManager.InitiateFileTransferFromGuest(guestFilePath="/tmp/output.txt",vm=vm,auth=creds)
entry = requests.get(reply.url, verify=False).text
print chr(27) + "[2J" #Clear Screen
self.log.error("running on " + vm.name)
self.log.error(entry)
exit_code = 1
time.sleep(10)
else:
self.log.info("finished process!")
exit_code = 2
return exit_code
def exec_command(self, content, vm, creds, args, program_path):
cmdspec = vim.vm.guest.ProcessManager.ProgramSpec(arguments=args, programPath=program_path)
output = content.guestOperationsManager.processManager.StartProgramInGuest(vm=vm, auth=creds, spec=cmdspec)
return output
def snapshot(conn,regex=None):
if regex == None:
regex = self.params.regex
vms = get_elements_regexed(conn, vim.VirtualMachine ,regex)
if len(vms) == 0:
quit("I am sorry - These are not the VMs you are looking for...")
elif len(vms) == 1:
self.log.error("Snapshotting - " + str(vms[0].config.name))
try:
vms[0].CreateSnapshot() #Needs testing!
except Exception as e:
self.log.error("Something went wrong...")
quit(e)
elif len(vms) > 1:
if self.param.auto == True:
self.log.error("This script is running in automatic mode - All VMs found will be snapshotted")
for vm in vms:
if self.param.auto == True:
try:
vm.CreateSnapshot() #Needs testing!
except Exception as e:
self.log.error("Something went wrong with " + str(vm.config.name))
else:
self.log.error("Snapshotting - " + str(vm.config.name))
resp = raw_input("\nWould you like to continue: [y/n]")
if resp in ['y','ye','yes','Y','Ye','Yes','YES', 'YE']:
vm.CreateSnapshot() #Needs testing!
else:
self.log.error("Not creating a snapshot of - " + str(vm.config.name))
continue
def get_elements(self, conn, recordType, template=False):
content = conn.RetrieveContent()
obj = []
container = content.viewManager.CreateContainerView(content.rootFolder, [recordType], True)
for c in container.view:
if template == False:
obj.append(c)
else:
if c.summary.config.template == True:
obj.append(c)
return obj
def get_elements_regexed(self, conn, recordType ,regex, template=False):
content = conn.RetrieveContent()
obj = []
container = content.viewManager.CreateContainerView(content.rootFolder, [recordType], True)
for c in container.view:
if recordType != vim.Folder:
if c.name == regex or regex in str(c.summary):
if template == False:
obj.append(c)
else:
if c.summary.config.template == True:
obj.append(c)
else:
if c.name == regex or regex in str(c.overallStatus):
if template == False:
obj.append(c)
else:
if c.summary.config.template == True:
obj.append(c)
return obj
def drop_and_run(self, content, vm, program_path, args ,creds=None):
self.log.error("Running script on - " + str(vm.config.name))
process = self.exec_command(content, vm, creds, args, program_path)
exists = self.verify_process(content, vm, creds, process)
if exists == 2:
quit("The command did not take... try again?")
else:
while exists == 1:
exists = self.verify_process(content, vm, creds, process)
reply = content.guestOperationsManager.fileManager.InitiateFileTransferFromGuest(guestFilePath="/tmp/output.txt",vm=vm,auth=creds)
self.log.error(requests.get(reply.url, verify=False).text)
self.log.info("Removing output now it was been viewed...")
self.exec_command(content, vm, creds, "/tmp/output.txt", "/bin/rm")
def run_a_command(self,conn,creds=None,regex=None, command=None):
if command == None:
command = self.params.Command
if regex == None:
regex = self.params.regex
hosts = self.get_elements_regexed(conn, vim.VirtualMachine,regex)
if creds == None:
user = raw_input("Please enter the username to make alterations to the system: ")
passwd = getpass.getpass(prompt='Enter password for the host: ')
creds = vim.vm.guest.NamePasswordAuthentication(username=user, password=passwd)
content = conn.RetrieveContent()
self.log.info("Preparing Command")
command = command + str(" > /tmp/output.txt 2>&1")
command = command.split(' ',1)
program_path = command[0]
args = command[1]
if len(hosts) == 0:
quit("Failed - These are not the VMs you are looking for...")
elif len(hosts) == 1:
vm = hosts[0]
self.drop_and_run(content, vm, program_path, args ,creds)
elif len(hosts) > 1:
if self.params.auto == True:
self.log.error("This script is running in automatic mode - All VMs found will have the command run on")
for vm in hosts:
if self.params.auto == True:
self.drop_and_run(content, vm, program_path, args ,creds)
else:
self.log.error("Command running on - " + str(vm.config.name))
resp = raw_input("\nWould you like to continue: [y/n]")
if resp in ['y','ye','yes','Y','Ye','Yes','YES', 'YE']:
self.drop_and_run(content, vm, program_path, args ,creds)
else:
self.log.error("Not running command on - " + str(vm.config.name))
continue
else:
quit("Defensive quit! - You Should NEVER get here!")
def drop_a_file(self,conn,creds=None,regex=None,artifacts=None):
if artifacts == None:
artifacts = self.params.artifact
if regex == None:
regex = self.params.regex
hosts = self.get_elements_regexed(conn, vim.VirtualMachine,regex)
if creds == None:
user = raw_input("Please enter the username to own the dropped files: ")
passwd = getpass.getpass(prompt='Enter password for the host: ')
creds = vim.vm.guest.NamePasswordAuthentication(username=user, password=passwd)
content = conn.RetrieveContent()
if len(hosts) == 0:
quit("I am sorry - These are not the VMs you are looking for...")
elif len(hosts) == 1:
for artifact in artifacts:
vm = hosts[0]
self.log.error("Dropping files on - " + str(vm.config.name))
attrib = vim.vm.guest.FileManager.FileAttributes()
theFile = artifact.split("/")[-1]
url = "/tmp/" + theFile
try:
gateway = content.guestOperationsManager.fileManager.InitiateFileTransferToGuest(overwrite=True,fileSize=os.path.getsize(artifact),fileAttributes=attrib,guestFilePath=url, vm=vm,auth=creds)
except:
self.log.error("There was a problem - trying again...")
self.drop_a_file(creds,regex,artifacts)
self.log.debug(gateway)
headers = {'Content-Type': 'application/octet-stream'}
with open(artifact, "r") as f:
r = requests.put(gateway,data=f,headers=headers,verify=False)
elif len(hosts) > 1:
for artifact in artifacts:
if self.params.auto == True:
self.log.error("This script is running in automatic mode - All VMs found will have the artifacts dropped on them")
for vm in hosts:
if self.params.auto == True:
self.log.error("Running script on - " + str(vm.config.name))
attrib = vim.vm.guest.FileManager.FileAttributes()
theFile = artifact("/")[-1]
url = "/tmp/" + theFile
gateway = content.guestOperationsManager.fileManager.InitiateFileTransferToGuest(overwrite=True,fileSize=os.path.getsize(artifact),fileAttributes=attrib,guestFilePath=url, vm=vm,auth=creds)
self.log.debug(gateway)
headers = {'Content-Type': 'application/octet-stream'}
with open(artifact, "r") as f:
r = requests.put(gateway,data=f,headers=headers,verify=False)
else:
self.log.error("Artifacts being dropped on - " + str(vm.config.name))
resp = raw_input("\nWould you like to continue: [y/n]")
if resp in ['y','ye','yes','Y','Ye','Yes','YES', 'YE']:
self.log.error("Dropping artifacts on - " + str(vm.config.name))
attrib = vim.vm.guest.FileManager.FileAttributes()
theFile = theFile("/")[-1]
url = "/tmp/" + theFile.split("/")[-1]
gateway = content.guestOperationsManager.fileManager.InitiateFileTransferToGuest(overwrite=True,fileSize=os.path.getsize(theFile),fileAttributes=attrib,guestFilePath=url, vm=vm,auth=creds)
self.log.debug(gateway)
headers = {'Content-Type': 'application/octet-stream'}
with open(theFile, "r") as f:
r = requests.put(gateway,data=f,headers=headers,verify=False)
else:
self.log.error("Not running command on - " + str(vm.config.name))
continue
else:
quit("Defensive quit! - You should NEVER get to this bit!")
def drop_the_script(self,theFile, attrib, url, creds,content, vm, args):
self.log.error("Running script on - " + str(vm.config.name))
gateway = content.guestOperationsManager.fileManager.InitiateFileTransferToGuest(overwrite=True,fileSize=os.path.getsize(theFile),fileAttributes=attrib,guestFilePath=url, vm=vm,auth=creds)
self.log.debug(gateway)
headers = {'Content-Type': 'application/octet-stream'}
with open(theFile, "r") as f:
r = requests.put(gateway,data=f,headers=headers,verify=False)
self.exec_command(content, vm, creds, "u+x " + url, "/bin/chmod")
process = self.exec_command(content, vm, creds, url + " " + args + " >> /tmp/output.txt 2>&1", "/bin/bash")
exists = self.verify_process(content, vm, creds, process)
if exists == 2:
quit("The command did not take... try again?")
else:
while exists == 1:
exists = self.verify_process(content, vm, creds, process)
reply = content.guestOperationsManager.fileManager.InitiateFileTransferFromGuest(guestFilePath="/tmp/output.txt",vm=vm,auth=creds)
self.log.error(requests.get(reply.url, verify=False).text)
self.log.info("Removing output now it was been viewed...")
self.exec_command(content, vm, creds, "/tmp/output.txt", "/bin/rm")
def drop_the_token_script(self,script, attrib, url, creds,content, vm, args):
self.log.error("Running script on - " + str(vm.config.name))
with tempfile.NamedTemporaryFile() as theFile:
theFile.write(script)
theFile.flush()
gateway = content.guestOperationsManager.fileManager.InitiateFileTransferToGuest(overwrite=True,fileSize=os.path.getsize(theFile.name),fileAttributes=attrib,guestFilePath=url, vm=vm,auth=creds)
self.log.debug(gateway)
headers = {'Content-Type': 'application/octet-stream'}
with open(theFile.name, "r") as f:
r = requests.put(gateway,data=f,headers=headers,verify=False)
self.exec_command(content, vm, creds, "u+x " + url, "/bin/chmod")
process = self.exec_command(content, vm, creds, url + " " + args + " >> /tmp/output.txt 2>&1", "/bin/bash")
exists = self.verify_process(content, vm, creds, process)
if exists == 2:
quit("The command did not take... try again?")
else:
while exists == 1:
exists = self.verify_process(content, vm, creds, process)
reply = content.guestOperationsManager.fileManager.InitiateFileTransferFromGuest(guestFilePath="/tmp/output.txt",vm=vm,auth=creds)
self.log.error(requests.get(reply.url, verify=False).text)
self.log.info("Removing output now it was been viewed...")
self.exec_command(content, vm, creds, "/tmp/output.txt", "/bin/rm")
def run_a_script(self,conn,creds=None,regex=None,script=None):
if script == None:
script = self.params.script
if regex == None:
regex = self.params.regex
hosts = self.get_elements_regexed(conn, vim.VirtualMachine,regex)
if creds == None:
user = raw_input("Please enter the username to make alterations to the system: ")
passwd = getpass.getpass(prompt='Enter password for the host: ')
creds = vim.vm.guest.NamePasswordAuthentication(username=user, password=passwd)
content = conn.RetrieveContent()
url = "/tmp/script.sh"
attrib = vim.vm.guest.FileManager.FileAttributes()
theFile,space,args = script.partition(' ')
if len(hosts) == 0:
quit("I am sorry - These are not the VMs you are looking for...")
elif len(hosts) == 1:
vm = hosts[0]
if self.params.tokenize == True:
script = self.detokenize_scripts(theFile)
self.log.debug(str(vm.name) + ": " +str(script))
self.drop_the_token_script(script, attrib, url, creds,content, vm, args)
else:
self.drop_the_script(theFile, attrib, url, creds,content, vm, args)
elif len(hosts) > 1:
if self.params.auto == True:
self.log.error("This script is running in automatic mode - All VMs found will have the command run on")
for vm in hosts:
if self.params.auto == True:
if self.params.tokenize == True:
script = self.detokenize_scripts(theFile)
self.drop_the_token_script(script, attrib, url, creds,content, vm, args)
else:
self.drop_the_script(theFile, attrib, url, creds, content, vm, args)
else:
self.log.error("Command running on - " + str(vm.config.name))
resp = raw_input("\nWould you like to continue: [y/n]")
if resp in ['y','ye','yes','Y','Ye','Yes','YES', 'YE']:
if self.params.tokenize == True:
script = self.detokenize_scripts(theFile)
self.drop_the_token_script(script, attrib, url, creds,content, vm, args)
else:
self.drop_the_script(theFile, attrib, url, creds,content, vm, args)
else:
self.log.error("Not running command on - " + str(vm.config.name))
continue
else:
quit("Defensive quit! - You should NEVER get to this bit!")
def powercycle(self, conn, regex=None):
if regex == None:
regex = self.params.regex
hosts = self.get_elements_regexed(conn, vim.VirtualMachine,regex)
self.log.error("Power cycle operations")
if len(hosts) == 0:
quit("Could not find the VM specified please try again!")
elif len(hosts) == 1:
if self.params.auto != True:
self.log.error("Found " + str(hosts[0].config.name))
resp = raw_input("\nWould you like to PowerCycle?: [y/n]")
if resp in ['y','ye','yes','Y','Ye','Yes','YES', 'YE']:
hosts[0].ResetVM_Task()
else:
self.log.error("Found " + str(hosts[0].config.name) + " PowerCycling!")
hosts[0].ResetVM_Task()
else:
self.log.error("Found " + str(len(hosts)) + " Powercycling them all!")
for host in hosts:
if self.params.auto != True:
self.log.error("Found " + str(host.config.name))
resp = raw_input("\nWould you like to PowerCycle?: [y/n]")
if resp in ['y','ye','yes','Y','Ye','Yes','YES', 'YE']:
host.ResetVM_Task()
else:
self.log.error("Found " + str(host.config.name) + " PowerCycling!")
host.ResetVM_Task()
def todo(self,conn):
subject = self.params.detail
if subject == "vms":
recordtype = vim.VirtualMachine
elif subject == "datastore":
recordtype = vim.Datastore
elif subject == "resourcepools":
recordtype = vim.ResourcePool
elif subject == "folders":
recordtype = vim.Folder
elif subject == "hostsystems":
recordtype = vim.HostSystem
elif subject == "dvports":
recordtype = vim.dvs.DistributedVirtualPortgroup
else:
quit("This function has not been created yet...")
elements = self.get_elements(conn,recordtype)
for ele in elements:
print str(ele.name)
print dir(ele)
self.log.error("VM")
self.log.error(elements[0].config.name)
self.log.error(dir(elements[0]))
#===========================MAIN===========================#
def main(self):
self.log.debug("Starting")
mode = self.params.mode
self.log.debug("Mode Selected: " + mode)
self.log.debug("Getting Config")
config = self.get_config()
self.log.debug("Config: " + str(config['hostname']) + " as " + str(config['username']))
self.log.debug("Connecting to vSphere")
if self.params.Developer == True:
connection = "test-mode"
else:
connection = self.get_connection(config)
self.log.debug("Connection Test...")
if not connection:
message = "No connection could be established - aborting!"
self.log.debug(message)
quit(message)
self.log.debug("...Passed!")
if mode == "index":
if self.params.detail:
subject = self.params.detail
regex = self.params.regex
self.index_instances(connection,subject,regex)
else:
quit("the index arguement requires the detail (-d) argument to select element to view")
elif mode == "todo":
self.todo(connection)
elif mode == "powercycle":
if self.params.regex is None:
quit("Please include a regex (-r) to know which VM to power cycle")
self.powercycle(connection)
elif mode == "filedrop":
if self.params.regex == None:
quit("Please provide a regex (-r) to match the host to drop your files on")
if self.params.artifact == None:
quit("Please include which files should be dropped on the host (-a [ artifact1, artifact2, artifact3 ])")
self.drop_a_file(Connection)
elif mode == "command":
if self.params.Command == None:
quit("Please provide a command (-C) to run on the GuestOS")
if self.params.regex == None:
quit("Please provide a regex (-r) to match the host to run your command on")
self.run_a_command(connection)
elif mode == "script":
if self.params.script == None:
quit("Please provide a script to be copied to the host (-S)")
if self.params.regex == None:
quit("Please provide a regex (-r) to match the host to run your script on")
self.run_a_script(connection)
elif mode == "snapshot":
if not self.params.regex:
quit("Please express (-r) which VM you would like to be snapshotted")
if self.params.regex and self.params.regex is not None:
self.snapshot(connection)
else:
quit("snapshot arguement requires the regex (-r) argument to narrow down which VMs to snapshot")
elif mode == "create":
if not self.params.extra or self.params.extra is None:
quit('Please supply a yaml runlist to create the instances with using the extra flag (-e)')
else:
if self.params.tokenize_config == True:
theYaml = yaml.load(self.detokenize_scripts(self.params.extra))
else:
try:
theYaml = yaml.load(open(self.params.extra))
except:
quit('There was a problem loading the run list yaml data... please check and try again')
user = raw_input("Please enter the username to make alterations to the system: ")
passwd = getpass.getpass(prompt='Enter password for the host: ')
creds = vim.vm.guest.NamePasswordAuthentication(username=user, password=passwd)
threads = []
for extra_data in theYaml:
if "notes" not in extra_data:
extra_data['notes'] = ""
try:
order = extra_data['order']
except KeyError:
self.log.info("No order found - Treating as Serial build!")
order = "serial"
if order == "serial":
self.log.info("Serial boot of " + extra_data['name'])
self.create(connection, extra_data, creds)
if len(t) > 0:
self.log.info("Waiting for previous Parallel threads to complete")
t.join()
elif order == "parallel":
self.log.info("Parallel boot of " + extra_data['name'])
t = Thread(target=self.create, args=[connection, extra_data, creds])
t.daemon = True
t.name = extra_data['name']
threads.append(t)
t.start()
else:
self.log.error("Did not recognise option " + str(order) + " - Skipping!")
continue
for t in threads:
self.log.info("Completing background threads: " + str(t.name))
t.join()
else:
message = "Please choose a Valid Mode! - You have selected %s" % mode
self.log.debug(message)
quit(message)
self.log.debug("Finished")
#===========================MAGIC==============================#
if __name__ == "__main__":
vsphere=VsphereTool()
vsphere.add_param("-f", "--farm", help="Which vSphere you want to connect to in your config", default=None, required=True, action="store")
vsphere.add_param("-m", "--mode", help="What to make foreman do. Excepted Values: ['index','create','command','script','snapshot','powercycle','filedrop',todo']", default=None, required=True, action="store")
vsphere.add_param("-d", "--detail", help="What to search for: ['datastore','vms','hostsystems','dvport','templates','resourcepool','datacentres','folders','vlans']", default=None, required=False, action="store")
vsphere.add_param("-e", "--extra", help="Extra detail to add to a given mode", default=None, required=False, action="store")
vsphere.add_param("-c", "--config", help="Change the Configuration file to use", default="./config/config.yaml", required=False, action="store")
vsphere.add_param("-D", "--Developer", help="A mode for a developer to use for testing that does not form a connection... not much functionality either ;-)", default=False, required=False, action="store_true")
vsphere.add_param("-r", "--regex", help="Changes the script behaviour to search for instances", default=None, required=False, action="store")
vsphere.add_param("-C", "--Command", help="Which command to run with arguements separated by <SOMETHING>", default=None, required=False, action="store")
vsphere.add_param("-S", "--script", help="Which script to be run on the GuestOS on the VM", default=None, required=False, action="store")
vsphere.add_param("-A", "--auto", help="Changes script to not prompt for instruction and take best fit where possible!", default=False, required=False, action="store_true")
vsphere.add_param("-a", "--artifact", help="A List of files to be dropped on the server (Does not work recursivly! You may want to tar/zip files!) e.g. -a artifact1 -a artifact2", default=None, required=False, action="append")
vsphere.add_param("-T", "--tokenize", help="Action to decide if a script that you are running on the Virtual Instance needs to be de-tokenized - Requires a definitions.py file", default=None, required=False, action="store_true")
vsphere.add_param("-TC", "--tokenize-config", help="Action to decide if a run list being processed should be de-tokenized - Requires a definitions.py file", default=None, required=False, action="store_true")
vsphere.run()
|
UTF-8
|
Python
| false | false | 2,014 |
13,314,398,650,748 |
3b1965fa606b7dddf9aff19b6f114674e6702597
|
61e153c55b1a9534782dfd8b08d6e0d11cfd4d0c
|
/15.py
|
4e04753fd0d61c84c844c3067191b77b24932b79
|
[
"GPL-2.0-only"
] |
non_permissive
|
generica/euler
|
https://github.com/generica/euler
|
5a746d84b50826f98bc92ec268b2bad02679a615
|
8f6934848643720cf4cfa11be605ee6e9e010c2b
|
refs/heads/master
| 2021-01-23T13:35:42.613131 | 2014-03-28T01:13:58 | 2014-03-28T01:13:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
start = (0, 0)
end = (2, 2)
'''
(0,0), (0,1), (0,2), (1,2), (2,2)
(0,0), (0,1), (1,1), (1,2), (2,2)
(0,0), (0,1), (1,1), (2,1), (2,2)
(0,0), (1,0), (1,1), (1,2), (2,2)
(0,0), (1,0), (1,1), (2,1), (2,2)
(0,0), (1,0), (2,0), (2,1), (2,2)
'''
def move_right(x, y)
if x != end_x:
x += 1
if __name__ == "__main__":
pos = start
num = 0
while pos != end:
next_a = (
print "%d" % (num)
|
UTF-8
|
Python
| false | false | 2,014 |
14,302,241,135,467 |
8916008a288b3bbaeb829ace54885cbb13ab6237
|
153ecce57c94724d2fb16712c216fb15adef0bc4
|
/z3c.testing/tags/0.1.1b1/src/z3c/testing/ftests.py
|
501f2a494fd33a80faf63b769bb2c2223bea387c
|
[] |
no_license
|
pombredanne/zope
|
https://github.com/pombredanne/zope
|
10572830ba01cbfbad08b4e31451acc9c0653b39
|
c53f5dc4321d5a392ede428ed8d4ecf090aab8d2
|
refs/heads/master
| 2018-03-12T10:53:50.618672 | 2012-11-20T21:47:22 | 2012-11-20T21:47:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import layer
from zope.app.testing import functional
import unittest
def appSetUp(app):
# just some stupid assertion
assert(app.__name__ is None)
layer.defineLayer('MyLayer', zcml='test.zcml',
appSetUp=appSetUp, clean=True)
def test_suite():
suite = unittest.TestSuite()
suites = (
functional.FunctionalDocFileSuite('BROWSER.txt'),
# test setup/teardown by calling it twice
functional.FunctionalDocFileSuite('BROWSER.txt'),
)
for s in suites:
s.layer=MyLayer
suite.addTest(s)
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
UTF-8
|
Python
| false | false | 2,012 |
4,690,104,308,987 |
ed8edc8513e004acda7a12bf46c341edace6dee9
|
aea5b07141fc8eaeb7f07326d466ccb7e1775622
|
/games/sots/start.py
|
7db2a449f7d8dbd538cc903d350f18174da6d35a
|
[
"MIT",
"ISC",
"Apache-2.0"
] |
permissive
|
madberry/x84
|
https://github.com/madberry/x84
|
ca698a8a75e302f19fd6b1d6f8f73698318c8c89
|
730fe0e0dcbfba9bc818b665d86ebf1f0cc269fb
|
refs/heads/master
| 2021-01-18T07:41:13.261772 | 2012-10-22T16:00:06 | 2012-10-22T16:00:06 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# sword of the samurai bbs door clone
import db
deps = ['bbs',
'games/sots/data_province',
'games/sots/data_text',
'games/sots/gamedb',
'games/sots/events']
import random
debugKill=0
def main():
session.activity = 'playing Sword of the Samurai'
if not handle() in events.keys():
print '-'*80
print 'create new event in db'
events[handle()] = Event(handle())
eventHandler = events[handle()]
eventHandler.joinGame (handle())
echo (cls() + color())
while True:
nextEvent = eventHandler.pop ()
callEvent = nextEvent[0]
args = nextEvent[1:][0]
print 'next event:', callEvent
print 'arguments:', args
retvalue = callEvent(eventHandler, *args)
if callEvent == Event.newSamurai and not retvalue:
# failed to create a new samurai
return retvalue
elif callEvent == Event.quit:
# user quit
return retvalue
return
|
UTF-8
|
Python
| false | false | 2,012 |
11,321,533,822,339 |
b5bbb208b0c135d2070009284ce872fca1d7ae28
|
644b019a4792b6c7d9e5352e6330069850cc07e7
|
/dentexchange/apps/libs/tests/auth/test_user_factory.py
|
2c381ee028422b18fa8a206b32e56ecd06a6101f
|
[
"BSD-3-Clause"
] |
permissive
|
jpchauvel/dentexchange
|
https://github.com/jpchauvel/dentexchange
|
db0611c8c45365db30bdc15e3005c6eeac104c73
|
58ae303e842404fc9e1860f294ec8044a332bef3
|
refs/heads/master
| 2021-10-10T12:19:00.985034 | 2014-09-24T03:42:20 | 2014-09-24T03:42:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding:utf-8 -*-
import mock
import unittest
from ...auth.builder import UserFactory
class UserFactoryTestCase(unittest.TestCase):
def setUp(self):
self.get_username_from_email = UserFactory.get_username_from_email
def tearDown(self):
UserFactory.get_username_from_email = self.get_username_from_email
@mock.patch('libs.auth.builder.User.objects.filter')
def test_user_exists_should_return_true_if_hashed_email_exists(
self, user_filter):
# setup
email = '[email protected]'
user_filter.return_value.count.return_value = 1
UserFactory.get_username_from_email = mock.Mock()
# action
returned_value = UserFactory.user_exists(email)
# assert
self.assertTupleEqual((email,),
UserFactory.get_username_from_email.call_args[0])
self.assertDictEqual(
dict(username=UserFactory.get_username_from_email(email)),
user_filter.call_args[1])
self.assertTrue(returned_value)
@mock.patch('libs.auth.builder.User.objects.filter')
def test_user_exists_should_return_false_if_hashed_email_doesnt_exists(
self, user_filter):
# setup
email = '[email protected]'
user_filter.return_value.count.return_value = 0
UserFactory.get_username_from_email = mock.Mock()
# action
returned_value = UserFactory.user_exists(email)
# assert
self.assertTupleEqual((email,),
UserFactory.get_username_from_email.call_args[0])
self.assertDictEqual(
dict(username=UserFactory.get_username_from_email(email)),
user_filter.call_args[1])
self.assertFalse(returned_value)
@mock.patch('libs.auth.builder.User')
def test_create_user_should_hash_email_use_it_as_username_set_password_and_returned_saved_user(
self, user_class):
# setup
email = '[email protected]'
password = 'password'
UserFactory.get_username_from_email = mock.Mock()
# action
returned_value = UserFactory.create_user(email, password)
# assert
self.assertTupleEqual((email,),
UserFactory.get_username_from_email.call_args[0])
self.assertDictEqual(
dict(username=UserFactory.get_username_from_email.return_value,
email=email),
user_class.call_args[1])
self.assertTupleEqual((password,),
user_class.return_value.set_password.call_args[0])
self.assertEqual(1, user_class.return_value.save.call_count)
self.assertEqual(id(user_class.return_value), id(returned_value))
@mock.patch('libs.auth.builder.random.randint')
@mock.patch('libs.auth.builder.hashlib.sha1')
@mock.patch('libs.auth.builder.salt_b64encode')
@mock.patch('libs.auth.builder.sha1_b64encode')
def test_get_username_from_email_should_hash_and_normalize_email(
self, sha1_b64encode, salt_b64encode, sha1, randint):
# setup
email = '[email protected]'
normalized_email = 'a_verylargeemailexample.com'
randint.return_value = 10
salt_b64encode.return_value = 'abcdefghijklmnopqrstuvwxyz0123456789ABCD'
sha1_b64encode.return_value = 'abcdefghijklmnopqrstuvwxyz0123456789ABCD'
# action
returned_value = UserFactory.get_username_from_email(email)
# assert
self.assertTupleEqual((email,), sha1.call_args[0])
self.assertTupleEqual((sha1.return_value.digest.return_value,),
sha1_b64encode.call_args[0])
self.assertTupleEqual((0, 2**64,), randint.call_args[0])
self.assertTupleEqual(('10',), salt_b64encode.call_args[0])
self.assertEqual(
(normalized_email[:10] + salt_b64encode.return_value[:-10] \
+ salt_b64encode.return_value)[:30], returned_value)
|
UTF-8
|
Python
| false | false | 2,014 |
2,619,930,086,375 |
448f38fa9be454d8b3a9bdfe95330f7ad1077fd4
|
45a581fed7d117d77872039bc22c071ed118730c
|
/test/init_test_db.py
|
78bb2ee5caa1698df1fb752a45e209cb452c709e
|
[] |
no_license
|
vavan/eia
|
https://github.com/vavan/eia
|
866229eb4c8954266cc371500c7d09645efb9841
|
fd54ed51302dd0a698e226ee5ac957fbfb9d16d9
|
refs/heads/master
| 2020-05-17T18:01:07.416154 | 2014-11-09T06:15:09 | 2014-11-09T06:15:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys, os
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '../'))
from bin.sql_db import *
os.remove('test_database.lite')
db = DataBase('test_database.lite', debug_mode = True, sql = SqlTest)
db.sql.load('database.sql')
|
UTF-8
|
Python
| false | false | 2,014 |
7,335,804,184,750 |
be0c30059c9839d3244e097ba06a4ab0fdda81be
|
65b6d9aa730fd5df09117082bfd31cd6ffa415aa
|
/flask_cloudinary.py
|
73451b6fddde6f98de958d7805cdf0796838cf2c
|
[] |
no_license
|
yoavram/volver
|
https://github.com/yoavram/volver
|
5411f464fb0bef985f15731b9910ecf0d225dd6a
|
c22def197c6168741c4245bf6718d3c0b3709628
|
refs/heads/master
| 2020-05-17T18:01:41.328198 | 2014-05-19T06:04:21 | 2014-05-19T06:04:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from cloudinary import uploader #pip install git+https://github.com/cloudinary/pycloudinary/
class Cloudinary(object):
def __init__(self, app):
config = app.config['CLOUDINARY_URL'].split('://')[1]
config = config.replace("@", ":")
self.api_key, self.api_secret, self.name = config.split(":")
def upload(self, image):
res = uploader.call_api(
"upload",
uploader.build_upload_params(),
api_key=self.api_key,
api_secret=self.api_secret,
cloud_name=self.name,
file=image.stream,
)
return res
''' res example:
{
u'secure_url': u'https://d3jpl91pxevbkh.cloudfront.net/huouwlpzr/image/upload/v1358978552/1001.png',
u'public_id': u'1001',
u'format': u'png',
u'url': u'http://res.cloudinary.com/huouwlpzr/image/upload/v1358978552/1001.png',
u'bytes': 4487,
u'height': 512,
u'width': 512,
u'version': 1358978552,
u'signature': u'6064602083ccb2fa86d73f979d8c70ea4bff731d',
u'resource_type': u'image'
}
'''
|
UTF-8
|
Python
| false | false | 2,014 |
14,559,939,143,577 |
021328651db601ac94419b3e37a75f3c85c9a4b4
|
9bbb1d240dc89f2567d8f0e8fcd264023a40d62a
|
/soulightrd/scripts/13_generate_project_activity.py
|
4044d0f6fa4d4dbf99a8514da8711b1f5eaa108d
|
[] |
no_license
|
vunhatminh241191/SoulightRd
|
https://github.com/vunhatminh241191/SoulightRd
|
256b19ab8797d3ef1e3613fb7a7fc6cc58806846
|
7727aaab2990fe6174d3601c470198324f7faf3f
|
refs/heads/master
| 2021-01-23T15:29:30.807748 | 2014-11-27T09:08:26 | 2014-11-27T09:08:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os, sys,random, datetime
SETTING_PATH = os.path.abspath(__file__ + "/../../")
PROJECT_PATH = os.path.abspath(__file__ + "/../../../")
sys.path.append(SETTING_PATH)
sys.path.append(PROJECT_PATH)
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from django.contrib.auth.models import User
from soulightrd.apps.main.models import ProjectActivity, Project, Photo
from soulightrd.apps.main.models import OrganizationBoardMember
from soulightrd.apps.app_helper import get_any_admin_object, generate_unique_id
from soulightrd.apps.app_settings import DEFAULT_IMAGE_PATH_MAPPING, DEFAULT_IMAGE_UNIQUE_ID
from dummy_database import NAMES
from django import db
def main():
print "... RUNNING GENERATE PROJECT ACTIVITY ..."
project_activities = ProjectActivity.objects.all()
if len(project_activities) == 0:
admin = get_any_admin_object()
project_activity_picture = None
try:
project_activity_picture = Photo.objects.get(
unique_id=DEFAULT_IMAGE_UNIQUE_ID['default_project_activity_picture'])
except Photo.DoesNotExist:
project_activity_picture = Photo.objects.create(caption="default_project_activity_picture"
,user_post=admin,image=DEFAULT_IMAGE_PATH_MAPPING['default_project_activity_picture']
,unique_id=generate_unique_id("photo"))
try:
projects = Project.objects.all()
for project in projects:
year = random.choice(range(2005, 2014))
month = random.choice(range(1, 12))
day = random.choice(range(1, 28))
responsible_member = OrganizationBoardMember.objects.filter(
organization=project.organization)[0].user
project_activity = ProjectActivity.objects.create(
title='abcdef',
project=project,
description='done',
responsible_member=responsible_member,
date=datetime.datetime(year,month,day))
project_activity.image_proof.add(project_activity_picture)
project_activity.save()
print "Generate Project Activity Successfully"
except:
print "Generate Project Activity Failed"
raise
db.close_connection()
else:
print "Project Activity dummy data already created"
if __name__ == '__main__':
stage = sys.argv[1]
if stage != "prod":
main()
|
UTF-8
|
Python
| false | false | 2,014 |
8,495,445,320,118 |
18b8bf732e78e4c4d9ab9e5e43c35bbd46c8728e
|
6126bbd2ab6cf2cfd562d600d79a5069adcefa4d
|
/mysite/polls/urls.py
|
af5cf99ed5dd85694d8d9e47a617913926308bcd
|
[] |
no_license
|
huhugravity/djangotest
|
https://github.com/huhugravity/djangotest
|
a64ef938f9d3290472250afc0152f931a40c7a2d
|
e201dfd22bc7ac93b89e66a8a9fcf3208bcc2a54
|
refs/heads/master
| 2019-06-17T09:34:16.006049 | 2014-09-02T00:16:03 | 2014-09-02T00:16:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on Jul 12, 2014
@author: Hu
'''
from django.conf.urls import patterns, url
from polls import views
urlpatterns = patterns('',
# ex: /polls/
url(r'^$', views.index, name='index'),
# ex: /polls/5/
url(r'^(?P<poll_id>\d+)/$', views.detail, name='detail'),
# ex: /polls/5/results/
url(r'^(?P<poll_id>\d+)/results/$', views.results, name='results'),
# ex: /polls/5/vote/
url(r'^(?P<poll_id>\d+)/vote/$', views.vote, name='vote'),
url(r'^addpoll', views.addPoll, name = 'addpoll'),
url(r'^thanks', views.thanks, name='thanks')
)
|
UTF-8
|
Python
| false | false | 2,014 |
7,730,941,162,582 |
f904515cf8452212bde33c6d1dabe7be839def6d
|
b39d9ef9175077ac6f03b66d97b073d85b6bc4d0
|
/Creon_gastro-resistant_capsule,_hard_SmPC.py
|
48e1f78e2658d1c6782b0589506df62f82f17c1f
|
[] |
no_license
|
urudaro/data-ue
|
https://github.com/urudaro/data-ue
|
2d840fdce8ba7e759b5551cb3ee277d046464fe0
|
176c57533b66754ee05a96a7429c3e610188e4aa
|
refs/heads/master
| 2021-01-22T12:02:16.931087 | 2013-07-16T14:05:41 | 2013-07-16T14:05:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
<<<<<<< HEAD
{'_data': [[u'Unknown',
[['GI', u'Buksm\xe4rta* Illam\xe5ende, Strikturer i'],
['Skin', u'Utslag Urtikaria, kl\xe5da'],
['Immune system', u'Allergiska']]]],
'_pages': [3, 4],
u'_rank': 3,
u'_type': u'LFSU'}
=======
{'_data': [['Unknown',
[['Skin',
u'subkutan v\xe4vnad Immunsystemet Allergiska reaktioner, inklusive anafylaktisk chock *Rubbningar i magtarmkanalen \xe4r huvudsakligen associerade med den underliggande sjukdomen. Samma eller l\xe4gre incidens \xe4n placebo rapporterades f\xf6r diarr\xe9 och buksm\xe4rta. ** Baserat p\xe5 erfarenhet fr\xe5n spontanrapportering har strikturer lokaliserade till ileo-caecum och tjocktarmen (fibrotiserande kolopati) rapporterats hos patienter med cystisk fibros som behandlats med h\xf6ga doser av pankreatinpreparat, se avsnitt 4.4 Varningar och f\xf6rsiktighet.']]]],
'_pages': [3, 4],
u'_rank': 1,
u'_type': u'LSFU'}
>>>>>>> eb0dbf7cfbd3e1c8a568eedcf6ca5658233104cc
|
UTF-8
|
Python
| false | false | 2,013 |
5,377,299,095,216 |
fcc63016f7d2ece761955ff8e0bad14986944e25
|
2c0da62e5b24445ad243e66a4478a9a905690e62
|
/pymatgen/phasediagram/tests/test_pdmaker.py
|
5f739ed852835fa0d9631ac2df4ce5a333a607c7
|
[
"LicenseRef-scancode-unknown"
] |
non_permissive
|
chenweis/pymatgen
|
https://github.com/chenweis/pymatgen
|
cfd20c29d06d5e7c73f3b1f049f4ddba242e436f
|
29c33fe1fc589c64a21eda9e3505072b5cf43983
|
refs/heads/master
| 2021-01-18T05:08:26.004823 | 2012-04-28T22:13:01 | 2012-04-28T22:13:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import unittest
import os
from pymatgen.core.periodic_table import Element
from pymatgen.phasediagram.entries import PDEntryIO
from pymatgen.phasediagram.pdmaker import PhaseDiagram, GrandPotentialPhaseDiagram
class PhaseDiagramTest(unittest.TestCase):
def setUp(self):
module_dir = os.path.dirname(os.path.abspath(__file__))
(self.elements, self.entries) = PDEntryIO.from_csv(os.path.join(module_dir,"pdentries_test.csv"))
self.pd = PhaseDiagram(self.entries)
def test_stable_entries(self):
stable_formulas = [ent.composition.reduced_formula for ent in self.pd.stable_entries]
expected_stable = ["Fe2O3", "Li5FeO4", "LiFeO2", "Fe3O4", "Li", "Fe", "Li2O", "O2", "FeO"]
for formula in expected_stable:
self.assertTrue(formula in stable_formulas, formula + " not in stable entries!")
def test_get_formation_energy(self):
stable_formation_energies = {ent.composition.reduced_formula:self.pd.get_form_energy(ent) for ent in self.pd.stable_entries}
expected_formation_energies = {'Li5FeO4': -164.8117344866667, 'Li2O2': -14.119232793333332, 'Fe2O3': -16.574164339999996, 'FeO': -5.7141519966666685, 'Li': 0.0, 'LiFeO2': -7.732752316666666, 'Li2O': -6.229303868333332, 'Fe': 0.0, 'Fe3O4': -22.565714456666683, 'Li2FeO3': -45.67166036000002, 'O2': 0.0}
for formula, energy in expected_formation_energies.items():
self.assertAlmostEqual(energy, stable_formation_energies[formula], 7, "Calculated formation for " + formula + " is not correct!")
class GrandPotentialPhaseDiagramTest(unittest.TestCase):
def setUp(self):
module_dir = os.path.dirname(os.path.abspath(__file__))
(self.elements, self.entries) = PDEntryIO.from_csv(os.path.join(module_dir,"pdentries_test.csv"))
self.pd = GrandPotentialPhaseDiagram(self.entries, {Element("O"): -5}, self.elements)
def test_stable_entries(self):
stable_formulas = [ent.original_entry.composition.reduced_formula for ent in self.pd.stable_entries]
expected_stable = ['Li5FeO4', 'Li2FeO3', 'LiFeO2', 'Fe2O3', 'Li2O2']
for formula in expected_stable:
self.assertTrue(formula in stable_formulas, formula + " not in stable entries!")
def test_get_formation_energy(self):
stable_formation_energies = {ent.original_entry.composition.reduced_formula:self.pd.get_form_energy(ent) for ent in self.pd.stable_entries}
expected_formation_energies = {'Fe2O3': 0.0, 'Li5FeO4': -5.305515040000046, 'Li2FeO3': -2.3424741500000152, 'LiFeO2': -0.43026396250000154, 'Li2O2': 0.0}
for formula, energy in expected_formation_energies.items():
self.assertAlmostEqual(energy, stable_formation_energies[formula], 7, "Calculated formation for " + formula + " is not correct!")
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false | false | 2,012 |
15,908,558,894,682 |
063c62cd78a3e92775c8406c8af1a2dfbc9b3865
|
64ca06d152444a0dc6bef54ea3caa74866e1c8e5
|
/omniture_python/json_csv_converter.py
|
3da8dcdfda64fcb39c46d7083210ab573232f9c2
|
[
"MIT"
] |
permissive
|
DeStars/omniture_python
|
https://github.com/DeStars/omniture_python
|
c5bed21e013fa35618a3b2f8530c1220e13629a4
|
3e216474f58aa75a9ec6a208451e85808c4cb38e
|
refs/heads/master
| 2020-04-28T00:57:54.594878 | 2014-10-19T21:24:32 | 2014-10-19T21:24:32 | 25,134,272 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from datetime import datetime
__author__ = 'DeStars'
class JsonToCsvConverter:
def __init__(self):
pass
def convert_to_csv(self, data):
csv_data = [self.__get_header_row(data)]
csv_data.extend([row for row in self.__get_rows(data)])
return csv_data
@staticmethod
def __get_header_row(data):
header_row = ['Date', 'Report Suite']
for element in data['elements']:
header_row.append(element['name'])
for metric in data['metrics']:
header_row.append(metric['name'])
return header_row
def __get_rows(self, data):
rows = []
for breakdown in data['data']:
date_string = datetime.strptime(breakdown['name'], '%a. %d %b. %Y').strftime('%Y-%m-%d')
rows.extend([self.__get_row(breakdown_elements, data['reportSuite']['name'], date_string) for breakdown_elements
in breakdown['breakdown']])
return rows
@staticmethod
def __get_row(row_data, report_suite, date):
row_elements = [date, report_suite, row_data['name']]
row_elements.extend([metric for metric in row_data['counts']])
return row_elements
|
UTF-8
|
Python
| false | false | 2,014 |
1,709,397,014,217 |
161fa1d862b7cccca2b2e838a9340b04940755d0
|
95225385ac25ca048578aa38d166641b4f61dc5d
|
/data_download/account/base.py
|
ab57f586045a6fb9d8e9e217e872b6c8e6c9bb59
|
[] |
no_license
|
cnnrdltn/data-download
|
https://github.com/cnnrdltn/data-download
|
b77fd2371dc150541c3dee3602d7a86684546213
|
ba1b427308141d8fca9304446ac433d7dae7c17c
|
refs/heads/master
| 2017-04-27T12:06:51.937912 | 2013-12-08T20:15:05 | 2013-12-08T20:15:05 | 15,008,605 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from abc import ABCMeta, abstractmethod
class Account(object):
__metaclass__ = ABCMeta
"""docstring for Account"""
def __init__(self):
super(Account, self).__init__()
self.is_verified = False
"""
Download user data associated with account
according to download preferences. Cannot
download unless is_verified. Verification
must be performed beforehand.
"""
@abstractmethod
def download():
pass
# account specific verification logic is handled by
# subclasses. Always interact with verify for proper
# error handling
@abstractmethod
def _verify():
pass
# perform custom verification for account
# forces verification, check is_verified otherwise
def verify(self):
status_code = self._verify()
if status_code != 0:
self.is_verified = True
else:
self.is_verified = False
raise VerificationFailedException(self, status_code)
return self.is_verified
@abstractmethod
def connect():
pass
class VerificationFailedException(Exception):
def __init__(self, account, status_code):
self.account = account
self.status_code = status_code
def __str__(self):
return "Verification failure for " + repr(self.account) + " with status code " + self.status_code
|
UTF-8
|
Python
| false | false | 2,013 |
8,684,423,905,493 |
03bd2ff09da4a12491f35bcdec5b0970613b5b94
|
a81befcf0e81144156ae10a01791cb268c5fec49
|
/Implementations/Discussions/find-minimum-k-elements.py
|
588b0771e32b51c9226256aa97a6a8fba7d33c6d
|
[
"GPL-2.0-only"
] |
non_permissive
|
rsivapr/art-of-coding-for-interviews
|
https://github.com/rsivapr/art-of-coding-for-interviews
|
acbc58a7c1c580a4d2ed603a9427d30811f98508
|
4ddbb3ac7bbba3b7243d102d9873273b5fea3e36
|
refs/heads/master
| 2021-01-01T19:52:03.515973 | 2013-11-29T00:02:35 | 2013-11-29T00:02:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Question :: In an integer array with N elements (N is large),
find the minimum k elements (k<<N).
Given: A large array, say `origin`.
"""
def find_minimum_sort(k, origin):
'''
Runs in O(N*log(N) time due to sorting)
When N is large, this is not efficient.
There is the additional space of O(N) taken as well.
'''
sorted_large = sorted(origin)
return sorted_large[:k]
def find_minimum_pick(k, origin):
'''
Here, the space is O(k) instead of O(N). Definitely an
improvement.
The running time of this is now O(N*k) which is a better solution
than O(N*logN) under the assumption that k<<N.
'''
smalls = origin[:k]
for number in origin:
if max(smalls) > number:
smalls[smalls.index(max(smalls))] = number
return smalls
def find_minimum(k, origin):
'''
Implement a k-element Max-Heap.
Space is still O(k), but the time now decreases to O(N*logk).
Since O(1) to just look at the root of the heap (`heap.peek()`)
Pushing an element in the heap takes O(logk) in the worst case.
Note: Why not use Min-Heap and heapify the `origin` array?
Because we are able to further reduce space from O(N) to O(k).
'''
for number in origin:
if number < heap.peek():
heap.pop()
heap.push(number)
def fin_minimum_optimized(k, origin):
'''
We build a Min-Heap from original array in place using Floyd's algorithm.
Heapify takes O(N) time.
For the second part, where we repeatedly pop the heap, we do this in
O(logN) time each. So O(k*LogN).
Can O(N) + O(k*logN) be O(N)?
Yes. If k<O(N/logN).
This also takes no extra space!
'''
minheap = heapify_floyd(origin)
mink = []
for i in range(k):
new = minheap.pophead()
mink.append(new)
|
UTF-8
|
Python
| false | false | 2,013 |
11,905,649,371,029 |
ab48fd4ea3ed8afe920952e17730d54b7913996b
|
0b078a8ce2ef8a77562618a7aa1c50d5cc61da3e
|
/encurtador_url/views.py
|
87f8798796d5cbf0a7258c323e5b58409538edae
|
[] |
no_license
|
hedleygois/encurtador-url
|
https://github.com/hedleygois/encurtador-url
|
39c7b8269a6d7967356ea43162bb8a518b55ed6e
|
4d11cdb71ad905b2613a3225b7eb956da3c26d8f
|
refs/heads/master
| 2016-08-06T15:16:30.054254 | 2014-12-02T12:54:53 | 2014-12-02T12:54:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render
from django.template import RequestContext, loader
from django.core.context_processors import csrf
from django.http import HttpResponse, HttpResponsePermanentRedirect
from django.shortcuts import render_to_response
from encurtador_url.models import EncodedURL
from encurtador_url.encurtador import Encurtador
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
import logging
logger = logging.getLogger(__name__)
# Create your views here.
def login_user(request):
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username,password=password)
if user is not None:
if user.is_active:
login(request,user)
request.session['user'] = user.id
template = loader.get_template('encurtador_url/index.html')
context = RequestContext(request, {
'user': user,
'urls': EncodedURL.objects.filter(user=user).order_by('clicks')
})
return HttpResponse(template.render(context))
else:
messages.add_message(request, messages.ERROR, 'User Do Not Exists')
return render(request, 'encurtador_url/index.html', c)
def logout_user(request):
c = {}
c.update(csrf(request))
del request.session['user']
logout(request)
return render_to_response('encurtador_url/index.html',c)
def registrar(request):
username = request.POST.get('username')
password = request.POST.get('password')
user = User.objects.create_user(username=username,password=password)
template = loader.get_template('encurtador_url/index.html')
context = RequestContext(request, {
'user': user
})
return HttpResponse(template.render(context))
def index(request):
if request.user.is_authenticated():
template = loader.get_template('encurtador_url/index.html')
user = User.objects.get(pk=request.session['user'])
context = RequestContext(request, {
'user': user,
'urls': carregar_urls_do_usuario_ordenadas_pelos_clicks(user)
})
return HttpResponse(template.render(context))
else:
c = {}
c.update(csrf(request))
return render_to_response('encurtador_url/index.html', c)
def encurtar_anonima(request):
url = request.POST.get('url', "")
url_a_ser_encurtada = 'http://' + url
url_encurtada = ""
template = loader.get_template('encurtador_url/resultado.html')
if request.user.is_authenticated():
context = tratar_usuario_autenticado(url_a_ser_encurtada, request)
else:
context = tratar_usuario_anonimo(url_a_ser_encurtada, request)
return HttpResponse(template.render(context))
def ir_para_url(request, url_encurtada):
if request.user.is_authenticated():
url_original = EncodedURL.objects.get(encoded_url_text=url_encurtada, user=request.user)
url_original.clicks += 1
url_original.save()
else:
url_original = EncodedURL.objects.get(encoded_url_text=url_encurtada)
return HttpResponsePermanentRedirect(url_original.url_text.__str__())
def tratar_usuario_autenticado(url_a_ser_encurtada, request):
url_ja_encurtada = checar_url_ja_encurtada_pelo_usuario(url_a_ser_encurtada, request.user)
if url_ja_encurtada is not None:
context = RequestContext(request, {
'url_encurtada': url_ja_encurtada.encoded_url_text,
'user': request.user
})
else:
encurtador = Encurtador()
url_encurtada = encurtador.encurtar(url_a_ser_encurtada)
url_vinculada = EncodedURL.objects.create(url_text = url_a_ser_encurtada, encoded_url_text = url_encurtada, clicks = 0, user = request.user)
context = RequestContext(request, {
'url_encurtada': url_encurtada,
'user': 'user',
})
return context
def tratar_usuario_anonimo(url_a_ser_encurtada, request):
encurtador = Encurtador()
url_encurtada = encurtador.encurtar(url_a_ser_encurtada)
url_encurtada_existente = checar_url_ja_encurtada(url_encurtada)
if(url_encurtada_existente is not None):
context = RequestContext(request, {
'url_encurtada': url_encurtada_existente.encoded_url_text
})
else:
context = RequestContext(request, {
'url_encurtada': url_encurtada
})
url_anonima = EncodedURL.objects.create(url_text = url_a_ser_encurtada, encoded_url_text = url_encurtada, clicks = 0)
return context
def checar_url_ja_encurtada(url):
try:
return EncodedURL.objects.get(encoded_url_text=url, user=None)
except:
return None
def checar_url_ja_encurtada_pelo_usuario(url, user_logado):
try:
return EncodedURL.objects.get(url_text=url, user=user_logado)
except:
return None
def carregar_urls_do_usuario_ordenadas_pelos_clicks(user):
return EncodedURL.objects.filter(user=user).order_by('-clicks')
|
UTF-8
|
Python
| false | false | 2,014 |
3,564,822,902,746 |
5ab6083fb5b395666e02542790f95c032a51e9d1
|
a1df0e7530fda073ef56e5275a1e6c21e49dfc76
|
/bin/stalker-client
|
8a0be7f4199793d459c856ee777dba864adb58da
|
[
"Apache-2.0"
] |
permissive
|
redbo/stalker
|
https://github.com/redbo/stalker
|
366a490e401244459f61e0511750d26789e84b7d
|
81a6ccbd2ab7b1eb0c926976e0ce9ba56eedafab
|
refs/heads/master
| 2018-01-15T13:55:21.208008 | 2014-02-24T18:24:04 | 2014-02-24T18:24:04 | 19,992,142 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import json
import urllib2
import os
import sys
import optparse
from datetime import datetime
def _requeue(config, cid):
target = config['url'] + '/checks/id/' + cid + '/next'
headers = {'X-API-KEY': config['key'], 'Content-Type': 'application/json'}
data = {'next': 'now'}
try:
req = urllib2.Request(target, json.dumps(data), headers=headers)
res = urllib2.urlopen(req)
content = res.read()
print "%s set for recheck." % cid
except urllib2.HTTPError as err:
if err.code == 404:
print "Error. No check with id %s" % cid
else:
print "Failed. Status: %s Msg: %s" % (err.code, err.reason)
def _state_log(config, target_check):
host, check = target_check.split(':', 1)
endpoint = '/state_log/%s/%s' % (host, check)
url = config['url'] + endpoint
headers = {'X-API-KEY': config['key'], 'Content-Type': 'application/json'}
try:
req = urllib2.Request(url, headers=headers)
res = urllib2.urlopen(req)
content = res.read()
result = json.loads(content)
print "## State Log for %s on %s ##" % (check, host)
if len(result['state_log']) > 0:
for entry in result['state_log']:
ts = datetime.utcfromtimestamp(entry['last'])
if entry['status']:
state = "UP "
else:
state = "DOWN"
print "[%s] %s - %s" % (ts, state, entry['out'])
else:
print "No history yet..."
except urllib2.HTTPError as err:
if err.code == 404:
print "Error. No check or check history for %s %s" % (host, check)
else:
print "Failed. Status: %s Msg: %s" % (err.code, err.reason)
def _remove_host(config, pattern):
target = config['url'] + '/hosts/' + pattern
headers = {'X-API-KEY': config['key'], 'Content-Type': 'application/json'}
try:
req = urllib2.Request(target, headers=headers)
req.get_method = lambda: 'DELETE'
res = urllib2.urlopen(req)
content = res.read()
print "%s deleted." % pattern
except urllib2.HTTPError as err:
if err.code == 404:
print "Error. No host matching %s" % pattern
else:
print "Failed. Status: %s Msg: %s" % (err.code, err.reason)
def _remove_check(config, cid, name=None):
target = config['url'] + '/checks/id/' + cid
headers = {'X-API-KEY': config['key'], 'Content-Type': 'application/json'}
try:
req = urllib2.Request(target, headers=headers)
req.get_method = lambda: 'DELETE'
res = urllib2.urlopen(req)
content = res.read()
print "%s deleted." % (name or cid)
except urllib2.HTTPError as err:
if err.code == 404:
print "Error. No check with id %s" % cid
else:
print "Failed. Status: %s Msg: %s" % (err.code, err.reason)
def _remove(config, pattern):
print "Looking up checks for %s..." % pattern
host = _get_checks_by_host(config, pattern)
if host:
if 'checks' in host:
print [i['check'] for i in host['checks']]
_ok = raw_input('Ok to remove all these checks? [y/n] ').strip().lower()
if _ok != 'y':
print "Aborting."
return
else:
for check in host['checks']:
_remove_check(config, check['_id'], check['check'])
print "Removing host..."
_remove_host(config, pattern)
def _get_checks_by_host(config, pattern):
target = config['url'] + '/checks/host/' + pattern
headers = {'X-API-KEY': config['key'], 'Content-Type': 'application/json'}
try:
req = urllib2.Request(target, headers=headers)
res = urllib2.urlopen(req)
return json.loads(res.read())
except urllib2.HTTPError as err:
if err.code == 404:
print "Error. No checks for %s" % pattern
else:
print "Failed. Status: %s Msg: %s" % (err.code, err.reason)
def _pjson(content):
try:
import pygments.lexers
lexer = pygments.lexers.get_lexer_by_name(
'javascript') # just use this incase its an old version
from pygments.formatters import TerminalFormatter
from pygments import highlight
print(highlight(content, lexer, TerminalFormatter()))
except ImportError:
# load and then dump to print
output = json.loads(content)
print json.dumps(output, sort_keys=False, indent=4)
def _request(config, rtype):
endpoints = {'alerting': '/checks/state/alerting',
'pending': '/checks/state/pending',
'suspended': '/checks/state/suspended'}
target = config['url'] + endpoints[rtype]
headers = {'X-API-KEY': config['key']}
req = urllib2.Request(target, headers=headers)
res = urllib2.urlopen(req)
content = res.read()
parsed = json.loads(content)
return content, parsed
def main():
"""stalkerweb cli"""
usage = '''%prog -a -p -s -v'''
args = optparse.OptionParser(usage)
args.add_option('--alerting', '-a', action="store_true",
help="Get alerting")
args.add_option('--pending', '-p', action="store_true",
help="Get pending")
args.add_option('--suspended', '-s', action="store_true",
help="Get suspended")
args.add_option('--verbose', '-v', action="store_true",
help="Print out json (fancy if pygments is present")
args.add_option('--recheck', dest='recheck_id',
help="Recheck check with given id")
args.add_option('--remove', dest='remove_host',
help="Remove all checks for given host or ip")
args.add_option('--remove-check', dest='remove_check',
help="Remove check with given id")
args.add_option('--list', '-l', dest='list_host',
help="List all checks for given host or ip")
args.add_option('--state-log', dest="state_log_target",
help="Show the state log for a given target ex: --state-log=hostname:check_name")
options, arguments = args.parse_args()
conf_file = os.environ.get('stalker-client-conf',
'/etc/stalker/stalker-client.conf')
config = {'url': None, 'key': None}
if os.path.exists(conf_file):
with open(conf_file) as f:
for line in f:
if line.startswith('stalkerweb_url'):
config['url'] = line.split('=')[1].strip().rstrip('/')
elif line.startswith('stalkerweb_api_key'):
config['key'] = line.split('=')[1].strip()
if os.environ.get('stalkerweb_url'):
config['url'] = os.environ.get('stalkerweb_url').rstrip('/')
if os.environ.get('stalkerweb_api_key'):
config['key'] = os.environ.get('stalkerweb_api_key')
if not config['url']:
print "No stalkerweb_url found in env nor in %s" % conf_file
sys.exit(1)
if not config['key']:
print "No stalkerweb_api_key found in env nor in %s" % conf_file
sys.exit(1)
if len(sys.argv) == 1:
options.alerting = True
if options.state_log_target:
_state_log(config, options.state_log_target)
if options.recheck_id:
_requeue(config, options.recheck_id)
if options.remove_check:
_remove_check(config, options.remove_check)
if options.remove_host:
_remove(config, options.remove_host)
if options.list_host:
host = _get_checks_by_host(config, options.list_host)
if host:
print json.dumps(host, sort_keys=False, indent=4)
if options.alerting:
failed_hosts = set()
content, parsed = _request(config, 'alerting')
if options.verbose:
_pjson(content)
else:
print "=== Alerting ==="
sorted_alerts = {}
for i in parsed['alerting']:
if i['hostname'] in sorted_alerts:
sorted_alerts[i['hostname']].append(i)
else:
sorted_alerts[i['hostname']] = []
sorted_alerts[i['hostname']].append(i)
for host in sorted_alerts:
for i in sorted_alerts[host]:
clean_out = " ".join([x for x in i['out'].split('\n')])
if clean_out.startswith('<urlopen error [Errno'):
failed_hosts.add(i['hostname'])
else:
print '(%s) %s on %s is alerting because "%s"' % (i['_id'], i['check'],
i['hostname'],
clean_out)
print "=== Alerting - unreachable ==="
print " ".join([x for x in failed_hosts])
if options.pending:
content, parsed = _request(config, 'pending')
if options.verbose:
_pjson(content)
else:
print "=== Pending ==="
for i in parsed['pending']:
clean_out = " ".join([x for x in i['out'].split('\n')])
print '%s on %s' % (i['check'], i['hostname'])
if options.suspended:
content, parsed = _request(config, 'suspended')
if options.verbose:
_pjson(content)
else:
print "=== Suspended ==="
for i in parsed['suspended']:
clean_out = " ".join([x for x in i['out'].split('\n')])
print '%s on %s is suspended. last output: "%s"' % (i['check'], i['hostname'], clean_out)
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,014 |
19,499,151,538,878 |
465e23003f6ce748ddd7907261b4e8b40e45714c
|
b26ee3e0d7553efd893c2122bccb61cb152e36b8
|
/tiddlywebplugins/ibuilder.py
|
96d528951ab2a6ac58a5a95c56db3f22d6a9230e
|
[] |
no_license
|
tiddlyweb/tiddlywebplugins.ibuilder
|
https://github.com/tiddlyweb/tiddlywebplugins.ibuilder
|
815e250083043963df796424d03a8b92a4a5909b
|
d038dc6e3edab2742ff028805ddb2947c95f5ad1
|
refs/heads/master
| 2021-01-01T18:22:32.214254 | 2013-08-01T15:57:59 | 2013-08-01T15:57:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
A collection of tools and functions for building tiddlywebplugins
packages come with pre-determined tiddlers, bags and recipes.
Use tiddlywebplugins.pkgstore to keep those entities in the
target package.
"""
from tiddlyweb.model.bag import Bag
from tiddlyweb.store import Store
from tiddlyweb.util import std_error_message
from tiddlywebplugins.twimport import recipe_to_urls, url_to_tiddler
__version__ = '0.1.4'
def cacher(args):
"""
Called from the twibuilder command line tool to "cache"
tiddler information into the local store.
"""
package_name = args[0]
cache_tiddlers(package_name)
def cache_tiddlers(package_name):
"""
Stores instance tiddlers in the package.
reads store_contents from <package>.instance
tiddler files are stored in <package>/resources/store
"""
instance_module = __import__('%s.instance' % package_name, None, None,
['instance'])
store_contents = instance_module.store_contents
target_store = Store('tiddlywebplugins.pkgstore',
{'package': package_name, 'read_only': False}, {})
sources = {}
for bag, uris in store_contents.items():
sources[bag] = []
for uri in uris:
if uri.endswith('.recipe'):
urls = recipe_to_urls(uri)
sources[bag].extend(urls)
else:
sources[bag].append(uri)
for bag_name, uris in sources.items():
bag = Bag(bag_name)
target_store.put(bag)
for uri in uris:
std_error_message('retrieving %s' % uri)
tiddler = url_to_tiddler(uri)
tiddler.bag = bag.name
target_store.put(tiddler)
|
UTF-8
|
Python
| false | false | 2,013 |
4,535,485,493,678 |
23f30b07e9320799328c4aec344283388090f656
|
c7b4021fb97ef9d4a36b6a6e6abe2bd5e067c7ee
|
/apps/smeuhoverride/views.py
|
59bef6bcbfd5a492e501c5f1ce7bffe10015565d
|
[
"MIT"
] |
permissive
|
joskid/smeuhsocial
|
https://github.com/joskid/smeuhsocial
|
3dbe5f0ac976ab08e2ba97ad75fbe4ba15958fc0
|
647b776521be0033ef5bfc8c42ada02d3fb5632e
|
refs/heads/master
| 2020-04-07T03:12:10.835685 | 2013-03-02T15:29:26 | 2013-03-02T15:29:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Create your views here.
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.auth.models import User
from django.http import Http404, get_host, HttpResponse
from django.contrib.auth.decorators import login_required
from tagging.models import Tag
from tagging.utils import calculate_cloud, LOGARITHMIC
from pinax.apps.blog.models import Post
from pinax.apps.photos.views import group_and_bridge, group_context
from pinax.apps.photos.models import Image
def tag_index(request, template_name="tagging_ext/index.html", min_size=0, limit=100):
query = """
SELECT tag_item.tag_id as tag_id, COUNT(tag_item.tag_id) as counter
FROM tagging_taggeditem as tag_item
INNER JOIN tagging_tag as tag ON (tag.id = tag_item.tag_id)
GROUP BY tag.name,tag_id
HAVING COUNT(tag.name) > %s
ORDER BY tag.name
LIMIT %s
"""
cursor = connection.cursor()
cursor.execute(query, [min_size, limit])
tags = []
for row in cursor.fetchall():
try:
tag = Tag.objects.get(id=row[0])
except ObjectDoesNotExist:
continue
if ' ' in tag.name:
continue
tag.count = row[1]
tags.append(tag)
tags = calculate_cloud(tags, steps=5, distribution=LOGARITHMIC)
return render_to_response(template_name, {'tags': tags},
context_instance=RequestContext(request))
def user_blog_index(request, username, template_name="blog/user_blog.html"):
blogs = Post.objects.filter(status=2).select_related(depth=1).order_by("-publish")
if username is not None:
user = get_object_or_404(User, username=username.lower())
blogs = blogs.filter(author=user)
return render_to_response(template_name, {
"blogs": blogs,
"username": username,
}, context_instance=RequestContext(request))
def blog_post_source(request, username, slug):
post = get_object_or_404(Post, slug=slug,
author__username=username)
if post.status == 1 and post.author != request.user:
raise Http404
return HttpResponse(post.body, mimetype="text/plain; charset=utf-8")
def get_first_id_or_none(objects):
try:
return objects[0].id
except IndexError:
return None
@login_required
def photo_details(request, id, template_name="photos/details.html"):
"""
show the photo details
"""
group, bridge = group_and_bridge(request)
photos = Image.objects.all()
if group:
photos = group.content_objects(photos, join="pool", gfk_field="content_object")
else:
photos = photos.filter(pool__object_id=None)
photo = get_object_or_404(photos, id=id)
previous_photo_id = get_first_id_or_none(
photos.filter(id__lt=photo.id, is_public=True).order_by('-id'))
next_photo_id = get_first_id_or_none(photos.filter(id__gt=photo.id,
is_public=True).order_by('id'))
# @@@: test
if not photo.is_public and request.user != photo.member:
raise Http404
photo_url = photo.get_display_url()
host = "http://%s" % get_host(request)
if photo.member == request.user:
is_me = True
else:
is_me = False
ctx = group_context(group, bridge)
ctx.update({
"host": host,
"photo": photo,
"photo_url": photo_url,
"is_me": is_me,
"previous_photo_id": previous_photo_id,
"next_photo_id": next_photo_id,
})
return render_to_response(template_name, RequestContext(request, ctx))
|
UTF-8
|
Python
| false | false | 2,013 |
4,088,808,878,820 |
bee05cb90d13a329dbbf51ea7e1902377284352b
|
af91af409fde0a4aadf99cb7a4718d535f3c6f4c
|
/main.py
|
e79454ab205ab4b29d1ec808d0a18baff5333b81
|
[] |
no_license
|
thenoodle68/itembalancer
|
https://github.com/thenoodle68/itembalancer
|
c486184a161974d4d15fd78ff7d0876cfe253de4
|
e5c34971cec92c9b92a51b7a44b956088b58f6c2
|
refs/heads/master
| 2021-01-10T22:22:21.639195 | 2014-05-13T02:30:57 | 2014-05-13T02:30:57 | 19,540,365 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import ctypes
import sys
nope = ["System Volume Information","$RECYCLE.BIN"]
dirs = ["d","e","f","g"]
insert_dirs = ["H:\\downloads\\Complete\\..done\\"]
def getFolderSize(folder):
total_size = os.path.getsize(folder)
for item in os.listdir(folder):
itempath = os.path.join(folder, item)
if os.path.isfile(itempath):
total_size += os.path.getsize(itempath)
elif os.path.isdir(itempath):
total_size += getFolderSize(itempath)
return total_size
def disk_usage(path):
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
ctypes.c_ulonglong()
if sys.version_info >= (3,) or isinstance(path, unicode):
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW
else:
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA
ret = fun(path, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
raise ctypes.WinError()
used = total.value - free.value
return (total.value, used, free.value)
def constructSection(target_directory):
entries = {}
alpha_entries = []
for x in os.listdir(target_directory):
if x not in nope:
entries[x] = getFolderSize(target_directory+x)
alpha_entries.append(x)
return [entries,alpha_entries]
series = {}
alpha_series = []
dirs.sort()
dirs.reverse()
for x in dirs:
x += ":\\"
b = constructSection(x)
series.update(b[0])
alpha_series+=b[1]
for x in insert_dirs:
b = constructSection(x)
series.update(b[0])
alpha_series+=b[1]
alpha_series.sort()
alpha_series.reverse()
total_stored = 0
for x in series:
total_stored += series[x]
total_storage = 0
def sizeOfDirs(dirs):
total = 0
for x in dirs:
total += series[x]
return total
for x in dirs:
total_storage += disk_usage(x+":\\")[0]
finished = {}
total_per_drive = total_stored/len(dirs)
alpha_series_cur = alpha_series
for x in dirs:
this_drive = []
cont = 1
while sizeOfDirs(this_drive) < total_per_drive and cont == 1 and len(alpha_series_cur) > 0:
this_drive.append(alpha_series_cur[0])
if sizeOfDirs(this_drive) > disk_usage(x+":\\")[0]:
cont = 0
else:
alpha_series_cur = alpha_series_cur[1:]
finished[x] = this_drive
sizes = {}
for x in dirs:
sizes[x] = 0
operations = []
for x in finished:
for y in finished[x]:
if not os.path.isdir(x+":\\"+y):
operations.append((y,x))
sizes[x] += series[y]
# make this do stuff instead
for x in operations:
print x
print "To move:"
total = 0
for x in sizes:
print x,str(sizes[x]/1073741824.0)+"GB"
total += sizes[x]
print "Total:",str(total//1073741824.0)+"GB"
|
UTF-8
|
Python
| false | false | 2,014 |
11,802,570,171,052 |
afee8d1f3567438d568d95afb7bb26c458f30da2
|
771cef7e49157dd530d65f2a208132d616fc4f3d
|
/bookshub/books/migrations/0002_auto__del_image__del_field_requested_user__del_field_requested_isbn10_.py
|
102b7cdd20e2dda210ca7c8456edb46e8e0e272a
|
[] |
no_license
|
eluciano11/bookshub
|
https://github.com/eluciano11/bookshub
|
5a9ae11ea8c13f5894aeb06ced8357c4d89b7e0f
|
cff1dc8bdd6f3bf6a42e0f18eeaf0854e9633ebe
|
refs/heads/master
| 2020-12-24T13:44:39.339884 | 2014-11-15T15:26:47 | 2014-11-15T15:26:47 | 18,911,951 | 4 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Image'
db.delete_table(u'books_image')
# Deleting field 'Requested.user'
db.delete_column(u'books_requested', 'user_id')
# Deleting field 'Requested.isbn10'
db.delete_column(u'books_requested', 'isbn10')
# Deleting field 'Requested.isbn13'
db.delete_column(u'books_requested', 'isbn13')
# Adding field 'Requested.isbn_10'
db.add_column(u'books_requested', 'isbn_10',
self.gf('django.db.models.fields.CharField')(default='', max_length=10, blank=True),
keep_default=False)
# Adding field 'Requested.isbn_13'
db.add_column(u'books_requested', 'isbn_13',
self.gf('django.db.models.fields.CharField')(default='', max_length=13, blank=True),
keep_default=False)
# Adding field 'Requested.count'
db.add_column(u'books_requested', 'count',
self.gf('django.db.models.fields.SmallIntegerField')(default=1),
keep_default=False)
# Adding M2M table for field user on 'Requested'
m2m_table_name = db.shorten_name(u'books_requested_user')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('requested', models.ForeignKey(orm[u'books.requested'], null=False)),
('user', models.ForeignKey(orm[u'users.user'], null=False))
))
db.create_unique(m2m_table_name, ['requested_id', 'user_id'])
# Deleting field 'Book.description'
db.delete_column(u'books_book', 'description')
# Deleting field 'Book.end_date'
db.delete_column(u'books_book', 'end_date')
# Deleting field 'Book.start_date'
db.delete_column(u'books_book', 'start_date')
# Deleting field 'Book.owner'
db.delete_column(u'books_book', 'owner_id')
# Deleting field 'Book.price'
db.delete_column(u'books_book', 'price')
# Deleting field 'Book.condition'
db.delete_column(u'books_book', 'condition')
# Deleting field 'Book.quantity'
db.delete_column(u'books_book', 'quantity')
# Adding field 'Book.score'
db.add_column(u'books_book', 'score',
self.gf('django.db.models.fields.FloatField')(default=0.0, null=True),
keep_default=False)
# Adding field 'Book.image'
db.add_column(u'books_book', 'image',
self.gf('django.db.models.fields.files.ImageField')(default='hi.png', max_length=100, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding model 'Image'
db.create_table(u'books_image', (
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('book', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['books.Book'])),
))
db.send_create_signal(u'books', ['Image'])
# Adding field 'Requested.user'
db.add_column(u'books_requested', 'user',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['users.User']),
keep_default=False)
# Adding field 'Requested.isbn10'
db.add_column(u'books_requested', 'isbn10',
self.gf('django.db.models.fields.CharField')(default='', max_length=10, blank=True),
keep_default=False)
# Adding field 'Requested.isbn13'
db.add_column(u'books_requested', 'isbn13',
self.gf('django.db.models.fields.CharField')(default='', max_length=10, blank=True),
keep_default=False)
# Deleting field 'Requested.isbn_10'
db.delete_column(u'books_requested', 'isbn_10')
# Deleting field 'Requested.isbn_13'
db.delete_column(u'books_requested', 'isbn_13')
# Deleting field 'Requested.count'
db.delete_column(u'books_requested', 'count')
# Removing M2M table for field user on 'Requested'
db.delete_table(db.shorten_name(u'books_requested_user'))
# Adding field 'Book.description'
db.add_column(u'books_book', 'description',
self.gf('django.db.models.fields.CharField')(default='test', max_length=140),
keep_default=False)
# Adding field 'Book.end_date'
db.add_column(u'books_book', 'end_date',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
# Adding field 'Book.start_date'
db.add_column(u'books_book', 'start_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2014, 10, 22, 0, 0), blank=True),
keep_default=False)
# Adding field 'Book.owner'
db.add_column(u'books_book', 'owner',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['users.User']),
keep_default=False)
# Adding field 'Book.price'
db.add_column(u'books_book', 'price',
self.gf('django.db.models.fields.DecimalField')(default=1.1, max_digits=5, decimal_places=2),
keep_default=False)
# Adding field 'Book.condition'
db.add_column(u'books_book', 'condition',
self.gf('django.db.models.fields.CharField')(default='new', max_length=10),
keep_default=False)
# Adding field 'Book.quantity'
db.add_column(u'books_book', 'quantity',
self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1),
keep_default=False)
# Deleting field 'Book.score'
db.delete_column(u'books_book', 'score')
# Deleting field 'Book.image'
db.delete_column(u'books_book', 'image')
models = {
u'books.book': {
'Meta': {'ordering': "('-modified_at', '-created_at')", 'object_name': 'Book'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['books.Category']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'edition': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'isbn_10': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'isbn_13': ('django.db.models.fields.CharField', [], {'max_length': '13', 'blank': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'publisher': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '75'})
},
u'books.category': {
'Meta': {'ordering': "('-modified_at', '-created_at')", 'object_name': 'Category'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'primary_key': 'True'})
},
u'books.requested': {
'Meta': {'ordering': "('-modified_at', '-created_at')", 'object_name': 'Requested'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'count': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extra_data': ('jsonfield.fields.JSONField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isbn_10': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'isbn_13': ('django.db.models.fields.CharField', [], {'max_length': '13', 'blank': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'requested'", 'max_length': '10'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['users.User']", 'symmetrical': 'False'})
},
u'books.review': {
'Meta': {'ordering': "('-modified_at', '-created_at')", 'object_name': 'Review'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['books.Book']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.User']"})
},
u'books.viewed': {
'Meta': {'ordering': "('-modified_at', '-created_at')", 'object_name': 'Viewed'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['books.Book']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.User']"})
},
u'users.user': {
'Meta': {'ordering': "('-modified_at', '-created_at')", 'object_name': 'User'},
'address_1': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'address_2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'google_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'gravatar_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'normal'", 'max_length': '20'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'token_version': ('django.db.models.fields.CharField', [], {'default': "'bf335f95-6b7e-41cc-91a4-24d6c023e881'", 'unique': 'True', 'max_length': '36', 'db_index': 'True'}),
'twitter_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30', 'blank': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
}
}
complete_apps = ['books']
|
UTF-8
|
Python
| false | false | 2,014 |
15,685,220,572,980 |
f8aa2dca93041c197eadc91595a3acb0d7b5d174
|
fc5ddbe104a000062c6939d6c876aea115325f41
|
/objects/LossOfHeterocigosity_region.py
|
7d334687a9ea03f6be2e3cc14d714c3a663518fb
|
[] |
no_license
|
Antonior26/CVA
|
https://github.com/Antonior26/CVA
|
ad1eb0a5242ce522a6db830cb5f692ba47549194
|
bcd1938a9b89a58b99afea439b9d05de0ad7296e
|
refs/heads/master
| 2021-01-13T01:44:04.879564 | 2013-10-14T15:21:23 | 2013-10-14T15:21:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on 08/10/2013
@author: antonior
'''
import requests
import json
class loh_region:
'''****************************
This class has methods Loh Variants
*****************************'''
def __init__(self,chrom,start,end,loh_sites):
'''******************
Constructor:
chrom: Chromosome [string]
start: Start possition 0-based [int]
end: End possition 0-based [int]
nloh_sites: LOH sites in region [list]
*******************'''
self.chrom=chrom
self.start=start
self.end=end
self.loh_sites=loh_sites
self.length=self.end-self.start #Length of the region
self.ids=[] #ENSBL ids of genes overlapping with this region
self.names=[] #Common name of genes overlapping with this region
self.description=[]
def __str__(self):
ids=",".join(self.ids)
names=",".join(self.names)
description=",".join(self.description)
return self.chrom+'\t'+str(self.start)+'\t'+str(self.end)+'\t'+str(self.end-self.start)+'\t'+str(len(self.loh_sites))+"\t"+ids+"\t"+names+"\t"+description
def __len__(self):
return self.end-self.start
def cellbase_annotation(self,type="gene"):
'''**********************************
Annotation by region ussing cellbase
type: type of feature to annotate, i.e: gene,snp...
**********************************'''
url="http://ws.bioinfo.cipf.es/cellbase/rest/latest/hsa/genomic/region/"+str(self.chrom)+":"+str(self.start)+"-"+str(self.end)+"/"+type+"?of=json"
query=requests.get(url)
annotation=json.loads(query.content)
for row in annotation[0]:
try:
self.ids.append(row["stableId"])
except:
print "Found feature without id"
try:
self.names.append(row["externalName"])
except:
print "Found feature without external Name"
try:
self.description.append(row["description"])
except:
print "Found feature without external Name"
def get_variants(self):
'''*******************
Return variants (type:LossOfHeterocigosity_Variant) in region
*******************'''
return self.loh_sites
def check_variant_cluster(self,variant,limit=1000):
sites=self.get_variants()
for site in sites:
if site.check_cluster(variant,limit):
return True
else:
return False
def add_varinat(self,variant,limit=1000):
'''************************
Add a variant region
************************'''
if self.check_variant_cluster(variant, limit):
if self.chrom!=variant.chrom:
raise Exception("Dfferent chromosomes")
elif self.start<variant.pos and self.end>variant.pos:
self.loh_sites.append(variant)
elif self.start>variant.pos:
self.loh_sites.append(variant)
self.start=variant.pos
elif self.end<variant.pos:
self.loh_sites.append(variant)
self.end=variant.pos
|
UTF-8
|
Python
| false | false | 2,013 |
10,926,396,839,526 |
c4a37195a0f7b065600ec7daa79b6f395774f7f6
|
99e96901763a492065865a5a0c98f8501ff92f4b
|
/p3gomorgen.py
|
f048e63540e0d12e6000804869f45d4658e033af
|
[] |
no_license
|
jonaskaempf/public-service
|
https://github.com/jonaskaempf/public-service
|
f455fc19e585890533a8c2b74e29d9d4272663c1
|
82bf9f12d5fe863828c6320fb6283b897ebbf55b
|
refs/heads/master
| 2020-05-19T23:34:38.055283 | 2014-10-31T12:28:14 | 2014-10-31T12:28:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
from datetime import datetime
import urllib2
import json
import locale
import cmd
import sys
import vlc
FEED_URL = "http://www.dr.dk/mu/Feed/urn:dr:mu:bundle:4f3b8a2a860d9a33ccfdb3a6"
API_URL = "http://www.dr.dk/mu/programcard/expanded?id=go-morgen-p3-"
# change locale for parsing RSS feed dates
#locale.setlocale(locale.LC_TIME, ('en_US', 'UFT-8'))
def parse_date(s):
cur = datetime.now()
opts = [cur.day, cur.month, cur.year]
if len(s.strip()) > 0:
for i, x in enumerate(s.split('-')):
t = int(x)
opts[i] = t
return datetime(opts[2], opts[1], opts[0])
def get_number(date):
global FEED_URL
feed = urllib2.urlopen(FEED_URL).read()
# Coarse parsing file to, maybe, avoid XML attacks
# truncate everything before actual items
feed = feed[feed.find("<item>"):]
# search for date
date_idx = feed.find(date.strftime("%d %b %Y"))
if date_idx < 0:
print("Could not find date in DRs feed. Maybe its still live? Or try later. Quitting now")
sys.exit(0)
# search for url just before that
url_idx = feed.rfind("<link>", 0, date_idx)
# extract url
url = feed[url_idx + 6:feed.find("</link>", url_idx, date_idx)]
print("Extracted this DR P3 Live Radio Player URL: %s" % url)
# return last number in this url
return url.split('-')[-1]
def retrieve_url(date):
global API_URL
number = get_number(date)
url = API_URL + str(number)
print("Querying the DR MU: {}".format(url))
data = urllib2.urlopen(url).read()
obj = json.loads(data)
actualDate = obj['Data'][0]["PrimaryBroadcastStartTime"]
prize = obj['Data'][0]["Assets"][0]['Links'][-1]['Uri']
return (actualDate, prize)
def get_player(url):
mp = vlc.MediaPlayer(url)
return mp
class CLI(cmd.Cmd):
prompt = "(stopped) > "
def __init__(self, mp):
cmd.Cmd.__init__(self)
self._mp = mp
def do_play(self, line):
self._mp.play()
self.prompt = "(playing) > "
def do_pause(self, line):
self._mp.pause()
self.prompt = "(paused) > "
def do_seek(self, line):
self._mp.set_position(int(line) / 100.0)
def do_quit(self, line):
self._mp.stop()
sys.exit(0)
def do_EOF(self, line):
return True
if __name__ == "__main__":
import sys
usage = """
usage: ./p3gomorgen.py [date]
data - ex: '29' (automagic this month), '29-10' (automagic this year), etc.
Defaults to 'most recent' broadcast.
Player commands:
play
pause
quit
seek [pct: 0-100]
"""
arg = ''
if len(sys.argv) > 1:
arg = sys.argv[1]
date = parse_date(arg)
streamDate, media_url = retrieve_url(date)
mp = get_player(media_url)
cli = CLI(mp)
cli.onecmd("play")
cli.cmdloop("Loaded %s from \n %s\n\n%s" % (media_url, streamDate, usage))
|
UTF-8
|
Python
| false | false | 2,014 |
16,398,185,166,917 |
054cd8e993cadd512a1477e2c6bf6c811cbd76bf
|
6aebe4c65344c1767087e85d859ba103d7937bdf
|
/literature/views.py
|
86271e8b72465d9fbdf5ea1f6ea4444dd5e8ceaf
|
[
"MIT"
] |
permissive
|
SpaceFox/textes
|
https://github.com/SpaceFox/textes
|
6d7ab151c445ce9c40cd6f8c71fd8de6bc0ce5db
|
dee9bd19dcdd49bcde40b152cbfd4c70a5e9a11c
|
refs/heads/master
| 2016-09-05T14:49:37.540398 | 2014-10-28T20:07:31 | 2014-10-28T20:07:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render, get_object_or_404
from models import Chapter, ShortStory, Novel
def short_story(request, slug):
text = get_object_or_404(ShortStory, slug=slug)
licence = {
u'title': text.title,
u'url': text.get_absolute_url()
}
return render(request, u'literature/short_story.html', {
u'text': text,
u'licence': licence,
})
def chapter(request, slug_novel, slug_chapter):
text = get_object_or_404(Chapter, slug=slug_chapter)
chapters = text.novel.chapter_set.all().order_by(u'sequence')
previous_chapter = text.novel.chapter_set.filter(sequence__lt=text.sequence).order_by(u'-sequence').first()
next_chapter = text.novel.chapter_set.filter(sequence__gt=text.sequence).order_by(u'sequence').first()
licence = {
u'title': text.novel.title,
u'url': text.novel.get_absolute_url()
}
return render(request, u'literature/chapter.html', {
u'text': text,
u'licence': licence,
u'chapters': chapters,
u'previous_chapter': previous_chapter,
u'next_chapter': next_chapter,
})
def novel(request, slug):
novel = get_object_or_404(Novel, slug=slug)
chapters = novel.chapter_set.all().order_by(u'sequence')
licence = {
u'title': novel.title,
u'url': novel.get_absolute_url()
}
return render(request, u'literature/novel.html', {
u'novel': novel,
u'licence': licence,
u'chapters': chapters,
})
def home(request):
return render(request, u'literature/home.html')
|
UTF-8
|
Python
| false | false | 2,014 |
13,254,269,095,695 |
6b2db9d916e6254321aabeee18e38937faeb6718
|
a8e0b86c485f53ae317a51b6bb81af062bfdf8af
|
/luhnybin.py
|
f6090b3512621ea7502d4368d7e8fc6aa9c90122
|
[
"Apache-2.0"
] |
permissive
|
jimmyislive/luhnybin
|
https://github.com/jimmyislive/luhnybin
|
d0d1b99fd9d9f3f70e440f00eb2012d0ed328a1f
|
d035250159122de71e43de564cee4b1b22016668
|
refs/heads/master
| 2020-12-25T06:16:17.199270 | 2012-10-30T18:23:00 | 2012-10-30T18:23:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
'''
Jimmy John - 10/30/2012
[email protected]
'''
import re
import sys
def luhn_check(num):
'''Function that tells us if the Luhn Check has passed for a number or not.'''
#pre-process it first
num = num.replace(' ', '').replace('-', '')
iteration = 0
sum = 0
for i in range(len(num) - 1, -1, -1):
if not (iteration % 2):
sum += int(num[i])
else:
double = int(num[i])*2
sum += double/10 + double % 10
iteration += 1
if sum % 10:
return False
else:
return True
def format(str):
'''Helper function to convert digits to 'X' '''
return ''.join(map(lambda c: 'X' if c.isdigit() else c, str))
def main():
#create regex objects for the patters of interest
re_pattern14 = re.compile('([0-9]( )*(-)*){14}')
re_pattern15 = re.compile('([0-9]( )*(-)*){15}')
re_pattern16 = re.compile('([0-9]( )*(-)*){16}')
for line in sys.stdin:
match_data = {}
#get rid of leading/trailing newlines etc
line = line.strip()
#parse the line for each of the patters of interest
#note the starting/ending points of each pattern
for re_pattern in [re_pattern14, re_pattern15, re_pattern16]:
start_index = 0
while start_index < len(line):
match = re_pattern.search(line, start_index)
if match:
match_data[(match.start(0), match.end(0))] = match.group(0)
start_index = match.start(0) + 1
else:
break
#create a copy of the original line as strings are immutable in python
line_copy = list(line)
#for all the pattern matches, check which patterns pass the Luhn Check
#for the ones that do, replace the corresponding character positions with 'X'
for k,v in match_data.items():
if luhn_check(v):
formatted_string = format(v)
index = 0
while index < len(formatted_string):
line_copy[k[0] + index] = formatted_string[index]
index += 1
#convert the list into a flat string
print ''.join(line_copy)
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,012 |
18,854,906,442,589 |
617b8be2120e8e58230c919511dd0a94c9ebb3d0
|
e9319f998c78d2a337d31dabfd02a9a608a29069
|
/screenshot.py
|
1516383339f3838fa5cddfae5afd74f42c6d4c0a
|
[] |
no_license
|
lavelle/ClipCloud
|
https://github.com/lavelle/ClipCloud
|
ea1bd9b342e6985402da99700bc15db62ae37e34
|
054c201f41ff70aafc49caaaa470d0dd72d8e191
|
refs/heads/master
| 2019-04-28T13:17:04.611255 | 2013-02-10T20:57:40 | 2013-02-10T20:57:40 | 3,892,974 | 1 | 1 | null | false | 2017-05-18T06:59:54 | 2012-04-01T14:24:36 | 2014-08-03T21:33:29 | 2013-02-10T20:57:47 | 1,100 | 4 | 1 | 8 |
Python
| null | null |
import os
from settings import *
from time import time
import subprocess
class Screenshot:
def __init__(self, mode):
if PLATFORM != 'Darwin':
print 'Screenshot tool only works on OS X'
exit(1)
self.flags = self.build_flags(mode)
self.filename = self.build_filename()
self.path = os.path.join(SCREENSHOT_PATH, self.filename)
def build_flags(self, mode):
flags = '-'
# Don't play camera shutter sound effect
flags += 'x'
flags += 'm' if mode == 'screen' else 'i'
return flags
def build_filename(self):
return 'screenshot_%s.png' % str(int(time()))
def capture(self):
subprocess.call(['screencapture', self.flags, self.path])
|
UTF-8
|
Python
| false | false | 2,013 |
17,248,588,673,043 |
cf29c2bd42660798d87e1216a4d7be8b2405524d
|
b39d9ef9175077ac6f03b66d97b073d85b6bc4d0
|
/Ibandronat_Sandoz_2_mgml_concentrate_for_solution_for_infusion_SmPC.py
|
3f6ced0688185cb2d2b1a3cd1b571d5f64b8c3b7
|
[] |
no_license
|
urudaro/data-ue
|
https://github.com/urudaro/data-ue
|
2d840fdce8ba7e759b5551cb3ee277d046464fe0
|
176c57533b66754ee05a96a7429c3e610188e4aa
|
refs/heads/master
| 2021-01-22T12:02:16.931087 | 2013-07-16T14:05:41 | 2013-07-16T14:05:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
{'_data': [['Unknown',
[['GI',
u'Diarrhoea 1 (0.6) 8 (5.3) Dyspepsia 5 (3.2) 6 (3.9) Vomiting 2 (1.3) 5 (3.3) Gastrointestinal pain 2 (1.3) 4 (2.6) Tooth disorder 0 (0.0) 3 (2.0) Skin and subcutaneous tissue disorders: Skin disorder 0 (0.0) 2 (1.3) Ecchymosis 0 (0.0) 2 (1.3) Musculoskeletal and connective tissue disorders: Myalgia 6 (3.8) 8 (5.3) Arthralgia 1 (0.6) 2 (1.3) Joint disorder 0 (0.0) 2 (1.3) Osteoarthritis 0 (0.0) 2 (1.3) General disorders: Asthenia 8 (5.1) 10 (6.6) Influenza-like illness 2 (1.3) 8 (5.3) Oedema peripheral 2 (1.3) 3 (2.0) Thirst 0 (0.0) 2 (1.3) Investigations: Gamma-GT increased 1 (0.6) 4 (2.6) Creatinine increased 1 (0.6) 3 (2.0)'],
['GI',
u'cheilitis Hepato-biliary disorders: cholelithiasis Skin and subcutaneous tissue disorders: rash, alopecia Renal and urinary disorders: urinary retention, renal cyst Reproductive system and breast disorders: pelvic pain General disorders and administration site conditions: hypothermia Investigations: blood alkaline phosphatase increase, weight decrease Injury, poisoning and procedural complications: injury, injection site pain Osteonecrosis of the jaw has been reported in patients treated by bisphosphonates. The majority of the reports refer to cancer patients, but such cases have also been reported in patients treated for osteoporosis. Osteonecrosis of the jaw is generally associated with tooth extraction and / or local infection (including osteomyelitis). Diagnosis of cancer, chemotherapy, radiotherapy, corticosteroids and poor oral hygiene are also deemed as risk factors (see section 4.4).']]]],
'_pages': [6, 9],
u'_rank': 2,
u'_type': u'LSFU'}
|
UTF-8
|
Python
| false | false | 2,013 |
8,504,035,284,502 |
aa90ab24708a5c88a7de7c40e50220abb9ec7fe2
|
adf9faef22a7a9ad5910b0743901fe98ded8b601
|
/activityRecognition/dijkstras_setup.py
|
d93b57da500898478859e794e0400de25a687cec
|
[] |
no_license
|
colincsl/Kinect-Projects
|
https://github.com/colincsl/Kinect-Projects
|
2d97d65730733f8335c2217a01694258eb423d39
|
f208729c2c7a4bff6c0a73054a579f0d7f3b1995
|
refs/heads/master
| 2021-01-01T05:48:02.845304 | 2012-06-23T10:58:30 | 2012-06-23T10:58:30 | 2,713,517 | 5 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#python dijkstras_setup.py build_ext --inplace
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
ext_modules = [Extension("dijkstrasGraph", ["dijkstras.pyx"])]
# for e in ext_modules:
# e.pyrex_directives = {"boundscheck": False}
# e.pyrex_directives = {"wraparound": False}
setup(
name = 'dijkstrasGraph',
cmdclass = {'build_ext': build_ext},
include_dirs = [np.get_include()],
ext_modules = ext_modules
)
|
UTF-8
|
Python
| false | false | 2,012 |
9,259,949,529,350 |
28dc5658ef781209e90170ed1cddf803f4bc6b9e
|
579c9cfe2245528ea20a8912e09783210ffa4152
|
/POSTagging.py
|
75970ad7181683485532f035e8b7a94e164d9e1b
|
[] |
no_license
|
sudarshan1754/NLP_POStagging
|
https://github.com/sudarshan1754/NLP_POStagging
|
c05ba6b6c7145e2006756655012abc16c31b0293
|
8a47719b80dd7c1c9dde3e1d09b7f5b33c5510d5
|
refs/heads/master
| 2021-05-29T10:40:43.192585 | 2014-10-12T20:26:55 | 2014-10-12T20:26:55 | 24,878,669 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
###********************************************************************************###
# __author__ = 'sid' #
# This program is written as part of the Natural Language Processing Home Work 3 #
# @copyright: Sudarshan Sudarshan (Sid) #
###********************************************************************************###
import nltk
import math
import time
import operator
from itertools import izip
import re
class pos_training:
@staticmethod
def tokenization(fpath):
pos = {}
no_of_tags = 0
word_tag = {}
transition = {}
starttags = ["<s>/<s>"] # Dummy start symbol
endtags = ["<e>/<e>"] # Dummy end symbol
file_content = open(fpath)
for line in file_content.readlines():
tokens = starttags + nltk.WhitespaceTokenizer().tokenize(line) + endtags
for index, token in enumerate(tokens): # Create the dictionary
# Increment the No_of_tags by 1
no_of_tags += 1
# Add the <word tag: count> to dictionary
word = token.split("/")[0]
tag = token.split("/")[1]
if word + " " + tag in word_tag:
word_tag[word + " " + tag] += 1
else:
word_tag[word + " " + tag] = 1
# Add the pos occurrence to dictionary
if tag in pos:
pos[tag] += 1
else:
pos[tag] = 1
# Get the transition tags
if index < len(tokens) - 1:
tag1 = tokens[index].split("/")[1]
tag2 = tokens[index + 1].split("/")[1]
if (tag1 + " " + tag2) in transition:
transition[tag1 + " " + tag2] += 1
else:
transition[tag1 + " " + tag2] = 1
# tags dictionary, transition dictionary, word_tag dictionary, no of tags in the file
token_results = [pos, transition, word_tag, no_of_tags]
return token_results
# Function to calculate the Unigram probability of tags
@staticmethod
def Unigram_Probability(pos, No_of_tags):
# A dictionary to store the <unigram: probability>
pos_probability = {}
for word, count in pos.items():
# Use MLE Estimation
pos_probability[word] = (count / float(No_of_tags))
return pos_probability
# Function to calculate tag_tag probability
@staticmethod
def tagtag_probability(tagtag, pos, pos_prob):
# For transition probability we must use the Interpolation smoothing technique
alpha = 0.99
beta = 1 - alpha
tagtag_prob = {}
for word, count in tagtag.items():
# tagtag_prob[word] = (alpha * (count / float(pos[word.split(" ")[1]]))) + (
# beta * float(pos_prob[word.split(" ")[0]]))
tagtag_prob[word] = (count / float(pos[word.split(" ")[0]]))
return tagtag_prob
# Function to calculate word_tag probability
@staticmethod
def wordtag_probability(wordtag, pos):
wordtag_prob = {}
for word, count in wordtag.items():
wordtag_prob[word] = (count / float(pos[word.split(" ")[1]]))
return wordtag_prob
class pos_testing:
@staticmethod
def read_lmfile(lmfile):
# To get pos, transition probability and observation probability
tagtag_probability = {}
wordtag_probability = {}
tags = {}
lm_content = open(lmfile)
for lNo, line in enumerate(lm_content.readlines()):
if line == "pos:\n":
posData = lNo
if line == "transition:\n":
transData = lNo
elif line == "observation:\n":
obsData = lNo
lm_content.close()
init_viterbi = {}
seenWords = []
lm_content = open(lmfile)
for lNo, line in enumerate(lm_content.readlines()):
# read tags
if posData < lNo < transData:
tags[line.split("\t")[0]] = float(line.split("\t")[1].rstrip('\n'))
# read tagtag
elif transData < lNo < obsData:
tagtag_probability[line.split("\t")[0]] = float(line.split("\t")[1].rstrip('\n'))
# get the initial viterbi
if "<s>" == (line.split("\t")[0]).split(" ")[0]:
init_viterbi[line.split("\t")[0]] = float(line.split("\t")[1].rstrip('\n')) + tags[
line.split("\t")[0].split(" ")[0]]
# read wordtag
elif lNo > obsData:
wordtag_probability[line.split("\t")[0]] = float(line.split("\t")[1].rstrip('\n'))
seenWords.append(line.split("\t")[0].split(" ")[0])
lm_model = [tags, tagtag_probability, wordtag_probability, init_viterbi, set(seenWords)]
return lm_model
@staticmethod
def tag_testfile(testfile, tags, tran_prob, obs_prob, init_viterbi, testtagfile, seenWords):
test_content = open(testfile)
LM_file = open(testtagfile, "w")
new_obs_dist = {}
p_w = 0.01
# assign uniform distribution for all unseen <word, tag_i>
for pos, prob in tags.iteritems():
new_obs_dist[pos] = math.log(((p_w * (1/float(len(seenWords)))) / (2 ** prob)), 2)
vTags = ["VBP", "VBG", "VB", "VBN", "VBD"] # ends with 'ed' or 'ing'
nTags = ["NNS"] # ends with 's' letter
ncTags = ["NNP", "NNS", "NN"] # capital letter
cdTags = ["CD"] # numeric
for lNo, line in enumerate(test_content.readlines()):
obs = nltk.WhitespaceTokenizer().tokenize(line)
resultant_tag = []
for ob in obs:
# get all seen tags
if ob not in seenWords:
new_viterbi = {}
for iVit in init_viterbi:
# for each tag of the test word
vitval = [] # For each cell
vitval_index = [] # For each cell
for wordtag, prob in new_obs_dist.iteritems():
# tag = wordtag.split(" ")[1]
if (iVit.split(" ")[1] + " " + wordtag) in tran_prob:
tp = tran_prob[iVit.split(" ")[1] + " " + wordtag]
else:
tp = 0.01 * tags[wordtag]
if re.search(r'[A-Z][a-z]+ed|[A-Z][a-z]+ing', ob) is not None and wordtag in vTags:
vitval.append(init_viterbi[iVit] + tp +
(new_obs_dist[wordtag] / 3))
elif re.search(r'[A-Z][a-z]+s', ob) is not None and wordtag in nTags:
vitval.append(init_viterbi[iVit] + tp +
(new_obs_dist[wordtag] / 10))
elif re.search(r'[A-Z][a-z]+', ob) is not None and wordtag in ncTags:
vitval.append(init_viterbi[iVit] + tp +
(new_obs_dist[wordtag] / 3))
elif re.search(r'\d', ob) is not None and wordtag in cdTags:
vitval.append(init_viterbi[iVit] + tp +
(new_obs_dist[wordtag] / 10))
else:
vitval.append(init_viterbi[iVit] + tp +
new_obs_dist[wordtag])
vitval_index.append(iVit.split(" ")[1] + " " + wordtag)
new_viterbi[vitval_index[vitval.index(max(vitval))]] = max(vitval) # Get the maximum in the cell
resultant_tag.append(str(max(new_viterbi.iteritems(), key=operator.itemgetter(1))[0].split(" ")[1] + ">>")) # Get the maximum in the column
init_viterbi = new_viterbi.copy()
else:
new_viterbi = {}
for iVit in init_viterbi:
# for each tag of the test word
vitval = [] # For each cell
vitval_index = [] # For each cell
for wordtag, prob in obs_prob.iteritems():
if ob == wordtag.split(" ")[0]:
# get seen tag
tag = wordtag.split(" ")[1]
if (iVit.split(" ")[1] + " " + tag) in tran_prob:
tp = tran_prob[iVit.split(" ")[1] + " " + tag]
else:
tp = 0.01 * tags[tag]
vitval.append(init_viterbi[iVit] + tp +
obs_prob[ob + " " + tag])
vitval_index.append(iVit.split(" ")[1] + " " + tag)
new_viterbi[vitval_index[vitval.index(max(vitval))]] = max(vitval) # Get the maximum in the cell
resultant_tag.append(max(new_viterbi.iteritems(), key=operator.itemgetter(1))[0].split(" ")[1]) # Get the maximum in the column
init_viterbi = new_viterbi.copy()
for i in range(0, len(obs)):
LM_file.write(obs[i] + "/" + resultant_tag[i] + " ")
LM_file.write("\n")
LM_file.close()
test_content.close()
class pos_evaluation:
@staticmethod
def evaulate(taggedfile, reffile):
tagged_content = open(taggedfile)
reffile_content = open(reffile)
totaltokens = 0
totalKnowns = 0
totalUnknowns = 0
unknownCorrect = 0
knownCorrect = 0
delimiter = [">>"]
for taggedline, refline in izip(tagged_content, reffile_content):
taggedtoken = nltk.WhitespaceTokenizer().tokenize(taggedline)
reftoken = nltk.WhitespaceTokenizer().tokenize(refline)
totaltokens += len(taggedtoken) # get the total number of tokens
for index, token in enumerate(taggedtoken):
taggedtag = token.split("/")[1]
# if unknown tag
if ">>" in [delimit for delimit in delimiter if delimit in taggedtag]:
taggedtag = taggedtag.rstrip(">>")
totalUnknowns += 1
if taggedtag == reftoken[index].split("/")[1]:
unknownCorrect += 1
else:
totalKnowns += 1
if taggedtag == reftoken[index].split("/")[1]:
knownCorrect += 1
print "\n----------Results----------"
print "Overall Accuracy: " + str((knownCorrect + unknownCorrect) / float(totaltokens))
print "Known Accuracy: " + str(knownCorrect / float(totalKnowns))
print "Unknown Accuracy: " + str(unknownCorrect / float(totalUnknowns))
print "\n"
if __name__ == "__main__":
print "\n-------------------------Welcome-------------------------\n"
while True:
option = raw_input('1. Train the Model\n2. Test the Language Model on a file\n3. Evaluate\n4. Exit\n\nEnter your choice:')
if int(option) == 1:
# get the training file name from the user
trainfile = raw_input('Enter the training file:')
# to get the lm file name from the user
lmpath = raw_input('Enter the LM file name: ')
# Timer to get the execution time
start_time = time.time()
# get the training results
train = pos_training()
# order: tags dictionary, transition dictionary, word_tag dictionary, no of tags in the file
token_results = train.tokenization(trainfile)
# get the ml of tags
pos_prob = train.Unigram_Probability(token_results[0], token_results[3])
print "\nFinished Calculating priors of tags........"
# to get the transition probabilities
tran_results = train.tagtag_probability(token_results[1], token_results[0], pos_prob)
print "\nFinished Calculating transition probability........"
# to get the and observation probabilities
obs_results = train.wordtag_probability(token_results[2], token_results[0])
print "\nFinished Calculating observation probability........"
print "\nWriting Results to Language Model file........"
# store the language model file
LM_file = open(lmpath, "w")
# to store pos_prob
LM_file.write("pos:\n")
for tag in pos_prob:
LM_file.write(str(tag) + "\t" + str(math.log(pos_prob[tag], 2)) + "\n")
# to store transition probs
LM_file.write("transition:\n")
for tag_tag in tran_results:
LM_file.write(str(tag_tag) + "\t" + str(math.log(tran_results[tag_tag], 2)) + "\n")
# to store observation probs
LM_file.write("observation:\n")
for word_tag in obs_results:
LM_file.write(str(word_tag) + "\t" + str(math.log(obs_results[word_tag], 2)) + "\n")
LM_file.close()
print "\n--- %s seconds ---\n" % (time.time() - start_time)
elif int(option) == 2:
# to get the lm file name from the user
lmfile = raw_input('Enter the LM file name: ')
# get the test file name from the user
testfile = raw_input('Enter the test file:')
# get the testtag file name from the user
testtagfile = raw_input('Enter the test tag file name:')
# tags, tagtag_probability, wordtag_probability, init_viterbi, seenWords
lmData = pos_testing.read_lmfile(lmfile)
print "\nFinished Reading Model file........"
# Timer to get the execution time
teststart_time = time.time()
print "\nTesting........"
pos_testing.tag_testfile(testfile, lmData[0], lmData[1], lmData[2], lmData[3], testtagfile, lmData[4])
print "\n--- %s seconds ---\n" % (time.time() - teststart_time)
elif int(option) == 3:
# to get the tagged file name from the user
taggedfile = raw_input('Enter the tagged file name: ')
# get the ref file name from the user
reffile = raw_input('Enter the ref test file name:')
pos_evaluation.evaulate(taggedfile, reffile)
elif int(option) == 4:
print "-------------------------Good Bye-------------------------"
break
else:
print "------Your choice is not valid. Enter a valid choice!------\n"
|
UTF-8
|
Python
| false | false | 2,014 |
128,849,024,872 |
8fc02dc2f4c0a55da1aa6b5900e4475c873edab4
|
79c46d53acdc133536f227977b649a655c6f0111
|
/Input.py
|
268550a9915403f84e8d66dda50f00702135f82a
|
[] |
no_license
|
PLIH/Input.py---development
|
https://github.com/PLIH/Input.py---development
|
f0974f293c75ae9f5639b45a6f29bee8d0a8de87
|
cbc877008041104e0712fdc2e571ce3d5fee4a00
|
refs/heads/master
| 2016-09-11T03:09:40.972622 | 2013-06-11T23:35:15 | 2013-06-11T23:35:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#Source File Name: Input.py
#Author's Name: Paige Harvey
#Last Modified By: Paige Harvey
#Last Modified On: 2012-06-10
#Program Description: An importable class to create input boxes in pygame
#Revision History:
#
import pygame
pygame.init()
class Textbox:
#output the content
def label(self, toPrint):
font = pygame.font.SysFont("None", 10)
myText = font.render(toPrint, 1, (0,0,0))
return myText
#draw/create
def create(self, surface, colour, length, height, x, y):
pygame.draw.rect(surface,(255,255,255), (x,y,length,height),0)
pygame.draw.rect(surface,(0,0,0),(x,y,length,height),1)
self.rect = pygame.Rect(x,y,length,height)
#return surface
#Take input
def takeInput(self, surface):
keepGoing = True
text = ""
x = self.rect.topleft[0]
y = self.rect.topleft[1]
while keepGoing:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
keepGoing = False
elif event.key == pygame.K_0:
text += "0"
myText = self.label(text)
surface.blit(myText, (x,y))
return text
#problem: cannot see input inside textbox being written
#b/c txtbox.blit in main keeps over writting it.
# must find way to suspend txtbox.blit, and blit with this instead
# perhaps returning a parital text?
# would have to run input loop in the main.... ick.
#
def onClick(self,mouse):
if mouse[0] > self.rect.topleft[0]:
if mouse[1] > self.rect.topleft[1]:
if mouse[0] < self.rect.bottomright[0]:
if mouse[1] < self.rect.bottomright[1]:
return True
else:
return False
else:
return False
else:
return False
else:
return False
|
UTF-8
|
Python
| false | false | 2,013 |
6,674,379,194,629 |
d3d1b42a5648dcc303a0defcc15d7c154a9e121c
|
9033971d873728ed3f9ea16fafb82b474e4c8ba5
|
/magic/__init__.py
|
67a75c653d456431c332182fb1f8b78a67d122bf
|
[] |
no_license
|
dtgillis/MAGiC
|
https://github.com/dtgillis/MAGiC
|
a2c6ef75b06635eb91fc2d4e4ff767d64eaceb1c
|
6fc37d277ffab96603583f208de7339902d2370f
|
refs/heads/master
| 2016-09-10T01:34:52.713618 | 2014-12-10T02:22:24 | 2014-12-10T02:22:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'dtgillis'
|
UTF-8
|
Python
| false | false | 2,014 |
7,060,926,278,418 |
76f1940921bdb3d41fef1e87ba3abc0215d105c9
|
f204469098936116ab04f4e016af6bd903bf0364
|
/git_deploy/configuration.py
|
8e51b8d885e7e1e25a15e1079f2533b992ecd845
|
[] |
no_license
|
cbenz/git-deploy
|
https://github.com/cbenz/git-deploy
|
b2bed115711c97d9b5d28d2fdf3ba519f1f1da00
|
cf1667de828e9108386cd4d1b3b30b1accaa0f8d
|
refs/heads/master
| 2020-05-31T23:09:24.740419 | 2014-04-04T17:42:50 | 2014-04-04T17:42:50 | 9,747,421 | 1 | 0 | null | false | 2014-04-04T17:42:50 | 2013-04-29T12:17:04 | 2014-04-04T17:42:50 | 2014-04-04T17:42:50 | 216 | 1 | 1 | 0 |
Python
| null | null |
# -*- coding: utf-8 -*-
# GitDeploy -- Deploy git repositories to multiple targets.
# By: Christophe Benz <[email protected]>
#
# Copyright (C) 2013 Christophe Benz, Easter-eggs
# https://github.com/cbenz/git-deploy
#
# This file is part of GitDeploy.
#
# GitDeploy is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# GitDeploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import glob
import json
import logging
import os
from . import conv
log = logging.getLogger(__name__)
def get_conf(config_dir_path):
if not os.path.isdir(config_dir_path):
return None
conf_dicts = []
for config_file_path in glob.iglob(os.path.join(config_dir_path, '*.json')):
log.debug(u'config_file_path = {path}'.format(path=config_file_path))
with open(config_file_path) as config_file:
config_file_str = config_file.read()
try:
conf_dict = json.loads(config_file_str)
except ValueError, exc:
log.error(u'Failed to decode repository JSON configuration for file "{path}": {exc}'.format(
exc=unicode(exc), path=config_file_path))
return None
conf_data, errors = conv.json_values_to_conf(conf_dict)
if errors is not None:
log.error(u'Repository configuration errors for file "{path}": {errors}'.format(
errors=errors, path=config_file_path))
return None
conf_dicts.append(conf_data)
conf = merge_conf_dicts(conf_dicts)
log.debug(u'conf = {conf}'.format(conf=conf))
return conf
def get_repo_alias_and_conf(conf, repo_url):
if conf['repositories']:
for repo_alias, repo_conf in conf['repositories'].iteritems():
if repo_conf['url'] == repo_url:
log.debug(u'repo_alias = {repo}, repo_conf = {conf}'.format(conf=repo_conf, repo=repo_alias))
return repo_alias, repo_conf
return None, None
def merge_conf_dicts(conf_dicts):
hooks = {}
repositories = {}
for conf_dict in conf_dicts:
if conf_dict.get('hooks'):
hooks.update(conf_dict['hooks'])
if conf_dict.get('repositories'):
repositories.update(conf_dict['repositories'])
return {'hooks': hooks, 'repositories': repositories}
|
UTF-8
|
Python
| false | false | 2,014 |
1,297,080,153,006 |
4d750d39a58908248da98da52ca8c189ca96f160
|
98d8c31bc8d082007d1711abcf807ecf56a652cd
|
/ktope/cli/__init__.py
|
4c4afe8d158336f5e6fe39fa4071fa01e61e75c7
|
[
"AGPL-3.0-only",
"AGPL-3.0-or-later"
] |
non_permissive
|
marwinxxii/ktope
|
https://github.com/marwinxxii/ktope
|
87f6ed74d965c5ce69e58eb888f9636244f0a81b
|
4c341d31eb53341a2923e0f3c1bf3023b80ccfcc
|
refs/heads/master
| 2021-03-12T21:56:26.873497 | 2011-05-28T18:51:45 | 2011-05-28T18:51:45 | 1,501,305 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__all__=['hw5']
|
UTF-8
|
Python
| false | false | 2,011 |
12,463,995,142,296 |
7bc9f1f5f540ce85a1e886240f2c89bb8e29a824
|
9347d62bc1c523543844def4aa747faecce1bfa4
|
/setup.py
|
121138a2cb2c462514dd76133d294f567f03aa07
|
[
"MIT"
] |
permissive
|
leepro/panatomy
|
https://github.com/leepro/panatomy
|
0d1b2b24b76d4cec582d8493c2812c1a8a2c072f
|
c9fa0cb1ce3ea018d3116da70c4a32a9a1b928f4
|
refs/heads/master
| 2016-09-10T17:53:54.354012 | 2014-01-21T23:26:08 | 2014-01-21T23:26:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
version = "0.0.1",
name = "panatomy",
author = "DongWoo Lee",
author_email = "[email protected]",
description = ("Python Anatomy Tool"),
license = "MIT",
keywords = "python anatomy autopsy",
url = "http://packages.python.org/panatomy",
packages=find_packages(),
test_suite='tests',
long_description=read('README.md'),
classifiers=[
"Development Status :: 1 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
)
|
UTF-8
|
Python
| false | false | 2,014 |
4,458,176,100,829 |
58ac20404d3dd34fe4193f03b0230e18886e0bab
|
5b3c1c4cddcae976caadd1aa1147637c176fe9a2
|
/02/babynames/06.py
|
22d105e9c9db5d7d7528cde45f5e5ea3696330ae
|
[
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"MIT"
] |
non_permissive
|
lucascolusso/python_workshop
|
https://github.com/lucascolusso/python_workshop
|
f74804efa7d487af01a2e133e85410eb26fb4fac
|
bcce92242f3750e831318064a5a330707f86f2df
|
refs/heads/master
| 2020-05-18T17:42:24.102288 | 2014-11-22T23:42:29 | 2014-11-22T23:42:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# number of names that are subsets of other names?
import ssadata
largedataset = ssadata.boys.keys() + ssadata.girls.keys();
counter = 0;
subsets_list = []
for metaname in largedataset:
for name in largedataset:
if metaname in name:
subsets_list.append(metaname);
print "The number of names that are subsets of other name is: " , len(subsets_list)
|
UTF-8
|
Python
| false | false | 2,014 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.