__id__
int64 3.09k
19,722B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
256
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 3
values | repo_name
stringlengths 5
109
| repo_url
stringlengths 24
128
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 6.65k
581M
⌀ | star_events_count
int64 0
1.17k
| fork_events_count
int64 0
154
| gha_license_id
stringclasses 16
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
5.76M
⌀ | gha_stargazers_count
int32 0
407
⌀ | gha_forks_count
int32 0
119
⌀ | gha_open_issues_count
int32 0
640
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 2
classes | gha_disabled
bool 1
class | content
stringlengths 9
4.53M
| src_encoding
stringclasses 18
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | year
int64 1.97k
2.01k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
15,144,054,716,647 |
97b71c249d9759a356896c1e6fa87ae87bb7261b
|
2d5cc2212ddd6ba380663513ad4d7fa88e413721
|
/dj_test/src/tests/parser/bunch_test.py
|
3a216e9d46779a36ff3035f1ff420ff8b86e8ac7
|
[] |
no_license
|
ekondrashev/makler
|
https://github.com/ekondrashev/makler
|
36d15184bd37196f19b0c9c35806a5cce483a1fc
|
f25f733ee2368ef1e7f1daf5462269c0d1856714
|
refs/heads/master
| 2016-09-11T02:25:59.643977 | 2010-07-30T16:31:20 | 2010-07-30T16:31:20 | 639,821 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on 07.05.2010
@author: ekondrashev
'''
import os
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
os.environ[ENVIRONMENT_VARIABLE] = 'dj_test.settings'
import unittest
import codecs
import json
import logging
from tests.parser.base_test_case import BaseTestCase
from dj_test.makler import findRoomCount, findCoast, findPhoneNumbers, findAddress
logging.basicConfig(level=logging.DEBUG)
TEST_ENTITIES = {
'roomCount' : findRoomCount,
'cost' : findCoast,
'phoneNumbers' : findPhoneNumbers,
'address' : findAddress,
}
INPUT = "input.json"
class Test(BaseTestCase):
def testName(self):
input = codecs.open(INPUT, "r", "utf-8" ).read()
input1 = json.loads(input)
for number, test in input1.iteritems():
originalInput = test['originalInput']
logging.info("Test #%s" % number)
logging.info("Original advertisment text:\n%s" % originalInput)
for testEntity, finder in TEST_ENTITIES.iteritems():
originalValue = test[testEntity]
foundValue = finder(originalInput)
self.assertEqual(originalValue, foundValue)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
UTF-8
|
Python
| false | false | 2,010 |
4,329,327,044,695 |
315459341b4c6011ac349d21bfd5ec7df9cc3c49
|
863efecc646b2c92145643f4f51ebe5dd0e6e598
|
/btds/urls.py
|
37f887cc0dfe19280af0d263906668369404ed95
|
[] |
no_license
|
Baka-Tsuki/BTDS
|
https://github.com/Baka-Tsuki/BTDS
|
9bab6c5495fa1b902f3204fbae397d426503de35
|
42e8e9f2eee370a8bd24755b6ed54eebbcd1aed0
|
refs/heads/master
| 2017-04-30T23:37:55.870780 | 2013-03-24T02:56:17 | 2013-03-24T02:56:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import patterns, include, url
from btds.feeds import VolumeFeed, LinkFeed
urlpatterns = patterns("btds.views",
url(r'^$', 'index', name="btds_index"),
url(r'^updates/$', 'updates', name='btds_updates'),
url(r'^(?P<sid>\d+)/$', 'series', name='btds_series'),
url(r'^book/(?P<vid>\d+)/$', 'volume', name='btds_volume'),
)
urlpatterns += patterns("",
(r'^feed/$', VolumeFeed()),
(r'^feedl/$', LinkFeed()),
)
|
UTF-8
|
Python
| false | false | 2,013 |
16,338,055,611,325 |
4e504eb33903d7dc885a7bb4b424e7198ce9b1b6
|
f3c3b8999f414b1fb147e3341d31f6aa98975e79
|
/cocosgui/layouts.py
|
ecaf5be32b3f25671a69a8c0b150f738b15fc7e4
|
[
"MIT"
] |
permissive
|
Saluev/cocos2d-gui
|
https://github.com/Saluev/cocos2d-gui
|
27c1e087c85b7a5115e9a405a9fa2c633524ff85
|
c800be24dd852b7ed9cee58c499b77da79e0aacb
|
refs/heads/master
| 2016-09-06T16:13:06.267126 | 2014-04-15T17:12:24 | 2014-04-15T17:12:24 | 18,759,656 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from .node import GUINode
class GUILayout(GUINode):
def __init__(self, spacing=5):
super(GUILayout, self).__init__()
self.spacing = spacing
self.__nodes = [] # TODO move this whole system to GUINode
def __len__(self):
return len(self.children)
def get_children(self):
return self.__nodes
def add(self, child, *args, **kwargs):
super(GUILayout, self).add(child, *args, **kwargs)
self.__nodes.append(child)
nodes = self.get_nodes()
nodes_count = len(nodes)
for i, node in enumerate(nodes):
if i == 0:
node.add_state('first-child')
else:
node.remove_state('first-child')
if i == nodes_count - 1:
node.add_state('last-child')
else:
node.remove_state('last-child')
class VerticalLayout(GUILayout):
def get_content_size(self):
children = self.get_children()
if not children:
return (0, 0)
child_widths = (child.width for child in children)
child_heights = (child.height for child in children)
width = max(child_widths )
height = sum(child_heights) + self.spacing * (len(children) - 1)
return (width, height)
def apply_style(self, **options):
super(VerticalLayout, self).apply_style(**options)
# now place children properly
children = self.get_children()[::-1]
xoffset, yoffset = self.content_box[:2]
for child in children:
box = child.margin_box
child.set_position(xoffset, yoffset)
yoffset += box[3]
yoffset += self.spacing
class HorizontalLayout(GUILayout):
def get_content_size(self):
children = self.get_children()
if not children:
return (0, 0)
child_widths = (child.width for child in children)
child_heights = (child.height for child in children)
width = sum(child_widths ) + self.spacing * (len(children) - 1)
height = max(child_heights)
return (width, height)
def apply_style(self, **options):
super(HorizontalLayout, self).apply_style(**options)
# now place children properly
children = self.get_children()
xoffset, yoffset = self.content_box[:2]
for child in children:
box = child.margin_box
child.set_position(xoffset, yoffset)
xoffset += box[2]
xoffset += self.spacing
|
UTF-8
|
Python
| false | false | 2,014 |
18,365,280,184,436 |
d15599385149107909f311c90d799b99fa17f192
|
b64ef84aa51d372ea606a1ac38d96e1c79ee7eca
|
/src/memit.py
|
6fdada3415d29335e9e19f8c10e511d986fc381b
|
[] |
no_license
|
cetoli/memit
|
https://github.com/cetoli/memit
|
d2027e2096e89e398479e5c7eb4512b582b8bb29
|
74e5754d22cbdef0dd6b2e485f5505cf83a1e633
|
refs/heads/master
| 2021-01-22T00:59:05.647717 | 2013-06-11T02:42:58 | 2013-06-11T02:42:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
############################################################
Memit - All
############################################################
:Author: *Carlo E. T. Oliveira*
:Contact: [email protected]
:Date: $Date: 2013/03/17 $
:Status: This is a "work in progress"
:Revision: $Revision: 0.1 $
:Home: `Labase <http://labase.selfip.org/>`__
:Copyright: 2011, `GPL <http://is.gd/3Udt>`__.
Serious Game in cavalier projection for memetics.
__author__ = "Carlo E. T. Oliveira ([email protected]) $Author: carlo $"
__version__ = "0.2 $Revision$"[10:-1]
__date__ = "2013/03/17 $Date$"
"""
REPO = '/studio/%s'
def _logger(*a):
print(a)
if not '__package__' in dir():
log ('if not __package__')
import svg
from html import TEXTAREA
#from time import setinterval
import time
logger = log
pass
else:
logger = _logger
setinterval = lambda a,b:None
pass
"""
def noop(nop=''):
pass
HANDLER = {"_NOOP_":'noop()'}
VKHANDLER = dict([(k,noop) for k in range(32,40)])
def uuid():
r = jsptrand()
return '%i'%(JSObject(jsptdate).getTime()*1000+r)
def jshandler(event):
code = event.keyCode
if code in VKHANDLER:
VKHANDLER[code]()
#alert(event.keyCode)
if not '__package__' in dir():
doc.onkeypress=jshandler
def eventify(owner):
#alert('owner :'+owner)
HANDLER[owner]()
"""
TRANS = "translate rotate scale skewX skewY matrix".split()
EVENT = ("onfocusin onfocusout onactivate onload onclick onkeydown onkeyup" + \
" onmousedown onmouseup onmouseover onmousemove onmouseout").split()
class Dialog:
""" Floating panel holding an editable text area. :ref:`dialog`
"""
def __init__(self, gui, img = REPO%'paje.png', text = '', act = lambda x:None):
self._rect=gui.rect(0,100, 800, 440, style= {
'fillOpacity':'0.7', 'fill':'black'})
self._area=gui.textarea(text,80,130, 700, 400)
self._imag=gui.image(img,2,80, 32, 32)
self._imag.addEventListener('click', self.action)
self.act= act
def hide(self):
self._rect.style.visibility = 'hidden'
self._area.style.visibility = 'hidden'
self._imag.style.visibility = 'hidden'
#self._area.setVisible(self._area,False)
def show(self):
self._rect.style.visibility = 'visible'
self._area.style.visibility = 'visible'
self._imag.style.visibility = 'visible'
#self._area.setVisible(self._area,True)
def get_text(self):
return self._area.value
def set_text(self, text):
self._area.value = text
def action(self, event):
self.hide()
self.act(self)
class GUI:
""" Factory creating SVG elements, unpacking extra arguments. :ref:`gui`
"""
def __init__(self,panel,data):
global SVG
SVG = svg
logger('GUI __init__')
self.args = {}
self.panel = self._panel = panel
self.data = data
for child in panel: # iteration on child nodes
panel.remove(child)
def get_args(self):
args = self.args
for key, value in self.args.items():
args[key]= 'eventify(\\"%s\\")'%value
self.args = {}
p='"'
if len(args) != 0:
args = ', '+','.join(['%s = %s%s%s'%(k,p,v,p)
for k, v in args.items()])
else:
args = ''
return args
def _get_kwargs(self,kw):
trans =' '.join(
[key + ['(%s)','%s'][isinstance(value, tuple)]%str(value)
for key, value in kw.items() if key in TRANS])
return trans and ', transform="%s"'%trans or ''
def request(self, url = '/rest/studio/jeppeto?type=2', action = None, data=''):
req = ajax()
req.on_complete = action
req.set_timeout(8,self._err_msg)
req.open('GET',url,True)
req.set_header("Content-Type","text/plain; charset=utf-8")
req.send()
pass
def _err_msg(self, url = None, data = '', action = None, error = None):
pass
def textarea(self,text,x,y,w,h,style= {}):
def dpx(d):
return '%spx'%d
attrs = dict (position = 'absolute', top=dpx(y), left=dpx(x) ,
width=dpx(w) , height=dpx(h), color = 'navajowhite', border= 1,
resize = 'none', background = 'transparent')
attrs['top']= y
attrs = {'position' : 'absolute', 'top':dpx(y), 'left':dpx(x),
'width':dpx(w) , 'height':dpx(h), 'resize' : 'none','borderColor': 'darkslategrey',
'color': 'navajowhite', 'border': 1, 'background' : 'transparent' }
#t = TEXTAREA(text, style = {'position' : 'absolute', 'top':'100px', 'left':'40px'})#attrs)
t = TEXTAREA(text, style = attrs)
#d_rect=gui.rect(10,100, 540, 240, style= {'fill-opacity':'0.2', 'fill':'black'})
self.data <= t
return t
def dialog(self, text, img = REPO%'paje.png', act = lambda x:None):
t = Dialog(self,text=text, img=img, act=act)
#t.setStyleAttribute('border',0)
return t
def set(self, element):
self.panel = element
def up(self, element):
self.panel <= element
def cling(self,level, element):
level <= element
def clear(self):
self.panel = self._panel
def remove(self, element):
self.panel.removeChild(element)
def text(self, text,x=150,y=25, font_size=22,text_anchor="middle",
style= {}):
element = SVG.text(text,x=x,y=y,
font_size=font_size,text_anchor=text_anchor,
style=style)
self.panel <= element
return element
def group(self, group= None, layer=0):
element = group or SVG.g()
self.panel <= element
layer and self.set(element) or self.clear()
return element
def path(self, d,style={}, onMouseOver="noop", onMouseOut="noop"):
exec('element = SVG.path(d=%s,style=%s%s)'%(
str(d),str(style),self.get_args()))
self.panel <= element
return element
def image(self, href, x=0, y=0, width=100, height=50, **kw):
exec('element = SVG.image(href="%s", x=%i, y=%i, width=%i, height=%i%s)'%(
href, x, y, width, height,self._get_kwargs(kw)))
self.panel <= element
return element
def ellipse(self, href, cx=0, cy=0, rx=100, ry=50, style= {}, **kw):
exec('element = SVG.ellipse(cx=%i, cy=%i, rx=%i, ry=%i,style=%s%s)'%(
cx, cy, rx, ry,str(style),self.get_args()))
self.panel <= element
return element
def rect(self, x=0, y=0, width=100, height=50,style={}):
exec('element = SVG.rect(x=%i, y=%i, width=%i, height=%i,style=%s%s)'%(
x, y, width, height,str(style),self.get_args()))
self.panel <= element
return element
def handler(self, key, handle):
VKHANDLER[key] = handle
def avatar(self):
return Avatar(self)
def _decorate(self, handler, **kw):
self.args = {} #kw #dict(kw)
#alert(' '.join([k for k,v in kw.items()]))
for key, value in kw.items():
handler_id = uuid()
HANDLER[handler_id] = handler
self.args[key] = handler_id
#alert(key+':'+ self.args[key])
x =self.args
#alert(' ,'.join([k+':'+v for k,v in x.items()]))
return self
def click(self,handler):
self._decorate(handler, onClick=handler)
return self
def over(self,handler):
self._decorate(handler, onMouseOver=handler)
return
class Marker:
""" Colored shadow on the walls helping the user to deploy a piece in 3D. :ref:`marker`
"""
def __init__(self,gui, x, y, fill, face):
cls='red green blue'.split()
self.face = face
k, j, i = face
OA, OB, OC = 345, 172, 125
#skew
self.avatar = gui.ellipse(cx= x+35, cy = y+35, rx=35, ry=35,
style=dict(fill=fill, fillOpacity= 0.5))
self._off = (k * j * OA + k * i * OB + j * i * OC ,
k * j * OA+ k * i * OC + j * i * OB)
self.hide()
def show(self, x, y):
self.avatar.setAttribute("visibility",'hidden')
self.avatar.setAttribute('cx', x)
self.avatar.setAttribute('cy', y)
self.avatar.setAttribute("visibility",'visible')
def hide(self):
self.avatar.setAttribute("visibility",'hidden')
def on_over(self, ev, i, j, k):
OFFX, OFFY = self._off
#x, y, z = [c*f for c,f in zip(self.face,[i,j,k])]
f= self.face
x, y, z = k * f[0], j * f[1], i * f[2]
ax = OFFX+x*100+71*z
ay = OFFY+y*100+71*z
#logger('face %s ijk %s xyz %s ax %d ay %d'%(self.face,(i,j,k),(x,y,z),ax,ay))
self.show(ax, ay)
class Piece(Marker):
""" Represents the user choice when deployed insde the 3D open cube. :ref:`piece`
"""
def __init__(self,gui, x, y, fill, r, g, b, board, pid):
SIDE = 70
self.board, self.fill, self.pid = board, fill, pid
self.red, self.green, self.blue = r,g,b
#self.avatar = gui.ellipse(cx= x+35, cy = y+35, rx=20, ry=20,
# style=dict(fill=fill, fillOpacity= 0.5))
#self.avatex = gui.text(pid, x= x+35, y = y+35+10,
# style=dict(fill='navajowhite', fillOpacity= 0.7))
self.avatar = gui.image(href=REPO%fill,
x=x ,y=y, width=SIDE,height=SIDE)
#self.ascore= gui.text(pid, x= 45, y = 300+ pid*30,
# style=dict(fill='black', fillOpacity= 0.7))
self.avatar.addEventListener('mouseover', self.on_over)
#self.avatex.addEventListener('mouseover', self.on_over)
self.avatar.addEventListener('mouseout', self.on_out)
self.avatar.addEventListener('click', self.on_click)
#self.avatex.addEventListener('click', self.on_click)
self.house = board
#self.hide()
def show(self, x, y):
self.avatar.setAttribute("visibility",'hidden')
self.avatar.setAttribute('x', x)
self.avatar.setAttribute('y', y)
self.avatar.setAttribute("visibility",'visible')
def do_markers(self, *a):
pass
def _busy(self, *a):
pass
def on_over(self, ev):
self.do_markers(ev)
def _on_over(self, ev):
i, j, k = self._ijk
self.red.on_over(ev, i, j, k)
self.green.on_over(ev, i, j, k)
self.blue.on_over(ev, i, j, k)
def next_jig(self):
"""Remove the next piece from the puzzle. """
self.board.next_jig()
self.next_jig = self._busy
def _next_jig(self):
"""Remove the next piece from the puzzle. """
self.board.next_jig()
self.next_jig = self._busy
def on_click(self, ev):
self.board.drag(self)
def reset(self, x, y):
self.house.remove(self)
self.house = self.board
self.avatar.setAttribute("opacity", 1.0)
self.show(x, y)
self.do_markers = self._busy
self.next_jig = self._next_jig
def place(self, z, y, x, house):
self.house.remove(self)
self.house = house
self.avatar.setAttribute("opacity", 0.4+z*0.3)
self._ijk = (z, y, x)
OFFX, OFFY = 170-35, 170-35
ax = OFFX+x*100+71*z
ay = OFFY+y*100+71*z
self.show(ax, ay)
#self.avatex.setAttribute("x",ax)
#self.avatex.setAttribute("y",ay+10)
#self.ascore.text = '%d=%d.%d.%d'%(self.pid,z, y, x)
self.do_markers = self._on_over
self.next_jig()
def on_out(self, ev):
self.red.hide()
self.green.hide()
self.blue.hide()
class House:
""" marks a 3D location inside the cube where a piece can be deployed. :ref:`house`
"""
def __init__(self,gui, i, j, k, fill, r, g, b, board):
OFF =170
SIDE = 99
RDX = 30
self.board = board
self.place = self._place
self.level = board.house_layers[k]
self.avatar = gui.rect(x= OFF+k*100+71*i, y = OFF+j*100+71*i,
width=SIDE-(2-i)*RDX, height=SIDE-(2-i)*RDX,
style=dict(fill=fill, fillOpacity= 0.2))
self.avatar.addEventListener('mouseover', self.on_over)
self.avatar.addEventListener('mouseout', self.on_out)
self.avatar.addEventListener('click', self.on_click)
self.red, self.green, self.blue = r,g,b
self._ijk = (i, j, k)
def on_over(self, ev):
"""Projects three guiding shadows on the orthogonal cube walls"""
i, j, k = self._ijk
self.red.on_over(ev, i, j, k)
self.green.on_over(ev, i, j, k)
self.blue.on_over(ev, i, j, k)
def _busy(self, *a):
"""State method associated with a busy occupied house."""
pass
def _place(self, ev):
"""State method of a house that can recieve a piece, register it to the
board and disable new deployments here"""
self.board.place(self._ijk, self)
self.place = self._busy
def remove(self,piece):
"""Remove a piece from the house and set state to receive a new piece"""
self.place = self._place
def on_click(self, ev):
self.place(ev)
def on_out(self, ev):
self.red.hide()
self.green.hide()
self.blue.hide()
class Cube:
""" A 3D game memetic space represented in a cavalier projection. :ref:`cube`
"""
def __init__(self,gui,bottom_image, rear_image, side_image):
cls='red green blue'.split()
OFF =123
SIDE = 300
RDX = 30
bottom = gui.image(href=REPO%bottom_image,
x=SIDE,y=-2*SIDE, width=SIDE,height=SIDE, rotate= 90)
rear = gui.image(href=REPO%rear_image,
x=0,y=OFF, width=SIDE,height=SIDE, skewX=45, scale=(1,0.71))
left = gui.image(href=REPO%side_image,
x=OFF,y=0, width=SIDE,height=SIDE, skewY=45, scale=(0.71,1))
self.parts = [bottom, rear, left]
def hide(self):
for part in self.parts:
part.hide()
def show(self):
for part in self.parts:
part.show()
class Form:
""" Collects demographic info and send results to the server. :ref:`form`
"""
def __init__(self,gui=None):
self._build_form(gui)
def _build_form(self, gui):
self.form = gui.rect(x=100,y=100, width=600,height=400,
style=dict(fill='navajowhite', fillOpacity= 0.8))
logger('b form a')
self.form.addEventListener('click', self._submmit)
def _request_form(self, gui):
logger('b form a')
req = ajax()
logger('b form')
req.on_complete = self._on_complete
req.set_timeout(8,self._err_msg)
req.open('GET','/api/',True)
#req.set_header('content-type', 'application/x-www-form-urlencoded')
req.set_header("Content-Type","text/plain; charset=utf-8")
req.send()
def _on_complete(self,req):
if req.status==200 or req.status==0:
logger('req %s req text %s'%(dir(req),req.header))
return
ids = req.text.split('name="_xsrf"')[1][:200].split('"')
logger('xsrf %s'%(ids))#,ids[7]))
else:
logger('error %s'%req.text)
def _err_msg(self):
logger('timeout after 8s')
def _submmit(self,ev):
self.form.setAttribute("visibility",'hidden')
logger('submmit')
class Phase:
""" A game stage with a particular scenario and pieces. :ref:`phase`
"""
def __init__(self, gui, back_layer, puzzle, component):
back, jigs, faces, pieces = component
gui.set(back_layer)
self.group = gui.group(layer=1)
self.back = [gui.image(REPO%bk, 550, 150,200,100) for bk in puzzle]
for jig in self.back[:]:
jig.setAttribute("visibility",'hidden')
self.current_jig = -1
#: The 3D cube for this phase.
self.cube = Cube(gui, *faces)
gui.clear()
Z2TW, TW2Z = [0, 1, 2], [2, 1, 0]
#P_PLC = [[i+j*3, 350 + 50 * i,610 + 50*j] for j in Z2TW for i in TW2Z]
## Brython failure FIX
def ij(i,j):
k = i%2
l = i//2
return [i+j*3, 10 + 610 * k,150 + 210* k +210* l + 70*j]
P_PLC = [ij(i,j) for j in Z2TW for i in TW2Z]
#: Original placement of pieces at phase startup.
self.piece_places = P_PLC
#: Set of pieces to play in this phase.
self.pieces = pieces
def next_jig(self):
"""Remove the next piece from the puzzle. """
print(self.back, self.current_jig, self.current_jig +1)
if self.current_jig >= 0:
self.back[self.current_jig].setAttribute("visibility",'hidden')
self.current_jig += 1
self.back[self.current_jig].setAttribute("visibility",'visible')
pass
def reset(self):
"""Rearrange all pieces into original placement. """
self.group.setAttribute('visibility','visible')
[self.pieces[fid].reset(x, y) for fid, x, y in self.piece_places]
pass
def hide(self):
self.group.setAttribute('visibility','hidden')
[piece.hide() for piece in self.pieces]
def show(self):
self.group.setAttribute('visibility','visible')
[self.pieces[fid].show(x, y) for fid, x, y in self.piece_places]
class Board:
""" A meme game board with a 3D cube, some pieces, score and puzzle. :ref:`board`
"""
def remove(self,piece):
"acts as a default null house"
pass
def _parse_response(self, response, prefices):
def _figures(response = response):
#workaround for brython bug
resp = eval(response.text)
return resp['result']
#figures = resp['result']
return [
[[img for img in _figures()
if ('%s0%d'%(prefix, infix)) in img] for infix in range(10)
]
for prefix in prefices.split()]
def _load_scenes(self, response):
logger('loading from memit type 2, ')
self.face_imgs, self.back_imgs =self._parse_response(response, 'face backs')
self._build_phases()
def _load_figures(self, response):
self.piece_imgs, self.jig_imgs, self.puzzle_imgs = self._parse_response(
response, 'piece jigs puzzle_')
logger('loading from memit type 1 pieces :%s'%self.piece_imgs)
self.gui.request('/rest/studio/memit?type=2', self._load_scenes)
def _load_inventory_from_server(self):
self.gui.request('/rest/studio/memit?type=1', self._load_figures)
def _build_markers(self, gui):
self.red = Marker(gui, 300,300,'red',(0,1,1))
self.green = Marker(gui, 300,300,'green',(1,0,1))
self.blue = Marker(gui, 300,300,'blue',(1,1,0))
def place(self, *a):
"""Placement state method. Assumes _place (active) or _busy states"""
pass
def _place(self, position = None, house = None):
self.piece.place(*position, house = house)
logger('_place %s %s %s'%(house.level,self.piece.avatar,self.piece))
#self.gui.set(house.level)
#self.gui.up(self.piece.avatar)
#self.gui.clear()
#self.gui.cling(house.level,self.piece.avatar)
self.place = self._busy
return self.piece
def _busy(self, *a):
pass
def _drag(self, p =None):
i, j, k = self._ijk
def next_jig(self):
"""Remove the next piece from the puzzle. """
self.value = 0
self.phases[0].next_jig()
print(self.phases[0].current_jig)
if self.phases[0].current_jig >= 9:
self.inc = 0
self.value = 0
__ = [drop.setAttribute('visibility', 'hidden') for drop in self.drops]
time.clear_interval()
#time.set_interval(self.tick,3000)
def drag(self, p =None):
"""Enable placement of pieces. Arg p is the piece being dragged """
self.piece = p
self.place = self._place
def __init__(self,gui):
logger('Board __init__ %s'%gui)
self.gui = gui
self.houses = []
self.phases = []
self.piece = None
self.phase = 0
self._load_inventory_from_server()
def _build_layers(self,gui):
self.back_layer = gui.group(layer=0)
self.marker_layer = gui.group()
gui.clear()
gui.set(self.marker_layer)
self._build_markers(gui)
self.house_layers = [gui.group(None,0) for ly in range(3)]
gui.clear()
self.pieces_layer = gui.group(layer=0)
def _build_inventory_locally(self):
# not being used currently, should be a fallback for server failure
PHASES = 7
PIECES = 9
#self.faces = ['valor.png','beleza.png','conforto.png'] * PHASES
self.back_imgs = ['back%02d.jpg'%(phase)
for phase in range(PHASES)]
self.puzzle_img = ['puzzle%02d.jpg'%(phase)
for phase in range(PHASES)]
self.jig_imgs = [
['jigs%02d_%02d.jpg'%(phase,face) for face in range(PIECES)]
for phase in range(PHASES)]
self.face_imgs = [['face%02d_%02d.jpg'%(phase,face) for face in [0,1,2]]
for phase in range(PHASES)]
self.piece_imgs = ['piece%02d_%02d.png'%(kind,piece)
for piece in range(PIECES) for kind in [0,1]]
def _build_phases(self):
gui= self.gui
CLS='red green blue'.split()
Z2TW, TW2Z = [0, 1, 2], [2, 1, 0]
KINDS = [0,1]#[0]*2+[1]*5
self.avatar = gui.image(href=REPO%'memit/background_base.png', x= 0, y = 0,
width=800, height=600, style=dict(opacity= 1))
self.pump = gui.image(href=REPO%'memit/bomba.png', x= 390, y = 5,
width=400, height=112, style=dict(opacity= 1))
self.drops = [gui.image(REPO%'memit/gota.png',750, 50 + 100*i,
40, 52) for i in range(6)]
self.puzzle = gui.image(href=REPO%'memit/puzzle00_00.png', x= 550, y = 150,
width=200, height=100, style=dict(fill='blue', fillOpacity= 1))
self._build_layers(gui)
r, g, b = RGB = [self.red, self.green, self.blue]
#piece_places = [[[350 + 50 * i,610 + 50*j]
# for j in Z2TW] for i in TW2Z]
piece_places = [[i+j*3, 350 + 50 * i,610 + 50*j]
for j in Z2TW for i in TW2Z]
#print (piece_places)
gui.set(self.pieces_layer)
def create_hidden(fid, x, y, kind):
piece = Piece(gui, x, y, self.piece_imgs[kind][fid], r, g, b, self, fid)
piece.hide()
return piece
pc = self.pieces = [
[create_hidden(fid, x, y , kind)
for fid, y, x in piece_places] for kind in KINDS]
logger('to b form')
gui.clear()
piece_imgs = [pc[i] for i in [1,1,1,1,1,1,1]]
gui.set(self.back_layer)
self.puzzle_imgs = [puz[0] for puz in self.puzzle_imgs]
self.phases = [Phase(gui, self.back_layer, self.puzzle_imgs, components)
for components in zip(self.back_imgs,
self.jig_imgs, self.face_imgs, piece_imgs)]
gui.clear()
gui.set(self.marker_layer)
self.houses = [House(gui, i, j, k, CLS[i], r, g, b, self)
for k in Z2TW for j in Z2TW for i in TW2Z]
gui.clear()
gui.up(self.back_layer)
gui.clear()
gui.up(self.marker_layer)
gui.clear()
gui.up(self.pieces_layer)
gui.clear()
self.phases[0].reset()
self.next_jig()
#: initialize pump digits, pump value and start 100ms timer
self.digits = [
gui.text('0', 425 +50*i, 65, 48, 'middle', {'fill': 'white'})
for i in range(4)]
self.value =0
self.inc =1
time.set_interval(self.tick,100)
def tick(self):
"""Time tick updates pump display value and makes the drops fall"""
value = self.value //10
for i, drop in enumerate(self.drops):
y = 50 + (i * 100 + (10 * self.value) % 100) % 500
drop.setAttribute('y' , y)
#print ('tick', value, value %10, value //10)
for i in range(4)[::-1]:
self.digits[i].text = str(value % 10)
value //= 10
self.value += self.inc
def main(dc,pn, gui, repo):
""" Starting point """
logger('Starting point')
global REPO
#REPO = repo
return Board(gui)
"""
#for phase in phases:
# Phase(gui, self.back_layer)
#self.phases= [
# [Cube(gui, *faces),None] for faces in self.face_imgs[phase]]
#self._build_markers(gui)
#for i in [2,1,0]:
# for j in [0,1,2]:
# for k in [0,1,2]:
# self.houses.append( House(gui, i, j, k, cls[i],
# self.red, self.green, self.blue, self))
#for i in [2,1,0]:
# for j in [0,1,2]:
# Piece(gui,610 + 50*j, 350 + 50 * i, 'black',
# self.red, self.green, self.blue, self,3*i+j+1)
#self._build_form(gui)
"""
|
UTF-8
|
Python
| false | false | 2,013 |
8,684,423,889,831 |
b7adf6889658556e5819a869428de6829752c5b2
|
75f04580ddc8fcf90ab48ff4d017a90d2733520d
|
/py/Board.py
|
162aa108d411465099f6ff5becd3ccc669351177
|
[] |
no_license
|
joserc87/angry-solver
|
https://github.com/joserc87/angry-solver
|
af681d290bd4378d8ffc2ead4c761191ce5020a4
|
240834c59c193f2c48a5449a2ab85b248a383381
|
refs/heads/master
| 2021-01-16T21:48:36.277556 | 2014-08-08T08:12:53 | 2014-08-08T08:12:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from Dictionary import Dictionary
from Move import Move
# See: http://fbpedia.com/cuantas-letras-tiene-apalabrados-angry-words.html
# Score: http://www.palabras-apalabrados.info/trucos
class Board:
BOARD_HEIGHT = 15
BOARD_WIDTH = 15
EMPTY = u' '
wordMultipliers = [int(c) for c in \
"113111111111311" + \
"111112111211111" + \
"311111111111113" + \
"111111121111111" + \
"111111111111111" + \
"121111111111121" + \
"111111111111111" + \
"111211111112111" + \
"111111111111111" + \
"121111111111121" + \
"111111111111111" + \
"111111121111111" + \
"311111111111113" + \
"111112111211111" + \
"113111111111311" ]
letterMultipliers = [int(c) for c in \
"111131111131111" + \
"131111111111131" + \
"112111313111211" + \
"111311111113111" + \
"311111212111113" + \
"111113111311111" + \
"113121111121311" + \
"111111111111111" + \
"113121111121311" + \
"111113111311111" + \
"311111212111113" + \
"111311111113111" + \
"112111313111211" + \
"131111111111131" + \
"111131111131111" ]
# This is for spanish
scorePerLetter = {
u'a' : 1,
u'b' : 3,
u'c' : 3,
u'd' : 2,
u'e' : 1,
u'f' : 4,
u'g' : 2,
u'h' : 4,
u'i' : 1,
u'j' : 8,
u'k' : 0,
u'l' : 1,
u'm' : 3,
u'n' : 1,
u'ñ' : 8,
u'o' : 1,
u'p' : 3,
u'q' : 5,
u'r' : 1,
u's' : 1,
u't' : 1,
u'u' : 1,
u'v' : 4,
u'w' : 0,
u'x' : 8,
u'y' : 4,
u'z' : 10
}
numLettersPerMatch = {
u'a' : 12,
u'b' : 2,
u'c' : 4,
u'd' : 5,
u'e' : 12,
u'f' : 1,
u'g' : 2,
u'h' : 2,
u'i' : 6,
u'j' : 1,
u'k' : 0,
u'l' : 4,
u'm' : 2,
u'n' : 5,
u'ñ' : 1,
u'o' : 9,
u'p' : 2,
u'q' : 1,
u'r' : 5,
u's' : 6,
u't' : 4,
u'u' : 5,
u'v' : 1,
u'w' : 0,
u'x' : 1,
u'y' : 1,
u'z' : 1
}
def __init__(self):
"""
Constructor
"""
self.multipliers = None
self.BOARD_HEIGHT = Board.BOARD_HEIGHT
self.BOARD_WIDTH = Board.BOARD_WIDTH
self.tiles = [' ' for _ in range(self.BOARD_WIDTH * self.BOARD_HEIGHT)]
# TODO: Change multipliers
#self.multipliers = [1 for _ in range (self.BOARD_WIDTH * self.BOARD_HEIGHT)]
def setTestData(self):
# For testing porpuses
self.tiles = [c for c in u" " + \
u" " + \
u" afeo " + \
u" c " + \
u" r " + \
u" ujier " + \
u" c " + \
u" i " + \
u" o " + \
u" " + \
u" " + \
u" " + \
u" " + \
u" " + \
u" " ]
def getTile(self, row, col):
"""
Getter to access the content of the board
Args:
row (int): The row of the tile to retrieve
col (int): The column of the tile to retrieve
Returns:
The tile at (row, col)
"""
assert(self.validPos(row, col))
return self.tiles[row*15 + col]
def getLetterMultiplier(self, row, col):
"""
Getter to access the multipliers
Args:
row (int): The row of the multiplier to retrieve
col (int): The column of the multiplier to retrieve
Returns:
The multiplier at (row, col)
"""
assert(self.validPos(row, col))
return self.letterMultipliers[row*15 + col] if self.getTile(row, col) == Board.EMPTY else 1
def getWordMultiplier(self, row, col):
"""
Getter to access the multipliers
Args:
row (int): The row of the multiplier to retrieve
col (int): The column of the multiplier to retrieve
Returns:
The multiplier at (row, col)
"""
assert(self.validPos(row, col))
return self.wordMultipliers[row*15 + col] if self.getTile(row, col) == Board.EMPTY else 1
def getScoreForLetter(self, letter):
"""
The score for each letter [a-z], without any kind of multipliers
Args:
letter (str): A single char (a-z)
Returns:
a number
"""
return Board.scorePerLetter[letter]
def setTile(self, row, col, val):
"""
Setter to change the content of the board
Args:
row (int): The row of the tile to change
col (int): The column of the tile to change
val (str): The new value of the tile
"""
assert(self.validPos(row, col))
self.tiles[row*15+col] = val
def initFromTileString(self, tilestring):
"""
Initialize the object (tiles) from a string
Args:
tilestring (str): A string with the format "C1|P1,C2|P2,...,CN|PN" where C1..N are just characters, the value of the tile, and P1..N are the position (int [0..15*15-1])
"""
moves = [x.split('|') for x in tilestring.split(',')]
for move in moves:
val = move[0]
pos = int(move[1])
if pos >= 0 and pos < self.BOARD_WIDTH * self.BOARD_HEIGHT:
self.tiles[pos] = val
def toString(self):
"""
String representation of the object
Returns:
A string
"""
string = ''
i = 0
for tile in self.tiles:
string = string + '| ' + tile + ' '
if i % Board.BOARD_WIDTH == 0:
string = string + '\n'
i += 1
return string
def findBestMove(self, dictionary, letters):
"""
Find the best move in the board using the available letters
Args:
dct (dict): The dictionary to use to find the words
letters (str): The available letters
Returns:
A list of moves
"""
# For each position
bestMove = None
moves = []
for i in range(self.BOARD_HEIGHT):
for j in range(self.BOARD_WIDTH):
# For each direction
for direction in [Move.DOWN, Move.RIGHT]: # LEFT and UP are not allowed
#moves[len(moves):] = self.findAllMovesIn(i, j, direction, letters, dictionary)
moves = self.findAllMovesIn(i, j, direction, letters, dictionary)
for move in moves: # Take only the best move
bestMove = self.maxMove(bestMove, move)
return bestMove
def findAllMovesIn(self, row, col, direction, letters, dictionary):
"""
Find all the possible words in the current board
starting in the position (row, column) with the available letters and cointained in the dictionary
Args:
row (int): The row of the position where the words will start
col (int): The column of the position where the words will start
direction ([x,y]): The direction of the word. Must be one between Move.UP, Move.DOWN, Move.LEFT or Move.RIGHT
letters (str): The available letters to build the word
dictionary (Dictionary): The dictionary where the words must be
Returns:
a list of Move with the possible moves (words)
"""
moves = []
# First of all, check that there can be a valid move at this position, just to speed things up
if not self.checkNeighbours(row, col, direction, len(letters)):
return moves
# Otherwise, if possible, lets start searching
# First, take the row/column from the board, as a string
boardString = self.getStringFromBoard (row, col, direction)
# Find a word that fits in the string
# This is the main method of the algorithm
words = dictionary.findWordsInPattern (boardString, letters)
# Once we have the solutions, lets create the moves
for word in words:
move = Move()
word = word.strip()
move.valid = True
move.row = row;
move.column = col;
move.direction = direction;
move.score = self.calcScore (row, col, direction, word);
move.lettersUsed = self.getLettersUsed (word, boardString);
move.word = word
if len(move.lettersUsed) > 0 and len(move.lettersUsed) < len(move.word):
moves.append(move)
return moves
def getStringFromBoard(self, row, col, direction):
"""
Gets the string contained in the board starting at the position (row, col) and going in the direction "direction"
It will contain also the spaces util the end of the board
Args:
row (int): The position row where the word starts
col (int): The position column where the word starts
direction ([dx, dy]): One of the following Move.UP, Move.DOWN, Move.LEFT or Move.RIGHT
Returns:
a string with the content of the board at that position. Can contain EMPTY tiles
"""
di = direction[0]
dj = direction[1]
end = False
i = row
j = col
boardString = ''
while not end:
# Check if finish
if not self.validPos(i, j):
end = True
else:
# Add the character to the string
boardString = boardString + self.getTile(i, j)
# Go forward
i+=di
j+=dj
return boardString
def getBeginingOfWordAt(self, row, col, direction):
di = direction[0]
dj = direction[1]
end = False
i = row
j = col
boardString = ''
while not end:
# Check if finish
if not self.validPos(i+di, j+dj) or self.tiles(i, j) == Board.EMPTY:
end = True
else:
# Go forward
i+=di
j+=dj
return [i, j]
def getWordAtPos(self, row, col, direction):
"""
Retrieves the word (without spaces) that lays on the position (row, col).
The word can actually start before or after (row, col)
Args:
row (int): The row part of the position P, where P contains a letter of the word
col (int): The column part of the position P, where P contains a letter of the word
direction ([dx, dy]): The direction in wich the word is written. The algorithm will only check that direction
Returns:
a string, without spaces, with length 0 if the position is empty, 1 if there is no word at the position or >1 if there is a word
"""
assert(self.validPos(row, col))
di = direction[0]
dj = direction[1]
if self.getTile(row, col) == Board.EMPTY:
return ''
start = self.getBeginingOfWordAt(row, col, [-di, -dj])
end = self.getBeginingOfWordAt(row, col, [di, dj])
end = False
i = row
j = col
boardString = ''
while not end:
# Check if finish
if self.validPos(i, j):
end = True
else:
# Add the character to the string
boardString = boardString + self.getTile(i, j)
# Go forward
i+=di
j+=dj
return boardString
def calcScore(self, row, col, direction, word):
"""
Retrieves the score based on the word, the position (multipliers) and derivated words.
Args:
row (int): The row where the word starts
col (int): The column where the word starts
direction ([di, dj]): The direction of the word. Must be Move.UP, Move.DOWN, Move.RIGHT or Move.LEFT
word (str): The word
Returns:
The score of that word in the board, as an integer
"""
score = 0
di = direction[0]
dj = direction[1]
end = False
i = row
j = col
wordMultiplier = 1
cnt = 0
while not end:
# Check if finish
if self.validPos(i, j) or cnt >= len(word):
end = True
else:
# Add the character to the string
wordMultiplier *= self.getWordMultiplier(i, j)
score += self.getScoreForLetter(word[cnt]) * self.getLetterMultiplier(i, j)
# Go forward
i+=di
j+=dj
cnt += 1
return score * wordMultiplier
def validPos(self, row, col):
"""
Check if the position (row, col) is inside the boundaries
Args:
row (int): The row where the word starts
col (int): The column where the word starts
Returns:
True if row >= 0 and row < self.BOARD_HEIGHT and col >= 0 and col < self.BOARD_WIDTH
"""
if row >= 0 and row < self.BOARD_HEIGHT and col >= 0 and col < self.BOARD_WIDTH:
return True
else:
return False
def checkNeighbours(self, row, col, direction, length):
"""
Check if a word with a certain length can be written in the pos (row, col)
If there is no other word in the neighbourhood, we can't write it (unless it is the first one)
Args:
row (int): The row where the word starts
col (int): The column where the word starts
direction ([di, dj]): The direction of the word. Must be Move.UP, Move.DOWN, Move.RIGHT or Move.LEFT
length (int): The length of the word
Returns:
True if there is a tile "length" tiles away from (row, col) in that direction. False otherwise
"""
di = direction[0]
dj = direction[1]
i = row
j = col
cnt = 0
while self.validPos(i, j) and cnt < length:
if self.getTile(i, j) != Board.EMPTY:
return True
if self.validPos(i + dj, j + di) and self.getTile(i + dj, j + di) != Board.EMPTY:
return True
if self.validPos(i - dj, j - di) and self.getTile(i - dj, j - di) != Board.EMPTY:
return True
i += di
j += dj
cnt += 1
return False
def maxMove(self, move1, move2):
"""
Return the best of the 2 moves.
Args:
move1 (Move): The first move
move2 (Move): The second move
Returns:
move2 if move1 is not valid or move2.score > move1.score, or else move1
"""
if move2 == None or (move1 != None and not move2.valid):
return move1
elif move1 == None or not move1.valid or move2.score > move1.score:
return move2
else:
return move1
def getLettersUsed(self, word, pattern):
"""
Get the letters in word that are not in pattern
Args:
word (str): The word (e.g. "python")
pattern (str): The pattern used to create the word (e.g. " tho ")
Returns:
the letters in word that are not the same in pattern (e.g. "pyn")
"""
letters = ''
# To calc the letters used, substract the pattern from the word
for i in range(len(word)):
if pattern[i] == Board.EMPTY:
letters += word[i]
return letters
if __name__ == '__main__':
tiles = 'A|156,R|187,E|126,N|141,E|112,O|142,O|172,L|157,D|113,I|114,D|115,O|116,C|111'
vec = [x.split('|') for x in tiles.split(',')]
board = Board()
board.initFromTileString(tiles)
print board.toString()
|
UTF-8
|
Python
| false | false | 2,014 |
11,295,764,004,071 |
1404211ada8aefc5972d773eb5e6a6b0d5330631
|
f355660eab43d67cfe82b45dcd5f9700a5a0b90d
|
/groups/urls.py
|
0ed1f3e031754a22e60978cc8aca3059b112946e
|
[] |
no_license
|
350dotorg/localpower
|
https://github.com/350dotorg/localpower
|
ff3def139e3da3ef92c10a3096167768ae03f030
|
973ba511428d0f7fc77b033cfcd293017900237b
|
refs/heads/master
| 2021-01-16T20:33:19.166788 | 2013-03-02T03:05:44 | 2013-03-02T03:05:44 | 2,094,385 | 2 | 0 | null | true | 2012-12-11T16:58:31 | 2011-07-23T20:11:50 | 2012-12-11T16:58:31 | 2012-12-11T16:58:31 | 228 | null | 2 | 59 |
JavaScript
| null | null |
from django.conf.urls.defaults import *
from search_widget.views import search_list
from models import Group
from feeds import GroupActivityFeed
group_search_info = {
'queryset': Group.objects.all().order_by("-created"),
'paginate_by': 5,
'search_fields': ['name', 'description',],
'template_name': 'groups/_search_listing',
'object_rendering_template': 'groups/_group_search_result.html',
}
urlpatterns = patterns(
'groups.views',
url(r'^$', 'group_list', name='group_list'),
url(r'^create/$', 'group_create', name='group_create'),
url(r'^create/external/$', 'group_external_link_only_create',
name='group_external_link_only_create'),
url(r'^(?P<group_id>\d+)/facebook/edit/$', 'group_external_link_form',
{'link_type': "facebook"},
name='group_facebook_link_form'),
url(r'^(?P<group_id>\d+)/twitter/edit/$', 'group_external_link_form',
{'link_type': "twitter"},
name='group_twitter_link_form'),
url(r'^(?P<group_id>\d+)/leave/$', 'group_leave', name='group_leave'),
url(r'^(?P<group_id>\d+)/join/$', 'group_join', name='group_join'),
url(r'^(?P<group_id>\d+)/approve/(?P<user_id>\d+)/$', 'group_membership_request', {'action': 'approve'}, name='group_approve'),
url(r'^(?P<group_id>\d+)/deny/(?P<user_id>\d+)/$', 'group_membership_request', {'action': 'deny'}, name='group_deny'),
url(r'^search/$', search_list, group_search_info, name='group_search'),
url(r'^(?P<group_slug>[a-z0-9-]+)/$', 'group_detail', name='group_detail'),
url(r'^(?P<group_slug>[a-z0-9-]+)/edit/$', 'group_edit', name='group_edit'),
url(r'^(?P<group_slug>[a-z0-9-]+)/feed/$', GroupActivityFeed(), name='group_activity_feed'),
url(r'^(?P<group_slug>[a-z0-9-]+)/discussions/$', 'group_disc_list', name='group_disc_list'),
url(r'^(?P<group_slug>[a-z0-9-]+)/discussions/subscribe/$', 'group_disc_subscribe',
name='group_disc_subscribe'),
url(r'^(?P<group_slug>[a-z0-9-]+)/discussions/unsubscribe/$', 'group_disc_unsubscribe',
name='group_disc_unsubscribe'),
url(r'^(?P<group_slug>[a-z0-9-]+)/discussions/create/$', 'group_disc_create', name='group_disc_create'),
url(r'^(?P<group_slug>[a-z0-9-]+)/discussions/(?P<disc_id>\d+)/$', 'group_disc_detail', name='group_disc_detail'),
url(r'^(?P<group_slug>[a-z0-9-]+)/discussions/(?P<disc_id>\d+)/remove/$', 'group_disc_remove', name='group_disc_remove'),
url(r'^(?P<group_slug>[a-z0-9-]+)/discussions/(?P<disc_id>\d+)/approve/$', 'group_disc_approve', name='group_disc_approve'),
url(r'^(?P<group_slug>[a-z0-9-]+)/contact/$', 'group_contact_admins',
name='group_contact_admins'),
url(r'^(?P<group_id>\d+)/event_approve/(?P<object_id>\d+)/$',
'group_association_request',
{'action': 'approve', 'content_type': 'events.event'},
name='group_event_approve'),
url(r'^(?P<group_id>\d+)/event_deny/(?P<object_id>\d+)/$',
'group_association_request',
{'action': 'deny', 'content_type': 'events.event'},
name='group_event_deny'),
url(r'^(?P<group_id>\d+)/challenge_approve/(?P<object_id>\d+)/$',
'group_association_request',
{'action': 'approve', 'content_type': 'challenges.challenge'},
name='group_challenge_approve'),
url(r'^(?P<group_id>\d+)/challenge_deny/(?P<object_id>\d+)/$',
'group_association_request',
{'action': 'deny', 'content_type': 'challenges.challenge'},
name='group_challenge_deny'),
url(r'^(?P<group_id>\d+)/action_approve/(?P<object_id>\d+)/$',
'group_association_request',
{'action': 'approve', 'content_type': 'actions.action'},
name='group_action_approve'),
url(r'^(?P<group_id>\d+)/action_deny/(?P<object_id>\d+)/$',
'group_association_request',
{'action': 'deny', 'content_type': 'actions.action'},
name='group_action_deny'),
)
|
UTF-8
|
Python
| false | false | 2,013 |
11,742,440,597,690 |
4768752dc489fa052e35eca6b26a14b954579f83
|
bc5c7ba0958870d31279a3464ad4fb2358fa6ad4
|
/problem.py
|
3d27a6833d09e46ca91e0aa2e0942818c2c81c43
|
[] |
no_license
|
felixrehfeldt/pymor-elasticity
|
https://github.com/felixrehfeldt/pymor-elasticity
|
669328ce65d8813a56f971eb1d85a66bdf186a2a
|
264d37d4e0791804c278d8d22fc3ac9727cecebb
|
refs/heads/master
| 2021-01-20T10:42:12.986436 | 2014-06-17T13:31:30 | 2014-06-17T13:31:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: Felix Albrecht, Rene Milk, Stephan Rave
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
import numpy as np
from pymor.core import ImmutableInterface
from pymor.domaindescriptions import RectDomain
from pymor.functions import ConstantFunction
from pymor.tools import Named
class ElasticityProblem(ImmutableInterface, Named):
def __init__(self, domain=RectDomain(), rhs=ConstantFunction(dim_domain=2),
volume_force=ConstantFunction(np.array([0., -1.]), dim_domain=2),
name=None):
self.domain = domain
self.rhs = rhs
self.volume_force = volume_force
self.name = name
|
UTF-8
|
Python
| false | false | 2,014 |
7,722,351,207,623 |
61e3ae77921c95cb2a0cb40bd3e733e6db6b7946
|
df0cdec9cd5957faf705bd437f4358e369fba080
|
/wordtrainer/wordtrainer/jqm_utils/templatetags/jqm_utils.py
|
eec637d73db47f1527c13c389f32b91ca0a04c17
|
[] |
no_license
|
davidpgero/django-wordtrainer
|
https://github.com/davidpgero/django-wordtrainer
|
f75ef1a320df4c508446f0fa6a0525ec4d0b34ff
|
75dc62b40913148ce3769046a00dcc9adb81e5c2
|
refs/heads/master
| 2016-09-06T09:05:23.318975 | 2012-08-19T21:56:00 | 2012-08-19T21:56:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from classytags.core import Tag, Options
from classytags.arguments import Argument
from django import template
from django.template import loader
register = template.Library()
class JQMPage(Tag):
name = 'jqm_page'
options = Options(
Argument('template_name', required=True),
Argument('page_id', required=True)
)
def render_tag(self, context, template_name, page_id):
context.update(dict(page_id=page_id))
template = loader.get_template(template_name)
html = template.render(context)
return html
register.tag(JQMPage)
|
UTF-8
|
Python
| false | false | 2,012 |
15,702,400,461,058 |
9c46caf66ef47659ec34f7b845098bc9d77a48ec
|
188c3d06d2b957bb0e4c4e6ae3a47bc8225c6dc6
|
/p67_MaxPathSum2.py
|
9f91ad99a30f749abfdeadf7b451845d0175f2f3
|
[] |
no_license
|
dihuynh/projectEuler
|
https://github.com/dihuynh/projectEuler
|
00eec51232160d1cf808153c6a1f359dfa12994e
|
637372e2d9be1dae3705ee66bd3159d984ac4ca8
|
refs/heads/master
| 2021-01-22T01:57:58.916327 | 2013-12-25T03:16:05 | 2013-12-25T03:16:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
def strip_newlines(lists):
new_lists = []
for i in lists:
new_lists.append(i.rstrip())
another_one = []
for k in new_lists:
another_one.append([int(j) for j in k.split()])
return another_one
# replace an element with the sum of it and the bigger number
# on top of it
# 2 2 15
# 3 4 -------> 8 13 ----->
#3 5 9
def get_max_path(lists):
for i in xrange(len(lists)-2,-1,-1):
for j in xrange(0,i+1):
lists[i][j] += max([lists[i+1][j], lists[i+1][j+1]])
return lists[0][0]
triangle = open("p67_triangle.txt").readlines()
# triangle = open("small_test.txt").readlines()
lists = strip_newlines(triangle)
print get_max_path(lists)
|
UTF-8
|
Python
| false | false | 2,013 |
19,069,654,810,108 |
d63c3f8a5a33062cf69adebcc7e96a56775771e3
|
82b3ad84af26ff55c982b4f7aa20ac1e3f540cde
|
/Fakes_Insert.py
|
ad3b620506ceced532b48996b05a09740f2b7645
|
[] |
no_license
|
chrisfrohmaier/California
|
https://github.com/chrisfrohmaier/California
|
c0abfb810fea867ea815c9fbce2d781365cf0052
|
88464533939d2cd55cbc0d1840e6628ef5fd2389
|
refs/heads/master
| 2020-04-05T09:48:50.185552 | 2014-11-24T21:51:30 | 2014-11-24T21:51:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#2014_11_14
#Before you run this make sure you have the input images (both science and mask) in this directory along with the following files:
#default.conv
#default.nnw
#Pipe_sexfile_Peter.sex
#PTF_Transform_Param.param
#!!!!!!!!!!!!!!!!IMPORTANT!!!!!!!!!!!!!--- YOU NEED THE MODULES BELOW
#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
import numpy, os, random, glob, shutil, time, subprocess, math
import multiprocessing
from multiprocessing import Pool
from astropy.io import fits
from astropy import wcs
import warnings
warnings.filterwarnings('ignore', category=UserWarning, append=True) #Ignores UserWarnings otherwise Astropy spits out loads when it overwrites files
warnings.filterwarnings('ignore', category=Warning, append=True)
from astropy.io.fits import getheader
import sys
global vnum
vnum=int(sys.argv[1])
#vnum=1
print 'THIS VNUM IS: ', vnum
#!!!!!!!!!!!!!!!!IMPORTANT!!!!!!!!!!!!!
def file_structure(): #This definition creates the file structure for results to be added into
if not os.path.exists('Results_V'+str(vnum)+''):
os.makedirs('Results_V'+str(vnum)+'')
if not os.path.exists('Results_V'+str(vnum)+'/Catalog'):
os.makedirs('Results_V'+str(vnum)+'/Catalog')
if not os.path.exists('Results_V'+str(vnum)+'/Fake_Star_Catalog'):
os.makedirs('Results_V'+str(vnum)+'/Fake_Star_Catalog')
if not os.path.exists('Results_V'+str(vnum)+'/Fakes_added'):
os.makedirs('Results_V'+str(vnum)+'/Fakes_added')
if not os.path.exists('Results_V'+str(vnum)+'/Galaxies'):
os.makedirs('Results_V'+str(vnum)+'/Galaxies')
def Sextract(science_image,zeropoint,seeing,saturation,gain): #Runs Sextractor and creates a catalog of all the stars
subprocess.call('sex -c Pipe_sexfile_Peter.sex '+science_image[0]+science_image[1]+'.fits -PARAMETERS_NAME PTF_Transform_Param.param -FILTER_NAME default.conv -CATALOG_NAME Results_V'+str(vnum)+'/Catalog/'+science_image[1]+'_Catalog_V'+str(vnum)+'.cat -WEIGHT_IMAGE '+science_image[0]+science_image[1]+'.weight.fits -MAG_ZEROPOINT'+' '+str(zeropoint)+' -SEEING_FWHM '+str(seeing)+' -SATUR_LEVEL '+str(saturation)+' -GAIN '+str(gain)+' -PHOT_FLUXFRAC 0.2,0.5,0.9 -VERBOSE_TYPE QUIET',shell=True)
def Enough_Objects(science_image): #Checks that sextractor has found a suitable number of objects
enough=True
test=os.popen('wc -l Results_V'+str(vnum)+'/Catalog/'+science_image[1]+'_Catalog_V'+str(vnum)+'.cat').read()
rows=test.split()
if float(rows[0])<300:
return False
def Selecting_Bright(science_image): #Selected the top 20 brightest stars in the catalog
f=open('Results_V'+str(vnum)+'/Catalog/'+science_image[1]+'_Catalog_V'+str(vnum)+'.cat') #CHANGE THIS SO THAT IT CAN TAKE ANY CATALOG INPUT
fin=f.readline()
xcord=[]
ycord=[]
flux_array=[]
mag_array=[]
background_array=[]
mag_best_array=[]
Alpha_Sky_array=[]
Delta_Sky_array=[]
while fin:
if fin.startswith('#'):
#h.write(fin)
#print fin
fin=f.readline()
continue
ln=fin.split()
#print ln[15]
mag=float(ln[2]) #magnitude of star
x=float(ln[3])
y=float(ln[4])
flux=float(ln[1])
background=float(ln[5])
mag_best=float(ln[17])
alpha=float(ln[18])
delta=float(ln[19])
if float(ln[7])<0.3: #Not Elliptical
if float(ln[9])>0.5: #Considered a good star
if x>100.0 and x<1948.0 and y>100.0 and y<3996.0: #No Edge Stars
if int(ln[8])==0:
if mag>12:
xcord.append(x); ycord.append(y); mag_array.append(mag); flux_array.append(flux); background_array.append(background); mag_best_array.append(mag_best); Alpha_Sky_array.append(alpha); Delta_Sky_array.append(delta)
fin=f.readline()
f.close()
mag_array, xcord, ycord,flux_array, background_array, mag_best_array, Alpha_Sky_array, Delta_Sky_array= (list(x) for x in zip(*sorted(zip(mag_array ,xcord, ycord, flux_array, background_array, mag_best_array, Alpha_Sky_array, Delta_Sky_array))))
mag_array=mag_array[:20]
xcord=xcord[:20]
ycord=ycord[:20]
flux_array=flux_array[:20]
background_array=background_array[:20]
mag_best_array=mag_best_array[:20]
Alpha_Sky_array=Alpha_Sky_array[:20]
Delta_Sky_array=Delta_Sky_array[:20]
return xcord, ycord, mag_array, flux_array, background_array, mag_best_array, Alpha_Sky_array, Delta_Sky_array
def selecting_galaxies(science_image,): #Finds and creates a catalog of Galxies
science_image=science_image
#print 'Doing Galaxy Stuff for: ', science_image
f=open('Results_V'+str(vnum)+'/Catalog/'+science_image[1]+'_Catalog_V'+str(vnum)+'.cat')
g=open('Results_V'+str(vnum)+'/Galaxies/'+science_image[1]+'_Galaxy_Catalog_V'+str(vnum)+'.cat','w')
l=open('Results_V'+str(vnum)+'/Galaxies/'+science_image[1]+'_Galaxy_regions_V'+str(vnum)+'.reg','w')
fin=f.readline()
counts=0
#Creating the Numpy Grid of Galaxies
hdulist_sci= fits.open(science_image[0]+science_image[1]+'.fits',ignore_missing_end=True) #THE SCIENCE DATA THAT WILL OPENED AND MODIFIED
science_data= hdulist_sci[0].data
header_data= hdulist_sci[0].header
resy=science_data.shape[0]
resx=science_data.shape[1]
galaxy_area=numpy.ones((resy,resx),dtype=bool)
galtest=numpy.ones((resy,resx),dtype=bool)
while fin:
if fin.startswith('#'):
#h.write(fin)
#print fin
fin=f.readline()
continue
ln=fin.split()
class_s=float(ln[9])
gal_mag=float(ln[2])
xcord=float(ln[3])
ycord=float(ln[4])
X2=float(ln[13])
Y2=float(ln[14])
CXX=float(ln[10])
CYY=float(ln[11])
CXY=float(ln[12])
mesrad=int(float((ln[22])))*2
FWHM=float(ln[16])
if int(ln[8])==0:
if class_s<0.5 and gal_mag>14 and gal_mag<30:
#if FWHM<15:
if xcord>40.0 and 2048-xcord>40.0 and ycord>40.0 and 4096-ycord>40.0: #No Edge Galaxies
g.write(fin)
#g.write(str((ln[0]))+' '+str((ln[1]))+' '+str((ln[2]))+' '+str((ln[3]))+' '+str((ln[4]))+' '+str(ln[5])+' '+str((ln[6]))+' '+str((ln[7]))+' '+str((ln[8]))+' '+str((ln[9]))+' '+str((ln[10]))+' '+str((ln[11]))+' '+str(ln[12])+' '+str((ln[13]))+' '+str((ln[14]))+' '+str((ln[15]))+' '+str((ln[16]))+'\n')
l.write(str(xcord)+' '+str(ycord)+'\n')
counts+=1
gy, gx= numpy.mgrid[(int(ycord)-36):(int(ycord)+36) ,(int(xcord)-36):(int(xcord)+36)]
#print gy, gx
#gx=gxo+(xcord-20)
#gy=gyo+(ycord-20)
a_galaxy=numpy.where(((CXX*((gx-xcord)*(gx-xcord)))+(CYY*((gy-ycord)*(gy-ycord)))+(CXY*((gx-xcord)*(gy-ycord))) <= 3))
#print a_galaxy
galaxy_area[a_galaxy[0]-36+int(ycord),a_galaxy[1]-36+int(xcord)]=False
fin=f.readline()
'''
galaxies_int=galaxy_area.astype(float)
hdu_gals=fits.PrimaryHDU(data=galaxies_int,header=header_data)
hdu_gals.scale(type='int16')
hdulist_gals=fits.HDUList([hdu_gals])
#print hdulist.info()
hdulist_gals.writeto('Results_V'+str(vnum)+'/Galaxies/'+science_image[1]+'_GALAXIES_V'+str(vnum)+'.fits', clobber=True, output_verify='ignore')
hdulist_gals.close()
#numpy.savetxt('Results_V'+str(vnum)+'/Galaxies/'+science_image[1]+'_Galaxy_Grids.dat', galaxy_area, delimiter=' ',fmt='%d')
#print 'Finished Doing Galaxy Stuff: ', science_image
'''
f.close()
g.close()
l.close()
return galaxy_area
def Scaling(science_image ,xcord, ycord, mag_array, flux_array, background_array, zpt, fake_stars, CCD_Num, magnitude_best,alpha_sky, delta_sky):
ranmagarray=[]
xcord_star=[]
ycord_star=[]
newx_star=[]
newy_star=[]
mag_array_star=[]
flux_array_star=[]
ran_mag_star=[]
ran_flux_star=[]
background_array_star=[]
scaling_factor_star=[]
CCD_Num_star=[]
faint_fake=max(mag_array)+1.0
best_mag_array=[]
alpha_array=[]
delta_array=[]
#print 'faint_fake', faint_fake
for i in range(0,fake_stars):
ran_mag=random.uniform(faint_fake, 22.5) #The fake stars will be in this range of magnitudes
ran_flux=10.0**((ran_mag-zpt)/(-2.5))
ranmagarray.append(ran_mag)
star=int(random.uniform(0,len(xcord)-1))
scaling_factor=((ran_flux)/flux_array[star])
newX=random.uniform(100.0,1948.0) #This lines don't actually do anything anymore! The new x and y co-ordinates are based on galaxy locations and the hostless parameters later on.
newY=random.uniform(100.0, 3996.0)
xcord_star.append(xcord[star]); ycord_star.append(ycord[star]); newx_star.append(newX); newy_star.append(newY); mag_array_star.append(mag_array[star]); flux_array_star.append(flux_array[star]); ran_mag_star.append(ran_mag); ran_flux_star.append(ran_flux); background_array_star.append(background_array[star]); scaling_factor_star.append(scaling_factor); CCD_Num_star.append(CCD_Num); best_mag_array.append(magnitude_best[star]); alpha_array.append(alpha_sky[star]); delta_array.append(delta_sky[star])
i+=1
return xcord_star, ycord_star, newx_star, newy_star, mag_array_star, flux_array_star, ran_mag_star, ran_flux_star, background_array_star, scaling_factor_star, CCD_Num_star, faint_fake, best_mag_array, alpha_array, delta_array
def add_fakes_2galaxy(science_image,boxsize, xcord_star, ycord_star, newx_star, newy_star, mag_array_star, flux_array_star, ran_mag_star, ran_flux_star, background_array_star, scaling_factor_star, CCD_Num_star, mag_best_star, alpha_array, delta_array, zeropoint, seeing, saturation, gain, readnoise, MOONILLF, AIRMASS, ELLIP, MEDSKY, SKYSIG,LMT_MG, MJD, MoonRA, MoonDec, PTFFIELD, galareas):
#This step finds the galaxies and adds fake stars to them
h=open('Results_V'+str(vnum)+'/Galaxies/'+science_image[1]+'_Galaxy_Catalog_V'+str(vnum)+'.cat') #Opens the Galaxy catalog
f=open('Results_V'+str(vnum)+'/Fake_Star_Catalog/'+science_image[1]+'_Fake_Star_Catalog_V'+str(vnum)+'.dat','w') #Opens the fake star catalog
reg=open('Results_V'+str(vnum)+'/Fake_Star_Catalog/'+science_image[1]+'_Fakes_Star_Regions_V'+str(vnum)+'.reg','w') #creates region file
ch=open('Results_V'+str(vnum)+'/Fake_Star_Catalog/'+science_image[1]+'_add_F2G_progress_V'+str(vnum)+'.dat','a') #Debugging file
hin=h.readline() #reads first line
fake_star_array=[] #prepares and array for fake stars
for stars in range(0,len(xcord_star)):
fake_star_array.append(stars) #creates an array [0,1,2,3, etc]
gal_line_array=[] #prepares and array for galaxies
while hin: #Adds all galxies into an array
gal_line_array.append(hin)
hin=h.readline()
#while fin: #adds all stars into an array
# fake_star_array.append(fin)
# fin=f.readline()
h.close()
#f.close()
hdulist_sci= fits.open(science_image[0]+science_image[1]+'.fits',ignore_missing_end=True) #THE SCIENCE DATA THAT WILL OPENED AND MODIFIED
science_data= hdulist_sci[0].data
resy=science_data.shape[0]
resx=science_data.shape[1]
ch.write(str('Resolution:')+' '+str(resy)+' '+str(resx)+'\n')
#print len(fake_star_array), ' Fake Stars have been added to ', len(fake_star_array), ' Galaxies'
#print gal_line_array[1]
#print 'Number of Fakes to Be added to hosts: ', int(len(xcord_star)*0.9)
num_of_fakes_all=0
#j=open('Results_V'+str(vnum)+'/Fakes_added/'+science_image[1]+'_Flux_Boxes_V'+str(vnum)+'.dat','w')
galaxy_mask=numpy.ones((resy,resx),dtype=bool)
for i in range(0,int(len(xcord_star)*0.9)): #Will only add n*0.9 fake stars to n Galaxies
#host_galaxy=gal_line_array.pop(random.randrange(0,len(gal_line_array))) #selecting a random host galaxy. Used .pop() so that the same galaxy isnt chosen twice
source_star=fake_star_array.pop(random.randrange(0,len(fake_star_array))) #selecting a random source star. Used .pop() so that the same star isnt chosen twice
ch.write(str('!!!!!')+' '+str(num_of_fakes_all)+' '+str('!!!!!')+'\n')
#print y
#print 'len: ',len(gal_line_array)
#ln=host_galaxy.split()
#x=float(ln[3])
#y=float(ln[4])
#print 'Lenth of Possible Galaxies: ', len(gal_line_array)
while len(gal_line_array)>0: #and num_of_fakes_all<len(xcord_star):
#host_galaxy=random.choice(gal_line_array)
#print 'Host Galaxy: ', host_galaxy
host_galaxy=gal_line_array.pop(random.randrange(0,len(gal_line_array))) #selecting a random host galaxy. Used .pop() so that the same galaxy isnt chosen twice
ln=host_galaxy.split()
x=float(ln[3]) #Galaxy X-cord
y=float(ln[4]) #Galaxy Y-cord
galaxy_mag_auto=float(ln[2])
galaxy_mag_best=float(ln[17])
galaxy_flux=float(ln[1])
galaxy_background=float(ln[5])
galaxy_mask[0:40,0:2048]=False
galaxy_mask[4056:4096,0:2048]=False
galaxy_mask[0:4096,0:40]=False
galaxy_mask[0:4096,2008:2048]=False
ch.write(str('Galaxy Mask part 1 done')+'\n')
if galaxy_mask[y,x]==False:
#print 'Cant Go there'
continue
else:
r=40 #Radius of Mask
ym,xm = numpy.ogrid[-y:resy-y, -x:resx-x] #Some clever numpy stuff
mask = xm*xm + ym*ym <= r*r
galaxy_mask[mask]=False
'Doing Usual Business'
#
CXX=float(ln[10])
CYY=float(ln[11])
CXY=float(ln[12])
FR_02=float(ln[20])
FR_05=float(ln[21])
FR_09=float(ln[22])
Host_Elongation=float(ln[6])
Host_Ellipticity=float(ln[7])
Host_Alpha=float(ln[18])
Host_Dec=float(ln[19])
R=3.0
#----------------THIS IS A GRID USED FOR CALCULATING STAR POSITIONS !!NOT!! FOR SCALING STARS
#Draw a large grid around the galaxy of say 20,20 pixels. Run through that grid for every x and y and if it satisfies the equation on page 32 of the sextractor manual then append it to an
#array. Then randomly choose a coordinate and insert a fake star there.
gyo,gxo= numpy.indices((40,40))
gx=gxo+(x-20)
gy=gyo+(y-20)
#print 'GX GY: ', gx, gy
#print 'Done gx gy'
goodcords=numpy.where(((CXX*((gx-x)*(gx-x)))+(CYY*((gy-y)*(gy-y)))+(CXY*((gx-x)*(gy-y))) <= 3)==True)
ch.write(str('Good Cords section CXX CYY: Done')+'\n')
if len(goodcords[0])-1<1:
continue
#print 'Done Good Cords'
#print 'Length of goodcords xy: ', len(goodcords[0]), len(goodcords[1])
#print 'Good Cords: ', goodcords
gc=random.randint(0,len(goodcords[0])-1)
ch.write(str('Choosing gc: Done')+'\n')
#print 'Done gc'
newy=(goodcords[0][gc])+(y-20)
newx=(goodcords[1][gc])+(x-20)
#print 'DOne newx newy'
sourcex=xcord_star[source_star] #stars current x location
sourcey=ycord_star[source_star] #stars current y location
ch.write(str('Newy and Newx: Done')+'\n')
##Creating the fboxes
fbox1=numpy.sum(science_data[newy,newx])
fbox2=numpy.sum(science_data[newy,newx]) + numpy.sum(science_data[newy-1.0,newx]) + numpy.sum(science_data[newy+1.0,newx]) + numpy.sum(science_data[newy,newx-1.0]) + numpy.sum(science_data[newy,newx+1.0])
fbox3=numpy.sum(science_data[newy-1.0:newy+2.0, newx-1.0:newx+2.0])
fbox4=numpy.sum(science_data[newy-1.0:newy+2.0, newx-1.0:newx+2.0]) + numpy.sum(science_data[newy-2.0,newx]) + numpy.sum(science_data[newy+2.0,newx]) + numpy.sum(science_data[newy, newx-2.0]) + numpy.sum(science_data[newy, newx+2.0])
fbox5=numpy.sum(science_data[newy-2.0:newy+3.0, newx-2.0:newx+3.0])
fbox6=numpy.sum(science_data[newy-5.0:newy+6.0, newx-5.0:newx+6.0])
ch.write(str('Fboxes: Done')+'\n')
reg.write(str(newx)+' '+str(newy)+'\n') #fake star region file
scale_fac=scaling_factor_star[source_star] #scale factor
back=background_array_star[source_star] #background
#---Old area to be scaled---
startx=int(sourcex-10.0)
starty=int(sourcey-10.0)
finx=int(sourcex+10.0)
finy=int(sourcey+10.0)
#---New area to have flux added---
Nstartx=newx-10.0
Nstarty=newy-10.0
Nfinx=newx+10.0
Nfiny=newy+10.0
newdata=numpy.ones((20,20)) #Preparing a blank gird for scaled objects
newdata[0:20,0:20]=(((science_data[starty:finy,startx:finx]))-back)*scale_fac #inserting scaled object
ch.write(str('New scaled Data: Added')+'\n')
#print x,y
#print 'New Data Shape: ', newdata.shape
#print 'Science Shape: ', science_data[starty:finy,startx:finx].shape
science_data[Nstarty:Nfiny, Nstartx:Nfinx]= (science_data[Nstarty:Nfiny, Nstartx:Nfinx]) + newdata #Modifying the science image
f.write(str(xcord_star[source_star])+' '+str(ycord_star[source_star])+' '+str(alpha_array[source_star])+' '+str(delta_array[source_star])+' '+str(newx)+' '+str(newy)+' '+str(mag_array_star[source_star])+' '+str(mag_best_star[source_star])+' '+str(flux_array_star[source_star])+' '+str(ran_mag_star[source_star])+' '+str(ran_flux_star[source_star])+' '+str(background_array_star[source_star])+' '+str(scaling_factor_star[source_star])+' '+str(int(PTFFIELD))+' '+str(CCD_Num_star[source_star])+' '+str(x)+' '+str(y)+' '+str(Host_Alpha)+' '+str(Host_Dec)+' '+str(galaxy_mag_auto)+' '+str(galaxy_mag_best)+' '+str(galaxy_flux)+' '+str(galaxy_background)+' '+str(CXX)+' '+str(CYY)+' '+str(CXY)+' '+str(Host_Elongation)+' '+str(Host_Ellipticity)+' '+str(FR_02)+' '+str(FR_05)+' '+str(FR_09)+' '+str(fbox1)+' '+str(fbox2)+' '+str(fbox3)+' '+str(fbox4)+' '+str(fbox5)+' '+str(fbox6)+' '+str(gain)+' '+str(readnoise)+' '+str(MOONILLF)+' '+str(MoonRA)+' '+str(MoonDec)+' '+str(AIRMASS)+' '+str(seeing)+' '+str(ELLIP)+' '+str(MEDSKY)+' '+str(SKYSIG)+' '+str(zeropoint)+' '+str(LMT_MG)+' '+str(MJD)+'\n')
num_of_fakes_all+=1
ch.write(str('Host Galaxy: Done')+'\n')
break
for g in range(0,int(len(xcord_star)-int(len(xcord_star)*0.9))):
Star_Location=True
#print 'How Many Hostless: ',
#print len(fake_star_array)
source_star=fake_star_array.pop(random.randrange(0,len(fake_star_array)))
ch.write(str('Hostless Source Star: Chosen')+'\n')
while Star_Location==True:
hostlessx=int(random.uniform(40.0,2008.0))
hostlessy=int(random.uniform(40.0,4056.0))
sourcex=xcord_star[source_star] #stars current x location
sourcey=ycord_star[source_star] #stars current y location
reg.write(str(hostlessx)+' '+str(hostlessy)+'\n') #fake star region file
scale_fac=scaling_factor_star[source_star] #scale factor
back=background_array_star[source_star] #background
ch.write(str('Hostless Location: Chosen')+'\n')
if galaxy_mask[hostlessy,hostlessx]==False and galareas[hostlessy,hostlessx]==False:
#print 'Cant Go there<-- Hostless'
continue
else:
r=40 #Radius of Mask
ym,xm = numpy.ogrid[-hostlessy:resy-hostlessy, -hostlessx:resx-hostlessx] #Some clever numpy stuff
mask = xm*xm + ym*ym <= r*r
galaxy_mask[mask]=False
ch.write(str('Hostless r and Mask: Done')+'\n')
#---Old area to be scaled---
startx=int(sourcex-10.0)
starty=int(sourcey-10.0)
finx=int(sourcex+10.0)
finy=int(sourcey+10.0)
#---New area to have flux added---
Nstartx=hostlessx-10.0
Nstarty=hostlessy-10.0
Nfinx=hostlessx+10.0
Nfiny=hostlessy+10.0
fbox1=numpy.sum(science_data[hostlessy,hostlessx])
fbox2=numpy.sum(science_data[hostlessy,hostlessx]) + numpy.sum(science_data[hostlessy-1.0,hostlessx]) + numpy.sum(science_data[hostlessy+1.0,hostlessx]) + numpy.sum(science_data[hostlessy,hostlessx-1.0]) + numpy.sum(science_data[hostlessy,hostlessx+1.0])
fbox3=numpy.sum(science_data[hostlessy-1.0:hostlessy+2.0, hostlessx-1.0:hostlessx+2.0])
fbox4=numpy.sum(science_data[hostlessy-1.0:hostlessy+2.0, hostlessx-1.0:hostlessx+2.0]) + numpy.sum(science_data[hostlessy-2.0,hostlessx]) + numpy.sum(science_data[hostlessy+2.0,hostlessx]) + numpy.sum(science_data[hostlessy, hostlessx-2.0]) + numpy.sum(science_data[hostlessy, hostlessx+2.0])
fbox5=numpy.sum(science_data[hostlessy-2.0:hostlessy+3.0, hostlessx-2.0:hostlessx+3.0])
fbox6=numpy.sum(science_data[hostlessy-5.0:hostlessy+6.0, hostlessx-5.0:hostlessx+6.0])
ch.write(str('Hostless Fbox: Done')+'\n')
newdata=numpy.ones((20,20)) #Preparing a blank gird for scaled objects
newdata[0:20,0:20]=(((science_data[starty:finy,startx:finx]))-back)*scale_fac #inserting scaled object
#print x,y
#print 'New Data Shape: ', newdata.shape
#print 'Science Shape: ', science_data[starty:finy,startx:finx].shape
science_data[Nstarty:Nfiny, Nstartx:Nfinx]= (science_data[Nstarty:Nfiny, Nstartx:Nfinx]) + newdata #Modifying the science image
f.write(str(xcord_star[source_star])+' '+str(ycord_star[source_star])+' '+str(alpha_array[source_star])+' '+str(delta_array[source_star])+' '+str(hostlessx)+' '+str(hostlessy)+' '+str(mag_array_star[source_star])+' '+str(mag_best_star[source_star])+' '+str(flux_array_star[source_star])+' '+str(ran_mag_star[source_star])+' '+str(ran_flux_star[source_star])+' '+str(background_array_star[source_star])+' '+str(scaling_factor_star[source_star])+' '+str(int(PTFFIELD))+' '+str(CCD_Num_star[source_star])+' '+str(99999.99)+' '+str(99999.99)+' '+str(99999.99)+' '+str(99999.99)+' '+str(99999.99)+' '+str(99999.99)+' '+str(99999.99)+' '+str(99999.99)+' '+str(99999.99)+' '+str(99999.99)+' '+str(99999.99)+' '+str(99999.99)+' '+str(99999.99)+' '+str(99999.99)+' '+str(99999.99)+' '+str(99999.99)+' '+str(fbox1)+' '+str(fbox2)+' '+str(fbox3)+' '+str(fbox4)+' '+str(fbox5)+' '+str(fbox6)+' '+str(gain)+' '+str(readnoise)+' '+str(MOONILLF)+' '+str(MoonRA)+' '+str(MoonDec)+' '+str(AIRMASS)+' '+str(seeing)+' '+str(ELLIP)+' '+str(MEDSKY)+' '+str(SKYSIG)+' '+str(zeropoint)+' '+str(LMT_MG)+' '+str(MJD)+'\n')
num_of_fakes_all+=1
Star_Location=False
ch.write(str('All Hostless Done')+'\n')
hdulist_sci.writeto(science_image[0]+science_image[1]+'_fakesV'+str(vnum)+'.fits', output_verify='ignore', clobber=True) #Saving image after loop of 200 Stars is complete
#j.close()
reg.close()
f.close()
hdulist_sci.close()
#print num_of_fakes_all, 'fake Stars Added to Galaxies and hostless in the Image: ', science_image[1]
ch.write(str('Num of Fakes Added:')+' '+str(num_of_fakes_all)+'\n')
ch.close()
#Creating a Galaxy Mask Fits file
'''
galaxy_mask_float=galaxy_mask.astype(int)
hdu=fits.PrimaryHDU(galaxy_mask_float)
hdu.scale(type='int16')
hdulist=fits.HDUList([hdu])
#print hdulist.info()
hdulist.writeto('Results_V'+str(vnum)+'/Fake_Star_Catalog/'+science_image[1]+'_GMask_V'+str(vnum)+'.fits', clobber=True, output_verify='ignore')
hdulist.close()
'''
def Execute(run):
#print 'Doing:', run
christ=open('Results_V'+str(vnum)+'/Images_Doing'+str(vnum)+'.dat','a')
science_image=run
tstart=time.time()
#print '@@@@@', run[0], run[1]
#print '!!!!!',science_image[0], science_image[1]
sci_fil=science_image[0]+science_image[1]+'.fits'
#maskfile=science_image[0]+science_image[1]+'.weight'
#print '######', science_image[1],'.weight'
#print sci_fil
#print maskfile
#print 'Name: ', science_image
try:
hdulist_multi_sci=fits.open(science_image[0]+science_image[1]+'.fits')
#print '++++ multi_mask assign ', science_image
except IOError or Warning or UnboundLocalError:
bad_images=open('Results_V'+str(vnum)+'/Bad_Images_V'+str(vnum)+'.dat','a')
bad_images.write(str(science_image[0])+str(science_image[1])+str('.fits')+' '+str('Reason: Astropy Could not Open the .fits file')+'\n')
bad_images.close()
#print 'Cant open Science'
return
try:
hdulist_weight=fits.open(science_image[0]+science_image[1]+'.weight.fits')
#print '++++ multi_mask assign ', science_image
except IOError or Warning or UnboundLocalError:
bad_images=open('Results_V'+str(vnum)+'/Bad_Images_V'+str(vnum)+'.dat','a')
bad_images.write(str(science_image[0])+str(science_image[1])+str('.fits')+' '+str('Reason: Astropy Could not Open the weight file')+'\n')
bad_images.close()
#print 'Cant open Science'
return
christ.write(str(science_image[0]+science_image[1])+'.fits'+'\n')
hdulist_multi_sci.verify('fix')
zeropoint=float(hdulist_multi_sci[0].header['UB1_ZP'])
seeing=float(hdulist_multi_sci[0].header['SEEING'])
saturation=55000.0 #float(hdulist_multi_sci[0].header['SATURATE'])
gain=float(hdulist_multi_sci[0].header['GAIN'])
CCD_Num=float(hdulist_multi_sci[0].header['CCDID'])
PTFFIELD=int(hdulist_multi_sci[0].header['PTFFIELD'])
readnoise=float(hdulist_multi_sci[0].header['READNOI'])
MOONILLF=float(hdulist_multi_sci[0].header['MOONILLF'])
AIRMASS=float(hdulist_multi_sci[0].header['AIRMASS'])
ELLIP=(hdulist_multi_sci[0].header['ELLIP'])
if ELLIP=='NAN.0':
bad_images=open('Results_V'+str(vnum)+'/Bad_Images_V'+str(vnum)+'.dat','a')
bad_images.write(str(science_image[0])+str(science_image[1])+str('.fits')+' '+str('Reason: ELLIP has a NAN')+'\n')
bad_images.close()
#print science_image[0]+science_image[1], ' Has a NAN'
return
else:
ELLIP=float(hdulist_multi_sci[0].header['ELLIP'])
MEDSKY=float(hdulist_multi_sci[0].header['MEDSKY'])
SKYSIG=float(hdulist_multi_sci[0].header['SKYSIG'])
LMT_MG=float(hdulist_multi_sci[0].header['LMT_MG'])
MJD=float(hdulist_multi_sci[0].header['OBSMJD'])
MoonRA=float(hdulist_multi_sci[0].header['MOONRA'])
MoonDec=float(hdulist_multi_sci[0].header['MOONDEC'])
fake_stars= 60 #number of fake stars per image (integer please!)
hdulist_multi_sci.close()
Sextract(science_image,zeropoint,seeing,saturation,gain)
catsize=Enough_Objects(science_image)
if catsize==False:
#print science_image, 'didn\'t have enough objects detected so it was moved to Results_V'+str(vnum)+'/Bad_Images/ and the newly created weight map, sex file and catalog have been deleted'
bad_images=open('Results_V'+str(vnum)+'/Bad_Images_V'+str(vnum)+'.dat','a')
bad_images.write(str(science_image[0])+str(science_image[1])+str('.fits')+' '+str('Reason: Sextractor did not detect enough objects (<300)')+'\n')
os.remove('Results_V'+str(vnum)+'/Catalog/'+science_image[1]+'_Catalog_V'+str(vnum)+'.cat')
return
x, y, mag, flux, back, magnitude_best, alpha_sky, delta_sky = Selecting_Bright(science_image)
#print 'Selecting Bright Done'
christ.write(str('Selecting Bright Done: ')+str(science_image[1])+'\n')
xcord_star, ycord_star, newx_star, newy_star, mag_array_star, flux_array_star, ran_mag_star, ran_flux_star, background_array_star, scaling_factor_star, CCD_Num_star, faint_fake, mag_best_star, alpha_array, delta_array =Scaling(science_image, x, y, mag, flux, back, zeropoint, fake_stars, CCD_Num, magnitude_best, alpha_sky, delta_sky)
#print 'Scaling Done'
christ.write(str('Scaling Done: ')+str(science_image[1])+'\n')
mag_log=open('Results_V'+str(vnum)+'/Magnitude_Log_File.dat','a')
#print 'Maglog Open'
mag_log.write(str(science_image[0])+str(science_image[1])+str('.fits')+' '+str(mag[0])+' '+str(mag[-1])+' '+str(faint_fake)+' '+str('22.5')+'\n')
#print 'Maglogwrite'
christ.write(str('Trying to Find Galaxies: ')+str(science_image[1])+'\n')
galareas=selecting_galaxies(science_image)
christ.write(str('Found Galaxies: ')+str(science_image[1])+'\n')
#print 'Selected Galaxies'
boxsize=[3,5,7] #This is redundant now, please do not consider this useful.
add_fakes_2galaxy(science_image,boxsize, xcord_star, ycord_star, newx_star, newy_star, mag_array_star, flux_array_star, ran_mag_star, ran_flux_star, background_array_star, scaling_factor_star, CCD_Num_star, mag_best_star, alpha_array, delta_array, zeropoint, seeing, saturation, gain, readnoise, MOONILLF, AIRMASS, ELLIP, MEDSKY, SKYSIG, LMT_MG, MJD, MoonRA, MoonDec, PTFFIELD, galareas)
t_total=time.time()-tstart
good_images=open('Results_V'+str(vnum)+'/Good_Images_V'+str(vnum)+'.dat','a')
good_images.write(str(science_image[0])+str(science_image[1])+str('.fits')+' '+str(t_total)+'\n')
christ.write(str('All Good: ')+str(science_image[1])+'\n')
christ.close()
#Sub_ML_DB(science_image)
#print 'Done:', run
#-----------------------------------RUN PIPELINE------------------------------------------
def Run_All(masterlist):
file_structure()
'''
all_fits=[] #Establishing an array to find the files
#path=[]
#fnames=[]
for dirpath,dirname,filenames in os.walk(os.path.abspath('../../fakes')): #Traverses through a directory tree
for file in filenames:
fileex=os.path.splitext(file)[-1] #Splits the file name, [-1] means it will look at the extension
if fileex== '.fits': #wanted all .fits files
all_fits.append([dirpath, file])
#print all_fits
science_fits=[]
for i in range(len(all_fits)):
#fname=all_fits[1]
ln=all_fits[i]
fname=ln[1].split('.')
#print fname
if fname[-2]=='w':
science_fits.append([ln[0]+str('/'), (os.path.splitext(ln[1])[0])])
'''
science_fits=[]
k=open(masterlist)
for line in k:
koo=line.strip('.fits\n')
kn=koo.split(' ')
science_fits.append([kn[0]+str('/'),kn[1]])
#print 'Science_Fits', science_fits
bad_images=open('Results_V'+str(vnum)+'/Bad_Images_V'+str(vnum)+'.dat','a')
bad_images.close()
good_images=open('Results_V'+str(vnum)+'/Good_Images_V'+str(vnum)+'.dat','a')
good_images.close()
mag_log=open('Results_V'+str(vnum)+'/Magnitude_Log_File.dat','a')
mag_log.close()
mag_log_col=open('Results_V'+str(vnum)+'/Magnitude_Log_File.columns','w')
mag_log_col.write(str('1. Path to Image')+'\n'+str('2. Brightest Source')+'\n'+str('3. Faintest Source Mag')+'\n'+str('4. Brightest Fake')+'\n'+str('5. Faintest Fake'))
mag_log_col.close()
t0=time.time()
processors=multiprocessing.cpu_count()
pool=Pool(processors)
pool.map(Execute,science_fits)
pool.close()
'''
#Single Core Test, comment out the above Multistuff
for run in science_fits:
Execute(run)
'''
print 'V'+str(vnum)+' took: ', time.time()-t0, 'seconds'
Run_All('Master.list')
#Run_All('Nersc_test_List.txt')
|
UTF-8
|
Python
| false | false | 2,014 |
12,807,592,510,869 |
450552dd5d76dc6c39549e7b57d2070e05ec8d28
|
c3d0e1a6a3529c04d6daa689da7c24a38e7aeb44
|
/build/src/nox/netapps/tests/pyunittests/vlan_parse_test.py
|
a47f2792842bae450e385c02062970dbbbf2eecf
|
[
"GPL-3.0-or-later",
"GPL-3.0-only"
] |
non_permissive
|
esy2k/openflowq
|
https://github.com/esy2k/openflowq
|
22931d59ab409e1ce65f4c4dbee199985bd1fd29
|
635cbfd9da19f242f7d1f5ae5413e85ba4e5bd3b
|
refs/heads/master
| 2021-01-10T19:24:32.908051 | 2011-07-14T07:14:51 | 2011-07-14T07:14:51 | 2,040,499 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
../../../../../../src/nox/netapps/tests/pyunittests/vlan_parse_test.py
|
UTF-8
|
Python
| false | false | 2,011 |
10,746,008,189,128 |
9993a57c5bd99a110e844a69404857d9ddadf7c1
|
5ab7a742c8a23a69f3100f744cd19a5a88252c60
|
/homeworks/hw2/GridFunction.py
|
89784bd0803db1ddd9568ea451f7e34ac69a178c
|
[
"MIT"
] |
permissive
|
thibaudatl/SoftwareDesign
|
https://github.com/thibaudatl/SoftwareDesign
|
33cab3c4f4a0b1f292e6c1e01e367664c0dd5090
|
6f9a02d68c44f18128cb88fba002101e316bbf05
|
refs/heads/master
| 2021-01-22T00:24:27.940508 | 2014-10-17T18:17:17 | 2014-10-17T18:17:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 16 00:29:18 2014
@author: leo
grid program
"""
def fourtime(d):
print d,
print d,
print d,
print d,
def across():
print '+',
fourtime("-")
def mid3():
print '|',
fourtime(' ')
print '|',
fourtime(' ')
print '|'
def mid4():
print '|',
fourtime(' ')
print '|',
fourtime(' ')
print '|',
fourtime(' ')
print '|'
def grid3x3():
across()
across()
print '+'
mid3()
mid3()
mid3()
mid3()
across()
across()
print '+'
mid3()
mid3()
mid3()
mid3()
across()
across()
print '+'
def grid4x4():
across()
across()
across()
print '+'
mid4()
mid4()
mid4()
mid4()
across()
across()
across()
print '+'
mid4()
mid4()
mid4()
mid4()
across()
across()
across()
print '+'
mid4()
mid4()
mid4()
mid4()
across()
across()
across()
print '+'
grid3x3()
grid4x4()
|
UTF-8
|
Python
| false | false | 2,014 |
7,138,235,677,475 |
7d62033a85400e1a37d2c17cb06b5927ad654a1d
|
85d36df79f5d56986b9985c9dcc827f73be8744e
|
/data_analysis/job_profiling/create_pbs_dump.py
|
7ade01e78d5e8396b1947d1215a110753b9d67c2
|
[] |
no_license
|
AndreaBorghesi/EuroraPersonal_aborghesi
|
https://github.com/AndreaBorghesi/EuroraPersonal_aborghesi
|
5c98f53b2bbdc20aeb3d87013618f0f8324e84e0
|
80f7554adfa68bbdc2fe5c8a7d51360fdd91da15
|
refs/heads/master
| 2021-01-10T19:44:44.167862 | 2014-08-28T10:55:16 | 2014-08-28T10:55:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
'''
@Author: Andrea Borghesi
Job Profiling script - Create pbs dump, useful for further uses
'''
import os
from subprocess import Popen, PIPE
import datetime
import numpy as np
import pickle
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
LOGNAME = "jobs.log"
DATESFILE="date.tmp"
# ORIGINAL THOMAS MODELS
# 31 Marzo
#DIR="/media/sda4/eurora/data_analysis/job_profiling/dataPBS_31Marzo/"
#PLOTDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/31_Marzo_corretto/plots_pbs/"
#STATSDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/31_Marzo_corretto/stats_pbs/"
#DUMPDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/31_Marzo_corretto/pbs_dump/"
# 1 Aprile 1400 1600
#DIR="/media/sda4/eurora/data_analysis/job_profiling/dataPBS_1Aprile_1400_1600/"
#PLOTDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/1_Aprile_1400_1600/plots_pbs/"
#STATSDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/1_Aprile_1400_1600/stats_pbs/"
#DUMPDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/1_Aprile_1400_1600/pbs_dump/"
# 1 Aprile 1730 1730
#DIR="/media/sda4/eurora/data_analysis/job_profiling/dataPBS_1Aprile_1730_1930/"
#PLOTDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/1_Aprile_1730_1930/plots_pbs/"
#STATSDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/1_Aprile_1730_1930/stats_pbs/"
#DUMPDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/1_Aprile_1730_1930/pbs_dump/"
# 2 Aprile
#DIR="/media/sda4/eurora/data_analysis/job_profiling/dataPBS_2Aprile_1310_1510/"
#PLOTDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/2_Aprile_1310_1510/plots_pbs/"
#STATSDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/2_Aprile_1310_1510/stats_pbs/"
#DUMPDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/2_Aprile_1310_1510/pbs_dump/"
# 3 Aprile
#DIR="/media/sda4/eurora/data_analysis/job_profiling/dataPBS_3Aprile_1600_1800/"
#PLOTDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/3_Aprile_1600_1800/plots_pbs/"
#STATSDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/3_Aprile_1600_1800/stats_pbs/"
#DUMPDIR="/media/sda5/eurora/data_analysis/job_profiling/thomasResults/3_Aprile_1600_1800/pbs_dump/"
# ER MODELS
#AVERAGE ENERGY (CPU AND MEM APPS)
# 31 Marzo
#DIR="/media/sda4/eurora/data_analysis/job_profiling/dataPBS_31Marzo/"
#PLOTDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/ER_31_Marzo_corretto/plots/"
#STATSDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/ER_31_Marzo_corretto/stats/"
#DUMPDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/ER_31_Marzo_corretto/pbs_dump/"
# 2 Aprile 13:10-15:10
#DIR="/media/sda4/eurora/data_analysis/job_profiling/dataPBS_2Aprile_1310_1510/"
#PLOTDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/ER_2_Aprile_1310_1510/plots/"
#STATSDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/ER_2_Aprile_1310_1510/stats/"
#DUMPDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/ER_2_Aprile_1310_1510/pbs_dump/"
# 3 Aprile 13:10-15:10
#DIR="/media/sda4/eurora/data_analysis/job_profiling/dataPBS_3Aprile_1600_1800/"
#PLOTDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/ER_3_Aprile_1600_1800/plots/"
#STATSDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/ER_3_Aprile_1600_1800/stats/"
#DUMPDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/ER_3_Aprile_1600_1800/pbs_dump/"
#WORST CASE
#DIR="/media/sda4/eurora/data_analysis/job_profiling/dataPBS_31Marzo/"
#PLOTDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/ER_31_WC_Marzo_corretto/plots/"
#STATSDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/ER_WC_31_Marzo_corretto/stats/"
#DUMPDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/ER_WC_31_Marzo_corretto/pbs_dump/"
# 2 Aprile 13:10-15:10
#DIR="/media/sda4/eurora/data_analysis/job_profiling/dataPBS_2Aprile_1310_1510/"
#PLOTDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/ER_WC_2_Aprile_1310_1510/plots/"
#STATSDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/ER_WC_2_Aprile_1310_1510/stats/"
#DUMPDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/ER_WC_2_Aprile_1310_1510/pbs_dump/"
# 3 Aprile 13:10-15:10
#DIR="/media/sda4/eurora/data_analysis/job_profiling/dataPBS_3Aprile_1600_1800/"
#PLOTDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/ER_WC_3_Aprile_1600_1800/plots/"
#STATSDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/ER_WC_3_Aprile_1600_1800/stats/"
#DUMPDIR="/media/sda4/eurora/data_analysis/job_profiling/thomasResults/ER_WC_3_Aprile_1600_1800/pbs_dump/"
DIR="/media/sda4/eurora/data_analysis/job_profiling/temp/"
PLOTDIR="/media/sda4/eurora/data_analysis/job_profiling/temp/plots/"
STATSDIR="/media/sda4/eurora/data_analysis/job_profiling/temp/stats/"
DUMPDIR="/media/sda4/eurora/data_analysis/job_profiling/temp/pbs_dump/"
STEP=15 # defines the granularity of the sampling for the job profile (in seconds)
QUEUE_CUTOFF=0 # defines after how many second a job is considered to actually be in queue
NCORES=976
# average queue times in seconds
AVG_QTIME_DEBUG = 1800
AVG_QTIME_LOGPAR = 86400
AVG_QTIME_PAR = 21600
tot_time_in_q = 0 # sum the time spent in queue by jobs
tot_time_in_q_weighted = 0
avg_time_in_q = 0 # average time spent in queue by jobs
avg_time_in_q_weighted = 0
tot_time_in_q_DEBUG = 0
tot_time_in_q_weighted_DEBUG = 0
avg_time_in_q_DEBUG = 0
avg_time_in_q_weighted_DEBUG = 0
tot_time_in_q_PAR = 0
tot_time_in_q_weighted_PAR = 0
avg_time_in_q_PAR = 0
avg_time_in_q_weighted_PAR = 0
tot_time_in_q_LONGPAR = 0
tot_time_in_q_weighted_LONGPAR = 0
avg_time_in_q_LONGPAR = 0
avg_time_in_q_weighted_LONGPAR = 0
tot_cores_used = 0 # sum of the number of cores used by all jobs
tot_gpu_used = 0
tot_mic_used = 0
tot_running_jobs = 0
tot_perc_cores_used = 0
avg_cores_used = 0 # sum of tot_cores_used divided by the number instant of times considered
avg_gpu_used = 0
avg_mic_used = 0
avg_perc_cores_used = 0
avg_running_jobs = 0
# read interval datetimes from file
datefile = DIR + DATESFILE
try:
# read start_datetime and end_datetime from file
with open(datefile,'r') as df:
line = df.read().rstrip("\n")
except:
print "Can't read " + datesfile + "\n"
dates = line.split(";")
start_datetime = datetime.datetime.strptime(dates[0],"%Y-%m-%d %H:%M:%S")
end_datetime = datetime.datetime.strptime(dates[1].rstrip(),"%Y-%m-%d %H:%M:%S")
jobs_log = DIR + LOGNAME
cmd = 'cat ' + jobs_log
status = Popen(cmd, shell=True, stdin=open(os.devnull), stdout=PIPE, stderr=PIPE)
PBS_job_exec_time_series = []
node_profile = [] # contains the number of node used on eurora (a value for each STEP seconds)
cpu_profile = [] # contains the number of cores used on eurora (a value for each STEP seconds)
dates = [] # contains the dates
nodes = [] # contains the number of used nodes
cpus = [] # contains the number of used cpus
queue_time = []
job_exec = []
for line in iter(status.stdout.readline,''):
#print "Before split.."
data = line.split('__')
#print "Before parsing.."
# parsing the data from job.log
job_info = data[0].split(';')
job_id_string = job_info[0]
job_name = job_info[1]
user = job_info[2]
queue = job_info[3]
st = job_info[4]
start_time = datetime.datetime.strptime(st,"%Y-%m-%d %H:%M:%S")
job_resources = data[1].split('#')
resources_temp = []
for job_res in job_resources:
job_res_split = job_res.split(';')
resources_temp.append((job_res_split))
resources = [x for x in resources_temp if x!=[""]]
gpu_req = 0
mic_req = 0
for r in resources:
gpu_req += int(r[2])
mic_req += int(r[3])
job_times_req = data[2].split(';')
rt = job_times_req[0]
run_start_time = datetime.datetime.strptime(rt,"%Y-%m-%d %H:%M:%S")
et = job_times_req[1]
if(et!="None"): # it could happen that some jobs are still running when parsed and insertered into the DB
end_time = datetime.datetime.strptime(et,"%Y-%m-%d %H:%M:%S")
node_req = int(job_times_req[2])
cpu_req = int(job_times_req[3])
mem_req = int(job_times_req[4])
time_req = job_times_req[5].rstrip()
hhmm = time_req.split(':')
time_req_as_delta = datetime.timedelta(hours=int(hhmm[0]),minutes=int(hhmm[1]))
if(run_start_time + time_req_as_delta < end_time):
end_time = run_start_time + time_req_as_delta
#print "Putting value in list.."
#PBS_job_exec_time_series.append((start_time,run_start_time,end_time,node_req,cpu_req,time_req_as_delta,queue))
PBS_job_exec_time_series.append((job_id_string,start_time,run_start_time,end_time,node_req,cpu_req,mem_req,time_req_as_delta,gpu_req,mic_req,queue))
#print PBS_job_exec_time_series
# now in PBS_job_exec_time_series we have the running start time and the end time for each job, together with the related node and cpu requested
current_time = start_datetime
step = datetime.timedelta(seconds=STEP)
min_queue = datetime.timedelta(seconds=QUEUE_CUTOFF)
while current_time < end_datetime: # for every STEP seconds in the time interval we chose our job from
used_nodes = 0 # number of nodes currently used
used_cpus = 0 # number of cores currently used
in_queue = 0
gpus_req_jobs_in_queue = 0
gpus_req_all_jobs = 0
mics_req_jobs_in_queue = 0
mem_req_jobs_in_queue = 0
mics_req_all_jobs = 0
mem_req_all_jobs = 0
exec_jobs = 0
for (jid,st,rt,et,nnodes,cores,mem_req,treq,gpu_req,mic_req,q) in PBS_job_exec_time_series:
#for (st,rt,et,n,c,treq,q) in PBS_job_exec_time_series:
if (current_time >= rt and current_time <= et): # PBS consider occupied resources only during REAL execution time
#if (current_time >= rt and current_time <= (st + treq)): # PBS consider occupied resources for the whole estimated execution time
# used_nodes += n
# used_cpus +=c
# exec_jobs += 1
used_nodes += nnodes
used_cpus +=cores
exec_jobs += 1
gpus_req_all_jobs += gpu_req
mics_req_all_jobs += mic_req
mem_req_all_jobs += mem_req
tot_cores_used+=cores
tot_gpu_used+=gpu_req
tot_mic_used+=mic_req
tot_running_jobs+=1
if (st <= current_time <= rt and (rt-st)>=min_queue):
in_queue += 1
dates.append(current_time)
# print current_time
# print used_nodes
# print used_cpus
nodes.append(used_nodes)
cpus.append(used_cpus)
queue_time.append(in_queue)
job_exec.append(exec_jobs)
node_profile.append((current_time,used_nodes))
cpu_profile.append((current_time,used_cpus))
current_time += step
times_in_queue_weighted = []
times_in_queue = []
for (jid,st,rt,et,nnodes,cores,mem_req,treq,gpu_req,mic_req,q) in PBS_job_exec_time_series:
# for a fair comparison with PBS
#if(rt>end_datetime):
# rt=end_datetime
tt = rt-st
#print rt
if(q=="debug"):
avg_q=AVG_QTIME_DEBUG
if(q=="parallel"):
avg_q=AVG_QTIME_PAR
if(q=="longpar" or q=="np_longpar"):
avg_q=AVG_QTIME_LOGPAR
if(q=="reservation"):
avg_q=AVG_QTIME_DEBUG
ttw = (rt-st)/avg_q
#if(0<tt.total_seconds()<1000):
if(0<tt.total_seconds()):
# print tt.total_seconds()
tot_time_in_q+=tt.total_seconds()
tot_time_in_q_weighted+=ttw.total_seconds()
times_in_queue.append(tt.total_seconds())
times_in_queue_weighted.append(ttw.total_seconds())
if(q=="debug"):
tot_time_in_q_DEBUG+=tt.total_seconds()
tot_time_in_q_weighted_DEBUG+=ttw.total_seconds()
if(q=="parallel"):
tot_time_in_q_PAR+=tt.total_seconds()
tot_time_in_q_weighted_PAR+=ttw.total_seconds()
if(q=="longpar" or q=="np_longpar"):
tot_time_in_q_LONGPAR+=tt.total_seconds()
tot_time_in_q_weighted_LONGPAR+=ttw.total_seconds()
avg_time_in_q=float(tot_time_in_q)/len(PBS_job_exec_time_series)
avg_time_in_q_weighted=float(tot_time_in_q_weighted)/len(PBS_job_exec_time_series)
avg_time_in_q_DEBUG=float(tot_time_in_q_DEBUG)/len(PBS_job_exec_time_series)
avg_time_in_q_weighted_DEBUG=float(tot_time_in_q_weighted_DEBUG)/len(PBS_job_exec_time_series)
avg_time_in_q_PAR=float(tot_time_in_q_PAR)/len(PBS_job_exec_time_series)
avg_time_in_q_weighted_PAR=float(tot_time_in_q_weighted_PAR)/len(PBS_job_exec_time_series)
avg_time_in_q_LONGPAR=float(tot_time_in_q_LONGPAR)/len(PBS_job_exec_time_series)
avg_time_in_q_weighted_LOGNPAR=float(tot_time_in_q_weighted_LONGPAR)/len(PBS_job_exec_time_series)
#print node_profile
#print cpu_profile
#for x in node_profile:
# print x
#print dates
# convert dates to matplotlib format
graph_dates = mdates.date2num(dates)
dates_as_int = np.arange(len(graph_dates)) # nummber if instants of time used
avg_cores_used=float(tot_cores_used)/len(graph_dates)
avg_gpu_used=float(tot_gpu_used)/len(graph_dates)
avg_mic_used=float(tot_mic_used)/len(graph_dates)
avg_running_jobs=float(tot_running_jobs)/len(graph_dates)
# calc avg load of system
for core in cpus:
perc_cores_used_per_istant = float(core)/NCORES
tot_perc_cores_used+=perc_cores_used_per_istant
avg_perc_cores_used=tot_perc_cores_used/len(cpus)
print PBS_job_exec_time_series
print cpus
dumpName = DUMPDIR + "PBS_dump.p"
pickle.dump(PBS_job_exec_time_series,open(dumpName,"wb"))
|
UTF-8
|
Python
| false | false | 2,014 |
13,855,564,533,820 |
22801671e5d23c93bd621d65abd38b65c7dbcbe7
|
009138cbef5db6de83edcc5b97d7408cc70056e8
|
/smart_selects/urls.py
|
0614cecfa986b2947d4b7c1f9cbd89783d6eec5d
|
[
"BSD-3-Clause"
] |
permissive
|
gearheart/django-smart-selects
|
https://github.com/gearheart/django-smart-selects
|
00a184634b5ca2a929be9324c85b6a95c582e0ae
|
99f51d297aa46d246ddbf579c382f8b3690d6914
|
refs/heads/master
| 2020-12-25T13:12:52.885038 | 2009-10-01T16:40:37 | 2009-10-01T16:40:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls.defaults import *
urlpatterns = patterns('smart_selects.views',
url(r'^(?P<app>[\w\-]+)/(?P<model>[\w\-]+)/(?P<field>[\w\-]+)/(?P<value>[\w\-]+)/$', 'filterchain', name='chained_filter'),
)
|
UTF-8
|
Python
| false | false | 2,009 |
910,533,067,202 |
a8788b7d86846a82fe7035e0db63a9c8d89591e1
|
f086bec89dfcb6a47888299577cc5ca7e6e1a637
|
/plot_rasterplots.py
|
800c399669b369a54f6dfb92af4f6bbeb7e2b568
|
[] |
no_license
|
laurentperrinet/bcpnn-mt
|
https://github.com/laurentperrinet/bcpnn-mt
|
7cbc5bf698e6c8d7c9ba387cac21e112cc91ff0b
|
ec9d6e5050db94ba7b062b52ec767356abc7a3ef
|
refs/heads/master
| 2021-05-26T15:38:00.576587 | 2013-11-22T13:53:09 | 2013-11-22T13:53:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
This script plots the input spike trains in the top panel
and the output rasterplots in the middle and lower panel
"""
import sys
import matplotlib
#matplotlib.use('Agg')
import pylab
import numpy as np
import re
import utils
import os
rcP= { 'axes.labelsize' : 24,
'label.fontsize': 24,
'xtick.labelsize' : 24,
'ytick.labelsize' : 24,
'axes.titlesize' : 32,
'legend.fontsize': 9}
if len(sys.argv) > 1:
param_fn = sys.argv[1]
if os.path.isdir(param_fn):
param_fn += '/Parameters/simulation_parameters.json'
f = file(param_fn, 'r')
print 'Loading parameters from', param_fn
params = json.load(f)
else:
print '\nPlotting the default parameters given in simulation_parameters.py\n'
import simulation_parameters
network_params = simulation_parameters.parameter_storage() # network_params class containing the simulation parameters
params = network_params.load_params() # params stores cell numbers, etc as a dictionary
def plot_input_spikes(ax, shift=0, m='o', c='k', ms=2):
"""
Shift could be used when plotting in the same axis as the output spikes
"""
n_cells = params['n_exc']
for cell in xrange(n_cells):
fn = params['input_st_fn_base'] + str(cell) + '.npy'
spiketimes = np.load(fn)
nspikes = len(spiketimes)
ax.plot(spiketimes, cell * np.ones(nspikes) + shift, m, color=c, alpha=.1, markersize=ms)
tp = np.loadtxt(params['tuning_prop_means_fn'])
def plot_input_spikes_sorted_in_space(ax, shift=0., m='o', c='g', sort_idx=0, ms=2):
n_cells = params['n_exc']
sorted_idx = tp[:, sort_idx].argsort()
if sort_idx == 0 or sort_idx == 1:
ylim = (0, 1)
else: # it's a velocity --> adjust the range to plot
crop = .8
ylim = (crop * tp[:, sort_idx].min(), crop * tp[:, sort_idx].max())
ylen = (abs(ylim[0] - ylim[1]))
for i in xrange(n_cells):
cell = sorted_idx[i]
fn = params['input_st_fn_base'] + str(cell) + '.npy'
if os.path.exists(fn):
spiketimes = np.load(fn)
nspikes = len(spiketimes)
if sort_idx == 0:
y_pos = (tp[cell, sort_idx] % 1.) / ylen * (abs(ylim[0] - ylim[1]))
else:
y_pos = (tp[cell, sort_idx]) / ylen * (abs(ylim[0] - ylim[1]))
ax.plot(spiketimes, y_pos * np.ones(nspikes) + shift, m, color=c, alpha=.1, markersize=2)
# else: this cell gets no input, because not well tuned
# ax.plot(spiketimes, i * np.ones(nspikes) + shift, m, color=c, markersize=2)
if sort_idx == 0:
ylabel_txt ='Neurons sorted by $x$-pos'
elif sort_idx == 1:
ylabel_txt ='Neurons sorted by $y$-pos'
elif sort_idx == 2:
ylabel_txt ='Neurons sorted by $v_x$, '
elif sort_idx == 3:
ylabel_txt ='Neurons sorted by $v_y$'
ax.set_ylabel(ylabel_txt)
# n_yticks = 8
# y_tick_idx = np.linspace(0, n_cells, n_yticks)
# y_ticks = np.linspace(tp[:, sort_idx].min(), tp[:, sort_idx].max(), n_yticks)
# y_ticklabels = []
# for i in xrange(n_yticks):
# y_ticklabels.append('%.2f' % y_ticks[i])
# ax.set_yticks(y_tick_idx)
# ax.set_yticklabels(y_ticklabels)
def plot_output_spikes_sorted_in_space(ax, cell_type, shift=0., m='o', c='g', sort_idx=0, ms=2):
n_cells = params['n_%s' % cell_type]
fn = params['%s_spiketimes_fn_merged' % cell_type] + '.ras'
nspikes, spiketimes = utils.get_nspikes(fn, n_cells, get_spiketrains=True)
sorted_idx = tp[:, sort_idx].argsort()
if sort_idx == 0:
ylim = (0, 1)
else:
crop = .8
ylim = (crop * tp[:, sort_idx].min(), crop * tp[:, sort_idx].max())
ylen = (abs(ylim[0] - ylim[1]))
print '\n', 'sort_idx', sort_idx, ylim,
for i in xrange(n_cells):
cell = sorted_idx[i]
if sort_idx == 0:
y_pos = (tp[cell, sort_idx] % 1.) / ylen * (abs(ylim[0] - ylim[1]))
else:
y_pos = (tp[cell, sort_idx]) / ylen * (abs(ylim[0] - ylim[1]))
ax.plot(spiketimes[cell], y_pos * np.ones(nspikes[cell]), 'o', color='k', markersize=ms)
# n_yticks = 6
# y_tick_idx = np.linspace(0, n_cells, n_yticks)
# y_ticks = np.linspace(tp[:, sort_idx].min(), tp[:, sort_idx].max(), n_yticks)
# y_ticklabels = []
# for i in xrange(n_yticks):
# y_ticklabels.append('%.2f' % y_ticks[i])
# ax.set_yticks(y_tick_idx)
# ax.set_yticklabels(y_ticklabels)
def plot_spikes(ax, fn, n_cells):
nspikes, spiketimes = utils.get_nspikes(fn, n_cells, get_spiketrains=True)
for cell in xrange(int(len(spiketimes))):
ax.plot(spiketimes[cell], cell * np.ones(nspikes[cell]), 'o', color='k', markersize=2)
fn_exc = params['exc_spiketimes_fn_merged'] + '.ras'
fn_inh = params['inh_spiketimes_fn_merged'] + '.ras'
pylab.rcParams['lines.markeredgewidth'] = 0
pylab.rcParams.update(rcP)
# ax1 is if input spikes shall be plotted in a seperate axis (from the output spikes)
#fig = pylab.figure(figsize=(14, 12))
#ax1 = fig.add_subplot(411)
#ax2 = fig.add_subplot(412)
#ax3 = fig.add_subplot(413)
#ax4 = fig.add_subplot(414)
fig = pylab.figure()#figsize=(14, 12))
pylab.subplots_adjust(bottom=.15, left=.15)#hspace=.03)
ax1 = fig.add_subplot(111)
#fig2 = pylab.figure(figsize=(14, 12))
#ax2 = fig2.add_subplot(111)
#ax3 = fig.add_subplot(413)
#ax4 = fig.add_subplot(414)
#ax1.set_title('Input spikes')
#ax3.set_ylabel('Exc ID')
#ax4.set_ylabel('Inh ID')
# x-position
plot_input_spikes_sorted_in_space(ax1, c='b', sort_idx=0, ms=3)
plot_output_spikes_sorted_in_space(ax1, 'exc', c='k', sort_idx=0, ms=3)
# sorted by velocity in direction x / y
#plot_input_spikes_sorted_in_space(ax2, c='b', sort_idx=2, ms=3)
#plot_output_spikes_sorted_in_space(ax2, 'exc', c='k', sort_idx=2, ms=3)
#plot_spikes(ax3, fn_exc, params['n_exc'])
#plot_spikes(ax4, fn_inh, params['n_inh'])
xticks = [0, 500, 1000, 1500]
ax1.set_xticks(xticks)
ax1.set_xticklabels(['%d' % i for i in xticks])
ax1.set_yticklabels(['', '.2', '.4', '.6', '.8', '1.0'])
ax1.set_xlabel('Time [ms]')
#ax2.set_xlabel('Time [ms]')
#ax3.set_xlabel('Time [ms]')
#ax4.set_xlabel('Time [ms]')
#ax1.set_ylim((0, params['n_exc'] + 1))
#ax2.set_ylim((0, params['n_exc'] + 1))
#ax3.set_ylim((0, params['n_exc'] + 1))
#ax4.set_ylim((0, params['n_inh'] + 1))
ax1.set_xlim((0, params['t_sim']))
#ax2.set_xlim((0, params['t_sim']))
#ax2.set_ylim((-3, 3))
#ax3.set_xlim((0, params['t_sim']))
#ax4.set_xlim((0, params['t_sim']))
output_fn = params['figures_folder'] + 'rasterplot_sorted_by_tp.png'
print "Saving to", output_fn
pylab.savefig(output_fn, dpi=200)
#output_fn = params['figures_folder'] + 'rasterplot_sorted_by_tp.pdf'
#print "Saving to", output_fn
#pylab.savefig(output_fn, dpi=200)
#output_fn = params['figures_folder'] + 'rasterplot_sorted_by_tp.eps'
#print "Saving to", output_fn
#pylab.savefig(output_fn, dpi=200)
pylab.show()
|
UTF-8
|
Python
| false | false | 2,013 |
17,669,495,485,994 |
80cc04a281a9f4730d2353a7c5200423a33f92e8
|
7c2164ad65db606cec4a30d060e8417e2ae9ff95
|
/rules/management/commands/ruleadmin.py
|
d6ad033c5afc1bee89c4562c183e5863926fd11a
|
[] |
no_license
|
amaschas/rules
|
https://github.com/amaschas/rules
|
572f9dbfbf545dfb03e1abf90e6dad55b0430675
|
7c220f5203fa9f050fb6f5a194536364a5b78352
|
refs/heads/master
| 2021-01-25T03:19:45.005848 | 2013-05-25T04:44:22 | 2013-05-25T04:44:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import cProfile
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from rules.models import *
from rules.tasks import *
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--delete-rules', action='store_true', default=False, help='Delete all rules'),
make_option('--delete-nicks', action='store_true', default=False, help='Delete all nicks'),
make_option('--reset-scores', action='store_true', default=False, help='Deletes all scores and resets score meta'),
make_option('--score-channel', help='Score all rules for a single channel slug'),
make_option('--score-rule', help='Score a single rule by id'),
make_option('--batch-size', default=5000, help='The number of lines to batch in scoring tasks'),
make_option('--profile', action='store_true', default=False, help='Enable profiling with cProfile'),
make_option('--profile-output', default='/tmp/score_profile', help='Path to store profile output, defaults to /tmp/score_profile'),
)
def handle(self, *args, **options):
if 'profile' in options and options['profile']:
prof = cProfile.Profile()
if 'delete_rules' in options and options['delete_rules']:
self.stdout.write('Deleting rules')
Rule.objects.all().delete()
self.stdout.write('Rules successfully deleted')
if 'delete_nicks' in options and options['delete_nicks']:
self.stdout.write('Deleting nicks')
Nick.objects.all().delete()
self.stdout.write('Nicks successfully deleted')
if 'reset_scores' in options and options['reset_scores']:
self.stdout.write('Deleting scores')
Score.objects.all().delete()
ScoreMeta.objects.all().delete()
self.stdout.write('Scores successfully deleted')
if 'score_channel' in options and options['score_channel']:
try:
channel = Channel.objects.get(slug=options['score_channel'])
channel.line_count=0
channel.save()
self.stdout.write('Scoring channel "%s"' % channel.title)
if options['profile']:
prof.runcall(update_channel, channel=channel)
prof.dump_stats(options['profile'])
else:
update_channel(channel)
self.stdout.write('Finished scoring channel "%s"' % channel.title)
except ObjectDoesNotExist:
self.stdout.write('No channel with slug: "%s"' % options['score_channel'])
if 'score_rule' in options and options['score_rule']:
try:
rule = Rule.objects.get(id=options['score_rule'])
self.stdout.write('Scoring rule "%s"' % rule.name)
if options['profile']:
# prof.runcall(update_rule, rule=rule, batch_size=options['batch_size'])
prof.runcall(update_rule, rule=rule)
prof.dump_stats(options['profile_output'])
else:
update_rule(rule)
self.stdout.write('Finished scoring rule "%s"' % rule.name)
except ObjectDoesNotExist:
self.stdout.write('No rule with id: "%s"' % options['score_rule'])
|
UTF-8
|
Python
| false | false | 2,013 |
10,488,310,138,413 |
f905e28040573042a5eae13b2521262321869b5a
|
ec2fe6118a8da66e3e6cfdd8c3d2eadb0eb1e6e2
|
/ex08/halton.py
|
d1eb52213ef2b184ca8f96ccfa2177c7fbed5c8e
|
[] |
no_license
|
jonasferoliveira/Computational-Science-I
|
https://github.com/jonasferoliveira/Computational-Science-I
|
d36d5a4865ae852e467e9f0f09eec5b72e972743
|
cbddceb3e1c7ed120100d74725e2fb8359efddc6
|
refs/heads/master
| 2023-03-20T01:26:01.014651 | 2012-01-09T04:17:58 | 2012-01-09T04:17:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
-------------------------------------------------------------------------------
Ex08: Halton sequence
-------------------------------------------------------------------------------
Explanation:
halton sequence produces quasi random numbers
compared to pseudo rng, these are more evenly distributed
and some problems arise with higher primes, ex. 17, 19:
the first 16 points have perfect linear correlation...
How this code works:
to produce N q-rand-nr:
- select a prime base p
- for each int(i) in range(N)
- rewrite in base p
- reoder the digits (abcdef. -> .fedcba)
- convert this nr back to dec.sys
Notes / Convention:
-------------------------------------------------------------------------------
@author: Rafael Kueng
-------------------------------------------------------------------------------
HISTORY:
v1 2011-11-18 basic implementation
v2 2012-01-09 added real random nr
BUGS / TODO:
LICENSE:
none
-------------------------------------------------------------------------------
"""
from os import sys
from numpy import *
import matplotlib as mp
import pylab as pl
from random import random
def halton(index, base):
result = 0
f = 1 / float(base)
i = index
while (int(i) > 0):
#print i
digit = i%base
result = result + f * digit
i = i / base
f = f / base
return result
def main():
N=1000
print 'Input base 1:'
base1 = input()
print 'input base 2:'
base2 = input()
halt_x = [halton(x,base1) for x in range(N)]
halt_y = [halton(x,base2) for x in range(N)]
rand_x = [random() for _ in range(N)]
rand_y = [random() for _ in range(N)]
pl.subplot(221)
pl.plot(halt_x,halt_y,'rx')
#pl.plot(halt_x[0:512],halt_y[0:512],'rx')
pl.plot(rand_x,rand_y,'gx')
pl.title("Halton (quasi)[red] vs. Pseudo[green] Random numbers\n(use red/gren 3d glasses :) )")
pl.subplot(223)
pl.plot(halt_x,halt_y,'rx')
pl.title("Halton Numbers / Quasi Random")
pl.subplot(224)
pl.plot(rand_x,rand_y,'gx')
pl.title("Pseudo Random Nr")
pl.show()
def cmdmain(*args):
try:
main()
except:
raise
# handle some exceptions
else:
return 0 # exit errorlessly
def classmain():
print 'call main()'
if __name__ == '__main__':
sys.exit(cmdmain(*sys.argv))
else:
classmain()
|
UTF-8
|
Python
| false | false | 2,012 |
10,067,403,371,015 |
5398245b7795b22ba6629fce9d7344cd56f45ec7
|
67e23336f04a2409c4600932c656de0d65340b9d
|
/migration/versions/001_initial_schema.py
|
d0e282d274575e75af28ba3948c0bc6df96315b3
|
[] |
no_license
|
rwilcox/turbogears2_setup_and_testing
|
https://github.com/rwilcox/turbogears2_setup_and_testing
|
7ce8e47ac5dffe31b7c1fb6077dc0cde2bf62ff3
|
0e0a4303fb5a029af5d44e936b954ce4e146d2d0
|
refs/heads/master
| 2016-08-03T00:00:45.480768 | 2010-02-21T04:55:13 | 2010-02-21T04:55:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from sqlalchemy import *
from migrate import *
metadata = MetaData(migrate_engine)
contacts_table = Table("contacts", metadata,
Column("id", Integer, primary_key=True),
Column("first_name", Text, unique=True),
Column("last_name", Text),
Column("email", Text, unique=True)
)
def upgrade():
# Upgrade operations go here. Don't create your own engine; use the engine
# named 'migrate_engine' imported from migrate.
contacts_table.create()
def downgrade():
# Operations to reverse the above upgrade go here.
contacts_table.drop()
|
UTF-8
|
Python
| false | false | 2,010 |
12,240,656,798,238 |
bbe65a0f982f5271a9978b94097ca5e39d42ad80
|
c3deec2c9ed48f6acb289231ab3c9ceccb6eaf92
|
/util.py
|
313f3b9bfbc9321af3dbe0e0357840a794d973ce
|
[] |
no_license
|
chazu/dew_drop
|
https://github.com/chazu/dew_drop
|
9ad9eda4508b10c43632e8110b1e4d11934b44be
|
ec9efbe67406da7ecec7924f95eb29c4f56477f5
|
refs/heads/master
| 2021-01-19T11:25:59.131531 | 2014-01-09T17:49:04 | 2014-01-09T17:49:04 | 11,478,268 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def listify(gen):
"Convert a generator into a function which returns a list"
def patched(*args, **kwargs):
return list(gen(*args, **kwargs))
return patched
@listify
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def multiIndex(the_object, index_array):
"""
TODO Write this docstring
"""
return reduce(lambda obj, key: obj[key], index_array, the_object)
def isDict(thing):
return type(thing) == dict
|
UTF-8
|
Python
| false | false | 2,014 |
3,547,642,988,894 |
062a2e6987374312372d1610642edea7d7739b21
|
868718d89d06b620f84f4a0125f0fff989c8e967
|
/examples/to_dec.py
|
53394e152ba11dc0c9cd647ecb3b21d500d6272a
|
[] |
no_license
|
kamillys/cucs_demo
|
https://github.com/kamillys/cucs_demo
|
c2fde375d80366ae856cc80b92b9cb179bcc0491
|
dabbd6f613fa35ffa8ddd6092ef4e60aa3c04dc1
|
refs/heads/master
| 2021-01-23T07:27:14.555461 | 2014-12-18T12:35:46 | 2014-12-18T12:38:26 | 21,289,060 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import os,sys
import fileinput
for line in fileinput.input():
cur = int(line, 2)
print cur
|
UTF-8
|
Python
| false | false | 2,014 |
14,980,845,969,888 |
a0e7fb6ed0c1396490d90de5bc9546efac44abee
|
1d4204c6766449214544a6511e138782712c0806
|
/Src/Module/Behavior/python/VisionDef.py
|
a18a13bcfac659ed07ed111817a77819d335fcee
|
[] |
no_license
|
spyfree/tjNaoBackup
|
https://github.com/spyfree/tjNaoBackup
|
2d0291a83f8473346546567ae7856634f6c5a292
|
0c7dd47f65e23be6e9d82a31f15ef69a3ce386cb
|
refs/heads/master
| 2016-09-06T08:20:52.068351 | 2014-01-31T03:33:19 | 2014-01-31T03:33:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import time
import Tools.Constants as Constants
import math
'''类MyInfo暂时不用,加入定位模块后再使用'''
class MyInfo:
"""
Class just to store important information about me
"""
def __init__(self):
self.x = 0.0
self.y = 0.0
self.h = 0.0
self.penalized = False
self.kicking = False
def updateLoc(self, loc):
if self.teamColor == Constants.TEAM_BLUE:
self.x = loc.x
self.y = loc.y
self.h = loc.h
else:
self.x = Constants.FIELD_GREEN_WIDTH - loc.x
self.y = Constants.FIELD_GREEN_HEIGHT - loc.y
self.h = sub180Angle(loc.h - 180)
class Ball:
"""
Class for holding all current Ball information
"""
def __init__(self, visionBall):
self.ballCenterX = 0 # ball x coordinate in the image
self.ballCenterY = 0 # ball y coordinate in the image
self.radius = 0
self.ballPer = 0
self.localX = 0
self.localY = 0
self.localZ = 0
self.confidence = 0
self.dist = 0
self.deg = 0
self.framesOn = 0
self.framesOff = 0
self.x = 0
self.y = 0
self.velX = 0
self.velY = 0
self.lastVisionDist = 0
self.lastVisionDeg = 0
self.lastVisionCenterX = 0
self.lastVisionCenterY = 0
self.lastLocalX = 0
self.lastLocalY = 0
self.lastLocalZ = 0
self.lastradius = 0
self.lastballper= 0
self.lastTimeSeen = 0
self.ballseen = 0
self.updateVision(visionBall)
def updateVision(self,visionBall):
'''update method gets list of vision updated information'''
self.ballcf = visionBall.ballCf()
if self.ballcf == 1 :
'''看到球之后的ballseen帧数任然认为看到球了'''
self.ballseen = 2
# Now update to the new stuff
self.ballCenterX = visionBall.ballCenterX()
self.ballCenterY = visionBall.ballCenterY()
self.localX = visionBall.ballX()
self.localY = visionBall.ballY()
self.localZ = visionBall.ballZ()
self.dist = math.sqrt(self.localX*self.localX + self.localY*self.localY)
self.ballper = visionBall.ballPer()
self.radius = visionBall.radius()
if self.localY == 0:
self.deg = 0
elif self.localY > 0:
self.deg = math.atan(self.localY / self.localX)
else:
self.deg = math.atan(self.localY / self.localX)
# Hold our history
self.lastVisionDist = self.dist
self.lastVisionDeg = self.deg
self.lastVisionCenterX = self.ballCenterX
self.lastVisionCenterY = self.ballCenterY
self.lastLocalX = self.localX
self.lastLocalY = self.localY
self.lastLocalZ = self.localZ
self.lastradius = self.radius
self.lastballper = self.ballper
self.confidence = 1
else:
if self.ballseen > 0 :
self.ballseen = self.ballseen - 1
self.ballCenterX = self.lastVisionCenterX
self.ballCenterY = self.lastVisionCenterY
self.localX = self.lastLocalX
self.localY = self.lastLocalY
self.localZ = self.lastLocalZ
self.deg = self.lastVisionDeg
self.dist = self.lastVisionDist
self.ballper = self.lastballper
self.radius = self.lastradius
self.confidence = 1
else:
self.confidence = 0
if self.confidence > 0:
self.framesOn += 1
self.framesOff =0
else:
self.framesOff += 1
self.framesOn =0
self.reportBallSeen()
def reportBallSeen(self):
"""
Reset the time since seen. Happens when we see a ball or when
a teammate tells us he did.
"""
self.lastTimeSeen = time.time()
def timeSinceSeen(self):
"""
Update the time since we last saw a ball
"""
return time.time() - self.lastTimeSeen
'''定位模块加入了再使用'''
def updateLoc(self, loc, teamColor):
"""
Update all of our inforamtion pased on the newest localization info
"""
# Get latest estimates
if teamColor == Constants.TEAM_BLUE:
self.x = loc.ballX
self.y = loc.ballY
self.velX = loc.ballVelX
self.velY = loc.ballVelY
else:
self.x = Constants.FIELD_WIDTH - loc.ballX
self.y = Constants.FIELD_HEIGHT - loc.ballY
self.velX = -loc.ballVelX
self.velY = -loc.ballVelY
class YGoal:
def __init__(self, visionGoal):
'''initialization all values for FieldObject() class'''
self.goalcolor = None
self.dist = 0
self.distCert = 0
self.leftDeg = 0
self.rightDeg = 0
self.confidence = 0
self.IdCert = 0
self.framesOn = 0
self.framesOff = 0
# Setup the data from vision
self.updateVision(visionGoal)
def updateVision(self, visionGoal):
'''updates class variables with new vision information'''
self.dist = visionGoal.ygdist()
self.leftDeg = visionGoal.ygldeg()
self.rightDeg = visionGoal.ygrdeg()
self.confidence = visionGoal.ygoalcf()
self.goalcolor = visionGoal.ygoalColor()
self.distCert = visionGoal.ygdistCert()
self.IdCert = visionGoal.yIdCert()
# obj is in this frame
if self.confidence > 0:
self.framesOn += 1
self.framesOff = 0
# obj not in this frame
else:
self.framesOff += 1
self.framesOn = 0
class BGoal:
def __init__(self, visionGoal):
'''initialization all values for FieldObject() class'''
self.goalcolor = None
self.dist = 0
self.distCert = 0
self.leftDeg = 0
self.rightDeg = 0
self.confidence = 0
self.IdCert = 0
self.framesOn = 0
self.framesOff = 0
# Setup the data from vision
self.updateVision(visionGoal)
def updateVision(self, visionGoal):
'''updates class variables with new vision information'''
self.dist = visionGoal.bgdist()
self.leftDeg = visionGoal.bgldeg()
self.rightDeg = visionGoal.bgrdeg()
self.confidence = visionGoal.bgoalcf()
self.goalcolor = visionGoal.bgoalColor()
self.distCert = visionGoal.bgdistCert()
self.IdCert = visionGoal.bIdCert()
# obj is in this frame
if self.confidence > 0:
self.framesOn += 1
self.framesOff = 0
# obj not in this frame
else:
self.framesOff += 1
self.framesOn = 0
|
UTF-8
|
Python
| false | false | 2,014 |
19,628,000,580,931 |
2bfa95a47c1b8b9910f0e0c1beb54dc00d413e93
|
2ca72fa7867a266990e4a930164bd8bd91b48de5
|
/libagnos/cpp/SConstruct
|
da7ee136ac1f926891b341ce7d04452cc8d33804
|
[
"Apache-2.0"
] |
permissive
|
dmgolembiowski/agnos
|
https://github.com/dmgolembiowski/agnos
|
61d31e8a713ea506e4db618b8f245869e873bb9a
|
1e5a5109fa67a04733bd03736a63fc56f538b838
|
refs/heads/master
| 2021-05-28T20:19:46.846435 | 2013-08-15T05:50:55 | 2013-08-15T05:50:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
##############################################################################
# Part of the Agnos RPC Framework
# http://agnos.sourceforge.net
#
# Copyright 2011, International Business Machines Corp.
# Author: Tomer Filiba ([email protected])
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from SCons.Errors import UserError
Decider('MD5')
try:
Import("env")
except UserError:
env = DefaultEnvironment()
env["CPPDEFINES"] = {}
if int(ARGUMENTS.get('AGNOS_DEBUG', 0)):
env["CPPDEFINES"]["AGNOS_DEBUG"] = None
if int(ARGUMENTS.get('BOOST_PROCESS_SUPPORTED', 0)):
env["CPPDEFINES"]["BOOST_PROCESS_SUPPORTED"] = None
else:
conf = Configure(env)
if conf.CheckCXXHeader('boost/process.hpp'):
conf.env["CPPDEFINES"]["BOOST_PROCESS_SUPPORTED"] = None
env = conf.Finish()
if int(ARGUMENTS.get('AGNOS_USE_WSTRING', 0)):
env["CPPDEFINES"]["AGNOS_USE_WSTRING"] = None
env.Library('agnos', Glob('src/*.cpp'))
|
UTF-8
|
Python
| false | false | 2,013 |
15,281,493,657,601 |
18226dd7790187b80a6bf2f4c7af7d7a6b365336
|
59f5cf1fd610d26d9c101cc0a2234ec8680c8dbf
|
/doc/templates/SConsProject
|
17048bed45b0814ff7ace290909d7dc9c4d71f1c
|
[
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
non_permissive
|
paeschli/scons-builder
|
https://github.com/paeschli/scons-builder
|
0eff12be114281f2adc98c71ef773f62ea48d3b6
|
ed5b3562b841f048fc6512e7bc042456c9607d1d
|
refs/heads/master
| 2016-09-06T11:40:55.914994 | 2014-08-14T16:56:24 | 2014-08-14T16:56:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#-*- Python -*-
# SConsBuilder Project File
#
# This file will be executed after the initial build environment is created
# but before it is finalized.
#
# - Available variables for import -
#
# init_env - Initial build environment
#
# - Available environment variables -
#
# init_env['BUILD_DIR'] - building directory
# init_env['OBJ_DIR'] - building subdirectory for object files, etc.
#
# You should at least declare following init_env variables:
#
# INCLUDE_DIR - Include directory
# SOURCE_DIRS - All source directories
# LIB_DIR - Destination directory for libraries
# BIN_DIR - Destination directory for executables
# CONFIG_HEADER - Used for autogenerated configuration header
# use None or ConfigHeader()
#
# WIN_DEBUG_SUFFIX - Used on Windows platform for specification of
# the debug suffix for libraries which are compiled
# with debug version of the MSVC runtime library.
#
# PROJECT_PREFIX - Used for auto-configuration
# SYSTEM_PREFIX - Used for auto-configuration
# COMPILER_PREFIX - Used for auto-configuration
# AUTOCONF_PREFIX - Used for auto-configuration
import sys
import os
import os.path
import datetime
from builder.btools import *
Import('init_env')
AddCustomVariables(
BoolVariable('makebindings', 'create bindings with SWIG', 1),
BoolVariable('python', 'compile python bindings', 0),
('INIMAGE_DIST_DIR', 'Path to inImage distribution'),
BoolVariable('enable_rtfact',
'enable building of RTfact rendering module', 1)
)
BUILD_DIR = init_env['BUILD_DIR']
SOURCE_DIRS = Split("""
#src
""")
INCLUDE_DIR = BUILD_DIR + os.sep + 'include'
LIB_DIR = BUILD_DIR + os.sep + 'lib'
BIN_DIR = BUILD_DIR + os.sep + 'bin'
CONFIG_HEADER = ConfigHeader()
# define symbol prefixes
PROJECT_PREFIX = 'RTSG_'
SYSTEM_PREFIX = ''
COMPILER_PREFIX = 'COMPILER_'
AUTOCONF_PREFIX = 'RTSG_'
init_env.Replace(CXXFILESUFFIX='.cpp',
INCLUDE_DIR=INCLUDE_DIR,
SOURCE_DIRS=SOURCE_DIRS,
LIB_DIR=LIB_DIR,
BIN_DIR=BIN_DIR,
CONFIG_HEADER=CONFIG_HEADER,
WIN_DEBUG_SUFFIX="D",
PROJECT_PREFIX=PROJECT_PREFIX,
SYSTEM_PREFIX=SYSTEM_PREFIX,
COMPILER_PREFIX=COMPILER_PREFIX,
AUTOCONF_PREFIX=AUTOCONF_PREFIX,
CPPPATH=SOURCE_DIRS,
LIBPATH=[LIB_DIR])
# Setup configuration, conf is created by env.Configure()
def customizeConfiguration(conf):
# in conf.env is the environment that can be changed
isWin32 = conf.env['PLATFORM'] == 'win32'
# On Windows append win32tools/include to CPPPATH
# win32tools/lib to LIBPATH
# win32tools/bin to PATH
if isWin32:
win32toolsDir = os.path.join(conf.env.Dir('#').abspath, 'win32tools')
if os.path.exists(win32toolsDir):
conf.env.Append(CPPPATH = [os.path.join(win32toolsDir, 'include')])
conf.env.Append(LIBPATH = [os.path.join(win32toolsDir, 'lib')])
conf.env.AppendENVPath('PATH', os.path.join(win32toolsDir, 'bin'))
# init lex tool
conf.env.Tool('lex')
# check lex tool
pathToLex = conf.env.WhereIs(conf.env.get('LEX'))
if not pathToLex:
print >>sys.stderr, 'Error: Could not find lex/flex tool'
Exit(1)
else:
print '* Use lexer tool from %s' % pathToLex
if not conf.CheckCCompiler():
print >>sys.stderr, 'Error: Could not run C/C++ compiler'
Exit(1)
if not conf.CheckTypeSizes():
print >>sys.stderr, 'Error: Could not determine type sizes'
Exit(1)
if not conf.CheckEndianness():
print >>sys.stderr, 'Error: Could not determine platform endianness'
Exit(1)
if conf.CheckTclTk(write_config_h=False):
conf.env.Replace(HAVE_TCL=1)
else:
conf.env.Replace(HAVE_TCL=0)
if conf.CheckPython():
conf.env.Replace(HAVE_PYTHON=1)
else:
conf.env.Replace(HAVE_PYTHON=0)
if conf.CheckSwig():
conf.env.Replace(HAVE_SWIG=1)
else:
conf.env.Replace(HAVE_SWIG=0)
if conf.CheckPNG():
conf.env.Replace(HAVE_PNG=1)
else:
conf.env.Replace(HAVE_PNG=0)
print >>sys.stderr, 'Warning: Could not find png library.'
if conf.CheckJPEG():
conf.env.Replace(HAVE_JPEG=1)
else:
conf.env.Replace(HAVE_JPEG=0)
print >>sys.stderr, 'Warning: Could not find jpeg library.'
if conf.CheckInImage(write_config_h=False):
conf.env.Replace(HAVE_INIMAGE=1)
else:
conf.env.Replace(HAVE_INIMAGE=0)
# because NMM is not a part of RTSG core we do not write
# detection macro RTSG_HAVE_NMM to the autogenerated config file
if conf.CheckNMM(write_config_h=False):
conf.env.Replace(HAVE_NMM=1)
else:
conf.env.Replace(HAVE_NMM=0)
if conf.CheckPthreads():
conf.env.Replace(HAVE_PTHREADS=1)
else:
conf.env.Replace(HAVE_PTHREADS=0)
if conf.CheckOpenGL(write_config_h=False):
conf.env.Replace(HAVE_GL=1)
else:
conf.env.Replace(HAVE_GL=0)
if conf.CheckGLUT(write_config_h=False):
conf.env.Replace(HAVE_GLUT=1)
conf.env.Replace(HAVE_GL=1)
else:
conf.env.Replace(HAVE_GLUT=0)
conf.env.Replace(HAVE_GL=0)
if conf.CheckGLEW(write_config_h=False):
conf.env.Replace(HAVE_GLEW=1)
conf.env.Replace(HAVE_GL=1)
else:
conf.env.Replace(HAVE_GLEW=0)
conf.env.Replace(HAVE_GL=0)
if conf.CheckSDL(write_config_h=False):
conf.env.Replace(HAVE_SDL=1)
if conf.CheckSDLTTF(write_config_h=False):
conf.env.Replace(HAVE_SDLTTF=1)
else:
conf.env.Replace(HAVE_SDLTTF=0)
else:
conf.env.Replace(HAVE_SDL=0)
conf.env.Replace(HAVE_SDLTTF=0)
if conf.CheckOgre(write_config_h=False):
conf.env.Replace(HAVE_OGRE=1)
else:
conf.env.Replace(HAVE_OGRE=0)
if conf.CheckOIS(write_config_h=False):
conf.env.Replace(HAVE_OIS=1)
else:
conf.env.Replace(HAVE_OIS=0)
# check boost headers
if not conf.CheckCXXHeader('boost/bind.hpp') or \
not conf.CheckCXXHeader('boost/function.hpp') or \
not conf.CheckCXXHeader('boost/variant.hpp') or \
not conf.CheckCXXHeader('boost/lexical_cast.hpp') or \
not conf.CheckCXXHeader('boost/algorithm/string/predicate.hpp') or \
not conf.CheckCXXHeader('boost/foreach.hpp'):
print >>sys.stderr, 'Error: Could not find boost libraries'
Exit(1)
if conf.CheckBoostFileSystem():
conf.env.Replace(HAVE_BOOST_FILESYSTEM=1)
else:
conf.env.Replace(HAVE_BOOST_FILESYSTEM=0)
if conf.CheckBoostRegex():
conf.env.Replace(HAVE_BOOST_REGEX=1)
else:
conf.env.Replace(HAVE_BOOST_REGEX=0)
if conf.CheckRTfactRemote():
conf.env.Replace(HAVE_RTFACT_REMOTE=1)
else:
conf.env.Replace(HAVE_RTFACT_REMOTE=0)
RegisterConfigurationCustomizer(customizeConfiguration)
# Customize final environment
def finalizeEnvironment(env):
# Evaluate building hierarchy
Export('env')
# setup install directories
INSTALL_DIR = env['prefix']
INSTALL_INCLUDE_DIR = os.path.join(INSTALL_DIR, 'include')
INSTALL_LIB_DIR = os.path.join(INSTALL_DIR, 'lib')
INSTALL_BIN_DIR = os.path.join(INSTALL_DIR, 'bin')
INSTALL_DIRS = [INSTALL_INCLUDE_DIR,
INSTALL_LIB_DIR,
INSTALL_BIN_DIR]
env.Replace(INSTALL_INCLUDE_DIR = INSTALL_INCLUDE_DIR,
INSTALL_LIB_DIR = INSTALL_LIB_DIR,
INSTALL_BIN_DIR = INSTALL_BIN_DIR,
INSTALL_DIRS = INSTALL_DIRS)
# process all source directories
for srcDir in SOURCE_DIRS:
buildDir = srcDir
if buildDir.startswith('#'):
buildDir = buildDir[1:]
buildDir = os.path.join(env['OBJ_DIR'], buildDir)
VariantDir(buildDir, srcDir, duplicate=0)
SConscript(os.path.join(buildDir, 'SConscript'))
# Alternate method to setup build directory :
# SConscript('src' + os.sep + 'SConscript',
# build_dir=env['BUILD_DIR'],
# src_dir=env['SRC_DIR'],
# duplicate=0)
# build documentation
DOXYGEN_OUTPUT_DIR = '#doc/doxygen'
DOXYGEN_CONFIG_FILE = '#doc/Doxyfile'
env.Command(env.Dir(DOXYGEN_OUTPUT_DIR), DOXYGEN_CONFIG_FILE,
"doxygen $SOURCES",
ENV = {'DOXYGEN_OUTPUT_DIR' : env.Dir(DOXYGEN_OUTPUT_DIR).abspath,
'DOXYGEN_INPUT_DIR' : env.Dir('#src').abspath
})
env.Alias('doc', env.Dir(DOXYGEN_OUTPUT_DIR))
env.AlwaysBuild(env.Dir(DOXYGEN_OUTPUT_DIR))
Help("""doc: Generate doxygen documentation""")
# default targets
Default(env['BUILD_DIR'])
Default(env['INCLUDE_DIR'])
# install targets
env.Alias('install', INSTALL_DIR)
# release and full-release target
curdate = datetime.date.today().isoformat()
MAJOR_VERSION = 0
MINOR_VERSION = 4
PATCH_VERSION = 0
VERSION = "%i-%i-%i" % (MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION)
TARGET_ARCH_SUFFIX = env.get('TARGET_ARCH_SUFFIX', 'IA32')
RELEASE_NAME = 'rtsg-Release-Version-%s-%s-%s' % \
(VERSION, TARGET_ARCH_SUFFIX, curdate)
FULL_RELEASE_NAME = 'rtsg-Full-Release-Version-%s-%s-%s' % \
(VERSION, TARGET_ARCH_SUFFIX, curdate)
release = env.CreateDist('#/'+RELEASE_NAME,
Split('bin lib bin-openrt lib-openrt'),
'rtsg',
excludeExts=['.cvsignore', '.sconsign'],
excludeDirs=['CVS','.svn','.sconf_temp'])
env.Alias('release', release)
full_release = env.CreateDist('#/'+FULL_RELEASE_NAME,
Split('bin lib bin-openrt lib-openrt include include-openrt engines'),
'rtsg',
excludeExts=['.cvsignore', '.sconsign'],
excludeDirs=['CVS','.svn','.sconf_temp'])
env.Alias('full-release', full_release)
RegisterEnvironmentFinalizer(finalizeEnvironment)
|
UTF-8
|
Python
| false | false | 2,014 |
1,563,368,137,500 |
d52888d4a824ac32a8e6412a797e22da4f468067
|
d7180f64c075aaea69a3cab6528112e6c1b37110
|
/Homework3B.py
|
ccd6afb6ac007f198291d46b3f7bca4467cb92ff
|
[] |
no_license
|
burkytek/PFAB
|
https://github.com/burkytek/PFAB
|
5ccc0de9a22626d85fd7cf99fd3ef67e4acaf9c9
|
762b3d22deaa19110230d2a6b4619329029722d7
|
refs/heads/master
| 2021-01-22T05:05:45.663617 | 2014-08-06T20:56:50 | 2014-08-06T20:56:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
gpas = {"Lassoff":3.12, "Johnson":2.22, "Reich":3.59, "Honeychurch":2.98, "Maini":3.11, "Levin":2.88, "Marcus":2.77, "Banks":3.71}
sum = 0
for key, value in gpas.iteritems():
print "Last Name:", key, " ", "GPA:", value
sum = sum + value
print "Average GPA is", sum/len(gpas)
rank = 1
for key in sorted(gpas.iteritems(), key=lambda gpa: gpa[1], reverse=True):
print "Rank #", rank, " ", key[0]
rank = rank + 1
|
UTF-8
|
Python
| false | false | 2,014 |
11,931,419,161,006 |
89a1b1a709479b26f663a10139eee9120e14e34d
|
84721ed008c94f2c7351c63a6dd23d4107efcce0
|
/vimlite/VimLite/BuilderManager.py
|
98907df8763c462fb8e0aaf2625529db54ceb2ed
|
[] |
no_license
|
vim-scripts/VimLite
|
https://github.com/vim-scripts/VimLite
|
17277b09a208d437d0b3a5e514b483bc5301ee4f
|
cd00cb17d98efed12e1e364dae8676101394f135
|
refs/heads/master
| 2016-09-07T18:50:25.672256 | 2013-01-25T00:00:00 | 2013-01-28T02:41:31 | 1,970,791 | 8 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from BuildSettings import BuildSettingsST
import BuilderGnuMake
class BuilderManager(object):
'''管理所有抽象 Builder'''
def __init__(self):
self.builders = {} # 保存所有 Builder 实例,{名字: 实例}
def AddBuilder(self, builder):
self.builders[builder.name] = builder
def GetBuilderByName(self, name):
return self.builders.get(name)
def GetActiveBuilderInstance(self):
'''从配置文件中获取'''
blder = BuildSettingsST.Get().GetActiveBuilderInstance()
if not blder:
print "No active Builder"
return None
# 理论上基类的配置是适用于所有派生类的
return BuilderGnuMake.BuilderGnuMake(blder.ToDict())
class BuilderManagerST:
__ins = None
@staticmethod
def Get():
if not BuilderManagerST.__ins:
BuilderManagerST.__ins = BuilderManager()
# 注册可用的 Builder 抽象类
# 所有设置是默认值,需要从配置文件读取设置值
BuilderManagerST.__ins.AddBuilder(BuilderGnuMake.BuilderGnuMake())
return BuilderManagerST.__ins
@staticmethod
def Free():
BuilderManagerST.__ins = None
|
UTF-8
|
Python
| false | false | 2,013 |
661,424,991,570 |
7aca4030eb35ccaaeb54b85413f5da92981c32a7
|
adbdfb0399f98c2a3b96255caa85520fb0123a85
|
/viewer/model/plotParser.py
|
695c97da36b419f7d6fde08593b44363c0425e84
|
[] |
no_license
|
cmollare/pyViewer
|
https://github.com/cmollare/pyViewer
|
6bf62780e295947d4ef51e0cf3b8dc0491049cd0
|
a3e865fe7897c829d732b03358f9a09bdfee6e83
|
refs/heads/master
| 2020-04-11T05:15:07.420057 | 2013-12-04T10:40:50 | 2013-12-04T10:40:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python2.7
# -*-coding:Latin-1 -*
from __future__ import print_function #for using print(string, end='') (remove \n at the end)
class PlotParser(object):
def __init__(self, string):
self.string = string
self.parseLine(string)
def getPlotData(self):
return self.plotData
def parseLine(self, string):
#find the "py..." command
if (string.find("pySimplePlot") == 0):
#print string
self.plotData = self.parseSimplePlot(string)
elif (string.find("pySubPlot") == 0):
#print string
self.plotData = self.parseSubPlot(string)
elif (string.find("pyMultiPlot") == 0):
#print string
self.plotData = self.parseMultiPlot(string)
else:
#print the original string if not known
if (len(string) != 0):
print(string, end=' ')
self.plotData = None
################## parsing fuctions ###########################################################
def parseSimplePlot(self, string):
cont = string.split(" ")
if len(cont) == 5:
return PlotData(plotType="simple", name=cont[1], y=eval(cont[2]), x=eval(cont[3]))
elif len(cont) == 4:
return PlotData(plotType="simple", name=cont[1], y=eval(cont[2]), x=None)
else:
return None
def parseSubPlot(self, string):
cont = string.split(" ")
if len(cont) == 6:
return PlotData(plotType="sub", parentName=cont[1], name=cont[2], y=eval(cont[3]), x=eval(cont[4]))
elif len(cont) == 5:
return PlotData(plotType="sub", parentName=cont[1], name=cont[2], y=eval(cont[3]), x=None)
else:
return None
def parseMultiPlot(self, string):
cont = string.split(" ")
if len(cont) == 6:
return PlotData(plotType="multi", parentName=cont[1], name=cont[2], y=eval(cont[3]), x=eval(cont[4]))
elif len(cont) == 5:
return PlotData(plotType="multi", parentName=cont[1], name=cont[2], y=eval(cont[3]), x=None)
else:
return None
|
UTF-8
|
Python
| false | false | 2,013 |
7,395,933,716,712 |
7c0e25c861b812cc2cfa588ceff0e3a10ecb4455
|
75a81ca6b3afab63fd692e6fc1cb10d8287768c3
|
/SplitSecretTest.py
|
f9577d7972b6e0460a4ff071c3c20f960b09a461
|
[] |
no_license
|
AndyOfiesh/BitcoinArmoryTest
|
https://github.com/AndyOfiesh/BitcoinArmoryTest
|
164417fdf843dd3c3bec9d1afb75351bfdd65a3a
|
999d1e00caefdb9f8a9406a9d69c26cfac76ab43
|
refs/heads/master
| 2021-01-01T05:43:18.745830 | 2014-05-23T23:28:23 | 2014-05-23T23:28:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on Aug 4, 2013
@author: Andy
'''
from test.Tiab import TiabTest
from random import shuffle
import sys
import unittest
from armoryengine.ArmoryUtils import FiniteField, FiniteFieldError, SplitSecret, \
hex_to_binary, RightNow, binary_to_hex, ReconstructSecret
sys.argv.append('--nologging')
sys.argv.append('--nologging')
TEST_A = 200
TEST_B = 100
TEST_ADD_RESULT = 49
TEST_SUB_RESULT = 100
TEST_MULT_RESULT = 171
TEST_DIV_RESULT = 2
TEST_MTRX = [[1, 2, 3], [3,4,5], [6,7,8] ]
TEST_VECTER = [1, 2, 3]
TEST_3_BY_2_MTRX = [[1, 2, 3], [3,4,5]]
TEST_2_BY_3_MTRX = [[1, 2], [3,4], [5, 6]]
TEST_RMROW1CO1L_RESULT = [[1, 3], [6, 8]]
TEST_DET_RESULT = 0
TEST_MULT_VECT_RESULT = [14, 26, 44]
TEST_MULT_VECT_RESULT2 = [5, 11, 17]
TEST_MULT_VECT_RESULT3 = [[7, 10], [15, 22], [23, 34]]
TEST_MULT_VECT_RESULT4 = [[248, 5, 249], [6, 241, 4], [248, 5, 249]]
TEST_MULT_VECT_RESULT5 = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
class SplitSecretTest(TiabTest):
def testFiniteFieldTest(self):
ff1 = FiniteField(1)
self.assertRaises(FiniteFieldError, FiniteField, 257)
self.assertEqual(ff1.add(TEST_A, TEST_B), TEST_ADD_RESULT)
self.assertEqual(ff1.subtract(TEST_A, TEST_B), TEST_SUB_RESULT)
self.assertEqual(ff1.mult(TEST_A, TEST_B), TEST_MULT_RESULT)
self.assertEqual(ff1.divide(TEST_A, TEST_B), TEST_DIV_RESULT)
self.assertEqual(ff1.mtrxrmrowcol(TEST_MTRX, 1, 1), TEST_RMROW1CO1L_RESULT)
self.assertEqual(ff1.mtrxrmrowcol(TEST_3_BY_2_MTRX, 1, 1), [])
self.assertEqual(ff1.mtrxdet([[1]]), 1)
self.assertEqual(ff1.mtrxdet(TEST_3_BY_2_MTRX), -1)
self.assertEqual(ff1.mtrxdet(TEST_MTRX), TEST_DET_RESULT)
self.assertEqual(ff1.mtrxmultvect(TEST_MTRX, TEST_VECTER), TEST_MULT_VECT_RESULT)
self.assertEqual(ff1.mtrxmultvect(TEST_3_BY_2_MTRX, TEST_VECTER), TEST_MULT_VECT_RESULT[:2])
self.assertEqual(ff1.mtrxmultvect(TEST_2_BY_3_MTRX, TEST_VECTER), TEST_MULT_VECT_RESULT2)
self.assertEqual(ff1.mtrxmult(TEST_2_BY_3_MTRX, TEST_3_BY_2_MTRX), TEST_MULT_VECT_RESULT3)
self.assertEqual(ff1.mtrxmult(TEST_2_BY_3_MTRX, TEST_2_BY_3_MTRX), TEST_MULT_VECT_RESULT3)
self.assertEqual(ff1.mtrxadjoint(TEST_MTRX), TEST_MULT_VECT_RESULT4)
self.assertEqual(ff1.mtrxinv(TEST_MTRX), TEST_MULT_VECT_RESULT5)
def testSplitSecret(self):
self.callSplitSecret('9f', 2,3)
self.callSplitSecret('9f', 3,5)
self.callSplitSecret('9f', 4,7)
self.callSplitSecret('9f', 5,9)
self.callSplitSecret('9f', 6,7)
self.callSplitSecret('9f'*16, 3,5, 16)
self.callSplitSecret('9f'*16, 7,10, 16)
self.assertRaises(FiniteFieldError, SplitSecret, '9f'*16, 3, 5, 8)
self.assertRaises(FiniteFieldError, SplitSecret, '9f', 5,4)
self.assertRaises(FiniteFieldError, SplitSecret, '9f', 1,1)
def callSplitSecret(self, secretHex, M, N, nbytes=1):
secret = hex_to_binary(secretHex)
print '\nSplitting secret into %d-of-%d: secret=%s' % (M,N,secretHex)
tstart = RightNow()
out = SplitSecret(secret, M, N)
tsplit = RightNow() - tstart
print 'Fragments:'
for i in range(len(out)):
x = binary_to_hex(out[i][0])
y = binary_to_hex(out[i][1])
print ' Fragment %d: [%s, %s]' % (i+1,x,y)
trecon = 0
print 'Reconstructing secret from various subsets of fragments...'
for i in range(10):
shuffle(out)
tstart = RightNow()
reconstruct = ReconstructSecret(out, M, nbytes)
trecon += RightNow() - tstart
print ' The reconstructed secret is:', binary_to_hex(reconstruct)
self.assertEqual(binary_to_hex(reconstruct), secretHex)
print 'Splitting secret took: %0.5f sec' % tsplit
print 'Reconstructing takes: %0.5f sec' % (trecon/10)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
UTF-8
|
Python
| false | false | 2,014 |
18,846,316,496,919 |
a60c1642c97e222ed2014427e33084591335d898
|
ee8bdc10af43f2d9b75e40c231dfe785b4048928
|
/apps/feedback/tests/test_feedback_views.py
|
289f67d483d51f7cb80ede86c72d2b38a257bf66
|
[
"MPL-1.1",
"GPL-2.0-or-later",
"LGPL-2.1-or-later"
] |
non_permissive
|
fox2mike/input.mozilla.org
|
https://github.com/fox2mike/input.mozilla.org
|
a92e0c4af7b1c4cf2b1950da167b860a08638561
|
bbef34bf28ca6141283ef286515274c5e83d6121
|
refs/heads/master
| 2021-01-18T11:41:44.591034 | 2011-10-20T17:06:54 | 2011-10-20T17:06:54 | 2,634,949 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from datetime import datetime
from django.conf import settings
from nose.tools import eq_
from pyquery import PyQuery as pq
from input import FIREFOX, OPINION_PRAISE, OPINION_ISSUE
from input.tests import ViewTestCase, enforce_ua
from input.urlresolvers import reverse
from feedback.models import Opinion
class BetaViewTests(ViewTestCase):
"""Tests for our beta feedback submissions."""
fixtures = ['feedback/opinions']
FX_UA = ('Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; '
'de; rv:1.9.2.3) Gecko/20100401 Firefox/%s')
def _get_page(self, ver=None):
"""Request beta feedback page."""
extra = dict(HTTP_USER_AGENT=self.FX_UA % ver) if ver else {}
return self.client.get(reverse('feedback'), **extra)
@enforce_ua
def test_no_ua(self):
"""No UA: Redirect to beta download."""
r = self._get_page()
eq_(r.status_code, 302)
assert r['Location'].endswith(
reverse('feedback.download'))
@enforce_ua
def test_release(self):
version = (getattr(FIREFOX, 'default_version', None) or
Version(LATEST_BETAS[FIREFOX]).simplified)
r = self._get_page(version)
eq_(r.status_code, 200)
@enforce_ua
def test_old_beta(self):
"""Old beta: redirect."""
r = self._get_page('3.6b2')
eq_(r.status_code, 302)
assert r['Location'].endswith(reverse('feedback.download'))
@enforce_ua
def test_newer_beta(self):
"""Beta version newer than current: no redirect."""
r = self._get_page('20.0b2')
eq_(r.status_code, 200)
@enforce_ua
def test_nightly(self):
"""Nightly version: should be able to submit."""
r = self._get_page('20.0a1')
eq_(r.status_code, 200)
r = self._get_page('20.0a2')
eq_(r.status_code, 200)
@enforce_ua
def test_recent_version(self):
"""Old beta: redirect."""
r = self._get_page('5.0')
eq_(r.status_code, 200)
def test_give_feedback(self):
r = self.client.post(reverse('feedback'))
eq_(r.content, 'User-Agent request header must be set.')
def test_opinion_detail(self):
r = self.client.get(reverse('opinion.detail', args=(29,)))
eq_(r.status_code, 200)
def test_url_submission(self):
def submit_url(url, valid=True):
"""Submit feedback with a given URL, check if it's accepted."""
data = {
# Need to vary text so we don't cause duplicates warnings.
'description': 'Hello %d' % datetime.now().microsecond,
'add_url': 'on',
'_type': OPINION_PRAISE.id,
}
if url:
data['url'] = url
r = self.client.post(reverse('feedback'), data,
HTTP_USER_AGENT=(self.FX_UA % '20.0b2'),
follow=True)
# Neither valid nor invalid URLs cause anything but a 200 response.
eq_(r.status_code, 200)
if valid:
assert r.content.find('Thanks') >= 0
assert r.content.find('Enter a valid URL') == -1
else:
assert r.content.find('Thanks') == -1
assert r.content.find('Enter a valid URL') >= 0
# Valid URL types
submit_url('http://example.com')
submit_url('https://example.com')
submit_url('about:me')
submit_url('chrome://mozapps/content/extensions/extensions.xul')
# Invalid URL types
submit_url('gopher://something', valid=False)
submit_url('zomg', valid=False)
# Try submitting add_url=on with no URL. Bug 613549.
submit_url(None)
def test_submissions_without_url(self):
"""Ensure feedback without URL can be submitted. Bug 610023."""
req = lambda: self.client.post(
reverse('feedback'), {
'description': 'Hello!',
'_type': OPINION_ISSUE.id,
}, HTTP_USER_AGENT=(self.FX_UA % '20.0b2'), follow=True)
# No matter what you submit in the URL field, there must be a 200
# response code.
r = req()
eq_(r.status_code, 200)
assert r.content.find('Thanks for') >= 0
# Resubmit, should not work due to duplicate submission.
r2 = req()
eq_(r2.status_code, 200)
assert r2.content.find('We already got your feedback') >= 0
def test_submission_autocomplete_off(self):
"""
Ensure both mobile and desktop submission pages have autocomplete off.
"""
def with_site(site_id):
r = self.client.get(reverse('feedback'), HTTP_USER_AGENT=(
self.FX_UA % '20.0b2'), SITE_ID=site_id, follow=True)
d = pq(r.content)
forms = d('article form')
assert forms
for form in forms:
eq_(pq(form).attr('autocomplete'), 'off')
with_site(settings.DESKTOP_SITE_ID)
with_site(settings.MOBILE_SITE_ID)
def test_submission_with_device_info(self):
"""Ensure mobile device info can be submitted."""
r = self.client.post(
reverse('feedback'), {
'description': 'Hello!',
'_type': OPINION_ISSUE.id,
'manufacturer': 'FancyBrand',
'device': 'FancyPhone 2.0',
}, HTTP_USER_AGENT=(self.FX_UA % '20.0b2'), follow=True)
eq_(r.status_code, 200)
assert r.content.find('Thanks') >= 0
# Fetch row from model and check data made it there.
latest = Opinion.objects.no_cache().order_by('-id')[0]
eq_(latest.manufacturer, 'FancyBrand')
eq_(latest.device, 'FancyPhone 2.0')
def test_feedback_index(self):
"""Test feedback index page for Betas."""
r = self.client.get(reverse('feedback'),
HTTP_USER_AGENT=(self.FX_UA % '20.0b2'),
follow=True)
eq_(r.status_code, 200)
def test_mobile_sumo_url(self):
"""Test the sumo url in the mobile version."""
r = self.client.get(reverse('feedback'),
HTTP_USER_AGENT=(self.FX_UA % '20.0b2'),
SITE_ID=settings.MOBILE_SITE_ID)
doc = pq(r.content)
sumo_url = doc('#sumo-url')
eq_(sumo_url.attr('href'), 'http://support.mozilla.com/en-US/')
|
UTF-8
|
Python
| false | false | 2,011 |
17,016,660,429,994 |
f97a6f9b96b4bea0cf92b5608373a8fcafea4242
|
ecd74f283963aadb4c8609566cc56b24426eadaa
|
/twitter_saver.py
|
e4c28157b9037a3b2ae76ba710bc117a2b675fe2
|
[] |
no_license
|
nst/Twitter-Saver
|
https://github.com/nst/Twitter-Saver
|
51064eb64c51c5b4ff313f758d79af6f0f18435b
|
c1a1e1dd2a6ffe28b473fc72d5ef8469f3a9f2f2
|
refs/heads/master
| 2021-01-10T18:38:35.895926 | 2010-01-02T02:13:45 | 2010-01-02T02:13:45 | 455,568 | 1 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# Nicolas Seriot
# 2010-01-01
# http://github.com/nst/Twitter-Saver
import sys
import urllib2
import json
def install_authentication_handler(url, login, password):
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, url, login, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
def user_statuses_count(username):
theurl = 'http://twitter.com/users/show.json?screen_name=%s' % username
try:
lines = urllib2.urlopen(theurl).readlines()
except Exception, e:
print e
return 0
return int(json.loads(''.join(lines))['statuses_count'])
def user_timeline_statuses(username, page=1):
theurl = 'http://twitter.com/statuses/user_timeline/%s.json?count=200&page=%d' % (username, page)
try:
lines = urllib2.urlopen(theurl).readlines()
except Exception, e:
print e
return []
return json.loads(''.join(lines))
def print_statuses(username):
count = user_statuses_count(username)
div, mod = divmod(count, 200)
pages = div + (1 if mod else 0)
for page in range(1, pages+1):
statuses = user_timeline_statuses(username, page)
if not statuses:
break
for d in statuses:
print unicode("%(id)s\t%(created_at)s\t%(text)s" % d).encode('utf-8')
if len(sys.argv) not in [2, 4]:
print "USAGE: %s <username> [<login> <password>]" % sys.argv[0]
sys.exit(1)
username = sys.argv[1]
login = sys.argv[2] if len(sys.argv) == 4 else None
password = sys.argv[3] if len(sys.argv) == 4 else None
if login and password:
install_authentication_handler('http://twitter.com/statuses', login, password)
print_statuses(username)
|
UTF-8
|
Python
| false | false | 2,010 |
13,838,384,639,025 |
fd68c80e84a16b9a0cfa237318d5ace711b1e06f
|
4fa0a68bd498682851d7c3b5c6d7d10157e1a2bc
|
/pmem.py
|
ff404f0d092d54e3a97f58085a8ce1a1e26a6c45
|
[] |
no_license
|
JuneJulyAugust/pmem
|
https://github.com/JuneJulyAugust/pmem
|
8bbab86a01a8928f9cfbf0970c5411035c763ced
|
fd837943bfdfc782261d3fc87d34f647cc4457d7
|
refs/heads/master
| 2021-04-14T15:58:18.690363 | 2014-10-23T08:17:17 | 2014-10-23T08:17:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/env python
import re
#import getopt, sys
import argparse
import os
import time
NEW_LINE="\n"
FILE_NAME = "tmp.dot"
GRAPH_HEADER = "graph i {\nnode [shape=\"none\"]\nrankdir=LR\n"
GRAPH_FOOTER = "}\n"
TABLE_MAIN_HEADER = "Tab [label =<<TABLE BORDER=\"0\" CELLBORDER=\"0\" CELLSPACING=\"0\" CELLPADDING=\"0\">"
TABLE_MAIN_FOOTER = "</TABLE>>]"
TABLE_HELP_BEGIN="<TABLE BORDER=\"0\" CELLBORDER=\"0\" CELLSPACING=\"1\" CELLPADDING=\"0\">"
TABLE_HELP_END="</TABLE>"
TR_BEGIN="<TR>"
TR_END="</TR>"
TD_BEGIN="<TD>"
TD_END="</TD>"
class ProcMapElement :
def __init__(self, addrB_, addrE_, perm_, offset_, dev_, inode_, name_):
self.addrB = int(addrB_, 16)
self.addrE = int(addrE_, 16)
self.perm = perm_
self.offset = offset_
self.dev = dev_
self.inode = inode_
self.name = name_
def generateTdTop(addr) :
return "<TR><TD VALIGN=\"TOP\" ALIGN=\"LEFT\"><FONT POINT-SIZE=\"4\">"+hex(addr).rstrip("L")+"</FONT></TD></TR>"
def generateTdMiddle(height, data=str()) :
return "<TR><TD ALIGN=\"LEFT\" HEIGHT=\""+ str(height)+"\">"+data+"</TD></TR>"
def generateTdBottom(addr) :
return "<TR><TD VALIGN=\"BOTTOM\" ALIGN=\"LEFT\"><FONT POINT-SIZE=\"4\">"+hex(addr).rstrip("L")+"</FONT></TD></TR>"
def generateSegment(data=str()) :
return "<TD BORDER=\"1\">"+data+"</TD>"
def generateRow(begin, end, data=str(), height=0) :
if height>100:
height=100
return NEW_LINE+TR_BEGIN+NEW_LINE+generateSegment(data)+NEW_LINE+TD_BEGIN+NEW_LINE+TABLE_HELP_BEGIN+generateTdTop(end)+generateTdMiddle(height)+generateTdBottom(begin)+TABLE_HELP_END+NEW_LINE+TD_END+TR_END+NEW_LINE
def generateUnused(addrB, addrE):
return ProcMapElement(addrB, addrE, "----", 0, 0, 0, "unused")
def fillHoles(elements):
copy = elements
for i in range(1, len(elements)+1, 2):
if elements[i-1].addrB != elements[i].addrE:
copy.insert(i, generateUnused(hex(elements[i].addrE)[2:-1], hex(elements[i-1].addrB)[2:-1]))
return copy
def getSegmentsList(procFile) :
stack = list()
try:
fP = open(procFile, "r")
for line in fP:
res = line.split();
address = res[0]
addrBegin, addrEnd = address.split("-")
perms = res[1]
offset = res[2]
dev = res[3]
inode = res[4]
pathname = str()
if len(res) > 5 :
pathname = res[5]
e = ProcMapElement(addrBegin, addrEnd, perms, offset, dev, inode, pathname)
stack.append(e)
fP.close()
except IOError:
print "Problem with ("+procFile+") file. Is the process still running??"
exit(1)
if (len(stack) == 0):
print "Problem with ("+procFile+") file. It seems to be empty (or wrong format)!!!"
exit(1)
return stack
def formSegments(stack):
#order in maps file is reversed
stack.reverse();
stack = fillHoles(stack)
def generateDot(stack):
dotContent = str();
dotContent = GRAPH_HEADER + TABLE_MAIN_HEADER
for last in stack :
dotContent += generateRow(last.addrB, last.addrE, last.name+"<BR/>"+last.perm, (last.addrE-last.addrB)/4096)
dotContent += TABLE_MAIN_FOOTER+GRAPH_FOOTER
return dotContent
def saveOutputFile(outFile, content):
f = open(outFile,"w")
f.write(content)
f.close();
def main():
pid="self"
parser = argparse.ArgumentParser()
#group1 = parser.add_mutually_exclusive_group()
#group1.add_argument("-p", "--pid", type=int, help="process id nr")
#group1.add_argument("-n", "--name", help="process name")
parser.add_argument("-p", "--pid", type=int, help="process id nr")
args = parser.parse_args()
if args.pid:
pid=str(args.pid)
#if args.name:
# pid=args.name
procFile="/proc/"+pid+"/maps"
segments = getSegmentsList(procFile)
formSegments(segments)
dotContent = generateDot(segments)
outFile = pid+"_"+str((time.time()))+".dot"
saveOutputFile(outFile, dotContent)
print "Out saved to", outFile
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 2,014 |
3,169,685,901,724 |
3312f7f2bee6c2e2097a7d239af75d80aca0d1c1
|
74d8510e4c06a015fcd10b4024e8a04cb918fce3
|
/database.py
|
8e1b0c7ea0356be230606589dbf6f76d5ccf9ff6
|
[] |
no_license
|
liuzhida/video_search
|
https://github.com/liuzhida/video_search
|
d4b0e8092300ef70058034ab9082fbef314639c5
|
d0449caf4f2e2d4808118f6ef164ccb6b74a6d7a
|
refs/heads/master
| 2021-01-16T18:31:06.247940 | 2013-05-10T11:21:13 | 2013-05-10T11:21:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#import tornado.httpserver
#import tornado.ioloop
#import tornado.web
#import tornado.options
import tornado.database
import time
#from tornado import gen
#from tornado import httpclient
#from tornado.options import define, options
#from tornado.escape import json_encode
class MysqlHander: #处理数据库操作,使用tornado自带的database,当然也可以使用其他的,比如sqlalchemy
def __init__(self):
self.db = tornado.database.Connection("192.168.0.195", \
"videoSearch", "videosearch", "gr2JZjhB")
def getword(self):
now = time.strftime("%Y-%m-%d", time.localtime())
titles = self.db.query("select query from DailyQueryRank where date = '%s 00:00:00'"%now)
result = []
for title in titles:
result.append(title['query'])
result = list(set(result))
return result
if __name__ == '__main__':
db = MysqlHander()
title = db.getword()
for i in title:
print i
|
UTF-8
|
Python
| false | false | 2,013 |
12,652,973,688,469 |
44a9ce7494894836021f4f8c31e5a6fec9a6dbe4
|
a6291c3748a44709bd9943f7bd6a3d3ec5fd6820
|
/renamep.py
|
1b891041b4bf0003ea7d607cec55b71dacea6f14
|
[] |
no_license
|
changchengdecao/python
|
https://github.com/changchengdecao/python
|
cac14d08c2a20c584e0d07ce15e6d0eb897e1bd6
|
0c6570db836f6f98015d2887984d258da5174327
|
refs/heads/master
| 2021-01-25T12:14:21.077793 | 2014-11-23T14:57:23 | 2014-11-23T14:57:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
for path,dirs,files in os.walk("./image"):
for items in files:
name = items[-7:-5]+".jpg"
#print name
#print path
os.rename(path+"/"+items,path+"/"+name)
|
UTF-8
|
Python
| false | false | 2,014 |
9,354,438,781,168 |
40b3d620f57c6a6ecb11eac606890e6a47c63d62
|
9b8a1b043bb8f1877031737eb536b45483ceeedb
|
/LDRA_glh2csv_dc.py
|
b91f469d6910124f5b11bab9c07df7d7a16f872c
|
[] |
no_license
|
vfinotti/LDRA_glh2csv
|
https://github.com/vfinotti/LDRA_glh2csv
|
7e3539dcb16bd714fea17d744f603baac9e4b317
|
30a0d7d6561d2ef5f4c25a1b9e32ea24165fb633
|
refs/heads/master
| 2023-03-16T01:57:52.888223 | 2014-01-03T10:32:13 | 2014-01-03T10:32:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# 2014-01-03 ppiazi Create - v0.1
# ex) python LDRA_glh2csv_dc.py
import sys # imports the sys module
import csv # imports the csv module
import getopt
import os
import datetime
import time
import LDRA_glh2csv_core
class LDRA_DynamicCoverageParser(LDRA_glh2csv_core.LDRA_GlhParser):
def LDRA_DynamicCoverageParser(self):
self.LDRA_glh2csv_code.LDRA_GlhParser(self)
def startParse(self):
if self.txt_file_fo == None:
print "File Object is not ready."
return False
if __name__ == "__main__":
ins = LDRA_DynamicCoverageParser()
|
UTF-8
|
Python
| false | false | 2,014 |
12,257,836,711,709 |
2c09e15c8cda23adb096951ab887ab61af7e9cdd
|
bf84f0b6d785391ede7703ef835c54d25d05836b
|
/zgexplorer/monitorviewer.py
|
e4c9da6084c69f9c279d17aae208436d566f2b08
|
[] |
no_license
|
dhananjaysathe/zeitgeist-explorer
|
https://github.com/dhananjaysathe/zeitgeist-explorer
|
bce791a760ba342de7277c425d63ef2081142aec
|
1743f7bceee36b2a4a180831ceb6810c4c1d5885
|
refs/heads/master
| 2020-04-10T04:01:00.604530 | 2013-03-22T17:52:48 | 2013-03-22T17:52:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/env python
# -.- coding: utf-8 -.-
#
# Zeitgeist Explorer
#
# Copyright © 2012 Manish Sinha <[email protected]>.
# Copyright © 2011-2012 Collabora Ltd.
# By Siegfried-A. Gevatter Pujals <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk, Gdk, Pango
from datetime import datetime
from templates import BuiltInFilters
from eventwidgets import EventDetailsViewer, EventsTreeView, EventsViewer
from remote import get_zeitgeist
from zeitgeist.datamodel import Event, Subject, Interpretation, \
Manifestation, StorageState, ResultType, Symbol
class MonitorViewer(Gtk.VBox):
_client = None
_is_running = False
def __init__(self):
super(MonitorViewer, self).__init__()
self.ids = []
self._client = get_zeitgeist()
self.monitor = None
# The Entry for this MonitorViewer
self.entry = None
self.events = {}
self.spacing = 6
self.margin = 12
self.desc_entry = Gtk.Label(xalign=0,yalign=0,wrap=True)
self.pack_start(self.desc_entry, False, False, 6)
# ButtonBox
self.hbox = Gtk.HBox(True)
self.button_box = Gtk.HButtonBox(False)
self.hbox.pack_start(Gtk.Label(), False, False, 6)
self.hbox.pack_start(self.button_box, False, False, 6)
self.hbox.pack_start(Gtk.Label(), False, False, 6)
self.button_box.set_layout(Gtk.ButtonBoxStyle.START)
self.pack_start(self.hbox, False, False, 6)
self.start_button = Gtk.Button(image=Gtk.Image.new_from_stock(
Gtk.STOCK_MEDIA_PLAY,Gtk.IconSize.BUTTON))
self.start_button.connect("clicked", self.start_monitor)
self.button_box.pack_start(self.start_button, False, False, 6)
self.stop_button = Gtk.Button(image= Gtk.Image.new_from_stock(
Gtk.STOCK_MEDIA_STOP,Gtk.IconSize.BUTTON))
self.stop_button.connect("clicked", self.stop_monitor)
self.stop_button.set_sensitive(False)
self.button_box.pack_start(self.stop_button, False, False, 6)
self.clear = Gtk.Button(image=Gtk.Image.new_from_stock(
Gtk.STOCK_CLEAR,Gtk.IconSize.BUTTON))
self.clear.connect("clicked", self.clear_events)
self.button_box.pack_start(self.clear, False, False, 6)
self.viewer = EventsViewer()
self.pack_start(self.viewer, True, True, 6)
self.show_all()
def map(self, template):
self.entry = template
def monitor_insert(self, time_range, events):
self.viewer.insert(events)
def monitor_delete(self, time_range, event_ids):
# FIXME: change row background to red or something
pass
def clear_events(self, button):
self.viewer.clear()
def start(self):
self.start_monitor(None)
def start_monitor(self, button):
self.start_button.set_sensitive(False)
self.stop_button.set_sensitive(True)
self._is_running = True
self.monitor = self._client.install_monitor(self.entry[3], \
[self.entry[2]], self.monitor_insert, self.monitor_delete)
def stop_monitor(self, button):
self.start_button.set_sensitive(True)
self.stop_button.set_sensitive(False)
self._is_running = False
self._client.remove_monitor(self.monitor)
self.viewer.clear()
def monitor_clear(self, button):
pass
def is_monitor_running(self):
return self._is_running
def monitor_stop(self):
self.stop_monitor(self.stop)
|
UTF-8
|
Python
| false | false | 2,013 |
7,645,041,788,703 |
0e9ace58054d6e9973c2cc4b510baddf217cb0e0
|
48aec4e6a0507b36252578ecc64a75ebf7b4ab6b
|
/sts/traffic_generator.py
|
57e6e6177b84d433088dabf73ab2f514826e65a3
|
[] |
no_license
|
colin-scott/sts
|
https://github.com/colin-scott/sts
|
63ff7218a4ef1f297883120f065015ee4fcadab1
|
736a3a68b62ce37a566fea8ed41c8190b85d55bb
|
refs/heads/master
| 2020-05-30T22:11:34.635718 | 2012-12-14T19:17:46 | 2012-12-14T19:17:46 | 7,135,013 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from pox.lib.packet.ethernet import *
from pox.lib.packet.ipv4 import *
from pox.lib.packet.icmp import *
from sts.dataplane_traces.trace import DataplaneEvent
import random
class TrafficGenerator (object):
"""
Generate sensible randomly generated (openflow) events
"""
def __init__(self, random=random.Random()):
self.random = random
self._packet_generators = {
"icmp_ping" : self.icmp_ping
}
def generate(self, packet_type, host):
if packet_type not in self._packet_generators:
raise AttributeError("Unknown event type %s" % str(packet_type))
# Inject the packet through one of the hosts' interfaces
if len(host.interfaces) < 1:
raise RuntimeError("No interfaces to choose from on host %s!" %
(str(host)))
interface = self.random.choice(host.interfaces)
packet = self._packet_generators[packet_type](interface)
host.send(interface, packet)
return DataplaneEvent(interface, packet)
# Generates an ICMP ping, and injects it through the interface
def icmp_ping(self, interface):
# randomly choose an in_port.
e = ethernet()
e.src = interface.hw_addr
# TODO(cs): need a better way to create random MAC addresses
# TODO(cs): allow the user to specify a non-random dst address
e.dst = EthAddr(struct.pack("Q",self.random.randint(1,0xFF))[:6])
e.type = ethernet.IP_TYPE
ipp = ipv4()
ipp.protocol = ipv4.ICMP_PROTOCOL
if hasattr(interface, 'ips'):
ipp.srcip = self.random.choice(interface.ips)
else:
ipp.srcip = IPAddr(self.random.randint(0,0xFFFFFFFF))
ipp.dstip = IPAddr(self.random.randint(0,0xFFFFFFFF))
ping = icmp()
ping.type = TYPE_ECHO_REQUEST
ping.payload = "PingPing" * 6
ipp.payload = ping
e.payload = ipp
return e
|
UTF-8
|
Python
| false | false | 2,012 |
6,150,393,192,928 |
9dbfd737a0bcccdd4c794438fb8ef03fa3ac0a42
|
4569d707a4942d3451f3bbcfebaa8011cc5a128d
|
/tractweakuiplugin/0.11/tractweakui/web_ui.py
|
f3a0bdec1e237461ff5ce776e62762bc8d93c581
|
[] |
no_license
|
woochica/trachacks
|
https://github.com/woochica/trachacks
|
28749b924c897747faa411876a3739edaed4cff4
|
4fcd4aeba81d734654f5d9ec524218b91d54a0e1
|
refs/heads/master
| 2021-05-30T02:27:50.209657 | 2013-05-24T17:31:23 | 2013-05-24T17:31:23 | 13,418,837 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Name: web_ui.py
# Purpose: The TracTweakUI Trac plugin handler module
#
# Author: Richard Liao <[email protected]>
#
#----------------------------------------------------------------------------
from trac.core import *
from trac.db import DatabaseManager
from trac.web.chrome import *
from trac.util.html import html
from trac.web import IRequestHandler
from trac.web.api import RequestDone, HTTPException
from trac.web.api import ITemplateStreamFilter
from trac.admin import IAdminPanelProvider
from trac.perm import IPermissionRequestor
from trac.web.chrome import add_stylesheet, add_script
from trac.util.text import to_unicode
from genshi.filters.transform import Transformer
from trac.util import Markup
from pkg_resources import resource_filename
import sys, os
import re
import time
import inspect
import textwrap
from utils import *
from model import schema, schema_version, TracTweakUIModel
__all__ = ['TracTweakUIModule']
class TracTweakUIModule(Component):
implements(ITemplateProvider,
IRequestHandler,
ITemplateStreamFilter,
IAdminPanelProvider,
IEnvironmentSetupParticipant,
IPermissionRequestor,
#INavigationContributor,
)
# ITemplateProvider
def get_templates_dirs(self):
return [resource_filename(__name__, 'templates')]
def get_htdocs_dirs(self):
return [('tractweakui', resource_filename(__name__, 'htdocs'))]
# IPermissionRequestor methods
def get_permission_actions(self):
actions = ['TRACTWEAKUI_VIEW', 'TRACTWEAKUI_ADMIN', ]
return actions
# IEnvironmentSetupParticipant methods
def environment_created(self):
# Create the required tables
db = self.env.get_db_cnx()
connector, _ = DatabaseManager(self.env)._get_connector()
cursor = db.cursor()
for table in schema:
for stmt in connector.to_sql(table):
cursor.execute(stmt)
# Insert a global version flag
cursor.execute("INSERT INTO system (name,value) VALUES ('tractweakui_version',%s)", (schema_version,))
db.commit()
def environment_needs_upgrade(self, db):
cursor = db.cursor()
cursor.execute("SELECT value FROM system WHERE name='tractweakui_version'")
row = cursor.fetchone()
if not row or int(row[0]) < schema_version:
return True
def upgrade_environment(self, db):
cursor = db.cursor()
cursor.execute("SELECT value FROM system WHERE name='tractweakui_version'")
row = cursor.fetchone()
if not row:
self.environment_created()
current_version = 0
else:
current_version = int(row[0])
from tractweakui import upgrades
for version in range(current_version + 1, schema_version + 1):
for function in upgrades.map.get(version):
print textwrap.fill(inspect.getdoc(function))
function(self.env, db)
print 'Done.'
cursor.execute("UPDATE system SET value=%s WHERE name='tractweakui_version'", (schema_version,))
self.log.info('Upgraded TracTweakUI tables from version %d to %d',
current_version, schema_version)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'TRACTWEAKUI_ADMIN' in req.perm:
yield ('ticket', 'Ticket System', 'tractweakui_admin', 'TracTweakUI Admin')
def render_admin_panel(self, req, cat, page, path_info):
req.perm.assert_permission('TRACTWEAKUI_ADMIN')
data = {}
data["page"] = page
data["encode_url"] = encode_url
#print cat, page, path_info
# Analyze url
action = ""
if path_info:
try:
action, args = path_info.split('?', 1)
action = action.strip("/")
except:
action = path_info.strip("/")
args = None
if action:
if action == "edit_path_pattern":
# edit path_pattern
if req.method == 'POST':
# TODO
if 'save' in req.args:
# save filter
path_pattern = req.args.get("path_pattern", "").strip()
path_pattern_orig = req.args.get("path_pattern_orig", "").strip()
self._save_path_pattern(req)
req.redirect(req.abs_href.admin(cat, page))
elif 'delete' in req.args:
# delete filter
path_pattern = req.args.get("path_pattern", "").strip()
self._del_path_pattern(req)
req.redirect(req.abs_href.admin(cat, page))
else:
# list filters
path_pattern = req.args.get("path_pattern", "").strip()
data["filter_names"] = self._get_filters()
data["path_pattern"] = req.args.get("path_pattern", "").strip()
#print data
return 'tractweakui_admin_list_filter.html', data
elif action.startswith("edit_filter_script"):
# edit script
if req.method == 'POST':
if 'save' in req.args:
# save filter
self._save_tweak_script(req)
#req.redirect(req.abs_href.admin(cat, page, path_info))
path_pattern = req.args.get("path_pattern", "").strip()
data["filter_names"] = self._get_filters()
data["path_pattern"] = req.args.get("path_pattern", "").strip()
#print data
return 'tractweakui_admin_list_filter.html', data
elif 'load_default' in req.args:
# load_default js script
data['path_pattern'] = req.args.get("path_pattern", "").strip()
data['filter_name'] = req.args.get("filter_name", "").strip()
data['tweak_script'] = self._load_default_script(req)
#print data
return 'tractweakui_admin_edit_filter.html', data
else:
# display filter details
path_pattern = req.args.get("path_pattern", "").strip()
filter_name = req.args.get("filter_name", "").strip()
tweak_script = TracTweakUIModel.get_tweak_script(self.env, path_pattern, filter_name)
data['tweak_script'] = tweak_script
data['path_pattern'] = path_pattern
data['filter_name'] = filter_name
return 'tractweakui_admin_edit_filter.html', data
elif action == "add_path_pattern":
# add path pattern
if req.method == 'POST':
if 'add' in req.args:
self._add_path_pattern(req)
req.redirect(req.abs_href.admin(cat, page))
else:
# list all path patterns
data["path_patterns"] = TracTweakUIModel.get_path_patterns(self.env)
return 'tractweakui_admin_list_path.html', data
# ITemplateStreamFilter
def filter_stream(self, req, method, filename, stream, data):
try:
# get all path patterns
path_patterns = TracTweakUIModel.get_path_patterns(self.env)
# try to match pattern
for path_pattern in path_patterns:
if re.match(path_pattern, req.path_info):
break
else:
return stream
filter_names = TracTweakUIModel.get_path_filters(self.env, path_pattern)
for filter_name in filter_names:
self._apply_filter(req, path_pattern, filter_name)
js_files = TracTweakUIModel.get_path_scripts(self.env, path_pattern)
if js_files:
script = ";\n".join(js_files)
else:
script = ""
stream = stream | Transformer('head').append(tag.script(Markup(script), type="text/javascript")())
return stream
except:
return stream
# IRequestHandler methods
def match_request(self, req):
return False
def process_request(self, req):
filter_base_path = os.path.normpath(os.path.join(self.env.path, "htdocs", "tractweakui"))
if not os.path.exists(filter_base_path):
return self._send_response(req, "")
tweakui_js_path = '/tractweakui/tweakui_js'
if req.path_info.startswith(tweakui_js_path):
path_pattern = urllib.unquote(req.path_info[len(tweakui_js_path) + 1: -3])
js_files = TracTweakUIModel.get_path_scripts(self.env, path_pattern)
if js_files:
script = ";\n".join(js_files)
else:
script = ""
self._send_response(req, script)
# internal methods
def _apply_filter(self, req, path_pattern, filter_name):
# get filter path
filter_path = os.path.normpath(os.path.join(self.env.path, "htdocs", "tractweakui", filter_name))
if not os.path.exists(filter_path):
return
css_files = self._find_filter_files(filter_path, ".css")
js_files = self._find_filter_files(filter_path, ".js")
for css_file in css_files:
add_stylesheet(req, 'site/tractweakui/' + filter_name + "/" + css_file)
for js_file in js_files:
if js_file != "__template__.js":
add_script(req, 'site/tractweakui/' + filter_name + "/" + js_file)
def _find_filter_files(self, filter_path, file_type):
if not os.path.exists(filter_path):
return []
return [file for file in os.listdir(filter_path) if file.endswith(file_type)]
def _get_filters(self):
filter_base_path = os.path.normpath(os.path.join(self.env.path, "htdocs", "tractweakui"))
if not os.path.exists(filter_base_path):
return []
return [file for file in os.listdir(filter_base_path)]
def _send_response(self, req, message):
"""
"""
req.send_response(200)
req.send_header('Cache-control', 'no-cache')
req.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT')
req.send_header('Content-Type', 'text/x-javascript')
# req.send_header('Content-Length', len(message))
req.end_headers()
if req.method != 'HEAD':
req.write(message)
raise RequestDone
def _add_path_pattern(self, req):
""" add filter
"""
path_pattern = req.args.get("path_pattern", "").strip()
# add to db
TracTweakUIModel.insert_path_pattern(self.env, path_pattern)
def _save_path_pattern(self, req):
""" add filter
"""
path_pattern = req.args.get("path_pattern", "").strip()
path_pattern_orig = req.args.get("path_pattern_orig", "").strip()
# add to db
TracTweakUIModel.save_path_pattern(self.env, path_pattern, path_pattern_orig)
def _del_path_pattern(self, req):
""" del filter
"""
path_pattern = req.args.get("path_pattern", "").strip()
# add to db
TracTweakUIModel.del_path_pattern(self.env, path_pattern)
def _save_tweak_script(self, req):
""" save tweak_script
"""
filter_name = req.args.get("filter_name", "").strip()
path_pattern = req.args.get("path_pattern", "").strip()
tweak_script = req.args.get("tweak_script", "").strip()
# add to db
TracTweakUIModel.save_tweak_script(self.env, path_pattern, filter_name, tweak_script)
def _load_default_script(self, req):
"""
"""
filter_name = req.args.get("filter_name", "").strip()
path_pattern = req.args.get("path_pattern", "").strip()
template_path = os.path.normpath(os.path.join(self.env.path, "htdocs", "tractweakui", filter_name, "__template__.js"))
if not os.path.exists(template_path):
return ""
try:
return to_unicode(open(template_path).read())
except Exception, e:
self.log.error("Load js template failed.", exc_info=True)
return ""
|
UTF-8
|
Python
| false | false | 2,013 |
15,487,652,092,656 |
5577bc6871a818f9eee3141b8d1b568f0f8c4a0b
|
86236a778c2d022cbdee11017c5a8cf7d01111d0
|
/adm_rec/lojas/views.py
|
23985c724d3fdd4910f72733fdc2d6dce63e8651
|
[] |
no_license
|
luckybiason/sist_rec_enter
|
https://github.com/luckybiason/sist_rec_enter
|
dee1b92f48c0f9476a8a1becedb1227222a49816
|
bf68c2bdc7d3c7c87642c309224fd03b0511b5c0
|
refs/heads/master
| 2021-01-23T19:34:51.084015 | 2013-10-30T20:46:48 | 2013-10-30T20:46:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#-*- coding: utf-8 -*-
from basiccrud.views import *
from models import *
from django.contrib.auth.decorators import login_required
|
UTF-8
|
Python
| false | false | 2,013 |
6,914,897,349,370 |
ab2bf3f666e0c63141eebb42f8d15d0486127b86
|
469a1cbf3f1a8aa7b9041063481021c0e7d12a4e
|
/simics-3.0-install/simics-3.0.31/amd64-linux/lib/python/mod_gdb_remote_gcommands.py
|
77dbc7eb5f0fff4ad73d5f7a45398865c163e125
|
[
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"GPL-2.0-only"
] |
non_permissive
|
h4ck3rm1k3/RnR-LLC
|
https://github.com/h4ck3rm1k3/RnR-LLC
|
5a42d6c23767a99dba1fd51a1ddbe7ff9086272d
|
eac6de26d8e59c28a64a9d1dcc51751cb6bf144b
|
refs/heads/master
| 2020-12-11T05:38:05.080933 | 2013-08-27T21:21:27 | 2013-08-27T21:21:27 | 29,173,767 | 1 | 0 | null | true | 2015-01-13T05:07:27 | 2015-01-13T05:07:27 | 2013-12-17T23:46:32 | 2013-08-27T21:21:40 | 173,440 | 0 | 0 | 0 | null | null | null |
## Copyright 2004-2007 Virtutech AB
##
## The contents herein are Source Code which are a subset of Licensed
## Software pursuant to the terms of the Virtutech Simics Software
## License Agreement (the "Agreement"), and are being distributed under
## the Agreement. You should have received a copy of the Agreement with
## this Licensed Software; if not, please contact Virtutech for a copy
## of the Agreement prior to using this Licensed Software.
##
## By using this Source Code, you agree to be bound by all of the terms
## of the Agreement, and use of this Source Code is subject to the terms
## the Agreement.
##
## This Source Code and any derivatives thereof are provided on an "as
## is" basis. Virtutech makes no warranties with respect to the Source
## Code or any derivatives thereof and disclaims all implied warranties,
## including, without limitation, warranties of merchantability and
## fitness for a particular purpose and non-infringement.
from cli import *
def gdb_remote(port, cpu):
if not cpu:
try:
cpu = current_processor()
except Exception, msg:
print msg
SIM_command_has_problem()
return
try:
SIM_create_object("gdb-remote", "gdb-remote-%d" % port,
[["processor", cpu], ["listen", port]])
except LookupError, msg:
print "Failed creating a gdb-remote object: %s" % msg
print "Make sure the gdb-remote module is available."
except Exception, msg:
print "Could not create a gdb-remote object: %s" % msg
new_command("gdb-remote", gdb_remote,
args = [arg(uint32_t, "port", "?", 9123),
arg(obj_t("processor", "processor"), "cpu", "?", "")],
type = ["Symbolic Debugging", "Debugging"],
short = "start gdb-remote",
doc = """
Starts listening to incoming connection requests from GDB sessions (provided
that a configuration has been loaded). Simics will listen to TCP/IP requests on
port <arg>port</arg>, or 9123 by default. The GDB session will operate on the
specified <arg>cpu</arg>, or the currently selected cpu by default. Use the gdb
command <b>target remote <i>host</i></b><b>:<i>port</i></b> to connect to
Simics.
""", filename="/mp/simics-3.0/src/extensions/gdb-remote/gcommands.py", linenumber="40")
def new_gdb_remote(name, port, cpu, architecture):
if not cpu:
try:
cpu = current_processor()
except Exception, msg:
print msg
SIM_command_has_problem()
return
if not architecture:
architecture = cpu.architecture
if not name:
for i in range(100):
name = "gdb%d" % i
try:
SIM_get_object(name)
except:
break
try:
SIM_create_object("gdb-remote", name,
[["processor", cpu], ["architecture", architecture],
["listen", port]])
except LookupError, msg:
print "Failed creating a gdb-remote object: %s" % msg
print "Make sure the gdb-remote module is available."
except Exception, msg:
print "Could not create a gdb-remote object: %s" % msg
new_command("new-gdb-remote", new_gdb_remote,
args = [arg(str_t, "name", "?", None),
arg(uint32_t, "port", "?", 9123),
arg(obj_t("processor", "processor"), "cpu", "?", ""),
arg(str_t, "architecture", "?", None)],
type = ["Symbolic Debugging", "Debugging"],
short = "create a gdb session",
doc = """
Starts listening to incoming connection requests from GDB sessions
(provided that a configuration has been loaded). Simics will listen to
TCP/IP requests on port specified by <arg>port</arg>, or 9123 by default.
If <arg>port</arg> is given as zero, a random port will be selected. The
GDB session will operate on the specified <arg>cpu</arg>, or the currently
selected cpu by default.
The <arg>architecture</arg> argument can be used to specify a particular
architecture for the GDB session. It should be the architecture name used
by Simics and not the GDB architecture name. For example, if you are
debugging a 32-bit program on a 64-bit x86 processor, you may want to
specify <tt>x86</tt> as <arg>architecture</arg> and run <tt>set architecture i386</tt>
in GDB before connecting. If not given, the architecture of the CPU will
be used.
Use the GDB command <b>target remote <i>host</i></b><b>:<i>port</i></b> to
connect to Simics. """, filename="/mp/simics-3.0/src/extensions/gdb-remote/gcommands.py", linenumber="81")
|
UTF-8
|
Python
| false | false | 2,013 |
4,080,218,965,865 |
78f47d0a7c3561d2aecd9c268edade93f2ae7e9b
|
6155bc63ad5b5de57cd1c23206d73c4e94c6e119
|
/django_server/server/nationbrowse/graphs/views.py
|
f995201b6bd3a3ef3a56352ea24881739e95fad7
|
[] |
no_license
|
Goldenbarqs/cs4970_capstone
|
https://github.com/Goldenbarqs/cs4970_capstone
|
14b9c6fd8a429d36b653cf782719f57b3e1f78c3
|
c39581b12ada2f8482ac5e6ce5f7626610bdde83
|
refs/heads/master
| 2020-12-30T18:57:13.535291 | 2009-12-01T21:57:02 | 2009-12-01T21:57:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding=utf-8
from cacheutil import safe_get_cache,safe_set_cache
from django.shortcuts import get_object_or_404,render_to_response
from django.http import HttpResponse,Http404
from django.template import RequestContext
from django.db.models.loading import get_model
from mpl_render import histogram
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
def scatterhist_test(request):
cache_key = "scatterhist_test"
response = safe_get_cache(cache_key)
if not response:
"""
# hella slow
County = get_model("places","county")
counties = County.objects.only('id',).all()
values = []
for county in counties:
if county.crime_data and county.population_demographics:
if (county.population_demographics.total > 0) and (county.crime_data.violent_crime > 0):
values.append(
(county.population_demographics.total, county.crime_data.violent_crime)
)
"""
CrimeData = get_model("demographics","crimedata")
DemographicData = get_model("demographics","placepopulation")
total_pops = dict( DemographicData.objects.filter(place_type__name="county").order_by("place_id").values_list("place_id","total") )
violent_crimes = dict( CrimeData.objects.filter(place_type__name="county").order_by("place_id").values_list("place_id","violent_crime") )
# { place_id: population, place_id: population, ...}
# { place_id: violent_crimes, place_id: violent_crimes, ...}
values = []
for place_id, population in total_pops.iteritems():
if population == 0:
continue
if not violent_crimes.has_key(place_id):
continue
crime = violent_crimes[place_id]
if crime == 0:
continue
values.append(
(population, crime)
)
fig = histogram(values,"Total Population","Violent Crimes")
canvas=FigureCanvas(fig)
response=HttpResponse(content_type='image/png')
canvas.print_png(response)
safe_set_cache(cache_key,response,86400)
return response
def scatterhist_test1(request,place_type,slug,source_id=None):
cache_key = "scatterhist_test place_type=%s slug=%s source_id=%s" % (place_type, slug, source_id)
response = safe_get_cache(cache_key)
if not response:
PlaceClass = get_model("places",place_type)
if not PlaceClass:
raise Http404
place = get_object_or_404(PlaceClass,slug=slug)
d = place.population_demographics
male_ages = map(lambda f: getattr(d,f[0].replace('age','male')), d.age_fields)
female_ages = map(lambda f: getattr(d,f[0].replace('age','female')), d.age_fields)
values = zip(
male_ages,
female_ages
)
fig = histogram(values,"Male","Female")
canvas=FigureCanvas(fig)
response=HttpResponse(content_type='image/png')
canvas.print_png(response)
safe_set_cache(cache_key,response,86400)
return response
|
UTF-8
|
Python
| false | false | 2,009 |
17,428,977,296,977 |
cf550afa5747b133e71b9d1010390ae34536ed15
|
149df7dee4b00a65fd7edd143e6d5a8791b0f05f
|
/blocks.py
|
2f7e0ef8b2d6fa0579b98f4fa499205c8f50bc7b
|
[] |
no_license
|
PiotrDabkowski/Py3D
|
https://github.com/PiotrDabkowski/Py3D
|
4a65c4344884255996ea4c02cda1af8b25bc8f54
|
cd383f47d22de28171be59690defe66e838d08cb
|
refs/heads/master
| 2020-04-06T06:46:41.424810 | 2014-11-09T20:20:39 | 2014-11-09T20:20:39 | 26,407,756 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import fgeo
from scipy.spatial import ConvexHull as sciConvexHull
from itertools import combinations, izip
from math import pi
from sorting import *
class ConvexHull:
def __init__(self, vertices):
self.vertices = vertices #reduce them!
scich=sciConvexHull([v.coords() for v in vertices])
self.faces = [[self.vertices[i] for i in f] for f in scich.simplices]
self._recalculate_planes()
for f, p in izip(self.faces, self.planes):
for v in self.vertices:
if v not in f:
break
if p.is_visible(v):
f.reverse()
self._recalculate_planes()
def _recalculate_planes(self):
self.planes = [fgeo.Plane(*f) for f in self.faces]
if hasattr(self, '_color'):
for p in self.planes:
p.set_color(self._color)
def move(self, vec):
for v in self.vertices:
v.move(vec)
self._recalculate_planes()
def set_color(self, color):
self._color = color
self._recalculate_planes()
def rotate(self, pvec, dvec, angle):
for v in self.vertices:
v.rotate(pvec, dvec, angle)
self._recalculate_planes()
def separating_plane(ch1, ch2):
for i, p in enumerate(ch1.planes):
for v in ch2.vertices:
if not p.is_visible(v):
break
else:
return 0, i
for i, p in enumerate(ch2.planes):
for v in ch1.vertices:
if not p.is_visible(v):
break
else:
return 1, i
raise Exception('Hulls seem to intersect!')
class Polyhedron:
def __init__(self, convex_hulls):
self.convex_hulls = convex_hulls
self.seps = {}
for a, b in combinations(self.convex_hulls, 2):
ob, i = separating_plane(a, b)
if ob:
self.seps[a,b] = b, i
self.seps[b,a] = b, i
else:
self.seps[a,b] = a, i
self.seps[b,a] = a, i
def move(self, vec):
for c in self.convex_hulls:
c.move(vec)
self._recalculate_seps()
def rotate(self, line, angle):
pass
def set_color(self, color):
for c in self.convex_hulls:
c.set_color(color)
def _is_greater(self, c1, c2):
ch, i = self.seps[c1,c2]
if ch.planes[i].is_visible(self._point):
if ch is c1:
return False
return True
else:
if ch is c1:
return True
return False
def return_sorted(self, point):
'''point is a view point'''
self._point = point
return msort(self.convex_hulls, is_greater=self._is_greater)
def sphere(radius, num=21):
vs = []
p = fgeo.Vector(radius,0,0)
step = 2*pi/num
y = fgeo.Vector(0,0,0), fgeo.Vector(0,1,0)
z = fgeo.Vector(0,0,0), fgeo.Vector(0,0,1)
for n in xrange(1, (num-1)/2):
pd = p.rotated(y[0], y[1], step*n)
for s in xrange(num):
vs.append(pd.rotated(z[0], z[1], step*s))
for n in xrange(1, (num-1)/2):
pd = p.rotated(y[0], y[1], -step*n)
for s in xrange(num):
vs.append(pd.rotated(z[0], z[1], step*s))
for n in xrange(num):
vs.append(p.rotated(z[0], z[1], step*n))
vs.append(p.rotated(y[0],y[1], pi/2))
vs.append(p.rotated(y[0], y[1], -pi/2))
return ConvexHull(vs)
def cuboid(x,y,z):
vs = [fgeo.Vector(0,0,0),
fgeo.Vector(x,0,0), fgeo.Vector(0,y,0), fgeo.Vector(0,0,z),
fgeo.Vector(x,0,z), fgeo.Vector(x,y,0), fgeo.Vector(0,y,z),
fgeo.Vector(x,y,z)]
return ConvexHull(vs)
class World:
def __init__(self, polys):
self.polys = polys
def _is_greater(self, c1, c2):
r, nr = separating_plane(c1, c2)
if not r:
if c1.planes[nr].is_visible(self._point):
return False
return True
if c2.planes[nr].is_visible(self._point):
return True
return False
def get_draw_order(self, point):
self._point = point
return sort_groups([poly.return_sorted(point) for poly in self.polys], is_greater=self._is_greater)
import random
def ranp(n, xr=1.5, yr=1.5, zr=1.5):
return [fgeo.Vector(xr*(random.random()-0.5), yr*(random.random()-0.5), yr*(random.random()-0.5)) for e in xrange(n)]
|
UTF-8
|
Python
| false | false | 2,014 |
3,994,319,632,670 |
432efacd2bbdff8320bd4065a8028f3639ffd485
|
75d7b327f47c362e0a93083b7ed34d536a35a631
|
/Card Shuffle/shuffle_smart.py
|
2a1a69645f10dcd0d97f4eaf66864e28d4adb150
|
[] |
no_license
|
terryli/Solutions
|
https://github.com/terryli/Solutions
|
51a5c0cc4236b7f8e435b1f09622f38749cc39f7
|
b33e026a56debcfe2511f15b2ee65608ae98a5dd
|
refs/heads/master
| 2021-01-01T20:17:21.996316 | 2013-08-11T18:52:22 | 2013-08-11T18:52:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# ---------------
# Given a deck of size N and a cut of size C every time, how many perfect
# shuffles do you need to cut before you get back to the original deck?
#
# A perfect shuffle is defined as taking two stacks of cards and sorting them
# in such a way that you interleave the cards one from each stack.
#
# SMART SOLUTION: Works for any N and C efficiently
#
# Difficulty: 5/5
# ---------------
import sys
def main():
size = int(sys.argv[1])
diff = int(sys.argv[2])
table = {}
completed = 0
count = 0
start = [i for i in xrange(size)]
deck = start
while (True):
count = count + 1
merged = shuffle(deck, size, diff)
for i in range(size):
if (table.has_key(i) == False):
if (start[i] == merged[i]):
table[i] = count
completed = completed + 1
if (completed == size):
print lcmm(table.values())
return
else:
deck = merged
def shuffle(deck, size, diff):
side = deck[0:size-diff]
main = deck[size-diff:]
merged = [y for x in map(None,side,main) for y in x if y is not None]
return merged
def gcd(a, b):
while b:
a, b = b, a % b
return a
def lcm(a, b):
return a * b // gcd(a, b)
def lcmm(counts):
return reduce(lcm, counts)
main()
|
UTF-8
|
Python
| false | false | 2,013 |
13,331,578,490,269 |
fdf7494dbb61846be7dbd20fe205268c3b6b64dc
|
f23192f8da0d44dd433f50e00334601093964d73
|
/SimpleCalculator/SimpleCalculator.py
|
d10acd95479305fc41a80bad2dcb325957c0ea5d
|
[] |
no_license
|
bennyhuynh301/Code-Challenge
|
https://github.com/bennyhuynh301/Code-Challenge
|
25256498197c2ea8af6e94fb8a838b3bfb14ab16
|
2c0740affa31e7c07f1ba241736866eea8fe6822
|
refs/heads/master
| 2021-01-01T16:44:57.717966 | 2013-03-15T07:05:29 | 2013-03-15T07:05:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
OPERATORS = ["+", "-", "*", "/", "(", ")"]
def isOperator(s):
return s in OPERATORS
def isLessPrecedence(operator_1, operator_2):
if (operator_1 == "+" and operator_2 == "-") or (operator_1 == "-" and operator_2 == "+"):
return False
if (operator_1 == "*" and operator_2 == "/") or (operator_1 == "/" and operator_2 == "*"):
return False
if (operator_1 == "*" or operator_1 == "/") and (operator_2 == "+" or operator_2 == "-"):
return False
return True
def isLeftParen(operator):
return operator == "("
def isRightParen(operator):
return operator == ")"
def getTokens(exp_string):
tokens = list()
s = ""
for op in exp_string:
if op != " ":
if isOperator(op):
if s != "":
tokens.append(s)
s = ""
tokens.append(op)
else:
s += op
if s != "": tokens.append(s)
tokens.append("end")
return tokens
class TreeNode:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
class SimpleCalculator:
def __init__(self, exp_string):
expressionTree = self.buildExpressionTree(exp_string)
self.value = self.evalExpression(expressionTree)
def buildExpressionTree(self, exp_string):
operator_stack = list()
operand_stack = list()
tokens = getTokens(exp_string)
for token in tokens:
if (token == "end"):
while (len(operator_stack) != 0):
operator = operator_stack.pop()
operand_2 = operand_stack.pop()
operand_1 = operand_stack.pop()
node = TreeNode(operator, operand_1, operand_2)
operand_stack.append(node)
elif isOperator(token):
if len(operator_stack) == 0:
operator_stack.append(token)
else:
if isLeftParen(token):
operator_stack.append(token)
elif isRightParen(token):
operator = operator_stack.pop()
operand_2 = operand_stack.pop()
while (not isLeftParen(operator)):
operand_1 = operand_stack.pop()
node = TreeNode(operator, operand_1, operand_2)
operand_2 = node
operator = operator_stack.pop()
operand_stack.append(operand_2)
#print "OPERATOR STACK: " + str(operator_stack)
#print "OPERAND STACK: " + str([op.value for op in operand_stack])
elif isLessPrecedence(operator_stack[-1], token):
operator_stack.append(token)
elif not isLessPrecedence(operator_stack[-1], token):
operator = operator_stack[-1]
while (not isLeftParen(operator)):
operand_2 = operand_stack.pop()
operand_1 = operand_stack.pop()
node = TreeNode(operator, operand_1, operand_2)
operand_stack.append(node)
operator_stack.pop()
if len(operator_stack) != 0:
operator = operator_stack[-1]
else:
break
#print "OPERATOR STACK: " + str(operator_stack)
#print "OPERAND STACK: " + str([op.value for op in operand_stack])
operator_stack.append(token)
else:
node = TreeNode(token)
operand_stack.append(node)
#print "OPERAND STACK: " + str([op.value for op in operand_stack])
#print "OPERATOR STACK: " + str(operator_stack)
return operand_stack[0]
def evalExpression(self, root):
op = root.value
if not isOperator(op): return int(op)
else:
if op == "+": return self.evalExpression(root.left) + self.evalExpression(root.right)
elif op == "-": return self.evalExpression(root.left) - self.evalExpression(root.right)
elif op == "*": return self.evalExpression(root.left) * self.evalExpression(root.right)
elif op == "/": return self.evalExpression(root.left) / self.evalExpression(root.right)
def getValue(self):
return self.value
def main(exp_string):
try:
cal = SimpleCalculator(exp_string)
print "RESULT: " + str(cal.getValue())
except:
print "ERROR: The expression is not well-formed"
if __name__ == '__main__':
main(sys.argv[1])
|
UTF-8
|
Python
| false | false | 2,013 |
6,279,242,187,208 |
430899216cbb14b5dac45a9c2626d618830693c3
|
678aa93b553ba7e30cb96a80c3a431b40354b0e9
|
/euler3.py
|
1ea8a957ed5bf3c84e847656ebd5779f89ae9657
|
[] |
no_license
|
michalczaplinski/python-euler
|
https://github.com/michalczaplinski/python-euler
|
2c2fbcf75f15faadee146ccad7d9e7ad25a8ff8d
|
5f0c34e585766ed4154639e300f5fee5e9d928ab
|
refs/heads/master
| 2021-01-10T22:05:55.128101 | 2014-06-16T10:10:49 | 2014-06-16T10:10:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
wdef isPrime(n):
for i in xrange(2, ((n+1)/2)+1):
if n%i == 0:
return False
else:
return True
def findFactors(n, factors):
for f in xrange(2, n+1):
if isPrime(f) and n % f == 0:
factors.append(f)
return findFactors((n/f), factors)
return factors
factors = []
factors = findFactors(600851475143, factors)
print max(factors)
|
UTF-8
|
Python
| false | false | 2,014 |
7,335,804,180,938 |
68c5b2662af3c1aaac62c2cf39e4df19a4c96c20
|
39c735f30340d153748e4ad3aa4b0baedbd1e824
|
/desuma4/Python3.py
|
bcb648f75b0f37199bc5853af546a7a294327411
|
[] |
no_license
|
sakaitaka/codeiq
|
https://github.com/sakaitaka/codeiq
|
605dcae9b77d7c801c6ab05383b269de4cb148a5
|
f3898044e7d1523ba0b8e15a61cd3b8b022f0b02
|
refs/heads/master
| 2021-05-28T10:07:40.297916 | 2014-10-04T16:47:03 | 2014-10-04T16:47:03 | 13,526,945 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
v=range;[print(chr(z),end='')for x in v(1,9)for z in v(36,118,x*2)]
|
UTF-8
|
Python
| false | false | 2,014 |
7,275,674,622,885 |
e573dca23f6d29ef622c2ee4ac99e1573f2b955e
|
1e344a3cb1b20a6f8fdeaf98c860b3a55975a976
|
/dungeon/dictionary.py
|
bba6d6e234fb6423624d0d638b4017d4b261b563
|
[] |
no_license
|
joshuamarch/Helvetica
|
https://github.com/joshuamarch/Helvetica
|
1ec98ea0b6a51310a35d25fdb02747c592efe475
|
ddcd24f92a6554255ed7ed613c2e65711c3a0fed
|
refs/heads/master
| 2021-01-01T15:40:48.437290 | 2011-06-30T00:03:29 | 2011-06-30T00:03:29 | 1,926,381 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
navigate = ['go', 'walk', 'head']
pickup = ['pickup', 'pick', 'grab', 'take', 'grasp', 'get']
attack = ['attack', 'kill', 'hurt', 'hit', 'strike']
|
UTF-8
|
Python
| false | false | 2,011 |
3,530,463,151,102 |
eaf82df750d34e660959781d7b99473b751df2da
|
3fe40f66eba8597a0dcd1c24857f81504c8e015a
|
/app/python/index_html.py
|
13cc87159a70b0a86e1f36f1ebf893cb2840d457
|
[
"MIT"
] |
permissive
|
hagifoo/gae-todo
|
https://github.com/hagifoo/gae-todo
|
c885c5126a71663bc8892780726b1d6d35d78af1
|
e6484f3928f69410be32fd278aa82e8c53a78582
|
refs/heads/master
| 2016-09-06T17:22:51.423947 | 2013-12-23T22:19:41 | 2013-12-23T22:19:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
class Handler(webapp.RequestHandler):
def get(self):
self.response.out.write(template.render('html/index.html', {}))
|
UTF-8
|
Python
| false | false | 2,013 |
14,405,320,311,154 |
31e8844b1b45626cdc23175af2c0e0b3b6c79511
|
c320805c80a13af9a9887d78c9584ae16706fbc3
|
/pmlib_server/tests/pm-daemon4-sin-men
|
d34a628a926609e2f974a5edce4e223607394b61
|
[] |
no_license
|
figual/pmlib
|
https://github.com/figual/pmlib
|
c4d06de55ee1c25923db30ebd4c63bec92e9a762
|
f47b99dd7fe69fdff361fc8e34c5f4dcb5823b5c
|
refs/heads/master
| 2021-01-24T06:07:48.376478 | 2013-11-19T14:16:33 | 2013-11-19T14:16:33 | 14,526,510 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
from threading import Thread
import sys, socket, signal, select, struct, os, time, select, exceptions #, daemonize
class PMDevice(Thread):
def __init__(self, devname, pipename, samplessec, verbose= False):
Thread.__init__(self)
self.devname= devname
self.pipename= pipename
self.samplessec= int(samplessec)
self.fifo = os.open(self.pipename, os.O_RDONLY)
self.watts= []
self.wattshour= 0
self.running= True
self.counters= {}
self.timeout= 10
def run(self):
size= struct.calcsize("ff")
while self.running:
try:
ready,_,_ = select.select([self.fifo],[],[], self.timeout)
if not ready: raise Exception
else:
recv= os.read(self.fifo, size)
w, wh= struct.unpack("ff", recv)
self.wattshour= wh
self.updatepowerdata(w)
except:
self.running= False
def updatepowerdata(self, w):
if not len(self.counters):
self.watts= [w]
elif [1 for pc_id, c in self.counters.iteritems() if c.status == states["ACTIVE"]]:
self.watts.append(w)
def stop(self):
self.running= False
class ClientRequest(Thread):
def __init__(self, client, address, verbose):
Thread.__init__(self)
self.client = client
self.address = address
self.verbose= verbose
self.running= True
def create_counter(self, devname):
try:
#if self.verbose: sys.stdout.write("Creating new power counter of %s\n" % devname)
c= Counter(pmdevices[devname]); idc= abs(id(c))
counters[idc]= c
pmdevices[devname].counters[idc]= c
#if self.verbose: sys.stdout.write("Sending new id counter %s\n" % idc)
return struct.pack("q", idc)
except:
return struct.pack("q", long(-1))
def destroy_counter(self, pc_id):
if counters[pc_id].status == states["INACTIVE"]:
try:
devname= counters[pc_id].pmdev.devname
del pmdevices[devname].counters[pc_id]
del counters[pc_id]
return struct.pack("i", 0)
except:
return struct.pack("i", -1)
else: return struct.pack("i", -1)
def receivedata(self, datatype):
return struct.unpack(datatype, self.client.recv(struct.calcsize(datatype)))[0]
def receive(self):
try:
msg_type, arg1, arg2 = self.receivedata("i"), None, None
if msg_type == operations["CREATE"]:
lenght = self.receivedata("i")
arg1= self.client.recv(lenght)
elif msg_type == operations["SETINTERVAL"]:
arg1 = self.receivedata("q")
arg2 = self.receivedata("f")
elif msg_type == operations["EXIT"]:
pass
elif [k for k, v in operations.iteritems() if v == msg_type]:
arg1 = self.receivedata("q")
else: raise Exception
if msg_type not in [operations["CREATE"],operations["EXIT"],operations["ERROR"]] and arg1 not in counters:
return operations["ERROR"], None, None
return msg_type, arg1, arg2
except:
return operations["ERROR"], None, None
def run(self):
while self.running:
msg_type, arg1, arg2= self.receive()
#print [k for k, r in operations.iteritems() if r == msg_type][0], arg1, arg2
if msg_type == operations["CREATE"]:
v= self.create_counter(arg1)
elif msg_type == operations["START"]:
v= counters[arg1].start()
elif msg_type == operations["RESTART"]:
v= counters[arg1].restart()
elif msg_type == operations["STOP"]:
v= counters[arg1].stop()
elif msg_type == operations["GET"]:
v= counters[arg1].getvalues()
elif msg_type == operations["DESTROY"]:
v= self.destroy_counter(arg1)
elif msg_type == operations["SETINTERVAL"]:
v= counters[arg1].setinterval(arg2)
elif msg_type == operations["ERROR"]:
v= struct.pack("i", -1)
elif msg_type == operations["EXIT"]:
self.running= False
v= struct.pack("i", 0)
#self.client.sendall(v)
self.sendallpacket(v)
self.client.close()
def sendallpacket(self, msg):
totalsent = 0
while totalsent < len(msg):
sent = self.client.send(msg[totalsent:])
if sent == 0:
raise RuntimeError
totalsent = totalsent + sent
class Counter:
def __init__(self, pmdev):
self.pmdev= pmdev
self.status= states["INACTIVE"]
self.sets= []
self.samplessec= self.pmdev.samplessec
self.watts= []
#self.wattshour= 0
self.totalwattshour= 0
def retval(self, r):
return struct.pack("i", int(r))
def setinterval(self, samplessec):
if self.status == states["INACTIVE"]:
if samplessec >= self.pmdev.samplessec or samplessec <= 0:
self.samplessec= self.pmdev.samplessec
else:
self.samplessec= samplessec
return self.retval(0)
else: return self.retval(-1)
def start(self):
if self.status == states["INACTIVE"]:
self.status= states["ACTIVE"]
#self.wattshour= self.pmdev.wattshour
self.sets= [[len(self.pmdev.watts)-1, -1, self.samplessec, self.pmdev.wattshour]]
self.totalwattshour= 0
return self.retval(0)
else: return self.retval(-1)
def restart(self):
if self.status == states["INACTIVE"]:
self.status= states["ACTIVE"]
#self.wattshour= self.pmdev.wattshour
self.sets.append([len(self.pmdev.watts)-1, -1, self.samplessec, self.pmdev.wattshour])
return self.retval(0)
else: return self.retval(-1)
def stop(self):
if self.status == states["ACTIVE"]:
self.status= states["INACTIVE"]
self.sets[-1][1]= len(self.pmdev.watts)
#self.totalwattshour+= (self.pmdev.wattshour - self.wattshour)
#print "HASTA AHORA CONSUMIDOS: ", self.totalwattshour
wattshour=self.pmdev.wattshour
self.totalwattshour+= (wattshour - self.sets[-1][3])
self.sets[-1][3]= wattshour - self.sets[-1][3]
return self.retval(0)
else: return self.retval(-1)
def getvalues(self):
if self.status == states["INACTIVE"]:
watts_sets= [0]
watts= []
wh_sets= []
#print self.sets
for s,e,i,wh in self.sets:
if e >= -1:
interval= int(round(self.pmdev.samplessec/i))
#print "Interval", interval
watts_set= [self.pmdev.watts[w] for w in xrange(s, e, interval)]
watts_sets.append(len(watts_set) + watts_sets[-1])
watts.extend(watts_set)
wh_sets.append(wh)
#print watts_sets[-2:], interval
#print "NUMERO DE DATOS ENVIADOS", len(watts_set)
#print "WH EN SETS", wh_sets
msg= struct.pack("i", len(watts) )
msg+= struct.pack("i", len(watts_sets) )
msg+= struct.pack("i" * len(watts_sets), *watts_sets )
msg+= struct.pack("f" * len(watts), *watts )
msg+= struct.pack("f" * len(wh_sets), *wh_sets )
msg+= struct.pack("f", self.totalwattshour )
#print "MENSAJE CREADO"
return msg
else: return self.retval(-6)
class PMServer:
def __init__(self, host='192.168.1.1', clientport=6526, pmdevicesfile="./pm-devices.conf", verbose=False):
signal.signal(signal.SIGALRM, self.timeouthandler)
signal.signal(signal.SIGINT, self.sighandler)
self.host= host
self.clientport= clientport
self.verbose= verbose
self.initdevices(pmdevicesfile)
self.init_socket(self.host, self.clientport)
self.running = True
def stop(self):
for devname,dev in pmdevices.iteritems():
dev.stop()
self.running= False
#self.clientsocket.close()
def sighandler(self, signum, frame):
self.stop()
def timeouthandler(self, signum, frame):
sys.stderr.write("No se puede leer de algunos de los dispositivos!\n")
sys.exit(-1)
def initdevices(self, pmdevicesfile):
try:
f= open(pmdevicesfile, "r")
lines= f.readlines()
for l in lines:
l= l[:-1]
dev= l.split("\t")
print dev
if not os.path.exists(dev[1]):
sys.stderr.write("Device %s not found\n" % dev[0])
raise Exception
signal.alarm(1)
pmdevices[dev[0]]= PMDevice(*dev)
signal.alarm(0)
except:
self.stop()
def init_socket(self, host, port):
self.clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.clientsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#if self.verbose: sys.stdout.write("Server listening at ('%s', %s)\n" % (host, port))
self.clientsocket.bind((host, port))
self.clientsocket.listen(5)
def run(self):
for devname,dev in pmdevices.iteritems(): dev.start()
while self.running:
# try:
client, address = self.clientsocket.accept()
#if self.verbose: sys.stdout.write("Clientsocket: got connection %d from %s\n" % (client.fileno(), address))
client_request = ClientRequest(client, address, self.verbose)
client_request.start()
# except socket.error, e:
# continue
def main(verbose= True):
global operations, states
global counters, pmdevices
operations= {"CREATE": 0, "START": 1, "RESTART": 2, "STOP": 3, "GET": 4, "DESTROY": 5, "SETINTERVAL": 6, "EXIT": 7, "ERROR": -1}
states= {"INACTIVE": 0, "ACTIVE": 1}
counters= {}
pmdevices= {}
pmdevicesconf= "./pm-devices.conf"
host= "192.168.1.1"
clientport = 6526
PMS = PMServer(host, clientport, pmdevicesconf, verbose)
PMS.run()
if __name__ == "__main__":
#daemonize.deamonize(stderr="/var/log/pm-daemon.err",pidfile="/var/run/pm-daemon.pid",startmsg="")
main()
|
UTF-8
|
Python
| false | false | 2,013 |
14,628,658,614,191 |
82a2e3603e646fd0f46bb0be4ef9e8c1aa6c7f50
|
a063ac4bdaee983d4111f4660402affe263624c2
|
/cparser.py
|
acde22eedfa8dae2f5832660b35d8cacc27df763
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
non_permissive
|
MilesQLi/Python-C-Parser
|
https://github.com/MilesQLi/Python-C-Parser
|
69ed125369d03402ad7898a01104eafc38d8f5b1
|
392b13710e1b78cc6ac7fb676c90fef2ab000f8a
|
refs/heads/master
| 2021-01-04T15:03:31.355370 | 2012-06-17T22:12:23 | 2012-06-17T22:12:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import string
# C Keywords
# ------------------------------------------------------------------------
types = ["short", "int", "long", "float", "double", "char", "void", "bool", "FILE"]
containers = ["enum", "struct", "union", "typedef"]
modifiers = [ "const", "volatile", "extern", "static", "register", "signed", "unsigned"]
flow = [ "if", "else",
"goto",
"case", "default",
"continue", "break", ]
loops = ["for", "do", "while" "switch", ]
keywords = types + containers + modifiers + flow + loops + [ "return", "sizeof" ]
prefix_operations = ["-","+","*","&","~","!","++","--"]
postfix_operations = ["++", "--"]
selection_operations = [".","->"] # Left-to-Right
multiplication_operations = ["*","/","%"] # Left-to-Right
addition_operations = ["+","-"] # Left-to-Right
bitshift_operations = ["<<",">>"] # Left-to-Right
relation_operations = ["<","<=",">",">="] # Left-to-Right
equality_operations = ["==","!="] # Left-to-Right
bitwise_operations = ["&", "^", "|"] # Left-to-Right
logical_operations = ["&&","||"]
ternary_operations = ["?",":"]
# Ternary () ? () : ()
assignment_operations = ["=", # Right-to-Left
"+=","-=",
"/=","*=","%="
"<<=",">>=",
"&=","^=","|=",
]
binary_operations = multiplication_operations + \
addition_operations + \
bitshift_operations + \
relation_operations + \
equality_operations + \
bitwise_operations + \
logical_operations + \
assignment_operations + selection_operations
operators = prefix_operations + binary_operations + ternary_operations
precedence = [
selection_operations,
multiplication_operations,
addition_operations,
bitshift_operations,
relation_operations,
equality_operations,
["&"],["^"],["|"],
logical_operations,
ternary_operations,
assignment_operations,
]
# Utitlity Functions
# ------------------------------------------------------------------------
def is_keyword(token):
return token in keywords
def isonly(s,chars):
return len(s) and all(map(lambda c: c in chars, s))
def intersection(list1,list2):
try:
return len(set(list1) & set(list2)) > 0
except TypeError:
print "Can't find the intersection of these:"
print list1
print list2
assert(0)
def first_instance(haystack, needles ):
for i,hay in enumerate(haystack):
if hay in needles:
return i
raise ValueError("%s does not contain one of %s"%(str(haystack),str(needles)))
def len_type(tokens):
index = 0
while tokens[index] in modifiers:
index += 1 # The modifier
index += 1 # the type
if tokens[index] == "*":
index += 1 # the pointer
return index
# Token Lexer
# ------------------------------------------------------------------------
class Token(str):
def set(self,line=0,pos=0):
self.line = line
self.pos = pos
def position(self):
return (self.line,self.pos)
def trim(self):
ret = Token( str(self)[:-1] )
ret.set(self.line, self.pos )
return ret
def __add__(self,other):
ret = Token( str(self) + str(other))
ret.set(self.line, self.pos )
return ret
def escape_character( c, line, pos ):
if c == "n":
curtoken = Token("\n")
curtoken.set(line,pos)
elif c == "f":
curtoken = Token("\f") # Form Feed, whatever that is
curtoken.set(line,pos)
elif c == "t":
curtoken = Token("\t")
curtoken.set(line,pos)
elif c == "'":
curtoken = Token("\'")
curtoken.set(line,pos)
elif c == '"':
curtoken = Token("\"")
curtoken.set(line,pos)
elif c == '\\':
curtoken = Token("\\")
curtoken.set(line,pos)
elif c == "0":
curtoken = Token("\0")
curtoken.set(line,pos)
else:
print "Lex Error at Line %d / Char %d - Character '%c' cannot be escaped" % (line, pos, c)
#assert(0)
curtoken = Token(c)
curtoken.set(line,pos)
return curtoken
def tokenize( s ):
symbols = string.punctuation.replace("_","")
digital = string.digits
floating = string.digits + "."
hexal = string.hexdigits
line = 1
pos = 0
curtoken = Token("")
curtoken.set(line,pos)
in_string = False
in_char = False
in_comment = False
in_pragma = False
in_define= False
definitions = {}
def evaluate_pragma( pragma ):
print pragma
#define
if pragma.startswith("#define "):
pragma = pragma.lstrip("#define ")
identifier = pragma[0:pragma.index(" ")]
expansion = pragma[pragma.index(" ")+1:]
definitions[identifier] = expansion
elif pragma.startswith("#undef "):
identifier = pragma.lstrip("#undef ")
if definitions.has_key(identifier):
del definitions[ identifier ]
else:
# Unknown pragma
pass
def _token():
if definitions.has_key(curtoken):
redefined = Token(definitions[curtoken])
redefined.set( *curtoken.position() )
return redefined
else:
return curtoken
for c in s:
pos += 1
#print (c,curtoken)
if in_comment or in_pragma:
if c=="\n":
if in_pragma:
evaluate_pragma( curtoken )
if curtoken.startswith("//") or curtoken.startswith("#") :
curtoken = Token("")
curtoken.set(line,pos)
in_comment = False
in_pragma = False
line += 1
pos = 0
elif c=='/' and curtoken.endswith("*"):
curtoken = Token("")
curtoken.set(line,pos)
in_comment = False
else:
curtoken += c
elif c == '"' and not in_char:
if not in_string:
# Start of new String
if curtoken != "":
yield _token()
in_string = True
curtoken = Token('"')
curtoken.set(line,pos)
elif len(curtoken) and curtoken[-1] == '\\':
curtoken = Token(curtoken[:-1] + "\"")
curtoken.set(line,pos)
else:
# End of String
in_string = False
curtoken += c
yield _token()
curtoken = Token("")
curtoken.set(line,pos)
elif in_string:
if curtoken.endswith('\\'):
# Char Symbols
#curtoken = curtoken.trim()
#curtoken += escape_character( c, line, pos )
curtoken += Token(c)
else:
curtoken += Token(c)
elif in_char:
if curtoken.endswith("\\"):
# Escape this Character
#curtoken = curtoken.trim()
#curtoken += escape_character(c, line, pos)
curtoken += c
elif c == "'":
# End of Character:
curtoken += c
if len(curtoken) != 3:
print "Lex Error at Line %d / Char %d - Character '%s' is too long." % (curtoken.line, curtoken.pos, c)
yield _token()
in_char = False
curtoken = Token("")
curtoken.set(line,pos)
else:
curtoken += c
elif c == "'" and not in_string:
# Start of Character:
if curtoken != "":
yield _token()
curtoken = Token("'")
curtoken.set(line,pos)
in_char = True
elif c == "#":
if curtoken != "":
yield _token()
curtoken = Token("#")
curtoken.set(line,pos)
in_pragma = True
elif curtoken=="/" and c=="*":
curtoken += Token(c)
in_comment = True
elif c == "/" and curtoken == "/":
curtoken += Token(c)
in_comment = True
elif c in symbols:
if (curtoken+c) in operators:
curtoken = Token((curtoken+c))
curtoken.set(line,pos)
elif c=='.' and isonly(curtoken, floating):
curtoken += Token(c)
else:
if curtoken != "":
yield _token()
curtoken = Token(c)
curtoken.set(line,pos)
else:
# Non-Symbols
if isonly(curtoken, symbols):
yield _token()
curtoken = Token("")
curtoken.set(line,pos)
if c in string.whitespace:
if curtoken != "":
yield _token()
if c == "\n":
#yield c
line += 1
pos = 0
curtoken = Token("")
curtoken.set(line,pos)
# Int
elif c in digital and isonly(curtoken,digital):
curtoken += Token(c)
# Float
elif c in floating and isonly(curtoken, floating):
curtoken += Token(c)
# Hex
elif curtoken.startswith("0x") and c in hexal and isonly(curtoken[2:], hexal):
curtoken += Token(c)
elif curtoken == "0" and c in "xX":
curtoken += Token(c)
else:
curtoken += Token(c)
if curtoken not in string.whitespace:
yield _token()
# Token Parser
# ------------------------------------------------------------------------
def parse_value(tokens):
if tokens[0] in prefix_operations:
unary = tokens.pop(0)
if tokens[0] == "(":
tokens.pop(0)
value,tokens = parse_expression( tokens )
if tokens[0]!=")":
print >>sys.stderr, "Parse Error at Line %d / Char %d - ( arguments must end with ')', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
else:
value,tokens = parse_value( tokens )
inner = ('Prefix',(unary,value))
elif is_keyword(tokens[0]):
print >>sys.stderr, "Parse Error at Line %d / Char %d - Value Expected at '%s', found keyword" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
elif tokens[0] in string.punctuation:
print >>sys.stderr, "Parse Error at Line %d / Char %d - Value Expected at '%s', found punctuation" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
elif tokens[0][0] == '"':
name = tokens.pop(0)
str = name[1:-1]
while len(tokens) and tokens[0] and tokens[0][0] == '"':
name = tokens.pop(0)
str += name[1:-1]
inner = ('String',str)
else:
name = tokens.pop(0)
inner = ('Value',name)
#print "Value",name
while len(tokens) and tokens[0] in "([":
if tokens[0] == "(":
tokens.pop(0)
# Get the Arguements
arguments = []
while len(tokens):
# Reached end of Argument List
if tokens[0]==")":
break
arg,tokens = parse_expression( tokens )
arguments.append( arg )
if tokens[0]!=",":
break
else:
tokens.pop(0)
if tokens[0]!=")":
print >>sys.stderr, "Parse Error at Line %d / Char %d - Function must have ')', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
inner = ('Call',(inner,arguments))
elif tokens[0] == "[":
tokens.pop(0)
index,tokens = parse_expression( tokens )
if tokens[0]!="]":
print >>sys.stderr, "Parse Error at Line %d / Char %d - Array Accessor must have ']', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
inner = ('Index',(inner, index) )
else:
#So how did you get here?
assert(0)
# Check for postfix unaray operations
if tokens[0] in postfix_operations:
unary = tokens.pop(0)
#print "Value",unary,name
inner = ('Postfix',(inner,unary))
return inner,tokens
def parse_if( tokens ):
if tokens[0] not in ["if"]:
print >>sys.stderr, "Parse Error at Line %d / Char %d - if must start with 'if', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
if tokens[0]!="(":
print >>sys.stderr, "Parse Error at Line %d / Char %d - if must have '(', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
test,tokens = parse_expression( tokens )
if tokens[0]!=")":
print >>sys.stderr, "Parse Error at Line %d / Char %d - if must have ')', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
action,tokens = parse_statement_or_block(tokens)
alternative = None
if tokens[0]=="else":
tokens.pop(0)
alternative,tokens = parse_statement_or_block(tokens)
#print "If",test,action
return ("If",(test,action,alternative)), tokens
def parse_while( tokens ):
if tokens[0] not in ["while"]:
print >>sys.stderr, "Parse Error at Line %d / Char %d - while must start with 'while', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
if tokens[0]!="(":
print >>sys.stderr, "Parse Error at Line %d / Char %d - while must have '(', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
test,tokens = parse_expression( tokens )
if tokens[0]!=")":
print >>sys.stderr, "Parse Error at Line %d / Char %d - if must have ')', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
action,tokens = parse_statement_or_block(tokens)
#print "While",test,action
return ("While",(test,action)), tokens
def parse_for( tokens ):
if tokens[0] not in ["for"]:
print >>sys.stderr, "Parse Error at Line %d / Char %d - for must start with 'for', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
if tokens[0]!="(":
print >>sys.stderr, "Parse Error at Line %d / Char %d - for must have '(', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
init,tokens = parse_expression( tokens )
if tokens[0]!=";":
print >>sys.stderr, "Parse Error at Line %d / Char %d - for must have first ';', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
test,tokens = parse_expression( tokens )
if tokens[0]!=";":
print >>sys.stderr, "Parse Error at Line %d / Char %d - for must have second ';', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
step,tokens = parse_expression( tokens )
if tokens[0]!=")":
print >>sys.stderr, "Parse Error at Line %d / Char %d - if must have ')', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
action,tokens = parse_statement_or_block(tokens)
#print "For",init,test,step,action
return ("For",(init,test,step,action)), tokens
def parse_cast( tokens ):
# This enforces (int)x or (int)(x), rather than int(x), that's not quite right
if tokens[0]!="(":
print >>sys.stderr, "Parse Error at Line %d / Char %d - cast must start with '(', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
# Get the Cast Type
cast_type,tokens = parse_type(tokens)
if tokens[0] != ")":
for e in expression:
print e
print >>sys.stderr, "Parse Error at Line %d / Char %d - ')' expected after expression %s" % (tokens[0].line, tokens[0].pos, str(inner))
assert(0)
tokens.pop(0)
# Get the Casted Value
if tokens[0] == "(":
tokens.pop(0)
cast_value,tokens = parse_expression(tokens)
if tokens[0] != ")":
print >>sys.stderr, "Parse Error at Line %d / Char %d - ')' expected after expression %s" % (tokens[0].line, tokens[0].pos, str(inner))
assert(0)
tokens.pop(0)
else:
cast_value,tokens = parse_value( tokens )
return ("Cast",(cast_type,cast_value)), tokens
def parse_expression( tokens ):
# This should be a tree not a list
expression = []
while len(tokens):
#TODO: Add Ternary Operator "?:"
#TODO: Add Comma
#TODO: Symbol Symbol should be illegal
if tokens[0] == ";":
break
elif tokens[0] == ",":
break
elif tokens[0] == ")":
break
elif tokens[0] == "]":
break
# Get a value
else:
if tokens[0] == "(":
# Is this an inner expression or a cast?
if tokens[1] in types+modifiers:
inner,tokens = parse_cast( tokens )
else:
tokens.pop(0)
inner,tokens = parse_expression( tokens )
if tokens[0] != ")":
for e in expression:
print e
print >>sys.stderr, "Parse Error at Line %d / Char %d - ')' expected after expression %s" % (tokens[0].line, tokens[0].pos, str(inner))
assert(0)
tokens.pop(0)
#break
else:
inner,tokens = parse_value( tokens )
expression.append( inner )
# TODO: Add Right/Left Associations
if tokens[0] in binary_operations + ternary_operations:
symbol = tokens.pop(0)
expression.append( ("Math", (symbol) ) )
else:
#print "Didn't find an operator, found",str(tokens[0]),"instead"
pass
# Fix precedence
if len(expression) > 2:
while len(expression) > 2:
# The expressions should always be of the form:
# Value Math Value Math Value
symbols = [ sym[1] for sym in expression[1::2] ]
for ops in precedence:
if "?" in ops and "?" in symbols:
i = (2 * symbols.index("?")) + 1
j = (2 * symbols.index(":")) + 1
before,after = expression[:i-1],expression[j+2:]
test,yes,no = expression[i-1],expression[i+1],expression[j+1]
math = ("Ternary",(test,yes,no))
#print math
expression = before + [math] + after
elif intersection( symbols, ops):
i = (2 * first_instance( symbols, ops )) + 1
symbol = expression[i][1]
before,after = expression[:i-1],expression[i+2:]
right,left = expression[i-1],expression[i+1]
math = ("Binary",(symbol,right,left))
#print math
expression = before + [math] + after
break
else:
# Nothing to see here, move along
pass
elif len(expression) == 2:
if expression[0][0] == "Math" and expression[0][1] in prefix_operations:
return ("Prefix",(expression[0][1],expression[1])),tokens
elif expression[1][0] == "Math" and expression[0][1] in postfix_operations:
return ("Postfix",(expression[1][1],expression[0])),tokens
#
if len(expression) == 1:
return expression[0],tokens
elif len(expression) == 0:
return ("Expression",[]),tokens
else:
print >>sys.stderr, "Parse Error at Line %d / Char %d - Couldn't compress expression into tree" % (tokens[0].line, tokens[0].pos)
for e in expression:
print >>sys.stderr, e
assert(0)
def parse_struct( tokens ):
struct = []
if tokens[0] not in ["struct","union"]:
print >>sys.stderr, "Parse Error at Line %d / Char %d - struct must start with 'struct' or 'union', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
kind = "Struct" if (tokens.pop(0) == "struct") else "Union"
if tokens[0]!="{":
print >>sys.stderr, "Parse Error at Line %d / Char %d - Blocks must start with 'struct {', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
while len(tokens):
if tokens[0]=="}":
break
if tokens[0]=="struct" or tokens[0]=="union":
inner,tokens = parse_struct(tokens)
struct.append(inner)
else:
declaration,tokens = parse_declaration(tokens)
struct.append(declaration)
if tokens[0]!=";":
print >>sys.stderr, "Parse Error at Line %d / Char %d - struct values must end in ';', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
if tokens[0]!="}":
print >>sys.stderr, "Parse Error at Line %d / Char %d - Blocks must start with 'struct {', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
#print kind, struct
return (kind, struct), tokens
def parse_switch(tokens):
if tokens[0] not in ["switch"]:
print >>sys.stderr, "Parse Error at Line %d / Char %d - switch must start with 'switch', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
if tokens[0]!="(":
print >>sys.stderr, "Parse Error at Line %d / Char %d - for must have '(', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
test,tokens = parse_expression( tokens )
if tokens[0]!=")":
print >>sys.stderr, "Parse Error at Line %d / Char %d - functions arguments must have ')', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
block,tokens = parse_block( tokens )
return ( "Switch", (test,block) ), tokens
def parse_type(tokens):
mods = []
while tokens[0] in modifiers:
mods.append( tokens.pop(0) )
if not ( tokens[0] in types ):
print >>sys.stderr, "Parse Error at Line %d / Char %d - expected type but found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert( tokens[0] in types )
type = tokens.pop(0)
isPointer = False
if tokens[0] == "*":
isPointer = True
tokens.pop(0)
#print "Type %s" % (" ".join(mods) + type + ("*" if isPointer else ""))
return ("Type", (mods, type, isPointer)), tokens
def parse_declaration( tokens ):
assignments = []
type, tokens = parse_type( tokens )
while len(tokens):
if tokens[0] == "*":
type = ("Type", (type[1][0], type[1][1], True))
tokens.pop(0)
# Check if it's a pointer
name = tokens.pop(0)
#print "Name %s" % name
length = None
if tokens[0]=="[":
tokens.pop(0)
length,tokens = parse_expression( tokens )
if tokens[0]!="]":
print >>sys.stderr, "Parse Error at Line %d / Char %d - Array Definition must end with ']', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
if tokens[0]=="[":
# Get Multi Dimensional Arrays
print >>sys.stderr, "Parse Error at Line %d / Char %d - Multi Dimensional Arrays don't work yet" %(tokens[0].line, tokens[0].pos)
assert(0)
if not is_keyword(name):
if tokens[0]=="=":
# Declaration value
tokens.pop(0)
expression,tokens = parse_expression( tokens )
assignments.append((type,name,length,expression))
else:
# Non-Assignmed value
assignments.append((type,name,length,None))
if tokens[0]==",":
tokens.pop(0)
type = ("Type", (type[1][0], type[1][1], False))
continue
elif tokens[0]==";":
break
if len(tokens):
print >>sys.stderr, "Parse Error at Line %d / Char %d - unknown token encountered at '%s'" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
return ("Declaration", assignments), tokens
def parse_function( tokens ):
returntype,tokens = parse_type(tokens)
name = tokens.pop(0)
if tokens[0]!="(":
print >>sys.stderr, "Parse Error at Line %d / Char %d - Function must have '(', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
# Arguements
arguments = []
while len(tokens):
# Reached end of Argument List
if tokens[0]==")":
break
if tokens[0]== "void" and tokens[1]==")":
tokens.pop(0)
break
type,tokens = parse_type(tokens)
argname = tokens.pop(0)
if is_keyword(name):
print >>sys.stderr, "Parse Error at Line %d / Char %d - Function argument #%d's name '%s' cannot be a keyword" % (len(arguments)+1, name)
assert(0)
arguments.append( (type,argname) )
if tokens[0]!=",":
break
else:
tokens.pop(0)
if tokens[0]!=")":
print >>sys.stderr, "Parse Error at Line %d / Char %d - Functions arguments must have ')', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
if tokens[0]=="{":
block,tokens = parse_block( tokens );
elif tokens[0]==";":
tokens.pop(0)
block = None
else:
print >>sys.stderr, "Parse Error at Line %d / Char %d - Functions must have '{', found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
return ("Function",(returntype,name,arguments,block)), tokens
def parse_statement( tokens ):
statement = []
needsemicolon = True
if tokens[0] == "if":
statement,tokens = parse_if( tokens )
needsemicolon = False
elif tokens[0] == "while":
statement,tokens = parse_while( tokens )
needsemicolon = False
elif tokens[0] == "for":
statement,tokens = parse_for( tokens )
needsemicolon = False
elif tokens[0] in types + modifiers:
statement,tokens = parse_declaration( tokens )
elif tokens[0]=="struct" or tokens[0]=="union":
statement,tokens = parse_struct(tokens)
elif tokens[0] == "switch":
statement,tokens = parse_switch(tokens)
needsemicolon = False
elif tokens[0] == "break":
statement = ("Break",None)
tokens.pop(0)
elif tokens[0] == "continue":
statement = ("Continue",None)
tokens.pop(0)
elif tokens[0] == "case":
tokens.pop(0)
literal,tokens = parse_value(tokens)
statement = ("Case",literal)
if tokens[0]!=":":
print >>sys.stderr, "Parse Error at Line %d / Char %d - case must end in a colon: found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(tokens[0] == ":")
tokens.pop(0)
needsemicolon = False
elif tokens[0] == "default":
tokens.pop(0)
statement = ("default",None)
if tokens[0]!=":":
print >>sys.stderr, "Parse Error at Line %d / Char %d - default must end in a colon: found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(tokens[0] == ":")
tokens.pop(0)
needsemicolon = False
elif tokens[1] == ":":
label = tokens.pop(0)
statement = ("Label",label)
assert(tokens[0] == ":")
tokens.pop(0)
needsemicolon = False
elif tokens[0] == "goto":
tokens.pop(0)
label = tokens.pop(0)
statement = ("Goto",label)
elif tokens[0] == "return":
tokens.pop(0)
expression,tokens = parse_expression( tokens );
statement = ("Return",expression)
else:
expression,tokens = parse_expression(tokens)
statement = ("Statement",expression)
if needsemicolon:
if tokens[0]==";" or tokens[0]==",":
tokens.pop(0)
else:
print >>sys.stderr, "Parse Error at Line %d / Char %d - Statements must end in a semicolon: found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(tokens[0]==";")
#print "Statement",statement,"\n"
return statement, tokens
def parse_block( tokens ):
if tokens[0]!="{":
print >>sys.stderr, "Parse Error at Line %d / Char %d - Blocks must start with a {, found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
block = []
while len(tokens) and tokens[0] != "}":
statement,tokens = parse_statement_or_block(tokens)
block.append( statement )
if tokens[0]!="}":
print >>sys.stderr, "Parse Error at Line %d / Char %d - Blocks must end with a }, found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(0)
tokens.pop(0)
#print "Block", block
return ("Block",block), tokens
def parse_statement_or_block( tokens ):
if tokens[0]=="{":
return parse_block( tokens )
else:
return parse_statement( tokens )
def parse_root( tokens ):
if tokens[ len_type(tokens) + 1 ] == "(":
return parse_function( tokens )
else:
declaration = parse_declaration( tokens )
if tokens[0]==";":
tokens.pop(0)
else:
print >>sys.stderr, "Parse Error at Line %d / Char %d - Non-Function Declarations must end in a semicolon: found %s instead" % (tokens[0].line, tokens[0].pos, tokens[0])
assert(tokens[0]==";")
return declaration
# Print Abstract Syntax Tree (AST)
# ------------------------------------------------------------------------
def print_thing( thing, depth=0 ):
def p(str,d=0):
print "\t"*(depth+d)+ str
try:
name,value = thing
except ValueError:
print "Can't Unpack this variable:"
print thing
assert(0)
#p("THING:", name,value)
if name == "Block":
p("Block")
for num,statement in enumerate(value):
print "\t"*(depth)+ "Statement %d" %(num+1)
print_thing(statement,depth+1)
elif name == "Statement":
print_thing(value,depth)
elif name == "Math":
symbol = value
p("Math")
p(symbol)
assert(0)
elif name == "Cast":
type,expression = value
p("Cast")
print_thing(expression,depth+1)
p("To")
print_thing(type,depth+1)
elif name == "Prefix":
p("Prefix")
symbol, expression = value
p(symbol)
print_thing(expression,depth+1)
elif name == "Postfix":
p("Postfix")
expression, symbol = value
print_thing(expression,depth+1)
p(symbol)
elif name == "Binary":
symbol,left,right = value
p("(")
p("Math '%s'" % symbol)
print_thing(left,depth+1)
p(symbol)
print_thing(right,depth+1)
p(")")
elif name == "String":
p("String")
p('"%s"'%value)
elif name == "Value":
p("Value")
p(value)
elif name == "Index":
p("Index")
var, expression = value
print_thing(var,depth+1)
p("[")
print_thing(expression,depth+1)
p("]")
elif name == "Type":
p("Type")
mods, type, isPointer = value
if len(mods):
type = " ".join(mods) + " " + type
if isPointer:
type = "Pointer to " + type
p(type,1)
elif name == "Declaration":
p(name)
for declaration in value:
type,name,length,assignment = declaration
if length:
p("Array of length",1)
print_thing(length,depth+2)
print_thing(type,depth+1)
p("Name",1)
p(name,2)
if assignment:
p("Assigned the value",1)
print_thing(assignment,depth+2)
elif name == "Expression":
p(name)
p("(")
if value:
print_thing(value,depth+1)
p(")")
elif name=="Struct" or name=="Union":
p(name)
p("{")
for expression in value:
print_thing(expression,depth+1)
p("}")
elif name=="If":
test,action,alternative = value
p(name)
p("TEST",1)
print_thing(test,depth+2)
p("DO",1)
print_thing(action,depth+2)
if alternative:
p("ELSE",1)
print_thing(alternative,depth+2)
elif name=="While":
test,action = value
p(name)
p("TEST",1)
print_thing(test,depth+2)
p("DO",1)
print_thing(action,depth+2)
elif name=="For":
init,test,step,action = value
p(name)
p("INIT",1)
print_thing(init,(depth+1)+1)
p("TEST",1)
print_thing(test,(depth+1)+1)
p("STEP",1)
print_thing(step,(depth+1)+1)
p("DO",1)
print_thing(action,depth+2)
elif name=="Break":
p(name)
elif name=="Continue":
p(name)
elif name=="Return":
p(name)
print_thing(value,depth+1)
elif name=="Case":
p(name)
print_thing(value,depth+1)
elif name=="Label":
p(name)
p(value,1)
elif name=="Goto":
p(name)
p(value,1)
elif name=="default":
p(name)
elif name=="Function":
returntype,name,arguments,block = value
if block:
p("Function Declaration")
else:
p("Function Header")
print_thing(returntype,depth+1)
p(name,1)
if len(arguments):
p("With %d Argument%s" %(len(arguments), "s" if len(arguments) > 1 else ""))
for num,(argtype,argname) in enumerate(arguments):
p("Argument %d:" %(num+1),1)
print_thing(argtype,depth+2)
p("Name",2)
p(argname,3)
else:
p("With No Arguments")
if block:
p("{")
print_thing(block,depth+1)
p("}")
elif name=="Call":
func,arguments = value
print_thing(func,depth+1)
p("(")
for num,arg in enumerate(arguments):
print_thing(arg,depth+1)
if num != len(arguments)-1:
p(",")
p(")")
elif name=="Switch":
test,block = value
p(name)
p("(")
print_thing(test,depth+1)
p(")")
p("{")
print_thing(block,depth+1)
p("}")
else:
p("Warning! Unknown type '"+ str(name) +"'")
p(str(name))
p(str(value))
def print_c( thing, depth, comments ):
def comment(str):
if comments:
p("// "+ str)
def p(str,d=0):
print "\t"*(depth+d)+ str
try:
name,value = thing
except ValueError:
print "Can't Unpack this variable:"
print thing
assert(0)
comment( "THING: %s %s" % (name,str(value)) )
comment( name )
if name == "Block":
p("{")
for num,statement in enumerate(value):
comment( "Statement %d" %(num+1) )
print_c(statement,depth+1,comments)
p("}")
elif name == "Cast":
type,expression = value
p("(")
print_c(type,depth+1,comments)
p(")")
print_c(expression,depth+1,comments)
elif name == "Prefix":
symbol, expression = value
p(symbol)
print_c(expression,depth+1,comments)
elif name == "Postfix":
expression, symbol = value
print_c(expression,depth+1,comments)
p(symbol)
elif name == "Binary":
symbol,left,right = value
p("(")
print_c(left,depth+1,comments)
p(symbol)
print_c(right,depth+1,comments)
p(")")
elif name == "Ternary":
test,yes,no = value
print_c(test,depth+1,comments)
p("?")
print_c(yes,depth+1,comments)
p(":")
print_c(no,depth+1,comments)
elif name == "String":
p('"%s"'%value)
elif name == "Value":
p(value)
elif name == "Index":
var, expression = value
print_c(var,depth+1,comments)
p("[")
print_c(expression,depth+1,comments)
p("]")
elif name == "Type":
mods, type, isPointer = value
if len(mods):
type = " ".join(mods) + " " + type
if isPointer:
type += "*"
p(type)
elif name == "Declaration":
for declaration in value:
type,name,length,assignment = declaration
print_c(type,depth+1,comments)
p(name,1)
if length:
p("[",1)
print_c(length,depth+2,comments)
p("]",1)
if assignment:
p("=",1)
print_c(assignment,depth+2,comments)
p(";")
elif name == "Expression":
p("(")
if value:
print_c(value,depth+1,comments)
p(")")
elif name=="Struct" or name=="Union":
if name == "Struct":
p("struct")
if name == "Union":
p("union")
p("{")
for expression in value:
print_c(expression,depth+1,comments)
p("};")
elif name=="If":
test,action,alternative = value
p("if(")
comment( "TEST" )
print_c(test,depth+2,comments)
p(")")
comment( "DO" )
p("{")
print_c(action,depth+2,comments)
p("}")
if alternative:
comment( "ELSE" )
p("else {",1)
print_c(alternative,depth+2,comments)
p("}",1)
elif name=="While":
test,action = value
p("while(")
comment( "TEST" )
print_c(test,depth+2,comments)
p(")")
comment( "DO" )
p("{",1)
print_c(action,depth+2,comments)
p("}",1)
elif name=="For":
init,test,step,action = value
p("for(")
comment( "INIT" )
print_c(init,(depth+1)+1,comments)
p(";")
comment( "TEST" )
print_c(test,(depth+1)+1,comments)
p(";")
comment( "STEP" )
print_c(step,(depth+1)+1,comments)
p(")")
comment( "DO" )
p("{")
print_c(action,depth+2,comments)
p("}")
elif name=="Break":
p("break;")
elif name=="Continue":
p("continue;")
elif name=="Return":
p("return")
print_c(value,depth+1,comments)
p(";")
elif name=="Case":
p("case")
print_c(value,depth+1,comments)
p(":")
elif name=="Label":
p(value,1)
p(":")
elif name=="Goto":
p(value,1)
p(";")
elif name=="default":
p("default:")
elif name=="Statement":
print_c(value,depth,comments)
p(";")
elif name=="Function":
returntype,name,arguments,block = value
if block:
comment( "Function Declaration" )
pass
else:
comment( "Function Header" )
pass
print_c(returntype,depth+1,comments)
p(name,1)
p("(")
if len(arguments):
comment( "With %d Argument%s" %(len(arguments), "s" if len(arguments) > 1 else "") )
for num,(argtype,argname) in enumerate(arguments):
comment( "Argument %d:" %(num+1) )
print_c(argtype,depth+2,comments)
comment( "Name" )
p(argname,3)
if num != len(arguments)-1:
p(",",3)
else:
comment( "With No Arguments" )
p("void")
p(")")
if block:
print_c(block,depth,comments)
else:
p(";")
elif name=="Call":
func,arguments = value
print_c(func,depth+1,comments)
p("(")
for num,arg in enumerate(arguments):
print_c(arg,depth+1,comments)
if num != len(arguments)-1:
p(",")
p(")")
elif name=="Switch":
test,block = value
p("switch")
p("(")
print_c(test,depth+1,comments)
p(")")
p("{")
print_c(block,depth+1,comments)
p("}")
else:
p("Warning! Unknown type '"+ str(name) +"'")
p(str(name))
p(str(value))
if __name__ == "__main__":
import os
import sys
import glob
from optparse import OptionParser
# Option Parser
parser = OptionParser()
parser.add_option("-s", "--save", dest="save", default=False,
action="store_true",
help="Save" )
parser.add_option("-d", "--dir", dest="directory",
action="store", type="string", default="results",
help="Output directory", metavar="DIR")
parser.add_option("-c", "--code",
action="store_true", dest="code", default=False,
help="Output AST as C")
parser.add_option("--comments",
action="store_true", dest="comments", default=False,
help="Output comments in the code")
parser.add_option("-a", "--ast",
action="store_true", dest="ast", default=False,
help="Output AST as readable AST")
parser.add_option("-t", "--tokens",
action="store_true", dest="tokens", default=False,
help="Output parsed tokens")
(options, args) = parser.parse_args()
# expand options
files = []
for arg in args:
for filenames in glob.glob( arg ):
files.append( filenames )
output_to_files = options.save
dirname = options.directory
if output_to_files:
if not os.path.exists( dirname ):
os.mkdir( dirname )
real_out = astfile = cfile = sys.stdout
# Do the stuff
for filename in files:
print filename
dirpath,filebase = os.path.split(filename)
base,ext = os.path.splitext(filebase)
data = open(filename,"r").read()
tokens = list(tokenize( data ))
# Print out the Lexer
if options.tokens:
if output_to_files:
sys.stdout = open(os.path.join(dirname, base+".tok"),"w")
print "Lexical Analysis of " + filename
for i,token in enumerate( tokens ):
loc = ("%d (%d,%d): " %(i, token.line, token.pos)).ljust(16)
print loc,token
# Parse tokens
if output_to_files:
if options.ast:
astfile = open(os.path.join(dirname, base+".ast"),"w")
sys.stdout = astfile
print "AST code of " + filename
if options.code:
cfile = open(os.path.join(dirname, base+".c"),"w")
sys.stdout = cfile
print "// C code of " + filename
while len(tokens):
block, tokens = parse_root( tokens )
if options.ast:
sys.stdout = astfile
print_thing(block)
if options.code:
sys.stdout = cfile
print_c(block, 0, options.comments)
print
if output_to_files:
sys.stdout = real_out
|
UTF-8
|
Python
| false | false | 2,012 |
12,936,441,532,201 |
cd7ea6cb38a06d897b0f8a47be63eb60cbb0254e
|
0839aa1c9d2481d3c1d93e13b6d38432f5c33fe7
|
/2.py
|
fb9728d940ee0d7af957ca8c0eb3942b827958e9
|
[] |
no_license
|
lihuang/Chanllenge
|
https://github.com/lihuang/Chanllenge
|
c3efa1c58e9590ff833e2955839693c411b75a57
|
8baa3b2d1062173eb779369706cd18b6869ed6fd
|
refs/heads/master
| 2021-01-22T10:08:09.498932 | 2013-11-18T09:21:55 | 2013-11-18T09:21:55 | 12,132,292 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'gongzhi'
'''
url = http://www.pythonchallenge.com/pc/def/map.html
'''
def translate(org):
res = ""
for i in range(len(org)):
if(org[i] == " "):
res += org[i]
elif(org[i] == "."):
res += org[i]
elif(org[i] == "'"):
res += org[i]
elif(org[i] == "("):
res += org[i]
elif(org[i] == ")"):
res += org[i]
elif(org[i] == "y"):
res += "a"
elif(org[i] == "z"):
res += "b"
else:
int_a = ord(org[i])
res += chr(int_a + 2)
return res
if __name__ == "__main__":
print 'Question 2'
org = "g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj."
print translate(org)
url = "map"
print translate(url)
|
UTF-8
|
Python
| false | false | 2,013 |
11,931,419,160,079 |
fec5e433710606f5bee1742db7206a474d1a454c
|
cc214ca44dd91e6c612b70b7216feb17afb54100
|
/cp003/course-python-003-04.py
|
1965f8fbdb735867d81fae4513f48db106b4ed2c
|
[] |
no_license
|
pgodlevska/python-course
|
https://github.com/pgodlevska/python-course
|
61a5105521b9d54e06e40d5c921fdf4030c45041
|
f6e8c5ac50bc4255ad6a7ea9ea7871468230f348
|
refs/heads/master
| 2015-08-09T09:46:03.598397 | 2013-11-25T12:22:19 | 2013-11-25T12:22:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
UGLY_FACE = '''
\ | /
@ @
*
\\"""/
'''
UGLY_FACE_R = r'''
\ | /
@ @
*
\"""/
'''
print UGLY_FACE
print UGLY_FACE_R
|
UTF-8
|
Python
| false | false | 2,013 |
7,885,559,965,574 |
ffbd0d34dae84271f33f88bf4436728f921f1e20
|
9eddc7f2c28e116e59413198f3d368512269b39b
|
/strange.py
|
0882cdba89cf33d0e7fbe2176ae5333d2d060151
|
[] |
no_license
|
oschuett/numc
|
https://github.com/oschuett/numc
|
c23ca9606eb696cc197a7f8ea90e17f1122417ff
|
21ebdd21192f9edd79399991ab2c91130224d140
|
refs/heads/master
| 2020-04-24T01:30:51.141663 | 2011-03-27T11:17:38 | 2011-03-27T11:17:38 | 1,408,601 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numc as np
def run(start_matrix):
u = start_matrix
dx = 1.0/(nx-1)
dy = 1.0/(ny-1)
dx2, dy2 = dx**2, dy**2
dnr_inv = 0.5/(dx2 + dy2)
for i in range(5):
u[1:-1, 1:-1] = ((u[0:-2, 1:-1] + u[2:, 1:-1])*dy2 +
(u[1:-1,0:-2] + u[1:-1, 2:])*dx2)*dnr_inv
np.set_debug(0)
# Der gleiche C-Code braucht 10-mal länger nur wegen anderer Start-Werte ???
print("Fast")
nx = 500
ny = 5000
run(np.ones((nx, ny), "d"))
print("Slow")
run(np.load("start_matrix.npy"))
#EOF
|
UTF-8
|
Python
| false | false | 2,011 |
11,716,670,833,363 |
98e0ad52fa480a738b325c1c81d3f7309931bfc5
|
69c9d323d5aecb89baffc05f85078b80438fcafc
|
/temp.py
|
22cd0a3d7545f2f837a5af3374351339f58819e2
|
[] |
no_license
|
saiichi/gitlearn
|
https://github.com/saiichi/gitlearn
|
f470a2b9b88092b0cf37ebd367d4517f104d2472
|
f723f383de36edf7237cd80753063622d7bea33b
|
refs/heads/master
| 2021-01-01T18:48:09.018988 | 2014-11-04T12:28:08 | 2014-11-04T12:28:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 12 09:43:15 2014
@author: cuiyi
"""
import numpy as np
import japanTSP as jt
file = open("jaresult")
x = []
for t in file.readlines():
x.append(int(t.strip())-1)
dataset = jt.datainput("ja9847.txt")
indicate = x
x = []
y =[]
indicate
for i in indicate:
x.append(dataset[i][0])
y.append(dataset[i][1])
x1 =x
y1 =y
indicate1 = indicate
for i in range(300):
p = np.random.randint(9800)
o = np.random.randint(5)
t = x1[p]
x1[p] = x1[p+o]
x1[p+o] = t
t = y1[p]
y1[p] = y1[p+o]
y1[p+o] = t
t = indicate1[p]
indicate1[p] = indicate1[p+o]
indicate1[p+o] = t
|
UTF-8
|
Python
| false | false | 2,014 |
8,194,797,611,119 |
1effc14aab1ac5bc09a772e750f94517cc519269
|
153ecce57c94724d2fb16712c216fb15adef0bc4
|
/Products.Ape/trunk/lib/apelib/config/common.py
|
c89645d65a9d0f05a506539ffd04bfe2e13551e8
|
[
"ZPL-2.1"
] |
permissive
|
pombredanne/zope
|
https://github.com/pombredanne/zope
|
10572830ba01cbfbad08b4e31451acc9c0653b39
|
c53f5dc4321d5a392ede428ed8d4ecf090aab8d2
|
refs/heads/master
| 2018-03-12T10:53:50.618672 | 2012-11-20T21:47:22 | 2012-11-20T21:47:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bits useful for configuration. May move to its own package.
$Id$
"""
import xml.sax.handler
from xml.sax import parse
from apelib.zodb3 import zodbtables
class Directive:
"""Abstract base class for table-oriented directives.
"""
schema = None # override
def __init__(self, source, *args, **kw):
self.source = source
if args:
columns = self.schema.get_columns()
for n in range(len(args)):
key = columns[n].name
if kw.has_key(key):
raise TypeError(
'%s supplied as both positional and keyword argument'
% repr(key))
kw[key] = args[n]
self.data = kw
unique_key = [self.__class__]
for column in self.schema.columns:
if column.primary:
unique_key.append(kw[column.name])
self.unique_key = tuple(unique_key)
def get_unique_key(self):
return self.unique_key
def index(self, tables):
t = tables.get(self.__class__)
if t is None:
t = zodbtables.Table(self.schema)
tables[self.__class__] = t
t.insert(self.data)
def __eq__(self, other):
if other.__class__ is self.__class__:
return other.data == self.data
return 0
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "<%s from %s with %s>" % (
self.__class__.__name__, repr(self.source), repr(self.data))
class XMLConfigReader (xml.sax.handler.ContentHandler):
"""Reads configuration from XML files.
"""
def __init__(self, handlers):
self.handlers = handlers
# Set up a directive list in a default variation.
directives = []
self.variations = {'': directives}
self.stack = [{'directives': directives,
'variations': self.variations}]
self.locator = None
def setDocumentLocator(self, locator):
self.locator = locator
def startElement(self, name, attrs):
vars = self.stack[-1].copy()
self.stack.append(vars)
handler = self.handlers[name]
locator = self.locator
if locator is not None:
source = (locator.getSystemId(), locator.getLineNumber())
else:
source = ("unknown", 0)
handler(source, vars, attrs)
def endElement(self, name):
del self.stack[-1]
class DirectiveReader:
def __init__(self, handlers):
self.directives = {} # { unique key -> variation -> directive }
self.handlers = handlers
def read(self, filename):
reader = XMLConfigReader(self.handlers)
parse(filename, reader)
for vname, directives in reader.variations.items():
self.add(directives, vname)
def add(self, directives, vname):
for d in directives:
key = d.get_unique_key()
info = self.directives.setdefault(key, {})
if info.has_key(vname):
if d == info[vname]:
# OK
pass
else:
raise KeyError(
'Conflicting directives: %s != %s' % (
repr(d), repr(info[vname])))
else:
info[vname] = d
def get_directives(self, vname=''):
res = []
# Note that although there isn't a way to declare that a
# variation extends another variation, all variations should
# derive from the default anyway, so we don't need the
# extension mechanism yet.
if not vname:
vnames = ('',)
else:
vnames = (vname, '')
for key, info in self.directives.items():
for vn in vnames:
if info.has_key(vn):
res.append(info[vn])
break # Go to next directive
return res
class DirectiveTables:
def __init__(self, directives):
self.tables = {} # {table name -> table}
for d in directives:
d.index(self.tables)
def query(self, table_name, **filter):
"""Returns the specified directive records.
"""
t = self.tables.get(table_name)
if t is None:
return []
return t.select(filter)
def query_field(self, table_name, field, **filter):
t = self.tables.get(table_name)
if t is None:
return None
records = t.select(filter)
if len(records) > 1:
raise LookupError, "More than one record returned from field query"
if not records:
return None
return records[0][field]
class ComponentSystem:
def __init__(self, directives):
self.dtables = DirectiveTables(directives)
self.factories = {} # {comptype -> assembler factory}
self.components = {} # {(comptype, name) -> component}
def add_component_type(self, comptype, assembler_factory):
self.factories[comptype] = assembler_factory
def get(self, comptype, name):
obj = self.components.get((comptype, name))
if obj is not None:
return obj
f = self.factories[comptype]
assembler = f(self, comptype, name)
obj = assembler.create()
self.components[(comptype, name)] = obj
assembler.configure()
return obj
|
UTF-8
|
Python
| false | false | 2,012 |
3,238,405,354,709 |
de588faf2f4cc5e00e6b08dcca694d15c405dd3c
|
acdbcff0504329c4fde3ae8d6432686690491ecd
|
/ipysite/views.py
|
4e398a1c4f440fb1b6a64b24b142dedefabdbbfa
|
[
"MIT"
] |
permissive
|
writefaruq/django-ipython-nbserver
|
https://github.com/writefaruq/django-ipython-nbserver
|
c2169554bf6d0b9ebcaed52fc4385f40265ce351
|
97218d5fba8fd53fba2dffd79d4c88d5efd05b1f
|
refs/heads/master
| 2020-05-19T10:57:43.631915 | 2013-09-18T15:54:14 | 2013-09-18T15:54:14 | 12,926,948 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import subprocess
import time
import string
import random
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect, HttpResponse
from IPython.lib import passwd
from ipysite.models import UserProfile
import ipysite.notebook_config as nc
from ipysite import user_login
def homepage(request):
""" Shows homepage """
return render_to_response('homepage.html',
{},
context_instance=RequestContext(request))
def account_settings(request):
""" Shows the Notebook server settings """
username = request.user.username
# first time actions
user_login.initialize_user_path(username)
u = User.objects.get(username=username)
users = UserProfile.objects.filter(user=u)
if users:
user = users[0]
else:
user = UserProfile.objects.create(user=u)
user.nbserver_port = nc.NB_SERVER_PORT_BASE + int(user.user.id)
user.nbserver_password = _get_nbserver_password()
user.save()
# Run the server, if not running yet
if os.path.exists('/proc/{0}'.format(user.nbserver_pid)): # nb server already up
time.sleep(1)
else: # first time or server not running
ip_dir = '{0}/{1}/.ipython'.format(nc.DATA_DIR, username)
nbserver_password_sha1 = passwd(user.nbserver_password)
user.nbserver_pid = _run_server(ip_dir, user.nbserver_port, nbserver_password_sha1)
user.save()
# sleep to let server start listening
time.sleep(3)
ctx = {'nbserver_password' : user.nbserver_password,
'nbserver_url' : '{0}:{1}'.format(nc.BASE_URL, user.nbserver_port)}
return render_to_response('account/settings.html', ctx, context_instance=RequestContext(request))
def run_notebook_server(request):
""" Deprecated now -- Launces the notebook server """
username = request.user.username
# first time actions
user_login.initialize_user_path(username)
#
u = User.objects.get(username=username)
users = UserProfile.objects.filter(user=u)
if users:
user = users[0]
else:
user = UserProfile.objects.create(user=u)
user.nbserver_port = nc.NB_SERVER_PORT_BASE + int(user.user.id)
user.nbserver_password = _get_nbserver_password()
user.save()
if os.path.exists('/proc/{0}'.format(user.nbserver_pid)): # nb server already up
time.sleep(1)
return HttpResponseRedirect('{0}:{1}'.format(nc.BASE_URL, user.nbserver_port))
else: # first time or server not running
ip_dir = '{0}/{1}/.ipython'.format(nc.DATA_DIR, username)
nbserver_password_sha1 = passwd(user.nbserver_password)
user.nbserver_pid = _run_server(ip_dir, user.nbserver_port, nbserver_password_sha1)
user.save()
# sleep to let server start listening
time.sleep(3)
return HttpResponseRedirect('{0}:{1}'.format(nc.BASE_URL, user.nbserver_port))
# show a maint msg
return HttpResponse("<html> Server is under maintenance! Please try later.</html>")
def _run_server(ip_dir, port, password):
""" Run a notebook server with a given ipython directory and port.
Returns a PID.
"""
new_env = dict(os.environ) # copy current environ
new_env['IPYTHONDIR'] = ip_dir # this fixes an issue
if nc.NB_SERVER_SSL_CERT:
pid = subprocess.Popen(['{0}python'.format(str(nc.VIRTUALENV_BIN_PATH)),
'{0}ipython'.format(str(nc.VIRTUALENV_BIN_PATH)),
'notebook',
'--NotebookApp.password={0}'.format(password),
'--NotebookApp.port={0}'.format(port),
'--NotebookApp.ipython_dir={0}'.format(ip_dir),
'--profile=nbserver', # not stable default works fine
'--certfile={0}'.format(nc.NB_SERVER_SSL_CERT)
],
env=new_env).pid
else:
pid = subprocess.Popen(['{0}python'.format(str(nc.VIRTUALENV_BIN_PATH)),
'{0}ipython'.format(str(nc.VIRTUALENV_BIN_PATH)),
'notebook',
'--NotebookApp.password={0}'.format(password),
'--NotebookApp.port={0}'.format(port),
'--NotebookApp.ipython_dir={0}'.format(ip_dir),
'--profile=nbserver' # not stable default works fine
],
env=new_env).pid
return pid
def _get_nbserver_password(size=16, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
|
UTF-8
|
Python
| false | false | 2,013 |
5,205,500,382,435 |
05b33d45f628e839240e56fd17979b7a516aba02
|
9de1c06475e2c6823d66f79648969ae48a8b8194
|
/pycmake/__init__.py
|
f8ddb04689cc70bc4df476cbeaecc2859d3b37f1
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
ricortiz/scikit-build
|
https://github.com/ricortiz/scikit-build
|
3c3a4677cb7daf987ee4fb8e44b5fbfefa42c302
|
1c047f420a222300b0d1820bea5e5e6e5127c759
|
refs/heads/master
| 2020-04-06T06:58:42.388869 | 2014-08-09T21:38:27 | 2014-08-09T21:38:27 | 51,521,235 | 0 | 0 | null | true | 2016-02-11T14:49:54 | 2016-02-11T14:49:54 | 2016-02-11T14:49:33 | 2014-08-14T23:46:12 | 466 | 0 | 0 | 0 | null | null | null |
# -*- coding: utf-8 -*-
__author__ = 'PyCMake team'
__email__ = '[email protected]'
__version__ = '0.1.0'
from pycmake.distutils_wrap import setup
|
UTF-8
|
Python
| false | false | 2,014 |
4,724,464,074,931 |
8b0ded9186782082e905873f85042b2e29926868
|
25ee6412669c9badfc3d19667cc3fc02052eedf0
|
/lab2/mixgraph.py
|
e88abb9fce0a0a51759e2e5cb165eef368ae6864
|
[] |
no_license
|
davidvgalbraith/radioastrolab
|
https://github.com/davidvgalbraith/radioastrolab
|
5cf8130c8d0057db58f455820f1b4dce60506207
|
1622c0b6d65c91d36322b5c2c21c33ab2ae30d8e
|
refs/heads/master
| 2020-05-04T22:04:22.180112 | 2014-05-05T11:51:08 | 2014-05-05T11:51:08 | 17,325,505 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import sys
import numpy as np
import matplotlib.pyplot as plt
def main(argv):
data = np.load('analogdsb/analog_mixing.npz')
d = {}
d["0"] = 1.05
d["1"] = 0.95
d["2"] = 105
d["3"] = 95
graphable = data["arr_" + argv[0]]
ffted = np.fft.fft(graphable)
x = np.arange(len(ffted))/200.0
q = np.arange(len(ffted))/6.0
ffted[0:5] = 0
ffted[6:1024] = 0
#print abs(ffted[0:12])
inv = np.fft.ifft(ffted)
xx = np.fft.fftfreq(len(graphable))
#plt.plot(xx, abs(ffted))
plt.plot(x, graphable)
#plt.plot(q, inv)
plt.tick_params(axis='both', which='major', labelsize=24)
plt.title("Convoluted signal of 1MHz and " + d[argv[0]].__str__() + " MHz", fontsize=36)
plt.xlabel("Time (microseconds)", fontsize=36)
plt.ylabel("Signal", fontsize=36)
#plt.title("Convoluted Power Spectrum of 1 MHz and " + d[argv[0]].__str__() + " MHz", fontsize=36)
#plt.xlabel("Frequency(kHz)", fontsize=36)
#plt.ylabel("Fourier Power Coefficient", fontsize=36)
#plt.title("IFFT of convolved, filtered signals at 0.95 MHz and 1 MHz", fontsize=36)
#plt.xlabel("Time (microsec)", fontsize=36)
#plt.ylabel("Signal", fontsize=36)
plt.show()
if __name__ == "__main__":
main(sys.argv[1:])
|
UTF-8
|
Python
| false | false | 2,014 |
2,156,073,622,527 |
a7166a794e7ca5ab7f5c29108f15da3dfc9a3f86
|
a2c4657c9e06e6f18a5494457b905cd877a8c6c3
|
/poster/real/plotres.py
|
26d4ab7b6d3cadc2745a50a39d2d755b6573cb2d
|
[] |
no_license
|
whigg/eage2011
|
https://github.com/whigg/eage2011
|
2820a09ff08c14b32a6dea0f5921d44a98e126cc
|
897e6cb1cc908073f8988bcd948734ff5c809e1e
|
refs/heads/master
| 2021-05-28T10:44:57.106727 | 2014-01-15T16:09:54 | 2014-01-15T16:09:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pickle
import numpy
from enthought.mayavi import mlab
mlab.options.backend = 'envisage'
from fatiando import vis
import fatiando.mesh
f = open("mesh.pickle")
mesh = pickle.load(f)
f.close()
f = open("seeds.pickle")
seeds = pickle.load(f)
f.close()
seed_mesh = numpy.array([seed['cell'] for seed in seeds])
corpo = fatiando.mesh.vfilter(mesh, 1, 1000)
# Plot the resulting model
fig = mlab.figure()
fig.scene.background = (1, 1, 1)
vis.plot_prism_mesh(seed_mesh, style='surface', xy2ne=True)
plot = vis.plot_prism_mesh(corpo, style='surface', xy2ne=True)
plot = vis.plot_prism_mesh(mesh, style='surface', opacity=0.2, xy2ne=True)
axes = mlab.axes(plot, nb_labels=5, color=(0,0,0))
axes.label_text_property.color = (0,0,0)
axes.title_text_property.color = (0,0,0)
axes.axes.label_format = "%-.2f"
mlab.outline(color=(0,0,0))
mlab.show()
|
UTF-8
|
Python
| false | false | 2,014 |
17,849,884,109,229 |
908abb08b7bd56350126b44bf5d66db8b4ed1b36
|
765b677ca7a85953efcaf1409ffafd2407c4fed8
|
/bgradar/api/data/google_api.py
|
005942d0eaf948b143cd8e748e002e19c2a6d1b0
|
[] |
no_license
|
CKPlus/BeautyGirlRadar
|
https://github.com/CKPlus/BeautyGirlRadar
|
0ec79af7235fe4025458b7ece03dc8fef1c39547
|
bc5e5c91668ca66b80d17653eaa8309320e2bbaf
|
refs/heads/master
| 2021-01-17T23:54:50.962545 | 2014-08-01T07:15:25 | 2014-08-01T07:15:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import json
import urllib3
http = urllib3.PoolManager(10)
GOOGLE_GEOCODE_API_URL = 'http://maps.googleapis.com/maps/api/geocode/json'
GOOGLE_API_KEY = ''
def get_address_by_lnglat(lng, lat):
address = ''
google_geocode_url = GOOGLE_GEOCODE_API_URL + "?sensor=true&latlng={0},{1}".format(lat, lng)
google_geocode_url = google_geocode_url + '&language=zh-TW'
resp = http.urlopen('GET', google_geocode_url)
google_geocode = json.loads(resp.data)
if 'results' not in google_geocode:
return address
if len(google_geocode['results']) == 0:
return address
address = google_geocode['results'][0].get('formatted_address', '')
return address
if __name__ == '__main__':
print get_address_by_lnglat(121.508272, 25.0421569)
|
UTF-8
|
Python
| false | false | 2,014 |
15,255,723,874,930 |
3911801f94a23ecfdb4f9f7872f038de20f29653
|
0c252c38fc4a1cf6a182e1a242ab08140bb58c83
|
/test_project/test_app/views.py
|
3126290623f5368c60e4936ad6679c6ce44496bc
|
[] |
no_license
|
mdiep/django-contactable
|
https://github.com/mdiep/django-contactable
|
cdcbef85fcf13085e257b868213b6a07024b741f
|
77c77a5426155b7e3a77f83705c15deb18ded0c5
|
refs/heads/master
| 2021-01-23T05:30:27.815464 | 2010-10-10T20:11:14 | 2010-10-10T20:11:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.views.generic.list_detail import object_detail, object_list
from contactable.forms import ContactInfoForm
from test_app.models import *
def person_detail(request, id):
return object_detail(request, queryset=Person.objects.all(), object_id=id)
def person_edit(request, id):
person = get_object_or_404(Person, id=id)
if request.method == 'POST':
form = ContactInfoForm(request.POST, instance=person)
if form.is_valid():
person = form.save()
return HttpResponseRedirect(person.get_absolute_url())
else:
form = ContactInfoForm(instance=person)
return render_to_response('test_app/person_edit.html', locals(),
context_instance=RequestContext(request))
def person_list(request):
return object_list(request, queryset=Person.objects.all())
|
UTF-8
|
Python
| false | false | 2,010 |
13,202,729,505,075 |
f810b27c1478c8a9631212ad02ef29fda4cbfce6
|
b7d2c0767637fc6c049502c1891c9fa1a89a770c
|
/tests/test_peekable_iterator.py
|
4e99ae48e51f53207f797e481869acc32e2f7902
|
[
"BSD-3-Clause"
] |
permissive
|
roeiba/slash
|
https://github.com/roeiba/slash
|
160beed53f2f1c5175dd606ee0af9e2940377959
|
5885a8083b00a10caa77a9a477263805c07862a4
|
refs/heads/master
| 2021-01-17T22:21:39.434958 | 2013-08-19T19:46:49 | 2013-08-19T20:07:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from .utils import TestCase
from slash.utils.peekable_iterator import PeekableIterator
class PeekableIteratorTest(TestCase):
def test_full_iteration(self):
objects = [object() for i in range(10)]
it = PeekableIterator(objects)
for i, x in enumerate(it):
self.assertIs(x, objects[i])
for peek_num in range(3): # no matter how many times we peek, we get the same result
if i == len(objects) - 1:
self.assertFalse(it.has_next())
with self.assertRaises(StopIteration):
it.peek()
self.assertIsNone(it.peek_or_none())
else:
self.assertTrue(it.has_next())
self.assertIs(it.peek(), objects[i+1])
self.assertIs(it.peek_or_none(), objects[i+1])
|
UTF-8
|
Python
| false | false | 2,013 |
16,758,962,404,311 |
78399453a3a0ddd76eae46a5dad085826d87dd39
|
63b58a357e7bdae49e03e7ef79d5d1dbbd97b428
|
/calendar_year.py
|
869bdb457d317fabba626ce5901342e3a20874e6
|
[] |
no_license
|
happyqq/python_study
|
https://github.com/happyqq/python_study
|
be11507704499a7fac45c123ef07d42858ef6953
|
ef4ee29ac6f3629f7914ae60ced79f29fef182aa
|
refs/heads/master
| 2018-05-13T07:17:13.661002 | 2014-12-27T15:49:18 | 2014-12-27T15:49:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#encoding:utf-8
import calendar
cal=calendar.TextCalendar(calendar.SATURDAY)
print cal.prmonth(2014,12)
print cal.pryear(2014)
print cal.formatmonth(2013,7)
|
UTF-8
|
Python
| false | false | 2,014 |
9,320,079,067,110 |
c64fd70e943b578e5098913f5cccb04c9beaf34e
|
6a607ff93d136bf9b0c6d7760394e50dd4b3294e
|
/CodePython/PreMidtermExam3_MaxNum.py
|
078fde9284bf5e8675e42b29beb5e01af785c019
|
[] |
no_license
|
n3n/CP2014
|
https://github.com/n3n/CP2014
|
d70fad1031b9f764f11ecb7da607099688e7b1de
|
96a7cdd76f9e5d4fd8fb3afa8cd8e416d50b918c
|
refs/heads/master
| 2020-05-15T07:08:17.510719 | 2014-02-14T03:21:05 | 2014-02-14T03:21:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
alist = input()
maxValue = 0
gum = ''
for i in str(alist):
if i == '[' or i == ']':
gum += ''
else:
gum += i
gum = gum.split(',')
for j in gum:
if int(j) > maxValue:
maxValue = int(j)
print maxValue
|
UTF-8
|
Python
| false | false | 2,014 |
19,318,762,903,901 |
822f54e708a071b729a11e4a0edeb651a0c63c70
|
07a5c1ebb8db2bd2d2a4102ce4ea61dc816f78cb
|
/stepbystep/urls.py
|
ef8d3d89ae832c57b80c1af2cdb3fa1ce37dfbb4
|
[] |
no_license
|
0x55aa/learn-c
|
https://github.com/0x55aa/learn-c
|
e494ffa2c487f191cede2f4d21d66621eb6a552a
|
8537c1909e4d02bf519604779a5626d9e561e2e1
|
refs/heads/master
| 2020-12-02T05:06:07.559426 | 2013-08-29T15:35:36 | 2013-08-29T15:35:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#coding: utf-8
from django.conf.urls import patterns, url
from stepbystep import views
urlpatterns = patterns('',
url(r'course/(?P<pk>[0-9]+)/$', views.CourseView.as_view(), name='course'),
url(r'list/$', views.CourseListView.as_view(), name='course_list'),
url(r'judge_code/$', views.JudgeCodeView.as_view(), name='judge_code'),
)
|
UTF-8
|
Python
| false | false | 2,013 |
3,169,685,909,049 |
94991a6df5d222704a6650bfda8428055a76b11f
|
64b67b22aad202bd89333df729ea326216eb1ff9
|
/skcv/video/optical_flow/io.py
|
1bcd9d832c0a382269d150ddbca7a77a2b927b2b
|
[
"BSD-3-Clause"
] |
permissive
|
guillempalou/scikit-cv
|
https://github.com/guillempalou/scikit-cv
|
25c8f2ceaf5c8343d0f865a3414d5bb3d6f313d8
|
66b5455f0097a158f0498b5cade4e8e8a0094c08
|
refs/heads/master
| 2021-01-10T18:46:04.099193 | 2014-03-26T23:08:08 | 2014-03-26T23:08:08 | 15,309,258 | 7 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
def read_flow_file(path):
""" Reads flow file and returns 2D numpy array
Parameters
----------
path: string
file path to read
Returns
-------
numpy array containing the 2D flow vectors for each position (x,y)
"""
#open the file
f = open(path, "rb")
if (not f): # pragma: no cover
raise IOError("File cannot be opened")
#read the tag
tag = f.read(4)
if tag != b"PIEH": # pragma: no cover
raise TypeError("File type does not correspond to a flow file")
#read the width and height
width = np.fromfile(f, dtype=np.uint32, count=1)
height = np.fromfile(f, dtype=np.uint32, count=1)
if width < 1 or width > 99999 or height < 1 or height > 99999: # pragma: no cover
raise ValueError("Width and height file not correct")
#read flow data
flow = np.fromfile(f, dtype=np.float32, count=width[0] * height[0] * 2)
if flow.size != width[0] * height[0] * 2: # pragma: no cover
raise ValueError("Data flow too small %d != %d" % (flow.size, width[0] * height[0] * 2))
#reshape the flow so that its shape is (height,width,2)
flow_reshaped = np.reshape(flow, (height[0], width[0], 2), order='C')
#close the file
f.close()
return flow_reshaped
def write_flow_file(path, flow):
""" Writes flow file to file
Parameters
----------
path: string
file path to write
flow: numpy array
flow values
"""
#open the file for writing
f = open(path, "wb")
if not f: # pragma: no cover
raise IOError("File cannot be opened")
#read the tag
tag = f.write(b"PIEH")
#write first the width and then the height
shape = np.array((2, 1), dtype=np.uint32)
shape[0] = flow.shape[1]
shape[1] = flow.shape[0]
shape.tofile(f)
#write the flow data
flow.astype(np.float32).tofile(f)
|
UTF-8
|
Python
| false | false | 2,014 |
12,266,426,626,956 |
5603b72800fa259d16ea46f215329e47da40b7c5
|
6c1a21e98f1972268799028e70d089b3dc7df5e8
|
/dotkeeper.py
|
d2a702ccfcd39b3e2e75365c8905875730641184
|
[] |
no_license
|
eminence/dotkeeper
|
https://github.com/eminence/dotkeeper
|
1b6288ea9e46bff0fb8d5c78aad655db6c890408
|
4fe8255fb36539a1caac39bf92a2cd2c2aedb72d
|
refs/heads/master
| 2021-01-13T02:27:23.272711 | 2012-11-27T22:18:18 | 2012-11-27T22:18:18 | 6,220,621 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
from git_helper import *
import sys
import inspect
from argparse import ArgumentParser
from configparser import ConfigParser
from pprint import pprint
import subprocess
class Command(object):
"""Manages the glue that connects the command line parser to individual functions"""
registered = []
def __init__(self, func, name):
(self.args, self.varargs, self.varkw, self.defaults, self.kwonlyargs, self.kwonlydefaults, self.annotations) = \
inspect.getfullargspec(func)
#print args, varargs, kwargs, defaults
if self.defaults is None: self.defaults = []
self.name = name
self.func = func
def print_usage():
for cmd in Command.registered:
print("%-20s %s" % (cmd.name, cmd.func.__doc__))
def get_command(name):
for cmd in Command.registered:
if cmd.name == name: return cmd
return None
def call_with(self, from_cmd):
d = {}
for thing in self.args:
parsed = getattr(from_cmd, thing)
d[thing] = parsed
args = []
for thing in self.args:
args.append(getattr(from_cmd, thing))
if self.varargs is not None:
args += getattr(from_cmd, self.varargs)
self.func(*args)
def setup(self):
parser = ArgumentParser(prog=sys.argv[0] + " " + self.name, description=self.func.__doc__)
offset = len(self.args) - len(self.defaults)
for x in range(len(self.args)):
thing = self.args[x]
d = {}
d['help'] = self.annotations[thing]
prefix=''
if (x-offset) >= 0:
default = self.defaults[x-offset]
d['default'] = default
d['dest'] = thing
prefix='--'
if type(default) == bool:
d['action'] = 'store_true'
#print(prefix+thing, d)
parser.add_argument(prefix+thing,**d)
if self.varargs:
parser.add_argument(self.varargs,nargs='+', help=self.annotations[self.varargs])
return parser
def register(name):
"A decorator maker"
def deco(func):
"A decorator"
Command.registered.append(Command(func, name))
return func
return deco
@Command.register("init")
def cmd_init(base_dir:"Path to dotkeeper base directory"="~/.dotkeeper/"):
"""Initializes the dot keeper repo"""
base_dir = os.path.expanduser(base_dir)
repo_dir = os.path.join(base_dir, "repo")
GIT_DIR = repo_dir
if not os.path.exists(repo_dir):
os.makedirs(repo_dir)
git_helper(["init"], git_dir=GIT_DIR)
config_file = os.path.join(base_dir, "config")
if os.path.exists(config_file):
print("Config file already exists!")
else:
# create new, empty config file. this will be the first commit into the repo
cp = ConfigParser()
cp.add_section("dotkeeper")
cp.set("dotkeeper", "base_dir", base_dir)
with open(config_file, "w") as f:
cp.write(f)
add_to_index(config_file, git_dir=GIT_DIR)
root_tree = write_tree(git_dir=GIT_DIR)
print("root_tree is %r" % root_tree)
commit_hash = commit_tree(root_tree, "Initial commit", parent=None, git_dir=GIT_DIR)
print("commit_hash is %r" % commit_hash)
git_helper(["update-ref", "HEAD", commit_hash], git_dir=GIT_DIR)
print("Done!")
@Command.register("log")
def cmd_log():
"Outputs a git log"
global GIT_DIR
git_helper(["log"], git_dir=GIT_DIR)
@Command.register("add")
def cmd_add(verbose:"Be verbose"=False, *file:"File to add"):
"Adds a file to the index"
global GIT_DIR
for f in file:
if verbose:
print("Added", f)
if not os.path.exists(f):
print("File does not exist")
add_to_index(f, git_dir=GIT_DIR)
@Command.register("status")
def cmd_status(verbose:"Be verbose"=False):
"Shows the status of the index and the file system"
global GIT_DIR
r = diff_index(git_dir=GIT_DIR)
for item in r.values():
if item['treeHash'] == '0000000000000000000000000000000000000000':
print("%s - add" % fix_git_to_path(item['name']))
index_files = ls_files(git_dir=GIT_DIR)
for file in index_files.values():
#compare this hash to the work hash
fspath = fix_git_to_path(file['name'])
workhash = hash_object(filename=fspath, write=False, git_dir=GIT_DIR)
if workhash != file['hash']:
print("%s - modified" % fspath)
@Command.register("diff")
def cmd_diff(verbose:"Be verbose"=False, *file:"File to diff"):
"Diffs the file system with the index"
index_files = ls_files(git_dir=GIT_DIR)
for item in file:
gitpath = fix_path_to_git(item)
blob = index_files[gitpath]['hash']
index_file = unpack_file(blob, git_dir=GIT_DIR)
p = subprocess.Popen(["vimdiff", index_file, item])
p.wait()
os.unlink(index_file)
@Command.register("commit")
def cmd_commit(msg:"Commit message"=None, verbose:"Be verbose"=False):
"Commits the index"
tree = write_tree(git_dir=GIT_DIR)
if verbose:
print("New tree is", tree)
commit = commit_tree(tree, msg, git_dir=GIT_DIR)
if verbose:
print("New commit is", commit)
git_helper(["update-ref", "HEAD", commit], git_dir=GIT_DIR)
if __name__ == "__main__":
if len(sys.argv) < 2:
Command.print_usage()
sys.exit(1)
cmd_s = sys.argv[1]
cp = ConfigParser()
cp.read(os.path.expanduser('~/.dotkeeper/config'))
global GIT_DIR
GIT_DIR = os.path.expanduser(os.path.join(cp.get("dotkeeper", "base_dir", fallback="~/.dotkeeper/repo"), "repo"))
cmd = Command.get_command(cmd_s)
if cmd is None:
Command.print_usage()
sys.exit(0)
parser = cmd.setup()
_args = parser.parse_args(sys.argv[2:])
cmd.call_with(_args)
|
UTF-8
|
Python
| false | false | 2,012 |
2,250,562,911,172 |
3e85f109487940b8a44f4c4e3f8d84070a97f324
|
347523b5ea88c36f6a7d7916426f219aafc4bbf8
|
/doc/salome/examples/creating_meshes_ex02.py
|
fe9f72f082364f2b8b9f0170f9cf7d7883cd4e22
|
[
"LGPL-2.1-only"
] |
non_permissive
|
FedoraScientific/salome-smesh
|
https://github.com/FedoraScientific/salome-smesh
|
397d95dc565b50004190755b56333c1dab86e9e1
|
9933995f6cd20e2169cbcf751f8647f9598c58f4
|
refs/heads/master
| 2020-06-04T08:05:59.662739 | 2014-11-20T13:06:53 | 2014-11-20T13:06:53 | 26,962,696 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Construction of a Submesh
import salome
salome.salome_init()
import GEOM
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
import SMESH, SALOMEDS
from salome.smesh import smeshBuilder
smesh = smeshBuilder.New(salome.myStudy)
# create a box
box = geompy.MakeBoxDXDYDZ(10., 10., 10.)
geompy.addToStudy(box, "Box")
# select one edge of the box for definition of a local hypothesis
p5 = geompy.MakeVertex(5., 0., 0.)
EdgeX = geompy.GetEdgeNearPoint(box, p5)
geompy.addToStudyInFather(box, EdgeX, "Edge [0,0,0 - 10,0,0]")
# create a hexahedral mesh on the box
quadra = smesh.Mesh(box, "Box : quadrangle 2D mesh")
# create a regular 1D algorithm for the faces
algo1D = quadra.Segment()
# define "NumberOfSegments" hypothesis to cut
# all the edges in a fixed number of segments
algo1D.NumberOfSegments(4)
# create a quadrangle 2D algorithm for the faces
quadra.Quadrangle()
# construct a submesh on the edge with a local hypothesis
algo_local = quadra.Segment(EdgeX)
# define "Arithmetic1D" hypothesis to cut the edge in several segments with increasing arithmetic length
algo_local.Arithmetic1D(1, 4)
# define "Propagation" hypothesis that propagates all other hypotheses
# on all edges of the opposite side in case of quadrangular faces
algo_local.Propagation()
# compute the mesh
quadra.Compute()
|
UTF-8
|
Python
| false | false | 2,014 |
3,204,045,606,156 |
f945b0cabe58d3dd85eb46c9a31f57399dbce23c
|
991d522516641bff72acaf1692f313920650f4dc
|
/pyfond/database.py
|
6b385a51c03b293b4b921a91e0c5f06a31e56724
|
[
"AGPL-3.0-only"
] |
non_permissive
|
w23/sifon
|
https://github.com/w23/sifon
|
3516909e027aabf35eb8ca8b3c818174879666f4
|
fab31c223f40f82cf71e8ccb50e22e4dc27b3028
|
refs/heads/master
| 2016-09-11T13:13:30.976893 | 2014-05-19T18:32:31 | 2014-05-19T18:32:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import logging
import sys
import bisect
class DummyDatabase:
def __init__(self):
self._tracks = []
def insert(self, track):
bisect.insort(self._tracks, track)
def list_all(self):
return self._tracks[:]
def find(self, tags):
class AnyTagIterator:
def __init__(self, tracks, value):
self._tracks = tracks[:]
self._value = value.casefold()
def __iter__(self):
for track in self._tracks:
for tag, value in track.tags.items():
if self._value in value.casefold():
yield track
break
return AnyTagIterator(self._tracks, tags[''])
def get(self, track_id):
for track in self._tracks:
if track.id == track_id:
return track
raise Exception("Not found")
|
UTF-8
|
Python
| false | false | 2,014 |
197,568,529,766 |
4edb561af3e6be5b68f6c04c2befc1898254bf17
|
2968ecf35cbaa7d6c34061f36f9dda7a46a890d3
|
/HW/HW1/genVectorField.py
|
2d8aa978568a7b6ea729700e76efeea31595ba7c
|
[] |
no_license
|
flashbyte/ITPDG
|
https://github.com/flashbyte/ITPDG
|
06965a873414e772a27492cb31c2408fad716138
|
974b6fb7d3e11d76168f1feb100f396ebe55e41d
|
refs/heads/master
| 2020-05-17T02:56:29.543019 | 2012-10-25T17:05:54 | 2012-10-25T17:05:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#Author: Nils Mull
#Mail: [email protected]
#Date: 23.10.2012
#http://stackoverflow.com/questions/1843194/plotting-vector-fields-in-python-matplotlib
#http://physics.nmt.edu/~raymond/software/python_notes/paper004.html
import pylab
import time
import numpy
# def norm(X,Y):
# dimFileld=len(X[0])
# for x in range(dimFileld):
# for y in range(dimFileld):
# avr=pylab.sqrt(pow((X[x][y]),2)+pow((Y[x][y]),2))
# X[x][y]=X[x][y]/avr
# Y[x][y]=Y[x][y]/avr
X,Y = pylab.meshgrid(pylab.arange(-3,3,.5),pylab.arange(-3,3,.5))
COLOR=numpy.ndarray(X.shape)
COLOR.fill(0)
COLOR[4,8]=5
COLOR[2,7]=5
COLOR[5,10]=5
C=1
U=X*0+1
V=1/(pow(X,2))
V= (1/(pow(X,2)))
fig=pylab.figure()
q=pylab.quiver(X,Y,U,V,COLOR,angles='xy',scale=30)
inc=3
xx=[]
yy=[]
while inc>.35:
xx.append(inc)
yy.append((-1)/inc)
inc = inc - 0.01
pl=pylab.plot(xx,yy,'g')
pylab.xlabel('X')
pylab.ylabel('Y')
pylab.grid(b=True)
l1 = pylab.Rectangle((0, 0), 1, 1, fc="b")
l2 = pylab.Rectangle((0, 0), 1, 1, fc="r")
l3 = pylab.Rectangle((0, 0), 1, 1, fc="g")
pylab.legend([l1,l2,l3],['Vektorfeld','Vektoren der Aufgabe','Graph'])
fig.savefig('vectorfield.png')
#fig.show()
#time.sleep(5)
|
UTF-8
|
Python
| false | false | 2,012 |
18,554,258,744,554 |
fc048863ac7bdd9f6fb6b08391e7e5d2836138ec
|
c0c065c309d7aa057a5c2b3d0d1fd63c835fc100
|
/test/TestReadConfig.py
|
ccf0339eed079a564c31532234d8d1614fe145ef
|
[
"BSD-3-Clause"
] |
permissive
|
cavedivr/redi
|
https://github.com/cavedivr/redi
|
85f5fc1afd45b6723e31d2953519dddd8f056856
|
cadcc6c8bf7767a88ad875724795ddcaf262005a
|
refs/heads/master
| 2021-01-23T14:29:59.066099 | 2014-05-05T19:46:07 | 2014-05-05T19:46:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import json
import unittest
import tempfile
import os
import sys
file_dir = os.path.dirname(os.path.realpath(__file__))
goal_dir = os.path.join(file_dir, "../")
proj_root = os.path.abspath(goal_dir)+'/'
sys.path.append(proj_root + 'bin/')
import redi
class TestReadConfig(unittest.TestCase):
def setUp(self):
self.setupFile = tempfile.mkstemp()
self.input = """{ "job_owner_name": "John Doe",
"job_email": "[email protected]",
"data_provider_name": "Jane Doe",
"data_provider_email": "[email protected]",
"smtp_host_for_outbound_mail": "smtp.example.org",
"system_log_file": "log/redi",
"translation_table_file": "TestTranslationTable.xml",
"form_events_file": "TestFormEvents.xml",
"raw_xml_file": "TestRaw.xml",
"subject_map_header": "studyID, mrn, facilityCode, startDate, endDate\\n",
"redcap_uri": "https://example.org/redcap/api/",
"token": "ABCDEF878D219CFA5D3ADF7F9AB12345" }"""
f = open(self.setupFile[1], 'r+')
f.write(self.input)
f.close()
self.files = ['TestTranslationTable.xml', 'TestFormEvents.xml',
'TestRaw.xml']
for file in self.files:
try:
f = open(proj_root+file, "w+")
except:
print("setUp failed to create file '" + file + "'")
def test_readConfig(self):
self.setup = redi.read_config(self.setupFile[1])
self.assertEqual(self.setup['system_log_file'], "log/redi")
self.assertEqual(self.setup['translation_table_file'],
"TestTranslationTable.xml")
self.assertEqual(self.setup['form_events_file'], "TestFormEvents.xml")
self.assertEqual(self.setup['raw_xml_file'], "TestRaw.xml")
self.assertEqual(self.setup['redcap_uri'],
"https://example.org/redcap/api/")
self.assertEqual(self.setup['token'],
"ABCDEF878D219CFA5D3ADF7F9AB12345")
self.assertEqual(self.setup['smtp_host_for_outbound_mail'],
"smtp.example.org")
def tearDown(self):
os.unlink(self.setupFile[1])
for file in self.files:
try:
os.unlink(proj_root+file)
except:
print("setUp failed to unlink file '" + file + "'")
return()
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false | false | 2,014 |
10,943,576,670,653 |
658731f848d0798aeb718503e8454aaee57e6137
|
53da9d4f14b067b70cf3b9e8a66a47870adbc4ea
|
/src/game_elements.py
|
8c7386471dfbdd64d80708ab6d675fb94e257d07
|
[] |
no_license
|
zixuanl/Battle_Maze
|
https://github.com/zixuanl/Battle_Maze
|
4b08bc9b7b0a91347c6e44d11d729f977089ce64
|
96e206222dbfa88907221aca5c6cd99b0db7d3d7
|
refs/heads/master
| 2021-01-02T09:21:13.431720 | 2014-05-01T20:07:02 | 2014-05-01T20:07:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import sys
import threading
from Tkinter import *
from random import *
from framework import *
import pygame
import sys
import tmx
import random
from tmx import *
PEERNAME = "NAME" # request a peer's canonical id
LISTPEERS = "LIST"
PLAYER_LIST="PLAY"
INSERTPEER = "JOIN"
MOVE = "MOVE"
FIRE = "FIRE"
FLAG = "FLAG"
OBSTACLE = "OBST"
PEERQUIT = "QUIT"
REPLY = "REPL"
ERROR = "ERRO"
GAME_START="GAME" # send to bootstrap to get game details
DETAILS="DETL" # bootstrap replies with this as first message followed by PLAYER_LIST messages
PEER_INFO_DETAILS="INFO" # Request for information from peer after getting the list from bootstrap
PLAY_START="PSTA"
I_WIN = "IWIN"
I_LOST="LOST"
INFORM_GAME_END_BOOTSTRAP="OVER"
class wall(pygame.sprite.Sprite):
def __init__(self,location,*groups):
super(wall,self).__init__(*groups)
self.image=pygame.image.load("game_items/brick.png")
self.rect=pygame.rect.Rect(location,self.image.get_size())
self.lifespan=50
def update(self, dt, game):
self.lifespan -= dt
if self.lifespan < 0:
self.kill()
return
class flags(pygame.sprite.Sprite):
def __init__(self,location,flag_num,*groups):
super(flags,self).__init__(*groups)
self.image = pygame.image.load("game_items/flag.png")
self.rect = pygame.rect.Rect(location,self.image.get_size())
self.start_rect = self.rect.copy()
self.flag_num = flag_num
def update(self,dt,game):
queue = game.message_queue[self.flag_num]['flag']
while queue.empty() != True:
data = queue.get()
self.kill()
if pygame.sprite.spritecollide(self, game.players_sp,False):
#game.flags_collected = game.flags_collected+1
print 'flags collected!'
data = game.player_num + ' ' + str(self.flag_num)
game.multicast_to_peers_data(FLAG, data)
self.kill()
class fire(pygame.sprite.Sprite):
def __init__(self,location,direction,*groups):
super(fire,self).__init__(*groups)
self.image = pygame.image.load("game_items/bullet.png")
self.rect=pygame.rect.Rect(location,self.image.get_size())
self.direction=direction
self.lifespan=1
def update(self, dt, game):
self.lifespan -= dt
if self.lifespan < 0:
wall((self.rect.x,self.rect.y),game.blockwall)
self.kill()
return
else:
if self.direction==2:
self.rect.y +=-1*400 * dt
print self.rect.y
elif self.direction==-2:
self.rect.y +=400 * dt
elif self.direction==1:
self.rect.x += self.direction * 400 * dt
elif self.direction==-1:
self.rect.x += self.direction * 400 * dt
class bullet(pygame.sprite.Sprite):
def __init__(self,location,direction,player_num,*groups):
super(bullet,self).__init__(*groups)
self.image = pygame.image.load("game_items/bullet.png")
self.rect=pygame.rect.Rect(location,self.image.get_size())
self.direction=direction
self.lifespan=1
self.player_num = player_num
def update(self, dt, game):
self.lifespan -= dt
if self.lifespan < 0:
self.kill()
return
if self.direction==2:
self.rect.y +=-1*400 * dt
elif self.direction==-2:
self.rect.y +=400 * dt
elif self.direction==1:
self.rect.x += self.direction * 400 * dt
elif self.direction==-1:
self.rect.x += self.direction * 400 * dt
new = self.rect
if game.tilemap.layers['collision'].collide(new,'blocker'):
self.kill()
if pygame.sprite.spritecollide(self, game.blockwall,False):
self.kill()
enemies = pygame.sprite.spritecollide(self, game.enemies, False)
if enemies:
self.kill()
for enemy in enemies:
#print "Enemy Dead", self.player_num
enemy.killer = self.player_num
enemy.killed = True
players = pygame.sprite.spritecollide(self, game.players_sp, False)
if players:
self.kill()
for player in players:
#print "Player Dead", self.player_num
player.killer = self.player_num
player.killed = True
|
UTF-8
|
Python
| false | false | 2,014 |
4,818,953,356,626 |
d08af9a32db10324d95a29fed62491a9d701c578
|
947bda6bdded5bd3c66febc2a0d4067668102803
|
/veriflame/__init__.py
|
a83fd3bc77568a8b6f0f5ee7dd075a9d53503f4a
|
[] |
no_license
|
BrainsInJars/Queen-Bee
|
https://github.com/BrainsInJars/Queen-Bee
|
88eda876882c2588f3ed9a89cc0064b05e536015
|
f807c4c207b96f4e6bcbb7ea23c207f0efa74921
|
refs/heads/master
| 2021-01-19T13:30:15.258590 | 2014-11-04T19:56:43 | 2014-11-04T19:56:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import time
import logging
import threading
_PIN_AUTO = 17
_PIN_LOW = 18
_PIN_HIGH = 27
_PIN_RELIGHT = 22
_PIN_OUTPUT = 23
OFF = 0x0
AUTO = 0x1
LOW = 0x2
HIGH = 0x4
class VeriFlame(threading.Thread, object):
def __init__(self, bouncetime=300):
super(VeriFlame, self).__init__()
self._state = 0
self.bouncetime = bouncetime
self.log = logging.getLogger('veriflame')
self.daemon = True
self.callback = None
self.event_shutdown = threading.Event()
self.event_pin_update = threading.Event()
try:
import RPi.GPIO as GPIO
self.GPIO = GPIO
except ImportError:
self.GPIO = None
# Read the current veriflame state
def state(self):
return self._state
# Control miscellaneous output pin
def output(self, state):
if self.GPIO is None:
return
self.GPIO.output(_PIN_OUTPUT, state)
# Attempt a relight of the furnace holding power off for holdtime seconds
def relight(self, holdtime):
if self.GPIO is None:
return
self.GPIO.output(_PIN_RELIGHT, True)
time.sleep(holdtime)
self.GPIO.output(_PIN_RELIGHT, False)
def _read_state(self, pins):
state = 0
for index, pin in enumerate(pins):
if self.GPIO.input(pin) == self.GPIO.HIGH:
state = state | (1 << index)
return state
def run(self):
if self.GPIO is None:
return
def pin_callback(channel):
self.event_pin_update.set()
self.GPIO.setmode(self.GPIO.BCM)
input_pins = [_PIN_AUTO, _PIN_LOW, _PIN_HIGH]
output_pins = [_PIN_RELIGHT, _PIN_OUTPUT]
for index, pin in enumerate(input_pins):
self.GPIO.setup(pin, self.GPIO.IN, pull_up_down=self.GPIO.PUD_DOWN)
self.GPIO.add_event_detect(pin, self.GPIO.BOTH, callback=pin_callback, bouncetime=self.bouncetime)
for index, pin in enumerate(output_pins):
self.GPIO.setup(pin, self.GPIO.OUT)
self.GPIO.output(pin, False)
# Force a callback on the iteration through the loop
self.event_pin_update.set()
while not self.event_shutdown.is_set():
self.event_pin_update.wait(1.0)
self.event_pin_update.clear()
current = self._read_state(input_pins)
if self._state != current:
self._state = current
if self.callback:
try:
self.callback(self._state)
except Exception as ex:
self.log.exception(ex)
self.GPIO.cleanup()
def shutdown(self):
self.event_shutdown.set()
|
UTF-8
|
Python
| false | false | 2,014 |
4,432,406,276,776 |
253f13f47653c335077f37596507edc3c3c80172
|
3a4f14d6638bc0c12c129ed73c6c3543437203df
|
/src/morphforge/traces/std_methods/trace_methods_std_fft.py
|
2c5461c9f23b7cb23b9ee24fff7b9d6a4fd28b1c
|
[
"BSD-2-Clause"
] |
permissive
|
unidesigner/morphforge
|
https://github.com/unidesigner/morphforge
|
ef04ccb3877f069a0feea72eb1b44c97930dac44
|
510cd86549b2c2fb19296da2d4408ed8091fb962
|
refs/heads/master
| 2021-01-15T22:34:28.795355 | 2012-04-05T08:55:12 | 2012-04-05T08:55:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#-------------------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#-------------------------------------------------------------------------------
from morphforge.traces.trace_methods_ctrl import TraceMethodCtrl
from morphforge.traces import Trace_FixedDT
import numpy as np
def fft(tr, normalise=True):
ft = np.fft.fft( tr._data )
if normalise:
ft /= ft.max()
ftfreq = np.fft.fftfreq( tr._data.size, tr.getDTNew().rescale("s").magnitude )
return ftfreq, ft
def psd(tr, normalise=True):
ft = np.fft.fft( tr._data )
ft = ft.real()**2 + ft.imag()**2
if normalise:
ft /= ft.max()
ftfreq = np.fft.fftfreq( tr._data.size, tr.getDTNew().rescale("s").magnitude )
return ftfreq, ft
TraceMethodCtrl.register(Trace_FixedDT, 'fft', fft, can_fallback_to_fixed_trace=True )
TraceMethodCtrl.register(Trace_FixedDT, 'psd', fft, can_fallback_to_fixed_trace=True )
|
UTF-8
|
Python
| false | false | 2,012 |
18,202,071,426,210 |
17fc4856a12ce3571ac8dc1cf5d66f78a651ccf6
|
45d291ee47ce36a49029087e3e025b782803e9af
|
/gl_render.py
|
7630dd374a08829bca79352aebd4430ce1a3d1cf
|
[] |
no_license
|
neil-b/graphics-gen
|
https://github.com/neil-b/graphics-gen
|
dda1bc1459233f9b1a7fd3a5ed8aca9bf56ca86d
|
eb047a13a2ee8285360af473a4af7c612ecb3d1c
|
refs/heads/master
| 2021-01-25T10:34:24.883563 | 2014-06-20T17:54:39 | 2014-06-20T17:54:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Code to produce rasterized equivalents of povray scenes.
use init() first to setup the camera settings,
then call render() as many times as needed
Being able to get rasterized output is useful to us for a couple of reasons:
- wireframe output
- 3d volumes can easily be generated by using the near and far parameters
requires opengl
"""
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import Image, ImageOps
SIZE = (500, 500)
initialized = False
windowHandle = None
"""
Initialize an OpenGL context. After the first call to init(), all following calls will
destroy the previous init call's OpenGL context.
Set ortho to true to enable orthographic projection
The near and far parameters will only be used if ortho is set to true.
"""
def init(camera, near=0., far=1., ortho=False):
global initialized, SIZE, windowHandle
cameraPos = camera.getPosition()
lookAt = camera.getLookAt()
fov = camera.getFov()
# destroy the previous window if it exists
# letting the previous windows accumulate will result in glitchy output!
if windowHandle:
glutDestroyWindow(windowHandle)
glutInit()
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH)
glutInitWindowSize(SIZE[0], SIZE[1])
windowHandle = glutCreateWindow('')
glutHideWindow()
upVector = [0., 1., 0.]
aspect = SIZE[0] / float(SIZE[1])
glDisable(GL_LIGHTING)
glDisable(GL_CULL_FACE)
glEnable(GL_BLEND)
glEnable(GL_DEPTH_TEST)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
if ortho:
# orthographic setup
glMatrixMode(GL_PROJECTION)
glOrtho(1., 0.,
0., 1.,
-near, -far)
glMatrixMode(GL_MODELVIEW)
else:
# perspective setup
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(fov, aspect, 1.0, 10.0)
glMatrixMode(GL_MODELVIEW)
gluLookAt(cameraPos[0], cameraPos[1], cameraPos[2],
lookAt[0], lookAt[1], lookAt[2],
upVector[0], upVector[1], upVector[2])
glClearColor(0.0, 0.0, 0.0, 1.0)
initialized = True
"""
Render a list of shapes and write to disk as fileName
"""
def render(meshes, fileName, wireFrame=False):
global initialized, SIZE
assert initialized
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
for mesh in meshes:
glPolygonMode(GL_FRONT_AND_BACK, (GL_LINE if wireFrame else GL_FILL))
glPushMatrix()
# apply transformations
glColor3f(*mesh.getColor())
glTranslatef(*mesh.getTranslation())
glScalef(*mesh.getScale())
glRotatef(mesh.getRotation()[2], 0, 0, 1)
glRotatef(mesh.getRotation()[1], 0, 1, 0)
glRotatef(mesh.getRotation()[0], 1, 0, 0)
# send triangles
glBegin(GL_TRIANGLES)
for triangle in mesh.getTriangles():
for vertex in triangle:
glVertex3f(vertex[0], vertex[1], vertex[2])
glEnd()
glPopMatrix()
glFlush()
glutSwapBuffers()
# save the framebuffer to disk
data = glReadPixels(0, 0, SIZE[0], SIZE[1], GL_RGB, GL_BYTE)
img = Image.frombuffer('RGB', SIZE, data, 'raw', 'RGB')
flipped = ImageOps.mirror(ImageOps.flip(img))
flipped.save(fileName)
|
UTF-8
|
Python
| false | false | 2,014 |
11,158,325,035,838 |
78c7557e08e8986c344e479470366b5504bfa62a
|
7ffd440205f0826fedeb297f8d2d89d942bb3e86
|
/mysite/registration/tests/__init__.py
|
f21f8672cbb6db05b90413c9dec99feb9f33645a
|
[] |
no_license
|
mstone9/Cilantro
|
https://github.com/mstone9/Cilantro
|
161ec51756cf8e97e03457472355c2b52ef59ed1
|
c284331dba50b87cc0dbd92c7855d6a2c5ec4fc9
|
refs/heads/master
| 2016-09-11T13:16:59.561490 | 2012-05-17T15:05:22 | 2012-05-17T15:05:22 | 3,531,568 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from mysite.registration.tests.backends import *
from mysite.registration.tests.forms import *
from mysite.registration.tests.models import *
from mysite.registration.tests.views import *
|
UTF-8
|
Python
| false | false | 2,012 |
19,688,130,088,123 |
3bd298e6c4bddb8f4de5e4e2320b00b47c336178
|
07091f53e29efabba7e9a13b9b28651fe85c7912
|
/scripts/static_spawns/corellia/factional_recruiters.py
|
d8b0b77e5be58ee1e9a4beabc3ae877306f5341f
|
[
"LGPL-3.0-only",
"GPL-1.0-or-later"
] |
non_permissive
|
Undercova/NGECore2
|
https://github.com/Undercova/NGECore2
|
377d4c11efba071e313ec75b3c2d864089733dc4
|
16d52e678201cab7c6e94924050ae1fc4a40de95
|
refs/heads/master
| 2019-01-03T17:35:40.610143 | 2014-11-09T03:34:03 | 2014-11-09T03:34:03 | 26,386,905 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
# Project SWG: Imperial Recruiters tatooine: Static Spawns
# (C)2014 ProjectSWG
from resources.datatables import Options
from resources.datatables import State
def addPlanetSpawns(core, planet):
stcSvc = core.staticService
objSvc = core.objectService
#recruiter Imp Outpost
stcSvc.spawnObject('imperial_recruiter', 'corellia', long(0), float(6718), float(315), float(-5809), float(0), float(0), float(0), float(0))
#rebrecruiter tyrena
tyrenaCantina = core.objectService.getObject(long(2625352))
if tyrenaCantina not None:
stcSvc.spawnObject('rebel_recruiter', 'corellia', tyrenaCantina.getCellByCellNumber(11), float(-25.8), float(-0.9), float(0.4), float(0), float(-0.707), float(0), float(0.707))
#cnet cantina
cnetCantina = core.objectService.getObject(long(8105493))
if cnetCantina not None:
stcSvc.spawnObject('rebel_recruiter', 'corellia', cnetCantina.getCellByCellNumber(11), float(-25.8), float(-0.9), float(0.4), float(0), float(-0.707), float(0), float(0.707))
return
|
UTF-8
|
Python
| false | false | 2,014 |
6,064,493,832,149 |
f307ac9600f561ae07620b05b79797283dcd1ebc
|
6d28d17c3d8cc98405df456206005ebd857658b5
|
/design/controller_plotter.py
|
45fe14695b6ea79446e2ee54d4c88397830dfc0d
|
[
"BSD-2-Clause"
] |
permissive
|
hazelnusse/robot.bicycle
|
https://github.com/hazelnusse/robot.bicycle
|
02a42772436a69e7886e0adf785fc54c165d0261
|
b8d7c67290497577c96167dac123765efc4e08f8
|
refs/heads/master
| 2020-05-17T04:19:40.698098 | 2013-08-31T08:10:15 | 2013-08-31T08:10:15 | 5,311,076 | 9 | 3 |
BSD-2-Clause
| false | 2023-08-24T09:53:29 | 2012-08-06T07:07:49 | 2022-10-04T19:05:00 | 2023-08-24T09:53:16 | 756 | 11 | 6 | 0 |
C++
| false | false |
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import numpy as np
from scipy.signal import dstep, dlsim
from parameters import rear, w
import yaw_rate_controller as yrc
class CDataPlotter(object):
def __init__(self, datafile=None, c_data=None):
if datafile is not None:
self.d = np.load(datafile)['arr_0']
elif c_data is not None:
self.d = c_data
self.cm = plt.get_cmap('gist_rainbow')
def plant_evals_c(self):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(-self.d['theta_R_dot'] * rear.R,
self.d['plant_evals_c'].real, 'k.')
ax.plot(-self.d['theta_R_dot'] * rear.R,
self.d['plant_evals_c'].imag, 'b.')
ax.axis('tight')
ax.set_xlabel('v [m / s]')
ax.set_ylabel('$\\lambda$')
ax.set_title("Plant eigenvalues (continuous)")
def plant_evals_d(self):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(self.d['plant_evals_d'].real,
self.d['plant_evals_d'].imag, 'k.')
ax.axis('equal')
ax.set_xlim([-1, 1])
ax.set_ylim([-1, 1])
ax.add_patch(Circle((0, 0), radius=1, fill=False))
ax.set_xlabel('real($\\lambda$)')
ax.set_xlabel('imag($\\lambda$)')
ax.set_title("Plant eigenvalues (discrete)")
def closed_loop_evals(self):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(self.d['estimator_evals'].real,
self.d['estimator_evals'].imag, 'r.')
ax.plot(self.d['controller_evals'].real,
self.d['controller_evals'].imag, 'g.')
ax.axis('equal')
ax.set_xlim([-1, 1])
ax.set_ylim([-1, 1])
ax.add_patch(Circle((0, 0), radius=1, fill=False))
plt.title("Closed loop eigenvalues, red: estimator, green: controller")
def controller_gains(self):
plt.figure()
N = len(self.d)
ax = plt.plot(self.d['theta_R_dot'],
self.d['K_c'][:,:,:].reshape((N, 4)))
ax[0].set_label(r"$k_\phi$")
ax[1].set_label(r"$k_\delta$")
ax[2].set_label(r"$k_\dot{\phi}$")
ax[3].set_label(r"$k_\dot{\delta}$")
plt.legend(loc=0)
plt.title('Feedback gains vs. speed')
plt.xlabel('$\\dot{\\theta}_R$')
plt.ylabel('Gain')
def estimator_gains(self):
N = len(self.d)
f, axarr = plt.subplots(2, 1, sharex=True)
lines = axarr[0].plot(self.d['theta_R_dot'],
self.d['K_e'][:, :, 0].reshape((N, 4)))
lines[0].set_label(r"$k_\phi$")
lines[1].set_label(r"$k_\delta$")
lines[2].set_label(r"$k_\dot{\phi}$")
lines[3].set_label(r"$k_\dot{\delta}$")
axarr[0].legend(loc=0)
axarr[0].set_title('Estimator gains vs. speed')
axarr[0].set_ylabel('Steer measurement gain')
lines = axarr[1].plot(self.d['theta_R_dot'],
self.d['K_e'][:, :, 1].reshape((N, 4)))
lines[0].set_label(r"$k_\phi$")
lines[1].set_label(r"$k_\delta$")
lines[2].set_label(r"$k_\dot{\phi}$")
lines[3].set_label(r"$k_\dot{\delta}$")
axarr[1].legend(loc=0)
axarr[1].set_xlabel('$\\dot{\\theta}_R$')
axarr[1].set_ylabel('Roll rate measurement gain')
def pi_gains(self):
plt.figure()
N = len(self.d)
ax = plt.plot(self.d['theta_R_dot'],
np.hstack((self.d['Kp'].reshape((N, 1)),
self.d['Kp_fit'].reshape((N, 1)),
self.d['Ki'].reshape((N, 1)),
self.d['Ki_fit'].reshape((N, 1)))))
ax[0].set_label(r"$K_p$")
ax[1].set_label(r"$K_{pf}$")
ax[2].set_label(r"$K_i$")
ax[3].set_label(r"$K_{if}$")
plt.legend(loc=0)
plt.title('Yaw rate PI gains vs. speed')
plt.xlabel('$\\dot{\\theta}_R$')
plt.ylabel('Gain')
def plot_evals_vs_speed(self, data_label, plot_title):
plt.figure()
ax = plt.subplot(1, 1, 1)
eigvals = self.d[data_label]
speeds = np.shape(eigvals)[0]
phs = []
lbl = []
for v in range(speeds):
ph, = ax.plot(eigvals[v, :].real, eigvals[v, :].imag,
marker='.', linestyle='None',
color=self.cm(1.*v/speeds),
label='$v = {}$'.format(v))
if v in range(0, speeds, 10):
phs.append(ph)
lbl.append(ph.get_label())
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])
l = ax.legend(phs, lbl, loc='center left', bbox_to_anchor=(1, 0.5),
numpoints=1)
#ax.legend(phs, lbl, loc='upper left'
plt.title(plot_title)
def controller_evals(self):
self.plot_evals_vs_speed("controller_evals",
"Controller eigenvalues (discrete)")
def estimator_evals(self):
self.plot_evals_vs_speed("estimator_evals",
"Estimator eigenvalues (discrete)")
def cl_eval_difference(self):
plt.figure()
cl_evals_cat = np.hstack((self.d['controller_evals'],
self.d['estimator_evals']))
cl_evals_cat.sort(axis=1)
cl_evals_orig = np.sort(self.d['closed_loop_evals'], axis=1)
diff = cl_evals_cat - cl_evals_orig
plt.plot(diff.real, diff.imag, 'k.')
plt.title("Difference in computed closed loop eigenvalues.")
def bode_r_to_psi_dot(self, speed, filename=None):
i = np.searchsorted(self.d['theta_R_dot'], speed)
f, axarr = plt.subplots(2, 1, sharex=True, figsize=(8.5,11))
axarr[0].semilogx(self.d['w_r_to_psi_dot'][i],
self.d['mag_r_to_psi_dot'][i])
axarr[0].set_title('Closed loop tf, $(v, \\dot{\\theta}_R$)'
+ ' = ({0}, {1})'.format(-self.d['theta_R_dot'][i] *
rear.R, self.d['theta_R_dot'][i]))
axarr[0].set_ylabel("Magnitude [dB]")
axarr[1].semilogx(self.d['w_r_to_psi_dot'][i],
self.d['phase_r_to_psi_dot'][i])
axarr[1].set_xlabel("Frequency [Hz]")
axarr[1].set_ylabel("Phase [deg]")
if filename is not None:
f.savefig(filename)
def bode_open_loop_e_to_psi_dot(self, speed, filename=None):
i = np.searchsorted(self.d['theta_R_dot'], speed)
f, axarr = plt.subplots(2, 1, sharex=True, figsize=(8.5,11))
axarr[0].semilogx(self.d['w_e_to_psi_dot'][i],
self.d['mag_e_to_psi_dot'][i])
axarr[0].set_title('Inner LQR/LQG loop closed, PI outer loop open:\n'
+ '$e_{\dot{\psi}}$ to $\\dot{\\psi}}$\n'
+ '$(v, \\dot{\\theta}_R$)'
+ ' = ({0}, {1})'.format(-self.d['theta_R_dot'][i] *
rear.R, self.d['theta_R_dot'][i]))
axarr[0].set_ylabel("Magnitude [dB]")
axarr[1].semilogx(self.d['w_e_to_psi_dot'][i],
self.d['phase_e_to_psi_dot'][i])
axarr[1].set_xlabel("Frequency [Hz]")
axarr[1].set_ylabel("Phase [deg]")
if filename is not None:
f.savefig(filename)
def bode_psi_r_to_psi_dot(self, speed, filename=None):
i = np.searchsorted(self.d['theta_R_dot'], speed)
f, axarr = plt.subplots(2, 1, sharex=True, figsize=(8.5,11))
axarr[0].semilogx(self.d['w_psi_r_to_psi_dot'][i],
self.d['mag_psi_r_to_psi_dot'][i])
axarr[0].set_title('Closed loop\n: $\\dot{\\psi}_r$ to $\\dot{\\psi}$, $(v, \\dot{\\theta}_R$)'
+ ' = ({0}, {1})'.format(-self.d['theta_R_dot'][i] *
rear.R, self.d['theta_R_dot'][i]))
axarr[0].set_ylabel("Magnitude [dB]")
axarr[1].semilogx(self.d['w_psi_r_to_psi_dot'][i],
self.d['phase_psi_r_to_psi_dot'][i])
axarr[1].set_xlabel("Frequency [Hz]")
axarr[1].set_ylabel("Phase [deg]")
if filename is not None:
f.savefig(filename)
def step_r_to_psi_dot(self, speed, x0):
i = np.searchsorted(self.d['theta_R_dot'], speed)
C_yr = self.d['C_cl'][i]
C_u = np.zeros((1, 8))
C_u[:, 4:] = self.d['K_c'][i]
C_x = np.hstack((np.eye(4), np.zeros((4, 4))))
C_xe = np.hstack((np.zeros((4, 4)), np.eye(4)))
C = np.vstack((C_yr, C_u, C_x, C_xe))
D = np.zeros((C.shape[0], 1))
t, y, x = dlsim((self.d['A_cl'][i],
self.d['B_cl'][i],
C,
D,
self.d['dt'][i]),
u=np.ones(100),
t=np.linspace(0, 20, 100),
x0=x0)
speed_string = '$\\dot{\\theta}_R$'
speed_string += ' = {0}'.format(self.d['theta_R_dot'][i])
f, axarr = plt.subplots(2, 1, sharex=True)
axarr[0].plot(t, y[:, 0])
axarr[0].set_title("Closed loop yaw rate step response, " + speed_string)
axarr[0].set_ylabel("Yaw rate")
axarr[1].plot(t, y[:, 1])
axarr[1].set_ylabel("Steer torque [N * m]")
axarr[1].set_xlabel("Time [s]")
f, axarr = plt.subplots(2, 2)
f.suptitle("Estimator performance, " + speed_string)
axarr[0, 0].plot(t, y[:, [2, 6]]) # Roll
axarr[0, 0].set_title("Roll angle")
axarr[0, 1].plot(t, y[:, [3, 7]]) # Steer
axarr[0, 1].set_title("Steer angle")
axarr[1, 0].plot(t, y[:, [4, 8]]) # Roll rate
axarr[1, 0].set_title("Roll rate")
axarr[1, 1].plot(t, y[:, [5, 9]]) # Steer rate
axarr[1, 1].set_title("Steer rate")
def lqrlqg_zero_input(self, speed, x0):
i = np.searchsorted(self.d['theta_R_dot'], speed)
C_yr = self.d['C_cl'][i]
C_u = np.zeros((1, 8))
C_u[:, 4:] = self.d['K_c'][i]
C_x = np.hstack((np.eye(4), np.zeros((4, 4))))
C_xe = np.hstack((np.zeros((4, 4)), np.eye(4)))
C = np.vstack((C_yr, C_u, C_x, C_xe))
D = np.zeros((C.shape[0], 1))
u = np.zeros((100,))
t = np.linspace(0, 20, 100)
t, y, x = dlsim((self.d['A_cl'][i],
self.d['B_cl'][i],
C,
D,
self.d['dt'][i]),
u=u,
t=t,
x0=x0)
speed_string = '$\\dot{\\theta}_R$'
speed_string += ' = {0}'.format(self.d['theta_R_dot'][i])
f, axarr = plt.subplots(2, 1, sharex=True)
axarr[0].plot(t, y[:, 0])
axarr[0].set_title("Closed loop zero input response, " + speed_string)
axarr[0].set_ylabel("Yaw rate")
axarr[1].plot(t, y[:, 1])
axarr[1].set_ylabel("Steer torque [N * m]")
axarr[1].set_xlabel("Time [s]")
f, axarr = plt.subplots(2, 2)
f.suptitle("Estimator performance, " + speed_string)
axarr[0, 0].plot(t, y[:, [2, 6]]) # Roll
axarr[0, 0].set_title("Roll angle")
axarr[0, 1].plot(t, y[:, [3, 7]]) # Steer
axarr[0, 1].set_title("Steer angle")
axarr[1, 0].plot(t, y[:, [4, 8]]) # Roll rate
axarr[1, 0].set_title("Roll rate")
axarr[1, 1].plot(t, y[:, [5, 9]]) # Steer rate
axarr[1, 1].set_title("Steer rate")
def step_yr_cl(self, speed, x0):
i = np.searchsorted(self.d['theta_R_dot'], speed)
print(i)
C_yr = self.d['C_yr_cl'][i]
C_u_lqr = np.zeros((1, 9))
C_u_lqr[0, 5:] = self.d['K_c'][i] # portion of control from LQR/LQG
C_u_pi = np.zeros((1, 9))
C_u_pi[0, 0] = self.d['Ki'][i] * 0.005
C_u_pi[0, 1:5] = -self.d['Kp'][i] * self.d['C_z'][i]
C_x = np.hstack((np.zeros((4, 1)), np.eye(4), np.zeros((4, 4))))
C_xe = np.hstack((np.zeros((4, 5)), np.eye(4)))
C = np.vstack((C_yr, C_u_lqr, C_u_pi, C_x, C_xe))
D = np.zeros((C.shape[0], 1))
D[2, 0] = self.d['Kp'][i]
t, y, x = dlsim((self.d['A_yr_cl'][i],
self.d['B_yr_cl'][i],
C,
D,
self.d['dt'][i]),
u=45*np.pi/180.*np.ones(100),
t=np.linspace(0, 20, 100),
x0=x0)
speed_string = '$\\dot{\\theta}_R$'
speed_string += ' = {0}'.format(self.d['theta_R_dot'][i])
f, ax = plt.subplots(2, 1, sharex=True)
ax[0].plot(t, y[:, 0])
ax[0].set_title("Closed loop yaw rate step response, " + speed_string)
ax[0].set_ylabel("Yaw rate")
ax[1].plot(t, y[:, 1] + y[:, 2])
ax[1].set_ylabel("Steer torque [N * m]")
ax[1].set_xlabel("Time [s]")
f, ax = plt.subplots(2, 2)
f.suptitle("Estimator performance, " + speed_string)
ax[0, 0].plot(t, y[:, [3, 7]]) # Roll
ax[0, 0].set_title("Roll angle")
ax[0, 1].plot(t, y[:, [4, 8]]) # Steer
ax[0, 1].set_title("Steer angle")
ax[1, 0].plot(t, y[:, [5, 9]]) # Roll rate
ax[1, 0].set_title("Roll rate")
ax[1, 1].plot(t, y[:, [6, 10]]) # Steer rate
ax[1, 1].set_title("Steer rate")
#fig = plt.figure()
#l = plt.plot(t, x[:, 0])
def main():
d = CDataPlotter(datafile="controller_data.npz") #c_data=yrc.design_controller())
speeds = [1.0, 3.0, 5.0, 7.0, 9.0]
x0 = np.zeros((8,))
# Start the bicycle with non-zero initial conditions to see how
# quickly estimator converges given the initial condition mismatch
x0[0] = 1e1 * np.pi/180.0 # Initial roll
x0[1] = 1e1 * np.pi/180.0 # Initial steer
x0[2] = 1e1 * np.pi/180.0 # Initial roll rate
x0[3] = 1e1 * np.pi/180.0 # Initial steer rate
for v in speeds:
#d.bode_r_to_psi_dot(-v / rear.R)#, "cl_{0}.pdf".format(int(v)))
#d.bode_open_loop_e_to_psi_dot(-v / rear.R, "e_to_psi_dot_{0}.pdf".format(int(v)))
#d.bode_psi_r_to_psi_dot(-v / rear.R)
#d.step_r_to_psi_dot(-v / rear.R, x0)
#d.lqrlqg_zero_input(-v / rear.R, x0)
#d.step_yr_cl(-v / rear.R, np.hstack((0, x0)))
continue
d.plant_evals_c()
#d.plant_evals_d()
# Speed parameterized plots
#d.controller_gains()
#d.estimator_gains()
#d.pi_gains()
#d.estimator_evals()
#d.closed_loop_evals()
#d.controller_evals()
#d.cl_eval_difference()
plt.show()
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 2,013 |
2,680,059,637,157 |
bb5e03921fb59f0c5b1132c5264a24821ffd36ea
|
cf5db2a91932096796a59200643cb5c4bff2f6db
|
/Google+/Database.py
|
e4e85f9dc9eab7488c7b54c4c4c3462a22637a2e
|
[] |
no_license
|
mkfsn/DataMining
|
https://github.com/mkfsn/DataMining
|
a184cf9a9ecd367ea1a4b6e185d363a7c141e397
|
b1ef8905e1e0d9a1d4333df811136d225a50f63d
|
refs/heads/master
| 2020-04-06T04:25:22.236620 | 2014-06-16T07:02:56 | 2014-06-16T07:02:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__date__= ' 4月 30, 2014 '
__author__= 'mkfsn'
import json
import sqlite3
import MySQLdb
import MySQLdb.cursors
class SQLite:
connection = None
cursor = None
def __init__ (self, database):
self.connection = sqlite3.connect(database)
self.cursor = self.connection.cursor()
self.cursor.execute('CREATE TABLE IF NOT EXISTS stocks (date text, trans text, symbol text, qty real, price real)')
self.connection.commit()
def insert (self, query):
self.cursor.execute(query)
self.connection.commit()
def __exit__ (self):
self.connection.close()
class MySQL:
DB_HOST = ""
DB_NAME = ""
DB_USER = ""
DB_PASS = ""
db = None
def __init__(self):
json_data = open ('mkfsn_secrets.json')
secrets = json.load (json_data)
self.DB_HOST = secrets['Database']['Host']
self.DB_NAME = secrets['Database']['Name']
self.DB_USER = secrets['Database']['Username']
self.DB_PASS = secrets['Database']['Password']
self.db = MySQLdb.connect ( host = self.DB_HOST,
user = self.DB_USER,
passwd = self.DB_PASS,
db = self.DB_NAME,
cursorclass = MySQLdb.cursors.DictCursor)
def friend_list(self):
cursor = self.db.cursor()
cursor.execute("SELECT * FROM `Google_Friends` WHERE `Done`=0")
self.db.commit()
result = cursor.fetchall()
return result
def freiend_complete(self, userid):
cursor = self.db.cursor()
cursor.execute("UPDATE `Google_Friends` SET `Done`=1 WHERE id = %s",(userid,))
self.db.commit()
def article_save(self, posturl, userid, urls, verb, date, kind, postid):
cursor = self.db.cursor()
where = (posturl, userid, urls, verb, date, kind, postid)
cursor.execute("INSERT IGNORE INTO `Google_Posts` VALUES (%s,%s,%s,%s,%s,%s,%s)", where)
self.db.commit()
def article_list(self):
cursor = self.db.cursor()
sql = """SELECT Post_ID FROM `Google_Posts` WHERE Post_ID NOT IN (SELECT Post_ID FROM `Google_Comments`)"""
cursor.execute(sql)
self.db.commit()
result = cursor.fetchall()
return result
def comment_save(self, data):
sql = """INSERT IGNORE INTO `Google_Comments` VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
cursor = self.db.cursor()
for where in data:
cursor.execute(sql, where)
self.db.commit()
def test():
# sqlite = SQLite('./test.sqlite3')
# sqlite.insert("INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14)")
db = MySQL()
print db.friend_list()
if __name__ == '__main__':
test()
|
UTF-8
|
Python
| false | false | 2,014 |
18,296,560,711,618 |
3d44cb24cf5af5f443d3231ee76718c1313abf23
|
4db415ed3c738362eb2df5867c0a4798ca8629b0
|
/filter-trends.py
|
0540327ba410f7d2577f0e19cff4cdd467de5d29
|
[] |
no_license
|
norbert/re-nnmcts
|
https://github.com/norbert/re-nnmcts
|
c0f32bd9ca4640d9e1df02984dc87bcc8659e9b1
|
c2a47de24ac16e4e35cbde61399bd062ac9b87d6
|
refs/heads/master
| 2021-01-20T05:31:18.186721 | 2014-03-27T16:18:35 | 2014-03-27T16:20:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import sys
import re
import argparse
from collections import OrderedDict
import twokenize
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--boundary', metavar='BOUNDARY', type=int)
parser.add_argument('minimum', metavar='MIN', type=int)
parser.add_argument('maximum', metavar='MAX', type=int)
args = parser.parse_args()
MIN = args.minimum
MAX = args.maximum
BOUNDARY = args.boundary or 0
TRENDS = OrderedDict()
for line in sys.stdin:
fields = unicode(line.strip(), 'utf-8').split('\t')
name = ' '.join(twokenize.tokenize(fields[0].lower()))
collected_at = int(fields[1])
rank = int(fields[2])
onset = collected_at if rank <= 3 else None
if not name in TRENDS:
TRENDS[name] = [rank, collected_at, collected_at, onset]
else:
t = TRENDS[name]
if onset is not None:
t[3] = min(onset, t[3]) if t[3] is not None else onset
t[0] = min(rank, t[0])
t[1] = min(collected_at, t[1])
t[2] = max(collected_at, t[2])
for name, t in TRENDS.iteritems():
if t[3] is None:
continue
duration = t[2] - t[1]
if duration < (30 * 60):
continue
elif duration > (24 * 60 * 60):
continue
elif t[1] < MIN + BOUNDARY:
continue
elif t[1] >= MAX - BOUNDARY:
continue
print('\t'.join([name, str(t[1]), str(t[0]), str(duration)])
.encode('utf-8'))
|
UTF-8
|
Python
| false | false | 2,014 |
15,307,263,475,165 |
b1ced03ce86e5f55a347a32f8dec4ae7524d86e4
|
4007e0fc71a3376024a4321ed31d7e304a011fa5
|
/strings/strings.py
|
e9d5eddb60ca6d61aefbd571192c8197cc173e08
|
[] |
no_license
|
jasonfigueroa/PythonSnippets
|
https://github.com/jasonfigueroa/PythonSnippets
|
f79edbac872955d5643ec48e3a191272dccd8b46
|
e9af7748a6a67ddcbd032b14b10f4155c17f988a
|
refs/heads/master
| 2020-12-29T01:54:22.521152 | 2014-11-22T01:16:21 | 2014-11-22T01:16:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# in this file I'll be practicing strings and a few basic ways to use or
# manipulate strings.
# string variable for our name, you can enclose the string with single quotes or
# double quotes, I'll use singles so I don't have to use the shift key
name = 'Jason'
# string variable for a number as a string, using double quotes here
stringNum = "1"
# printing a string, in this case the word Hello with
print ("Hello")
# printing string variable name and "\n" is for adding an extra space between
# this print statement and the next print statement
print (name + "\n")
#print ("\n")
# printing the same string and concatenating (fancy word for appending) the
# variable name to it
print ('Hello' + name)
# notice the previous line printed "Hello" and name with no space between them
# let's fix that by simply adding an extra space after "Hello" so "Hello "
print ("Hello" + " " + name)
# An alternate way of doing this and in my opinion a simpler way, less typing ;)
print ("Hello " + name)
# anotther way of printing an extra line
print()
# printing a number as a string
print ("num: " + stringNum)
# print (stringNum + 2) would produce an error you would either have to convert
# the string variable stringNum to a int variable or the number 2 to a string
# first we'll convert the number 2 to a string, we'll use the str() command
print ("first num: " + stringNum + "; second num: " + str(2))
# about tired of retyping this let's throw it in a variable
greet = 'Hello '
print()
# putting it all together
print (greet + name + ' your number is: ' + stringNum)
|
UTF-8
|
Python
| false | false | 2,014 |
18,683,107,743,762 |
16efae70d5cbf8ed36d64cd10a4df94079305d56
|
d767de7e981f7ede8b290857205fbc2576e4a663
|
/setup.py
|
c4bc44af8ec904b489880a9555f250486721627a
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
non_permissive
|
alanzoppa/pyfacebook
|
https://github.com/alanzoppa/pyfacebook
|
44a23a16d830bafacda3d3b32e38568bfa362d44
|
87a8fcc659a07fec10c16fa80fb460d722212f95
|
refs/heads/master
| 2021-01-16T20:43:55.981968 | 2010-09-08T17:00:51 | 2010-09-08T17:00:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
from distutils.core import setup
setup(name='pyfacebook',
version='0.1',
description='Python Client Library for the Facebook API',
author='Samuel Cormier-Iijima',
author_email='[email protected]',
url='http://github.com/alanzoppa/pyfacebook',
packages=['pyfacebook', 'pyfacebook.djangofb',
'pyfacebook.djangofb.default_app'])
|
UTF-8
|
Python
| false | false | 2,010 |
19,069,654,820,799 |
8d8709548b3b9427ee9f7bd13c83d66e20d889eb
|
f668c73303941f27f6e68a15b0e44f898f6ed897
|
/results/2012-12-23/runtrees.py
|
af12b658869b996e639a4eb28947f22a9c7d4612
|
[] |
no_license
|
FilipDom/AppBio-Noise-Reduce
|
https://github.com/FilipDom/AppBio-Noise-Reduce
|
dc4ad47ca03e9e8190fcf76a8e05ef450b7872bd
|
dff4141191b7c0bbd4946a175d9182e7bf66c7aa
|
refs/heads/master
| 2016-09-06T19:18:01.681023 | 2013-01-07T18:00:43 | 2013-01-07T18:00:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/env python
'''
Generates noisy and noisy reduced trees for all the alignments in all the data sets.
The results are saved in the trees subfolder within a subfolder with the same name
as the data set folder.
'''
import os
import subprocess
dataBaseDir = 'data/2012-12-19'
dataEndDirs = ['symmetric_0.5', 'symmetric_1.0', 'symmetric_2.0',
'asymmetric_0.5', 'asymmetric_1.0', 'asymmetric_2.0']
outBaseDir = 'results/2012-12-23/trees'
for endDir in dataEndDirs:
dataDir = os.path.join(dataBaseDir, endDir)
outDir = os.path.join(outBaseDir, endDir)
if not os.path.exists(outDir):
os.mkdir(outDir)
subprocess.call(['python', 'src/treeCalcFolder.py', dataDir, outDir, '300'])
|
UTF-8
|
Python
| false | false | 2,013 |
12,996,571,062,689 |
c6bb2d6f1f746cbf6f39d964c2024c82c0cc8dca
|
39dabb0e739d2d464906a1ed6dc089d10016c90e
|
/scripts/no_isbn_script.py
|
51d845f5fe8b55d3d85f3d5cbf2c0ff764fa2920
|
[
"GPL-3.0-or-later"
] |
non_permissive
|
merbroussard/sdp_curricula
|
https://github.com/merbroussard/sdp_curricula
|
2d773c7d452053c52a8fbb564a35c81e716bd6f7
|
16fe5b3d2dffe480e6ee4f46a5e71eb0c1afd54d
|
refs/heads/master
| 2021-01-16T19:52:02.988454 | 2014-03-14T19:24:33 | 2014-03-14T19:24:33 | 11,127,187 | 0 | 1 | null | true | 2014-01-14T18:10:06 | 2013-07-02T14:53:46 | 2014-01-14T18:10:06 | 2014-01-14T18:10:06 | 26,501 | 0 | 0 | 1 |
JavaScript
| null | null |
import os
from os.path import abspath, dirname
import sys
import csv
import re
# Set up django path so you can just run this in the VM
project_dir = abspath(dirname(dirname(__file__)))
sys.path.insert(0, project_dir)
os.environ["DJANGO_SETTINGS_MODULE"] = "sdp_curricula.settings"
from curricula.models import LearningMaterial, Publisher, PublisherGroup, Curriculum, GradeCurriculum
from vendors.models import Vendor, NegotiatedPrice
from schools.models import SchoolType
def define_curriculum(info, c_name, c_pub):
try:
c = Curriculum.objects.get(name=c_name, publisher=c_pub.group)
print 'Found the curriculum ' + c.name
except Curriculum.DoesNotExist:
subject = info[4]
c = Curriculum(name=c_name, publisher=c_pub.group, subject_area=subject)
c.save()
print 'Created the curriculum ' + c.name
return c
def define_grade_curriculum(info, c):
try:
g = GradeCurriculum.objects.get(curriculum=c, grade_level_start=info[6], grade_level_end=info[7])
print 'Found the curriculum set/grade curriculum for this file'
print g
except GradeCurriculum.DoesNotExist:
g = GradeCurriculum(curriculum=c, grade_level_start=info[6], grade_level_end=info[7])
g.save()
print 'Created the curriculum set/grade curriculum'
return g
def is_empowerment(info):
if info[5] == 'TRUE':
return True
def is_default(info):
if len(info) > 8:
if info[8] != 'FALSE':
return True
else:
return True
def add_school_types(info, g, default, empowerment):
if is_default(info):
default.approved_curricula.add(g)
print 'Approved for default'
if is_empowerment(info):
empowerment.approved_curricula.add(g)
print 'Approved for empowerment'
def iterate_through_data(data, publisher, grade_curriculum, vendor, default,
empowerment, is_empowerment, is_default):
for row in data:
ordering_code = row[1].strip()
if len(row[2]) > 1:
title = row[0].strip().replace('*', '')
try:
if ordering_code == '':
print title
material = LearningMaterial.objects.get(title=title)
print 'Empty ISBN'
else:
material = LearningMaterial.objects.get(ordering_code=ordering_code)
material.title = title
material.save()
print 'Material title updated to ' + title
print 'Found material ' + material.title
if row[4] == 'TRUE':
material.isTeacherEdition = True
material.save()
print 'Updated as Teacher\'s edition'
if row[5] != '':
material.quantity = int(row[5])
material.save()
print material.quantity
except LearningMaterial.DoesNotExist:
print 'Creating material'
m_type = row[3]
if row[4] == 'TRUE':
teachers = True
print 'This is a teacher\'s edition'
else:
teachers = False
if row[5] != '':
q = row[5]
else:
q = 1
material = LearningMaterial(ordering_code=ordering_code,
title=title, publisher=publisher,
material_type=m_type, isTeacherEdition=teachers,
quantity=q)
material.save()
print 'Material created: ' + material.title
try:
if ordering_code == '':
grade_curriculum.materials.get(title=material.title)
else:
grade_curriculum.materials.get(ordering_code=material.ordering_code)
print 'Looks like we already added this one to the curriculum!'
except LearningMaterial.DoesNotExist:
grade_curriculum.materials.add(material)
grade_curriculum.save()
print 'Material added to grade curriculum'
# if '$' in row[2]:
price = float(re.sub('[\$,]', '', row[2]))
try:
n = NegotiatedPrice.objects.get(value=price, material=material)
print 'We have a price for this :)'
except NegotiatedPrice.DoesNotExist:
n = NegotiatedPrice(value=price, vendor=vendor, material=material)
n.save()
print 'Created the price'
print material.title + ' has a price of ' + str(n.value)
if is_default:
n.negotiated_for_school_type.add(default)
print 'Priced for default'
if is_empowerment:
n.negotiated_for_school_type.add(empowerment)
print 'Priced for empowerment'
def check_digit_10(isbn):
assert len(isbn) == 9
sum = 0
for i in range(len(isbn)):
c = int(isbn[i])
w = i + 1
sum += w * c
r = sum % 11
if r == 10:
return 'X'
else:
return str(r)
if __name__ == "__main__":
try:
print 'Looking for vendor with name ' + sys.argv[1]
try:
v = Vendor.objects.get(name=sys.argv[1])
print 'Found ' + v.name
except Vendor.DoesNotExist:
v = Vendor(name=sys.argv[1])
v.save()
print 'Created ' + v.name
csv_filepathname = sys.argv[2] # Change to the CSV
print 'Using the csv ' + csv_filepathname
except IndexError:
print 'Not enough arguments: vendor name and csv path expected'
data = csv.reader(open(csv_filepathname, 'rU'), delimiter=',', quotechar='"')
data.next()
info = data.next()
print info
c_name = info[0]
try:
c_pub = Publisher.objects.get(name=info[3])
except Publisher.DoesNotExist:
new_group = PublisherGroup(name=info[3])
new_group.save()
c_pub = Publisher(name=info[3], group=new_group)
c_pub.save()
print 'Found a publisher named ' + c_pub.name
c = define_curriculum(info, c_name, c_pub)
g = define_grade_curriculum(info, c)
default = SchoolType.objects.get(name="Default")
empowerment = SchoolType.objects.get(name="Empowerment")
add_school_types(info, g, default, empowerment)
data.next()
print 'Starting data iteration ...'
iterate_through_data(data, c_pub, g, v, default,
empowerment, is_empowerment(info), is_default(info))
|
UTF-8
|
Python
| false | false | 2,014 |
1,769,526,538,989 |
b9ef324892a71936b917daadbfaa031151aa06e5
|
da2fa913fd59cfebb03b1d795dcaa023cc5de469
|
/unittest2/unittest2/plugins/timed.py
|
797ad6150ce9c0f142cf14b8d60220dad432cbae
|
[] |
no_license
|
justinabrahms/test_generators
|
https://github.com/justinabrahms/test_generators
|
387a7f3341f7b5016492c3558fd8ca0c09431814
|
f8baf5741e20f0eadc4bc4491264d11bb2356a2e
|
refs/heads/master
| 2021-01-19T20:22:38.349367 | 2010-08-10T22:35:21 | 2010-08-10T22:35:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from unittest2.events import Plugin
help_text = 'Output time taken for each test (verbose only)'
class TimedTests(Plugin):
configSection = 'timed'
commandLineSwitch = ('T', 'timed', help_text)
def __init__(self):
self.threshold = self.config.as_float('threshold', 0)
def stopTest(self, event):
if event.timeTaken >= self.threshold:
msg = ' %.2f seconds ' % event.timeTaken
# only output in verbose (verbosity = 2)
event.message(msg, 2)
|
UTF-8
|
Python
| false | false | 2,010 |
7,464,653,208,835 |
b1eb481b7c0b9d50aca09b6a934c2f3c758cefde
|
e1e8d557249eefbb0bd2fa46f21b9f012bc40f55
|
/example/cache.py
|
09691ca636c0fe1adbcd2a5b2e789479a6ff1230
|
[
"Apache-2.0"
] |
permissive
|
juanmaneo/rift
|
https://github.com/juanmaneo/rift
|
def6c9578dbcaa2a617fb9192ae0cc491fbb708a
|
1dc80321168bf9d2a34bb368f1b1caffcfd06793
|
refs/heads/master
| 2021-01-19T11:53:58.507081 | 2014-02-11T21:32:56 | 2014-02-11T21:32:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import elliptics
import msgpack
class Options(object):
def __init__(self):
self.groups = None
self.source_groups = None
self.cache_groups = None
self.remotes = None
self.log_file = None
self.log_level = None
self.cache_list_namespace = None
self.cache_list = None
self.file_namespace = None
self.file = None
self.remove = False
def parse_options():
from optparse import OptionParser
options = Options()
parser = OptionParser()
parser.usage = "%prog type [options]"
parser.description = __doc__
parser.add_option("-g", "--groups", action="store", dest="groups", default=None,
help="Comma separated list of groups where to store cache files")
parser.add_option("-s", "--source-groups", action="store", dest="source_groups", default=None,
help="Comma separated list of groups of original files")
parser.add_option("-c", "--cache-groups", action="store", dest="cache_groups", default=None,
help="Comma separated list of groups of cache files")
parser.add_option("-l", "--log", dest="log_file", default='/dev/stderr', metavar="FILE",
help="Output log messages from library to file [default: %default]")
parser.add_option("-L", "--log-level", action="store", dest="log_level", default="1",
help="Elliptics client verbosity [default: %default]")
parser.add_option("-r", "--remote", action="append", dest="remote",
help="Elliptics node address [default: %default]")
parser.add_option("--cache-list", action="store", dest="cache_list", default=None,
help="Copy this key from source to cache groups")
parser.add_option("--cache-list-namespace", action="store", dest="cache_list_namespace", default=None,
help="Elliptics session namespace for cache list key")
parser.add_option("--file", action="store", dest="file", default=None,
help="Copy this key from source to cache groups")
parser.add_option("--file-namespace", action="store", dest="file_namespace", default=None,
help="Elliptics session namespace for file key")
parser.add_option("--remove", action="store_true", dest="remove", default=False,
help="Remove file from cache groups instead of copying")
(parsed_options, args) = parser.parse_args()
if not parsed_options.groups:
raise ValueError("Please specify at least one group (-g option)")
def parse_groups(string):
try:
return map(int, string.split(','))
except Exception as e:
raise ValueError("Can't parse groups list: '{0}': {1}".format(parsed_options.groups, repr(e)))
options.groups = parse_groups(parsed_options.groups)
print("Using groups list: {0}".format(options.groups))
options.source_groups = parse_groups(parsed_options.source_groups)
print("Using source groups list: {0}".format(options.source_groups))
options.cache_groups = parse_groups(parsed_options.cache_groups)
print("Using cache groups list: {0}".format(options.cache_groups))
try:
options.log_file = parsed_options.log_file
options.log_level = int(parsed_options.log_level)
except Exception as e:
raise ValueError("Can't parse log_level: '{0}': {1}".format(parsed_options.log_level, repr(e)))
print("Using elliptics client log level: {0}".format(options.log_level))
if not parsed_options.remote:
raise ValueError("Please specify at least one remote address (-r option)")
try:
options.remotes = []
for r in parsed_options.remote:
options.remotes.append(elliptics.Address.from_host_port_family(r))
print("Using remote host:port:family: {0}".format(options.remotes[-1]))
except Exception as e:
raise ValueError("Can't parse host:port:family: '{0}': {1}".format(parsed_options.remote, repr(e)))
if not parsed_options.cache_list:
raise ValueError("Please specify cache list key (--cache-list option)")
if not parsed_options.file:
raise ValueError("Please specify file key (--file option)")
options.cache_list = parsed_options.cache_list
options.cache_list_namespace = parsed_options.cache_list_namespace
options.file = parsed_options.file
options.file_namespace = parsed_options.file_namespace
options.remove = parsed_options.remove
return options
if __name__ == '__main__':
options = parse_options()
logger = elliptics.Logger(options.log_file, options.log_level)
node = elliptics.Node(logger)
any_remote = False
for remote in options.remotes:
try:
node.add_remote(remote)
any_remote = True
except Exception as e:
print("Couldn't connect to remote: {0} got: {1}".format(remote, e))
if not any_remote:
raise ValueError("Couldn't connect to any remote")
# cache_list = receive_list(node, options)
list_session = elliptics.Session(node)
list_session.groups = options.groups
if options.cache_list_namespace:
list_session.set_namespace(options.cache_list_namespace)
source_session = elliptics.Session(node)
source_session.groups = options.source_groups
source_session.namespace = options.file_namespace
cache_session = elliptics.Session(node)
cache_session.groups = options.cache_groups
cache_session.namespace = options.file_namespace
file_key = source_session.transform(options.file)
file_id = ''.join(chr(x) for x in file_key.id)
if not options.remove:
print("Add {0} to groups {1}".format(options.file, options.cache_groups))
read_results = source_session.read_data(file_key)
read_results.wait()
read_result = read_results.get()[0]
io_attr = elliptics.IoAttr()
io_attr.id = read_result.id
io_attr.timestamp = read_result.timestamp
io_attr.user_flags = read_result.user_flags
write_result = cache_session.write_data(io_attr, read_result.data)
write_result.wait()
def add_file(data):
cache_list = msgpack.unpackb(data)
cache_list[file_id] = options.cache_groups
return msgpack.packb(cache_list)
list_session.write_cas(options.cache_list, add_file, 0, 3)
else:
print("Remove {0} from groups {1}".format(options.file, options.cache_groups))
remove_result = cache_session.remove(file_key)
remove_result.wait()
def remove_file(data):
cache_list = msgpack.unpackb(data)
del cache_list[file_id]
return msgpack.packb(cache_list)
list_session.write_cas(options.cache_list, remove_file, 0, 3)
|
UTF-8
|
Python
| false | false | 2,014 |
17,609,365,947,397 |
4773169682416deaec6688fb5118771821aafb9a
|
9b48138d91b6433041f4571e995849a50beae08e
|
/test/classification.py
|
e9960239b4dcee3abe309c6f400d67180a1da6ab
|
[] |
no_license
|
nicorotstein/juan_alberto
|
https://github.com/nicorotstein/juan_alberto
|
2ca0cef65769324a750cc1bc69337cb792b9cd80
|
9ae664890625d576b9ad2679c68cbd5793d5c151
|
refs/heads/master
| 2016-09-05T20:17:57.369366 | 2012-05-08T19:37:59 | 2012-05-08T19:37:59 | 3,538,435 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from __future__ import division
#tagger = NGramTagger('es')
from nltk import NaiveBayesClassifier
#from micorpus import get_review
from nltk.corpus.reader import TaggedCorpusReader
from nltk.corpus.util import LazyCorpusLoader
from nltk.corpus import stopwords
from nltk import trigrams
def get_features(trigr):
#trigr = ( (w,t),(w,t),(w,t) )
'''returns the features for a trigram'''
return{
'first_word:':(trigr[0][0]),
'first_pos':(trigr[0][1]),
'second_word':(trigr[1][0]),
'second_pos':(trigr[1][1]),
'third_word':(trigr[2][0]),
'third_pos':(trigr[2][1]),
}
def load_train_set():
'''loads a training set for the classifier'''
archivo = open('../data/classitest.txt','r')
train_set = []
for linea in archivo:
tup = eval(linea)
train_set.append((get_features(tup[0]),tup[1]))
return train_set
def read_sentences_corpus(reader = None):
#reader = LazyCorpusLoader()
#its overriding reader
reader = TaggedCorpusReader('../data/', r'.*\.pos')
'''
create a corpus reader with the files in ../data/*.pos
this files contains sentences tagged, and are the bases of trainig, test sets.
'''
pos_fileids = reader.fileids()[1]
neg_fileids = reader.fileids()[0]
pos_sents = reader.tagged_sents(pos_fileids)
neg_sents = reader.tagged_sents(neg_fileids)
#pos_sents = [[(word.lower(),tag) for word,tag in sent if word not in stopwords.words('english')] for sent in pos_sents ]
#neg_sents = [[(word.lower(),tag) for word,tag in sent if word not in stopwords.words('english')] for sent in neg_sents ]
return (pos_sents,neg_sents)
def chunks(sentences, n = 10):
""" Yield successive n-sized chunks from sentences.
"""
for i in xrange(0,len(sentences), n):
yield sentences[i:i+n]
def get_trigrams(sentence):
'''keeps a yield to trigrams for each sentence in the reader'''
return trigrams(sentence)
class Metrics():
def diversity(corpus):
return len(corpus) / len (set(corpus))
def test_metrics(reader):
#reader = LazyCorpusLoader()
pos = reader.words(reader.fileids()[1])
print 'tokens: {0}, vocabulary:{1}'.format(len(pos),len(set(pos)))
print len(pos) / len(set(pos))
if __name__ == '__main_':
reader = TaggedCorpusReader('../data/', r'.*\.pos')
test_metrics(reader)
if __name__ == '__main__':
reader = TaggedCorpusReader('../data/', r'.*\.pos')
from nltk import word_tokenize, pos_tag
train_set = load_train_set()
classifier = NaiveBayesClassifier.train(train_set)
#to classify we have to pass a trigram.
test = ['always helpfull staff','the room was small','big room and beds','the view is amazing','neighbourhood is a bit ugly','connected with metro']
for t in test:
tagged_t = pos_tag(word_tokenize(t))
print tagged_t
for trig in trigrams(tagged_t):
print trig
print classifier.classify(get_features(trig))
print '------------'
|
UTF-8
|
Python
| false | false | 2,012 |
6,717,328,872,528 |
c2143ae81644743bcd855865555b3988740aaa4b
|
eaa0aa13ea83be4d32dd08beac0497031861e562
|
/examples/block/client.py
|
3ca76b48840b1342611d8f3acad322cb30578dd7
|
[
"BSD-3-Clause"
] |
permissive
|
teepark/junction
|
https://github.com/teepark/junction
|
53cdb4e03f8f1de730ea17da66187079a68fbcdf
|
481d135d9e53acb55c72686e2eb4483432f35fa6
|
refs/heads/master
| 2021-01-01T20:00:23.442225 | 2014-03-07T22:16:14 | 2014-03-07T22:16:14 | 1,063,890 | 3 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# vim: fileencoding=utf8:et:sta:ai:sw=4:ts=4:sts=4
import random
import traceback
import greenhouse
import junction
greenhouse.global_exception_handler(traceback.print_exception)
SERVICE_PORT = 9870
WAIT_SERVICE = 1
def main():
client = junction.Client(("localhost", SERVICE_PORT))
client.connect()
client.wait_connected()
print "wait 2"
client.rpc(WAIT_SERVICE, 0, "wait", (2,))
rpcs = []
for i in xrange(5):
wait = random.random() * 5
rpc = client.send_rpc(WAIT_SERVICE, 0, "wait", (wait,))
rpc.counter = i
rpcs.append(rpc)
print "queued a wait %r: %r" % (rpcs[-1].counter, wait)
while rpcs:
rpc = junction.wait_any(rpcs)
print "got back %r" % rpc.counter
rpcs.remove(rpc)
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,014 |
14,731,737,866,646 |
6195a2d37e3628167f7db731b6ab8bf585b0e37e
|
f9a57730b14392c971df39dfc75105b255398dbb
|
/reg.py
|
6836a245399b2b144c9d61d2c5e43d4063600e09
|
[] |
no_license
|
deedy/Neural-Playlist-Prediction
|
https://github.com/deedy/Neural-Playlist-Prediction
|
ff79bba711f617044673652d37ae30b1cd7e3ef2
|
104acbf82bcee639c4db27c3bf915181caf93a7e
|
refs/heads/master
| 2021-05-28T21:58:11.013386 | 2014-12-07T05:42:24 | 2014-12-07T05:42:24 | 25,749,533 | 5 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Module for logistic regression with Theano
@author Siddharth Reddy <[email protected]>
12/4/14
"""
from __future__ import division
from math import log, exp, tanh
import theano
import theano.tensor as T
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import numpy as np
class LogisticRegression(object):
def __init__(self):
self.w = None
self.b = None
def fit(self, X, Y, lrate=0.01, training_steps=100, coeff_reg=0.1):#, batch_size=1):
"""
Batch gradient descent for maximum-likelihood estimation
"""
num_instances = len(X)
num_features = len(X[0])
x = T.matrix("x")
y = T.vector("y")
w = theano.shared(np.random.random((num_features,)), name="w")
b = theano.shared(0., name="b")
#p = 1 / (1 + T.exp(-(T.dot(x, w) + b)))
p = (1 + T.tanh(-(T.dot(x, w) + b))) / 2
cost = ((p - y) ** 2).sum() + coeff_reg * (w ** 2).sum()
gw, gb = T.grad(cost, [w, b])
train = theano.function(
inputs=[x,y],
outputs=[cost],
updates=((w, w - lrate * gw), (b, b - lrate * gb)),
allow_input_downcast=True)
cst = [0] * training_steps
#num_batches = num_instances // batch_size
for i in xrange(training_steps):
"""
for j in xrange(num_batches):
#lidx = j*batch_size
#uidx = min(num_instances, lidx+batch_size)
#err = train(X[lidx:uidx], Y[lidx:uidx])
err = train(X[j:(j+1)], Y[j:(j+1)])
"""
err = train(X, Y)
cst[i] = sum(err)
print "%d\t%f" % (i, cst[i])
plt.plot(cst)
#plt.show()
plt.savefig('bgd.png')
self.w = w.get_value()
self.b = b.get_value()
def predict_log_proba(self, x, eps=1e-6):
#return -log(1 + exp(-(np.dot(x, self.w) + self.b)))
p = (1 + tanh(-(np.dot(x, self.w) + self.b))) / 2
p = max(p, eps)
return log(p)
|
UTF-8
|
Python
| false | false | 2,014 |
12,876,311,968,454 |
e1749c81bfa84277ebc50f009d0aac3de292759f
|
5223465efb47e9f77d087881a23d0f4f229bc3d5
|
/home/views.py
|
30b6b12ecf42e972eac30aca2a60109be6287566
|
[
"BSD-3-Clause"
] |
permissive
|
wraithan/archcode
|
https://github.com/wraithan/archcode
|
53562e92e46eb3ed7f908884f29ae679c912f380
|
ee50b8d80830adb3620883a6072a1abb322a699f
|
refs/heads/master
| 2020-05-20T02:00:11.728648 | 2009-12-16T06:03:02 | 2009-12-16T06:03:02 | 603,910 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.template import RequestContext
def index(request):
title = "Welcome"
return render_to_response('home/index.html', {
'title': title,
}, context_instance=RequestContext(request))
@login_required
def profile(request):
title = "Profile"
return render_to_response('home/profile.html', {
'title': title,
}, context_instance=RequestContext(request))
|
UTF-8
|
Python
| false | false | 2,009 |
15,796,889,741,615 |
28f78c3cbdcbfcaf17d99ebe0d306e763fb8c28b
|
37137e34d29143393ec4ad750a9f55bf25188ede
|
/data/refine/3-geocoding_cleanup/stats.py
|
0a9ab1f3d8d39eb38808bb85109a6c2203d3d613
|
[] |
no_license
|
statm/filmspotting
|
https://github.com/statm/filmspotting
|
db51edf5b3c481f4e265f878cb079fa6bc21bb04
|
ebc3f6ab71bc3ed07c3086fbf4a6041da13b665a
|
refs/heads/master
| 2021-01-13T01:25:34.866597 | 2014-05-05T09:32:42 | 2014-05-05T09:39:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import json
if __name__ == '__main__':
f = open("geocoding_cleaned.json")
data = json.load(f)
f.close()
loc_parts = []
for entry in data:
for location in entry["locations"]:
gc = location["geocoding"]
if gc["status"] == "OK":
for addr_comp in gc["results"][0]["address_components"]:
if addr_comp["long_name"] not in loc_parts:
loc_parts.append(addr_comp["long_name"])
print len(loc_parts)
|
UTF-8
|
Python
| false | false | 2,014 |
16,982,300,689,112 |
766d9c4fc5638fb3e95a585f7cdd485e0a743f4b
|
5279fbc7b877b106dcff302cc5219327cd116510
|
/extreme_learning_machines/elm_example.py
|
01ddb55bf746c923b4689a1144e6548ddf734442
|
[] |
no_license
|
volcrush/NeuralNetworks
|
https://github.com/volcrush/NeuralNetworks
|
715ae1eb737856b7933dc2fb27effe322af31ed4
|
f6ef1534320860f7571e80059dd1719225c44e33
|
refs/heads/master
| 2021-01-16T22:25:11.441058 | 2014-12-02T22:15:43 | 2014-12-02T22:15:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from sklearn.datasets import load_digits
from elm import ELMClassifier
# testing elm on the digits dataset
digits = load_digits()
X, y = digits.data, digits.target
# normalization is important
X/=255
clf = ELMClassifier(n_hidden=30)
clf.fit(X,y)
print 'score:', clf.score(X,y)
|
UTF-8
|
Python
| false | false | 2,014 |
2,911,987,857,100 |
f7295c368b5b47366727ac134fa047e39d70bc8d
|
c6da9c78775dcabd81fc73d1f100f3aceaf34cd0
|
/liblocator.py
|
6b8c2ae34eb121cb6b3cbcc292e472ac0c7a3935
|
[] |
no_license
|
bachp/wkhtmltopdf-py
|
https://github.com/bachp/wkhtmltopdf-py
|
df5dc62834a395909b343a78734700dd91b28b3b
|
e902677f120f67f3c8682f8b8bb1180fa904a062
|
refs/heads/master
| 2021-01-02T23:14:46.663150 | 2010-07-26T14:44:03 | 2010-07-26T14:44:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Helper Module to locate libraries in system
# Place this module in the path on the filesystem you want to find out
# and import it. Then call the path() methode to get the absolute path
# to the modules location.
def path():
""" Returns the absolute path to the modules location """
import os
return os.path.dirname(__file__)
|
UTF-8
|
Python
| false | false | 2,010 |
13,640,816,151,479 |
857983aed4dc5d0678eced80a43610c3504e510c
|
21cbfc4c3a58e32f2888ded48bdacb2335e01159
|
/tests/testControllerDefault.py
|
d314589bd957b721db79625d23f677373099214b
|
[
"LicenseRef-scancode-public-domain"
] |
non_permissive
|
tazjel/SplunkeGSD
|
https://github.com/tazjel/SplunkeGSD
|
f447096ffcbb92198131e0d6576f6a9cbb507d59
|
441573a8d5ba5bbcc367ceed7df8cc01cb24c1d3
|
refs/heads/master
| 2020-12-06T20:37:11.885352 | 2014-04-10T15:57:53 | 2014-04-10T15:57:53 | 65,085,120 | 1 | 0 | null | true | 2016-08-06T14:03:21 | 2016-08-06T14:03:20 | 2016-08-06T14:03:19 | 2014-04-10T15:57:59 | 95,924 | 0 | 0 | 0 | null | null | null |
import unittest
import ConfigParser
import urllib2
import imp
import sys
import os
default = imp.load_source('default', 'controllers/default.py')
team = imp.load_source('team', 'controllers/classes/team.py')
module = imp.load_source('module', 'controllers/classes/module.py')
class TestControllerDefault(unittest.TestCase):
def testsave_game_report_cal(self):
report = [['', 'Actual', 'Estimated'], ['Dublin: Test Module', '234.7', '200'], ['Total Effort', '234.7', '200']]
budget = [['Cost', '7175.0', '8968.8']]
revenue = [['Revenue', '500000.0', '500000.0']]
files = [name for name in os.listdir('saved_game_reports/.')]
blah = default.save_game_report_cal(report, budget, revenue)
new_files = [name for name in os.listdir('saved_game_reports/')]
self.assertTrue(len(new_files)-len(files) ==1)#check only one file is created
fil = list( set(new_files)- set(files))
self.assertFalse(os.stat('saved_game_reports/'+str(fil[0])).st_size==0)
os.remove('saved_game_reports/'+str(fil[0]))
def testcalculatepfail(self):
lt = ['dublin', 'san francisco', 'bangalore']
temp = default.calculatepfail(lt)
self.assertTrue(len(temp) == len(lt))
self.assertIsInstance(temp['dublin'][2], int)
self.assertIsInstance(temp['dublin'][0], float)
self.assertIsInstance(temp['dublin'][1], float)
def testgenerateIntervention(self):
lt = ['dublin', 'san francisco', 'bangalore']
temp = default.generateIntervention(lt)
self.assertTrue(len(lt) == len(temp))
self.assertTrue(len(temp['dublin']) != 0)
def testload_game_cal(self):
blah = default.load_game_cal("game1")
self.assertIsNotNone(blah)
with self.assertRaises(IOError):
blah = default.load_game_cal("EllenSmells")
def testopen_conf(self):
conf = default.open_conf()
self.assertIsNotNone(conf)
def testget_locations(self):
locations = default.get_locations()
self.assertIsInstance(locations, dict)
self.assertTrue(len(locations)>1)
def testshow_saved_reports(self):
test = default.show_saved_reports()
self.assertIsInstance(test["title"], str)
self.assertIsInstance(test["result2"], dict)
def testconfig_game(self):
temp = default.config_game()
self.assertIsInstance(temp["title"], str)
self.assertIsInstance(temp["data"], dict)
self.assertIsInstance(temp["result"], list)
self.assertIsInstance(temp, dict)
def testcalculateprob(self):
val = {'dublin': [0,3,4]}
temp = default.calculateprob(val)
self.assertTrue(temp['dublin'][0] != 0)
self.assertTrue(temp['dublin'][1] == val['dublin'][1])
self.assertTrue(temp['dublin'][2] == val['dublin'][2])
def testview_game_cal(self):
bob = team.team(10, 'dublin', 10)
mod1 = module.module('TestModule', 50)
mod2 = module.module('TestModule2', 50)
bob.addModule(mod1)
bob.addModule(mod2)
bob.applyEffort()
blah = default.view_game_cal(0, [bob], 0, 0)
self.assertIsInstance(blah[3], str)
def testproblemSimulator(self):
bob = team.team(10, 'dublin', 10)
mod1 = module.module('TestModule', 50)
mod2 = module.module('TestModule2', 50)
bob.addModule(mod1)
bob.addModule(mod2)
bob.applyEffort()
jdska = {'san francisco': [0.17500000000000002, 0.17500000000000002, 0], 'bangalore': [0.0376470588235294, 0.18823529411764706, 4], 'dublin': [0.2, 0.2, 0]}
result = default.problemSimulator([bob], jdska)
self.assertIsInstance(result, bool)
def testgetDailyDevPeriod(self):
period = default.getDailyDevPeriod()
self.assertIsNotNone(period)
self.assertIsInstance(period, float)
def testgetFinalRevenue(self):
bob = team.team(10, 'dublin', 10)
mod1 = module.module('TestModule', 50)
mod2 = module.module('TestModule2', 50)
bob.addModule(mod1)
bob.addModule(mod2)
bob.applyEffort()
revenue = default.getFinalRevenue([bob], 1000000, 1, 2)
self.assertIsInstance(revenue[0], str)
self.assertIsInstance(revenue[1],float)
self.assertNotEqual(revenue, '')
def testgetExpectedBudget(self):
bob = team.team(10, 'dublin', 10)
mod1 = module.module('TestModule', 50)
mod2 = module.module('TestModule2', 50)
bob.addModule(mod1)
bob.addModule(mod2)
bob.applyEffort()
bud = default.getExpectedBudget([bob]*2)
self.assertIsNotNone(bud)
self.assertIsInstance(bud, float)
def testgetTotalCost(self):
config = ConfigParser.ConfigParser()
config.read("application.config")
cost_of_dev = config.get('Developer', 'Cost_Per_Day')
bob = team.team(10, 'dublin', 10)
mod1 = module.module('TestModule', 50)
mod2 = module.module('TestModule2', 50)
bob.addModule(mod1)
bob.addModule(mod2)
lst = [bob]*2
days = 5
cost = default.getTotalCost(lst, days, 0)
self.assertEqual(cost, (20*float(cost_of_dev)*days))
def testqueryCost(self):
cost = default.queryCost(1, 0)
config=default.open_conf()
cost_of_dev = config.get('Developer', 'Cost_Per_Day')
self.assertEqual(cost, float(cost_of_dev))
def testemailQuery(self):
bob = team.team(10, 'dublin', 10)
mod1 = module.module('TestModule', 50)
mod2 = module.module('TestModule2', 50)
bob.addModule(mod1)
bob.addModule(mod2)
self.assertEqual('Dublin: Yes, on schedule', default.emailQuery('dublin', [bob]))
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false | false | 2,014 |
19,396,072,312,969 |
51b7886affa0cbe459d8dae8b1210a7f5cd59412
|
63b58a357e7bdae49e03e7ef79d5d1dbbd97b428
|
/re_findall.py
|
c689af841b30ec2352b798ae83736b94f0ff0234
|
[] |
no_license
|
happyqq/python_study
|
https://github.com/happyqq/python_study
|
be11507704499a7fac45c123ef07d42858ef6953
|
ef4ee29ac6f3629f7914ae60ced79f29fef182aa
|
refs/heads/master
| 2018-05-13T07:17:13.661002 | 2014-12-27T15:49:18 | 2014-12-27T15:49:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#encoding:utf-8
import re
text='abcaaebescccaefweaabbbccdeabaaaaccababababaaa'
pattern='ab'
#findall()返回输入中与模式匹配而不重叠的所有字串
#for match in re.findall(pattern,text):
#finditer()会返回一个迭代器,他将生成Match实例,显示原字符串中的位置
for match in re.finditer(pattern,text):
s=match.start()
e=match.end()
print 'Found "%s" at %d:%d' %(text[s:e],s,e)
|
UTF-8
|
Python
| false | false | 2,014 |
1,992,864,848,105 |
944e213bdb30a20080336d38ac72fe2ea4231108
|
979db45722c36212c408d06c99bf8e0cd8dac102
|
/propietapi/core/signals.py
|
ee0f89a72375a3761294f14e963e2ba47ce0216a
|
[] |
no_license
|
propiet/api.propiet.com
|
https://github.com/propiet/api.propiet.com
|
d8121d703a6096b05410b91b64907d912bae3fe8
|
8b125c60648eed9b8ee51915e31ca547157b0d8a
|
refs/heads/master
| 2021-01-21T19:35:06.024751 | 2014-10-03T22:58:38 | 2014-10-03T22:58:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib.auth.models import User
from django.db import models
from tastypie.models import create_api_key
# Authentication API Key
models.signals.post_save.connect(create_api_key, sender=User)
|
UTF-8
|
Python
| false | false | 2,014 |
7,086,696,088,175 |
7414e80d78d273722f4ccacaf2eaf1b32016da98
|
02a7d02b7bff39b72e8549f49b2c0c339150a76f
|
/original/remove_structured_control.py
|
16dccfccd78ddc1ccf26b5fc4d2b6545fbd80db1
|
[] |
no_license
|
scottpledger/compyler-csci4555
|
https://github.com/scottpledger/compyler-csci4555
|
b5a99e17aa4f020aafcc222c55ac98dae387f6d4
|
18e947271d275939e7f255f4d8372b285a1694a9
|
refs/heads/master
| 2021-01-23T13:17:43.103062 | 2012-12-21T09:20:11 | 2012-12-21T09:20:11 | 5,603,160 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from compiler.ast import *
from vis import Visitor
from compiler_utilities import *
from ir_x86_2 import *
class RemoveStructuredControl(Visitor):
def visitModule(self, n):
return Module(n.doc, Stmt(self.dispatch(n.node)))
def visitStmt(self, n):
sss = [self.dispatch(s) for s in n.nodes]
return Stmt(reduce(lambda a,b: a + b, sss, []))
def visitIf(self, n):
test = n.tests[0][0]
then = self.dispatch(n.tests[0][1])
else_ = self.dispatch(n.else_)
else_label = generate_name('else')
end_label = generate_name('if_end')
return [CMPLInstr(None, [Const(0), test]),
JumpEqInstr(else_label)] + \
[then] + \
[Goto(end_label)] + \
[Label(else_label)] + \
[else_] + \
[Label(end_label)]
def visitWhile(self, n):
test = n.test
body = self.dispatch(n.body)
start_label = generate_name('while_start')
end_label = generate_name('while_end')
return [Label(start_label),
CMPLInstr(None, [Const(0), test]),
JumpEqInstr(end_label)] + \
[body] + \
[Goto(start_label)] + \
[Label(end_label)]
def default(self, n):
return [n]
|
UTF-8
|
Python
| false | false | 2,012 |
16,492,674,426,461 |
9e5738fc5751e05ddb87d58aa967a01c2c21f742
|
b31efa760c7896cee431fef3b0557e4dd80770c7
|
/igor-exec-windows.py
|
c68807abee1137ad9151bba77cb4ca38e0a8c3f6
|
[] |
no_license
|
avinashprabhu/igor-mode
|
https://github.com/avinashprabhu/igor-mode
|
eee2d0b43d8ae59f8c630013c98af196640f889e
|
e5acac949e658458bfb4aa7b45efc2b2f71b04af
|
refs/heads/master
| 2018-01-04T20:18:50.077377 | 2014-09-02T16:45:55 | 2014-09-02T16:45:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
""" Execute Igor commands externally from Windows
Call this script with one or more commands passed as individual
arguments. The passed commands will be run, in order, by Igor.
There is no simple scripting language in Windows that will get
information back from Igor. In VBScript, it is not possible to call
the Execute2 function from the Igor COM object because Execute2
returns pointers to type BSTR. VBScript can't handle this.
Requires: pywin32
"""
import contextlib
import sys
@contextlib.contextmanager
def nostderr():
savestderr = sys.stderr
class Devnull(object):
def write(self, _): pass
sys.stderr = Devnull()
yield
sys.stderr = savestderr
class WindowsIgorCommunicator(object):
def __init__(self):
import win32com.client
self.igorapp = win32com.client.gencache.EnsureDispatch("IgorPro.Application")
self.constants = win32com.client.constants
def execute(self, cmd):
flag_nolog = 1
code_page = 0
err_code = 0
err_msg = ""
history = ""
results = ""
result_tuple = self.igorapp.Execute2(flag_nolog, code_page, cmd,
err_code, err_msg,
history, results)
err_code, err_msg, history, results = result_tuple
return results
def main(argv):
""" Call with one or more Igor commands as arguments. Each command
will be run in Igor. No other arguments are accepted.
"""
commands = argv
# no commands given, so quit
if len(commands) < 1:
return ""
igorapp = WindowsIgorCommunicator()
results = []
for cmd in commands:
results.append(igorapp.execute(cmd))
return '\n'.join(results)
if __name__ == '__main__':
with nostderr():
sys.stdout.write(main(sys.argv[1:]))
|
UTF-8
|
Python
| false | false | 2,014 |
16,509,854,286,666 |
ef3302ffe829c737a86e19bbc0cbd30f5040d337
|
de7a39129bf471d4d4be25c65174916a505146e6
|
/tools/workshop_checklist.py
|
797662c07c57789d2ac58a8ceb3ee7433ea70e35
|
[] |
no_license
|
jdh2358/py4science
|
https://github.com/jdh2358/py4science
|
a6da01de9cb16709828bfd801bf7faf847f346bb
|
a56c742ec2e0a31c2251468d9947ebaf707346d7
|
refs/heads/master
| 2016-09-05T22:18:38.520426 | 2009-12-05T17:47:26 | 2009-12-05T17:47:26 | 1,418,846 | 5 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""Minimal test script to check for modules needed in python workshop.
Execute this code at the command line by typing:
python workshop_checklist.py
If it does NOT say 'OK' at the end, copy the *entire* output of the run and
send it to the course instructor for help.
"""
# Standard library imports
import glob
import os
import platform
import sys
from StringIO import StringIO
# Third-party imports
import nose
import nose.tools as nt
##############################################################################
# Code begins
#-----------------------------------------------------------------------------
# Generic utility functions
def sys_info():
"""Summarize some info about the system"""
print '=================='
print 'System information'
print '=================='
print 'os.name :',os.name
print 'os.uname :',os.uname()
print 'platform :',sys.platform
print 'platform+ :',platform.platform()
print 'prefix :',sys.prefix
print 'exec_prefix :',sys.exec_prefix
print 'executable :',sys.executable
print 'version_info :',sys.version_info
print 'version :',sys.version
print '=================='
#-----------------------------------------------------------------------------
# Tests
def check_import(mname):
"Check that the given name imports correctly"
exec "import %s as m" % mname
if mname == 'matplotlib':
m.use('Agg')
m.rcParams['figure.subplot.top']= 0.85
try:
vinfo = m.__version__
except AttributeError:
vinfo = '*no info*'
print 'MOD: %s, version: %s' % (mname,vinfo)
# Test generators are best written without docstrings, because nose can then
# show the parameters being used.
def test_imports():
modules = ['setuptools',
'IPython',
'numpy','scipy','scipy.weave','scipy.io',
'matplotlib','pylab',
'nose',
#'Cython', # disabled for now, not included in EPD Beta2
]
for mname in modules:
yield check_import,mname
def test_weave():
"Simple code compilation and execution via scipy's weave"
from scipy import weave
weave.inline('int x=1;x++;')
n,m=1,2
code="""
int m=%s;
return_val=m+n;
""" % m
val = weave.inline(code,['n'])
nt.assert_equal(val,m+n)
def est_numpy_all():
"Run the entire numpy test suite"
import numpy
numpy.test()
# Test generator, don't put a docstring in it
def test_loadtxt():
import numpy as np
import numpy.testing as npt
# Examples taken from the loadtxt docstring
array = np.array
c = StringIO("0 1\n2 3")
a1 = np.loadtxt(c)
a2 = np.array([[ 0., 1.],
[ 2., 3.]])
yield npt.assert_array_equal,a1,a2
d = StringIO("M 21 72\nF 35 58")
a1 = np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')})
a2 = np.array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
yield npt.assert_array_equal,a1,a2
c = StringIO("1,0,2\n3,0,4")
x,y = np.loadtxt(c, delimiter=',', usecols=(0,2), unpack=True)
yield npt.assert_array_equal,x,np.array([ 1., 3.])
yield npt.assert_array_equal,y,np.array([ 2., 4.])
def test_plot():
"Simple plot generation."
from matplotlib import pyplot as plt
plt.figure()
plt.plot([1,2,3])
plt.xlabel('some numbers')
plt.savefig('tmp_test_plot.png')
def test_plot_math():
"Plots with math"
from matplotlib import pyplot as plt
plt.figure()
plt.plot([1,2,3],label='data')
t=(r'And X is $\sum_{i=0}^\infty \gamma_i + \frac{\alpha^{i^2}}{\gamma}'
r'+ \cos(2 \theta^2)$')
plt.title(t)
plt.legend()
plt.grid()
plt.savefig('tmp_test_plot_math.png')
def cleanup_pngs():
"""Remove temporary pngs made by our plotting tests"""
for f in glob.glob('tmp_test_*.png'):
try:
os.remove(f)
except OSError:
print '*** Error: could not remove file',f
#-----------------------------------------------------------------------------
# Main routine, executed when this file is run as a script
#
if __name__ == '__main__':
print "Running tests:"
# This call form is ipython-friendly
nose.runmodule(argv=[__file__,'-vvs'],
exit=False)
print """
***************************************************************************
TESTS FINISHED
***************************************************************************
If the printout above did not finish in 'OK' but instead says 'FAILED', copy
and send the *entire* output to the instructor for help.
"""
sys_info()
|
UTF-8
|
Python
| false | false | 2,009 |
18,047,452,579,555 |
a72c5e65e028f01f09f0769ce05c96b82aa88983
|
336f89c0d8386c5669a476578a8a636a690c2c14
|
/var/dyn.py
|
933db766b3e2e77a02708b94546eb3834b5631df
|
[] |
no_license
|
cadizm/csci570
|
https://github.com/cadizm/csci570
|
5d06a1f3001b117df1e75774fb8ac1890e0219e0
|
c1dcf93f72217c2d91d78ccd04380e15918cb48a
|
refs/heads/master
| 2020-04-05T23:19:43.165593 | 2012-09-16T20:02:18 | 2012-09-16T20:02:18 | 4,152,987 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
for i in range(10):
print "%d: Using optimal solutions for: " % (i),
for j in range(i):
print "%d" % (j),
print
|
UTF-8
|
Python
| false | false | 2,012 |
2,800,318,701,806 |
c1117cb109246ae3f9ddd7692168a285d6615aaf
|
49f64c647639733c7404cd730c6509f700d29527
|
/regulations/sel_regulations.py
|
52f97e9399f0750b86128e69bd94608211985305
|
[] |
no_license
|
Muthuseenivelraja/orginfl
|
https://github.com/Muthuseenivelraja/orginfl
|
18a637b00eeb2461664864835ee197373b582a9e
|
ebd782f64bfbe40921b81690fffdcee4bf2f3cdb
|
refs/heads/master
| 2021-05-27T02:04:42.517116 | 2011-09-06T21:02:57 | 2011-09-06T21:02:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# import modules
from selenium import selenium
from BeautifulSoup import BeautifulSoup
from urllib2 import urlopen
import argparse
import time, re, csv
def getArgs():
"""
Command argument parser
Returns structure:
args.server
args.port
args.db
args.user
args.password
args.input
"""
parser = argparse.ArgumentParser(description='Regulations Data Import')
parser.add_argument( '-q', '--quota', help='Quota', default=50)
args = parser.parse_args()
return args
def main():
args = getArgs()
filename = 'regulations.csv'
baseUrl = 'http://www.regulations.gov/'
# initialize selenium
sel = selenium("localhost", 4444, "*firefox", "http://www.regulations.gov/")
sel.start()
# get actual web page using selenium
sel.open('http://www.regulations.gov/#!searchResults;rpp='+str(args.quota)+';po=0')
# sel.wait_for_page_to_load('3000')
html = sel.get_html_source() # get actual rendered web page
print html
# use BeautifulSoup to cycle through each regulation
soup = BeautifulSoup(html)
regs = soup.find('div',{'class':'x-grid3-body'}).findAll('a')
# cycle through list and call up each page separately
for reg in regs:
link = baseUrl + reg['href']
link = str(link)
# use Selenium to load each regulation page
sel.open(link)
# sel.wait_for_page_to_load('3000')
html = sel.get_html_source() # get actual rendered web page
# use BeautifulSoup to assign each value to a variable
soup = BeautifulSoup(html)
docid = soup.find('span',id="rrspan1").findNext('span').contents
docketid = soup.find('span',id='rrspan2').findNext('a').contents
info = []
info.append({'name':'DocumentID','value':docid})
info.append({'name':'DocketID','value':docketid})
s = soup.find('table',id='rrtable3').find('tr')
while getattr(s,'name',None) != None:
name = s.findAll('td')[0].span.contents
name = str(name[0])
name = re.sub(r'[\s+\:\\\-\/]','',name)
value = s.findAll('td')[1].contents
info.append({'name':name,'value':str(value[0])})
s = s.nextSibling
# grab actual text of document
doclink = str(soup.findAll('iframe')[3]['src'])
if doclink != None:
doc = urlopen(doclink).read()
doclinkid = re.search('(?<=objectId\=)\w*(?=\&)',doclink)
doclinkid = doclinkid.group(0)
# write variables to row in csv file
fields = ['DocumentID','DocketID','RIN','DocumenTitle','OtherIdentifier','CFRCitation','FRDocketNumber','Abstract','DocumentType','DocumentSubtype','Startendpage','PostDate','AuthorDate','AuthorDocumentDate','ReceivedFilingDate','ReceiptDate','FRPublishDate','CommentStartDate','CommentEndDate','CommentsDue','PostmarkDate','DocumentLegacyID','Media','PageCount']
row = []
for f in fields:
for i in info:
if i['name'] == f:
row.append(i['value'])
else:
row.append('')
if doclinkid != None:
row.append(doclinkid)
else:
row.append('')
f = open(filename,'wb')
c = csv.writer(f,delimiter=',')
c.writerow(row)
f.close()
if doc != None:
f = open(doclinkid,'wb')
f.write(doc)
f.close()
# close down selenium
sel.stop()
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,011 |
13,176,959,709,725 |
b62699a77f78453afc943cd518552282b6ff0169
|
57dc9106303ae7d4749626b8929e47ebff5fdfbb
|
/gamete.py
|
5a6f534e9ac088ecc958880b34d9bc22fa0fc9dc
|
[] |
no_license
|
storaged/simulation
|
https://github.com/storaged/simulation
|
effe97b9d0428b8c5402e9e7850c942b8ea10a4a
|
2228ad2e43a6dbc2d6bd60b0ba93e2148b3c0a4e
|
refs/heads/master
| 2016-09-05T10:32:46.405316 | 2014-07-22T22:45:09 | 2014-07-22T22:45:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
##KG
##Transposone class, representing a TE on the DNA-string
##has an id numer and link to it's parent
##there is one instance of TE with given id, plants only store the id
import distributions
from plant import Plant
from phenotype import Phenotype
from saveableparams import parameters as param
def merge(t1, t2):
res = []
i, j = 0, 0
while i < len(t1) and j < len(t2) :
if t1[i].id < t2[j].id :
res.append(t1[i])
i += 1
else:
res.append(t2[j])
j += 1
if i < len(t1):
res += t1[i:]
if j < len(t2):
res += t2[j:]
return res
class Gamete:
def __init__(self, aut_TE_list, nonaut_TE_list, origin, sex=None):
self.aut_TE_list = aut_TE_list
self.nonaut_TE_list = nonaut_TE_list
self.origin = origin
self.sex = sex
def crossbreed(self, gamete):
parent1 = self.origin
parent2 = gamete.origin
new_aut_TE = merge(self.aut_TE_list, gamete.aut_TE_list)
new_nonaut_TE = merge(self.nonaut_TE_list, gamete.nonaut_TE_list)
new_transposase_activity = 0.5 * (parent1.transposase_activity + parent2.transposase_activity)
if param.location_mode :
p = Plant.new(parent1.location, len(new_aut_TE), new_transposase_activity)
else :
p = Plant.new((0,0), len(new_aut_TE), new_transposase_activity)
p.inactive_transposons = 0 #self.inactive_transposons
p.phenotype = parent1.phenotype.generate_phenotype(parent2.phenotype)
p.ord_counter = parent1.ord_counter #nie wiem co tu ma byc
p.aut_transposons_list = new_aut_TE
p.aut_transposons = len(new_aut_TE)
p.nonaut_transposons_list = new_nonaut_TE
p.nonaut_transposons = len(new_nonaut_TE)
return p
|
UTF-8
|
Python
| false | false | 2,014 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.