__id__
int64 3.09k
19,722B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
256
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 3
values | repo_name
stringlengths 5
109
| repo_url
stringlengths 24
128
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 6.65k
581M
⌀ | star_events_count
int64 0
1.17k
| fork_events_count
int64 0
154
| gha_license_id
stringclasses 16
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
5.76M
⌀ | gha_stargazers_count
int32 0
407
⌀ | gha_forks_count
int32 0
119
⌀ | gha_open_issues_count
int32 0
640
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 2
classes | gha_disabled
bool 1
class | content
stringlengths 9
4.53M
| src_encoding
stringclasses 18
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | year
int64 1.97k
2.01k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
15,470,472,227,735 |
91720b6a2575d8462c7940e3fb43b08fc59cd92c
|
e9d139f5108ca115d6254763438dd8855fc4454d
|
/app/models/mss.py
|
1715a7954a2ff79cd583bda8f96718f8f968d29c
|
[] |
no_license
|
Letractively/simulation-modeling
|
https://github.com/Letractively/simulation-modeling
|
119d1376a75ff825903a0dd4bbbbc161e1d19e05
|
aca18bf1f50b1083bbc9cbd97b87d3df1c71000b
|
refs/heads/master
| 2016-08-12T18:44:07.605687 | 2011-12-14T11:04:29 | 2011-12-14T11:04:29 | 45,956,671 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
'Система массового обслуживания'
import random, math, distributions
from models.validator import *
@accepts(
channelsCount=(int, unsigned), # Количество каналов
queue={ # Очередь
'size': (int, unsigned), # Максимальный размер очереди
'time': (float, unsigned), # Максимальное время ожидания в очереди
},
streams={ # Потоки
'in': distribution(), # Входной поток заявок
'out': distribution(), # Выходной поток обслуживаний
},
faults={ # Неисправности
'problems': (float, positive), # Поток неисправностей
'repairs': (float, finite, positive), # Поток ремонтов
'destructive': probability, # Доля аварий
},
totalTime=(float, positive), # Продолжительность моделирования
)
def mss(channelsCount, queue, streams, faults, totalTime):
u'Система массового обслуживания'
# Потоки
in_stream = streams['in']
out_stream = streams['out']
fault_stream = distributions.exponential(faults['problems'])
repair_stream = distributions.exponential(faults['repairs'])
# Действия
mss.actions = {}
increment, decrement = 1, -1
def action(state=0):
def mix(f):
key = f.__name__
mss.actions[key] = 0
def spice(*args):
result = f(*args)
mss.actions[key] += 1 # Статистика
# Состояние
duration = (args[0] - mss.prevTime)
try:
mss.states[mss.state] += duration
except:
mss.states.append(duration)
mss.prevTime = args[0]
mss.state += state
return result
return spice
return mix
@action(state=increment)
def accept(time):
'Принять заявку в обработку'
# Поиск свободного канала
free = [key for key, value in enumerate(orders) if value == Infinity and 1 <= key <= channelsCount]
# Выбор случайного канала
channel = random.choice(free)
# Время обработки заявки
orders[channel] = time + next(out_stream)
@action(state=increment)
def sit(time):
'Отправка заявки в очередь'
orders.append(time + queue['time'])
@action()
def sizeout(time):
'Отклонение заявки по размеру очереди'
@action(state=decrement)
def leave(time, channel):
'Окончание обслуживания заявки'
orders[channel] = Infinity
@action(state=decrement)
def stand(time):
'Удаление первого элемента из очереди'
orders.pop(channelsCount + 1)
@action(state=decrement)
def timeout(time):
'Уход элемента из очереди по таймауту'
orders.pop(channelsCount + 1)
@action()
def fault(time, destructive):
'Неисправность'
if not mss.state:
return # Авария проходит без последствий, если нет заявок в обработке
# Занятые каналы
busy = [index for index, value in enumerate(orders) if 1 <= index <= channelsCount and value < Infinity]
# Выбор канала, на котором происходит авария
channel = random.choice(busy)
# Заявка ожидает ремонта оборудования
orders[channel] += next(repair_stream)
if destructive: # Если случилась авария,
orders[channel] += next(out_stream) # Заявка ещё и обрабатывается заново
# События
def onNew(time):
'Появление новой заявки'
if mss.state < channelsCount: # Если есть пустые каналы,
accept(time) # то принимаем заявку;
elif mss.state < channelsCount + queue['size']: # если их нет, но есть места в очереди -
sit(time) # направляем её в очередь.
else: # если же и очередь забита -
sizeout(time) # отвергаем заявку.
def onLeave(time, channel):
'Канал channel освобождается заявкой'
leave(time, channel) # Заявка уходит из канала
if mss.state + 1 > channelsCount: # Если очередь непуста...
stand(time) # то из очереди вызывается первая заявка
accept(time) # и уходит на выполнение
def onTimeout(time):
'Уход заявки из очереди по таймауту'
timeout(time)
def onDispatch(*args):
'Редирект на нужный обработчик события'
onDispatch.route(*args)
orders[0] = next(eventStream)
def onFault(time):
'Неисправность'
fault(time, random.random() <= faults['destructive'])
def inputCombinator():
'Комбинация генераторов'
# Время последней заявки и последней неисправности
new, fault = next(in_stream), next(fault_stream)
while True:
# Время следующей заявки и следующей неисправности
if new < fault:
onDispatch.route = onNew
yield new
new += next(in_stream)
else:
onDispatch.route = onFault
yield fault
fault += next(fault_stream)
# Бесконечность
Infinity = float('Infinity')
# Поток событий - заявок и аварий
eventStream = inputCombinator()
# Время последнего изменения состояния
mss.prevTime = 0
# Структура, хранящая местоположение заявок и операции над ними.
orders = [next(eventStream)] + [Infinity] * channelsCount
# Состояние
mss.state = 0
mss.states = []
while True:
# Тип и значение следующего события
event = min(range(len(orders)), key=orders.__getitem__)
time = orders[event]
# Ограничение времени жизни модели
if time > totalTime:
break
if event:
if event <= channelsCount: # Завершена обработка заявки
onLeave(time, event)
else: # Заявка уходит из очереди по таймауту
onTimeout(time)
else: # Независимое событие (новая заявка или неисправность)
onDispatch(time) # уходит на диспетчеризацию.
# Время истекло, но в обработке и в очереди ещё могли остаться заявки.
mss.actions['shutdown'] = mss.state
# Качество работы СМО
rejectedFields = ( # Заявки, отклонённые:
'timeout', # По таймауту
'sizeout', # По размеру очереди
'shutdown', # По окончанию рабочего времени
)
# Количество заявок, отменённых по каждой из причин
absolute = {}
for field in rejectedFields:
absolute[field] = mss.actions[field]
# Все отменённые заявки
absolute['reject'] = sum(absolute.values())
# Принятые заявки и все заявки, прошедшие через систему
absolute['accept'] = mss.actions['accept']
absolute['total'] = absolute['reject'] + absolute['accept']
# Значения в процентах
relative = {}
for key, value in absolute.items():
if key != 'total':
relative[key] = round(value / float(absolute['total']) * 100, 2)
# Среднее количество занятых каналов
km = 0
for channel, time in enumerate(mss.states):
km += channel * time if channel <= channelsCount else channelsCount * time
km /= totalTime
# Среднее время пребывания заявки
import operator
times = {}
orderTime = sum(orders * time for orders, time in enumerate(mss.states))
queueTime = sum(time * orders for orders, time in enumerate(mss.states[channelsCount:]))
if absolute['total']: # В системе
times['total'] = round(orderTime / absolute['total'], 3)
if mss.actions['sit']: # В очереди
times['queue'] = round(queueTime / mss.actions['sit'], 3)
# Среднее количество заявок
orders = {'work': round(km, 3)}
orders['total'] = round(orderTime / totalTime, 3)
orders['queue'] = round(queueTime / totalTime, 3)
# Результаты работы
states = tuple(round(duration / totalTime * 100, 3) for duration in mss.states)
return {
'quality': {
'abs': absolute,
'pc': relative,
},
'load': {
'states': states,
'longestState': max(states),
'times': times,
'orders': orders,
},
'faults': mss.actions['fault'],
}
|
UTF-8
|
Python
| false | false | 2,011 |
5,695,126,667,772 |
2c2fde73a17655958cce53a49295d1ef1dc1cf7d
|
53a9f8479b6ec29110b6c959d63ad7a7185bcd33
|
/backend/items/serializers.py
|
0f769b351dcfa841c64781a7dcbd80fa226aa9a6
|
[] |
no_license
|
m3lm4n/Dekorator
|
https://github.com/m3lm4n/Dekorator
|
31a7484680cb92c16483f8d8f757a3cb528a39a7
|
46406a826859d3c687812c612ea83622bde4edd2
|
refs/heads/master
| 2022-11-15T20:16:38.352775 | 2014-03-28T11:25:45 | 2014-03-28T11:25:45 | 10,098,451 | 1 | 0 | null | false | 2022-11-04T19:05:05 | 2013-05-16T09:54:51 | 2021-05-21T09:46:30 | 2022-11-04T19:05:02 | 2,811 | 1 | 0 | 15 |
Python
| false | false |
from rest_framework import serializers
import logics, models
def absolute_media_uri(media, request):
return request.build_absolute_uri(reverse('media', args=[unicode(media)]))
class ItemSerializer(serializers.ModelSerializer):
class Meta:
model = logics.Item
class ReservationSerializer(serializers.ModelSerializer):
item = ItemSerializer()
class Meta:
model = logics.Reservation
#fields = ('id', 'name', 'date')
class RentItemSerializer(serializers.ModelSerializer):
res = ReservationSerializer()
class Meta:
model = models.RentModel
class ReturnItemSerializer(serializers.ModelSerializer):
class Meta:
model = models.ReturnModel
|
UTF-8
|
Python
| false | false | 2,014 |
13,125,420,101,317 |
f28dbac88e249e1c78419ff8a9dd912deef14193
|
5a88558e3489b2a862045927a6d0e0bfdf5fa914
|
/python/PresentationBuilder/slides/DjangoWikiSlide.py
|
7caf8daba42c4c81abf93af4209bb4c4b321e525
|
[
"LGPL-2.1-only"
] |
non_permissive
|
woojin1524/moose
|
https://github.com/woojin1524/moose
|
abdf1c32e083f5b24a8ee380ba8ddb4fb7611ad8
|
4bc12284e32b7ae0027a4cb5069966fa9233d56f
|
refs/heads/master
| 2020-04-05T18:56:09.174027 | 2014-12-09T00:55:16 | 2014-12-09T00:55:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os, re, urllib
from ..images import DjangoWikiImage
from ..slides import RemarkSlide
##
# A slide for wiki content from a Djanjo Wiki (https://github.com/django-wiki/django-wiki)
class DjangoWikiSlide(RemarkSlide):
@staticmethod
def validParams():
params = RemarkSlide.validParams()
return params
# When reading the markdown these replacements are made
replace = [('&', '&'), ('<', '<'), ('>', '>'), ('\r\n', '\n')]
##
# Constructor
# @param id The numeric slide id
# @param markdown The raw markdown for this slide
# @param kwargs Optional key, value pairs
def __init__(self, name, params):
RemarkSlide.__init__(self, name, params, image_type='DjangoWikiImage')
# Storage for comments
self._comments = []
def parse(self, markdown):
markdown = RemarkSlide.parse(self, markdown)
# Replace special characters
for item in self.replace:
markdown = markdown.replace(item[0], item[1])
# Equations
pattern = re.compile('(\${1,})(.*?)\${1,}', re.S)
for m in pattern.finditer(markdown):
# Inline
if m.group(1) == '$$':
markdown = markdown.replace(m.group(0), '`$ ' + m.group(2) + ' $`')
elif m.group(1) == '$$$':
markdown = markdown.replace(m.group(0), '`$$ ' + m.group(2) + ' $$`')
else:
print 'ERROR parsing equation on slide', self.name()
print ' ', m.group(2)
# Extract comments
markdown = re.sub(r'(?<![^\s.])(\s*\[\]\(\?\?\?\s*(.*?)\))', self._storeComment, markdown)
# Add the comments at the end
if self._comments:
prefix = '\n'
if len(self._comments) > 1:
prefix = '\n- '
markdown += '\n???\n'
for c in self._comments:
markdown += prefix + c
# Return the markdown
return markdown
##
# Substitution function for extracting Remark comments (private)
def _storeComment(self, match):
self._comments.append(match.group(2).strip())
return ''
|
UTF-8
|
Python
| false | false | 2,014 |
1,864,015,837,791 |
64cd94eeba80c0bfd56180c1b27730f9b63c8009
|
c21dff2e338ccb9d1cfea538653a0d95ee437cbb
|
/examples/bootstrap3/tests/button.py
|
ffd63644ffc42c68616a89dc0fa90991f48f4649
|
[
"MIT"
] |
permissive
|
maxvyaznikov/djangocms-cascade
|
https://github.com/maxvyaznikov/djangocms-cascade
|
80f3ed48af7642ebe69067087240fa6decf9bdeb
|
e9d553e5d5de82bc4e818f46dae11b7fd70a575e
|
refs/heads/master
| 2020-12-28T16:23:44.595600 | 2014-03-03T05:49:14 | 2014-03-03T05:49:14 | 17,281,873 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import json
from django.test import TestCase
from django.test.client import RequestFactory
from django.forms import widgets
from django.contrib.auth.models import User
from django.test.client import Client
from cms.models.placeholdermodel import Placeholder
from cmsplugin_cascade.widgets import MultipleInlineStylesWidget
from cmsplugin_cascade.models import CascadeElement
from cmsplugin_cascade.bootstrap3.buttons import ButtonWrapperPlugin
class ButtonWrapperPluginTest(TestCase):
admin_password = 'secret'
client = Client()
def setUp(self):
self.createAdminUser()
self.factory = RequestFactory()
self.placeholder = Placeholder.objects.create()
self.request = self.factory.get('/admin/dummy-change-form/')
def createAdminUser(self):
self.user = User.objects.create_user('admin', '[email protected]', self.admin_password)
self.user.is_staff = True
self.user.is_superuser = True
self.user.save()
logged_in = self.client.login(username=self.user.username, password=self.admin_password)
self.assertTrue(logged_in, 'User is not logged in')
def test_change_form(self):
context = {'button-type': 'btn-primary', 'button-options': ['btn-block'], 'button-size': 'btn-lg',}
obj_id = CascadeElement.objects.create(context=context).id
model = ButtonWrapperPlugin().get_object(self.request, obj_id)
self.assertEqual(model.context.get('-num-children-'), 0)
self.assertListEqual(ButtonWrapperPlugin.get_css_classes(model), ['btn', 'btn-primary', 'btn-lg', 'btn-block'])
self.assertEqual(ButtonWrapperPlugin.get_identifier(model), 'Primary')
button_wrapper = ButtonWrapperPlugin(model=model)
self.assertEqual(len(button_wrapper.partial_fields), 4)
self.assertIsInstance(button_wrapper.partial_fields[0].widget, widgets.RadioSelect)
self.assertIsInstance(button_wrapper.partial_fields[1].widget, widgets.RadioSelect)
self.assertIsInstance(button_wrapper.partial_fields[2].widget, widgets.CheckboxSelectMultiple)
self.assertIsInstance(button_wrapper.partial_fields[3].widget, MultipleInlineStylesWidget)
self.assertListEqual(button_wrapper.child_classes, ['LinkPlugin'])
form = button_wrapper.get_form(self.request)
html = form(instance=model).as_table()
self.assertInHTML('<input checked="checked" id="id_context_1" name="button-type" type="radio" value="btn-primary" />', html)
self.assertInHTML('<input id="id_context_1" name="button-size" type="radio" value="" />', html)
self.assertInHTML('<input checked="checked" id="id_context_0" name="button-options" type="checkbox" value="btn-block" />', html)
button_wrapper.save_model(self.request, model, form, True)
def test_save_button(self):
add_url = '/admin/cms/page/add-plugin/'
post_data = {u'plugin_parent': [u''], u'csrfmiddlewaretoken': [u'PQ7M8GfaJs4SdlsFRLz7XrNwC23mtD0D'], u'plugin_type': [u'ButtonWrapperPlugin'], u'plugin_language': [u'en'], u'placeholder_id': [str(self.placeholder.id)]}
response = self.client.post(add_url, post_data)
self.assertContains(response, '/admin/cms/page/edit-plugin/')
change_url = json.loads(response.content)['url']
obj_id = change_url.split('/')[-2]
post_data = { u'csrfmiddlewaretoken': [u'PQ7M8GfaJs4SdlsFRLz7XrNwC23mtD0D'], u'inline_styles-margin-left': [u''], u'button-type': [u'btn-default'], u'_continue': [True], u'_popup': [u'1'], u'button-size': [u'btn-lg'], u'inline_styles-margin-bottom': [u''], u'inline_styles-margin-right': [u''], u'button-options': [u'btn-block'], u'inline_styles-margin-top': [u'50px'], u'_save': [u'Save']}
response = self.client.post(change_url, post_data)
self.assertInHTML('<title>Change a page</title>', response.content)
model = CascadeElement.objects.get(id=obj_id)
self.assertDictContainsSubset({ 'button-type': 'btn-default', 'button-size': 'btn-lg' }, model.context)
self.assertListEqual(model.context.get('button-options'), [u'btn-block'])
self.assertDictContainsSubset({ 'margin-top': '50px' }, model.context.get('inline_styles'))
self.assertEquals(model.css_classes, u'btn btn-default btn-lg btn-block')
self.assertEquals(model.inline_styles, u'margin-top: 50px;')
|
UTF-8
|
Python
| false | false | 2,014 |
3,831,110,844,513 |
a5df5cc2236541cc3428e249abf4ad70fe75c6d5
|
f1411184372357f033a47049e4cb344d46535273
|
/compiler.py
|
c5e12f8bd4eff602a04cd7080d254a6579453317
|
[] |
no_license
|
dguarino/Fall2014
|
https://github.com/dguarino/Fall2014
|
451d91add7f83fecd431a3b248e9dacee6bc657a
|
38bd7c04d491deed5107ad1d0624080defda7da3
|
refs/heads/master
| 2021-01-18T11:37:13.906895 | 2014-10-25T08:34:18 | 2014-10-25T08:34:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from sys import argv
import re
import string
import fileinput
import os
script,filename=argv
filenameext=filename+'.txt'
def line_prepender(filename,line):
with open(filename,'r+') as f:
content = f.read()
f.seek(0,0)
f.write(line.rstrip('\r\n') + '\n' + content)
with open('./base/reticularonly.hoc') as oldver, open('./compiled/reticular_'+filename+'.hoc','w') as newver:
for line in oldver:
newver.write(line)
with open('./param/'+filenameext) as inf:
retrel = 0
for line in inf:
line_prepender('./compiled/reticular_'+filename+'.hoc', line)
with open('./compiled/reticular_'+filename+'.hoc') as oldver, open('./reticular_'+filename+'.hoc','w') as newver:
for line in oldver:
newver.write(line)
os.system('nrngui '+ 'reticular_'+filename+'.hoc')
os.remove('reticular_'+filename+'.hoc')
|
UTF-8
|
Python
| false | false | 2,014 |
5,093,831,216,639 |
de88e57fb1a23e33970362ee64d5f9f4714b2e39
|
b2bfe6574693d6f168e3bc8bd888e51861378077
|
/test/test_conn.py
|
e63c7aaec7eb6676c1c9638e869852a1074ac69a
|
[
"GPL-3.0-only"
] |
non_permissive
|
andreascian/MONK
|
https://github.com/andreascian/MONK
|
85c0230b3c53965316193d00dbecee09656e29f4
|
24b7daca5a0706a2be06422cadb492602cf7a251
|
refs/heads/master
| 2020-12-14T06:09:49.255274 | 2014-05-05T13:31:16 | 2014-05-05T13:31:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
#
# MONK automated test framework
#
# Copyright (C) 2013 DResearch Fahrzeugelektronik GmbH
# Written and maintained by MONK Developers <[email protected]>
#
# This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version
# 3 of the License, or (at your option) any later version.
#
from nose import tools as nt
from monk_tf import conn
def test_simplest():
""" conn: create the simplest possible AConnection
"""
# nothing to prepare
# execute
sut = conn.AConnection()
# assert
nt.ok_(sut, "should contain an AConnection object, but contains '{}'"
.format(sut))
def test_call_methods():
""" conn: calling a public AConnection method calls its state's method
"""
# prepare
state = MockState()
expected_calls = ["connect", "login", "cmd", "disconnect"]
sut = conn.AConnection(start_state=state)
# execute
sut.connect()
sut.login()
sut.cmd(None)
sut.disconnect()
# assert
nt.eq_(expected_calls, state.calls,
"didn't call the following methods: \"{}\"".format(
list(set(expected_calls) - set(state.calls))))
def test_fsm():
""" conn: go through all state transitions
"""
# prepare
txt_in = "qwerty12345"
expected = txt_in
sut = conn.EchoConnection()
sut.credentials = ("not", "important")
# execute
sut.connect()
sut.login()
after_login = sut.current_state
out = sut.cmd(txt_in)
sut.disconnect()
# here no exceptions is already a good sign that fsm works
# assert
nt.eq_(sut.current_state, conn.Disconnected(),
"after complete transition end state should be disconnected")
nt.eq_(after_login, conn.Authenticated(),
"after login, state should be authenticated")
nt.eq_(out, expected, "cmd should return same message as was put in")
@nt.raises(conn.NotConnectedException)
def test_wrong_state():
""" conn: raise Exception when sending cmd unconnected
"""
# prepare
sut = conn.EchoConnection()
# execute
sut.cmd("")
# finished, because cmd should raise exception
def test_cmd_returncode():
""" conn: test connections can handle additional parameters
"""
# set up
sut = conn.EchoConnection()
sut2 = conn.DefectiveConnection()
# execute + assert (raises Error if params can't be parsed)
sut._cmd("hello", returncode=True)
try:
out = sut2._cmd("hello", returncode=True)
except conn.MockConnectionException as e:
pass
def test_connected_login():
""" conn: connection's _login is not called if already logged in
"""
# set up
sut = MockConnection(start_state=conn.Authenticated())
# execute
sut.login()
sut.login()
sut.login()
# assert
nt.ok_("_login" not in sut.calls)
@nt.raises(conn.CantConnectException)
def test_legal_port():
""" conn: using a non existing port results in exception
"""
# setup
sut = conn.SerialConnection(port="this/port/can/hopefully/not/exis.t")
# exercise
sut.connect()
@nt.raises(conn.CantConnectException)
def test_noprompt_exception():
""" conn: connecting to shut down target device results in exception
"""
# setup
sut = conn.SilentConnection()
# exercise
sut.connect()
def test_noprompt_notconnected():
""" conn: connecting to shut down target device doesn't change state
"""
# setup
sut = conn.SilentConnection()
# exercise
try:
sut.connect()
except conn.CantConnectException as e:
# verify
nt.ok_(isinstance(sut.current_state, conn.Disconnected))
class MockConnection(conn.AConnection):
def __init__(self, *args, **kwargs):
self.calls = []
self.logged_in = kwargs.pop("logged_in", False)
super(MockConnection, self).__init__(*args, **kwargs)
def _connect(self):
self.calls.add("_connect")
def _login(self):
self.calls.append("_login")
self.logged_in = True
def _cmd(self, *args, **kwargs):
self.calls.add("_cmd")
def _disconnect(self):
self.calls.add("_disconnect")
class MockState(conn.AState):
calls = []
def connect(self, connection):
self.calls.append("connect")
def login(self, connection):
self.calls.append("login")
def cmd(self, connection, msg):
self.calls.append("cmd")
def disconnect(self, connection):
self.calls.append("disconnect")
def next_state(self, connection):
return self
|
UTF-8
|
Python
| false | false | 2,014 |
3,848,290,725,648 |
dcdb820338614b7cba0f04cb8e2749a7dc4a6a4f
|
5804b418d4c3c73dc5a19fc5d8fea06cb0c79ce5
|
/domosys_web/domosys_web/urls.py
|
1415646f62d61092349f9b96bac4530ee8e221d7
|
[
"GPL-2.0-only"
] |
non_permissive
|
Domosys-ORG/domosys_org
|
https://github.com/Domosys-ORG/domosys_org
|
a37c6a3507f183c8bedb95ed99318519044f2d81
|
a97020e59b65fcb8ac9d86026ae8b96b657a1f57
|
refs/heads/master
| 2020-05-18T14:13:31.004611 | 2014-02-04T14:39:36 | 2014-02-04T14:39:36 | 16,349,759 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from conf.views import Config, EntryFormView
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'domosys_app.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^login/$', 'base.views.login_user'),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}, name='logout'),
url(r'^tasks/', include('djcelery.urls')),
url(r'^$', 'base.views.index', name='index'),
url(r'^config/$', Config.as_view()),
url(r'^config/(?P<caption_type>\w+)/(?P<caption_id>\d+)', EntryFormView.as_view()),
)
|
UTF-8
|
Python
| false | false | 2,014 |
1,632,087,616,181 |
1a110e8d3333bc8fc9440fa3fa75b858aa1969ce
|
e757793ebd7add274c599367f0be44c41357cbc9
|
/try_taobao/libs/actions.py
|
c9a2df154408b335a180415bb7efb9bcc894768d
|
[
"GPL-2.0-only"
] |
non_permissive
|
chenchonghz/webrobot
|
https://github.com/chenchonghz/webrobot
|
58e2ac42f10b9aee54813dd8f31b1dd4980896e8
|
c4f01083f891dd672bbed6bfc99d3d502052090c
|
refs/heads/master
| 2021-01-20T11:20:32.319982 | 2014-07-11T04:48:03 | 2014-07-11T04:48:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#-*- coding: utf8 -*-
from __future__ import with_statement
from contextlib import closing
import sys
import time
import logging
class SendConpon(object):
def __init__(self):
self.logger = logging.getLogger("try")
pass
def __call__(self, client, api, url='', **kw):
self.client = client
try:
client.driver.get('http://try.taobao.com/item.htm?spm=a1z0i.1000798.1000585.4.Nn1GuK&id=6278854&qq-pf-to=pcqq.c2c')
time.sleep(1)
except Exception, e:
self.logger.exception("!!!!!!!Failed to open complete url !!!!!!!!:%s" % e)
# trydetailBtn = self.e("#J_Tab ul li span")
# print trydetailBtn
# trydetailBtn.click()
# time.sleep(1)
self.findRow()
while True:
if self.e("a.next-page") is not None:
current = self.e("span.current").text
try:
self.e("a.next-page").click()
time.sleep(1)
self.findRow()
except:
self.logger.info("falied get:%s" %current)
else:
break
def findRow(self):
conpons = self.es(".apply-detail .items .apply-item")
for row in conpons:
print "row:%s" % row.text
if row.e(".nick") is not None:
n=row.e(".nick").text
self.putContent(n)
else:
continue
def putContent(self, n):
from sailing.core import RowTask
task = RowTask("test.csv","ww.csv")
writer = task._get_done_writer()
writer.write("%s\n" % n)
self.logger.info("try:%s" % n)
def e(self, l):
return self.client.e(l)
def es(self, l):
return self.client.es(l)
|
UTF-8
|
Python
| false | false | 2,014 |
15,925,738,780,207 |
820d09a5efda2ee31544c85163d84ddd89b4adaa
|
e8cac4db53b22a28f7421ede9089bd3d4df81c82
|
/TaobaoSdk/Request/LogisticsCompaniesGetRequest.py
|
d99649041f6555ad7b1fc7c4a11e189f37374cc8
|
[] |
no_license
|
wangyu0248/TaobaoOpenPythonSDK
|
https://github.com/wangyu0248/TaobaoOpenPythonSDK
|
af14e84e2bada920b1e9b75cb12d9c9a15a5a1bd
|
814efaf6e681c6112976c58ec457c46d58bcc95f
|
refs/heads/master
| 2021-01-19T05:29:07.234794 | 2012-06-21T09:31:27 | 2012-06-21T09:31:27 | 4,738,026 | 7 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 查询淘宝网合作的物流公司信息,用于发货接口。
# @author [email protected]
# @date 2012-06-21 17:17:39
# @version: 0.0.0
import os
import sys
import time
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__modulePath = os.path.join(__getCurrentPath(), os.path.pardir)
__modulePath = os.path.normpath(__modulePath)
if __modulePath not in sys.path:
sys.path.insert(0, __modulePath)
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">查询淘宝网合作的物流公司信息,用于发货接口。</SPAN>
# <UL>
# </UL>
class LogisticsCompaniesGetRequest(object):
def __init__(self):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">获取API名称</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.method = "taobao.logistics.companies.get"
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">时间戳,如果不设置,发送请求时将使用当时的时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">int</SPAN>
# </LI>
# </UL>
self.timestamp = int(time.time())
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">需返回的字段列表。可选值:LogisticCompany 结构中的所有字段;多个字段间用","逗号隔开. 如:id,code,name,reg_mail_no <br><font color='red'>说明:</font> <br>id:物流公司ID <br>code:物流公司code <br>name:物流公司名称 <br>reg_mail_no:物流公司对应的运单规则</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Field List</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.fields = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">是否查询推荐物流公司.可选值:true,false.如果不提供此参数,将会返回所有支持电话联系的物流公司.</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Boolean</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">optional</SPAN>
# </LI>
# </UL>
self.is_recommended = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">推荐物流公司的下单方式.可选值:offline(电话联系/自己联系),online(在线下单),all(即电话联系又在线下单). 此参数仅仅用于is_recommended 为ture时。就是说对于推荐物流公司才可用.如果不选择此参数将会返回推荐物流中支持电话联系的物流公司.</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">optional</SPAN>
# </LI>
# </UL>
self.order_mode = None
|
UTF-8
|
Python
| false | false | 2,012 |
13,073,880,488,206 |
3c1e1ae4dd8df71d0ec353401c9d3059d2e82610
|
5a290014ef5f9707953c2ccdde578576422d1ec0
|
/tests/test_energies.py
|
99d93b4ad0b81cd19ce5aaee38fea0b9f6456259
|
[] |
no_license
|
somous-jhzhao/openmm-validation
|
https://github.com/somous-jhzhao/openmm-validation
|
74f001d10197e8a511124cbfc783fc08f168170a
|
50c146650ee88c99d84eff5f849488a34477a574
|
refs/heads/master
| 2020-09-26T12:25:10.240077 | 2014-10-13T02:33:46 | 2014-10-13T02:33:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/local/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Test that all OpenMM systems in simtk.pyopenmm.extras.testsystems.* give the expected potential
energies and can stably run a short dynamics simulation.
DESCRIPTION
This script tests a number of simple model test systems, available in the package
simtk.pyopenmm.extras.testsystems, to make sure they reproduce known potential energies.
TODO
COPYRIGHT AND LICENSE
@author John D. Chodera <[email protected]>
All code in this repository is released under the GNU General Public License.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import sys
import math
import doctest
import numpy
import simtk.unit as units
import simtk.openmm as openmm
from repex import testsystems
#=============================================================================================
# Expected potential energies for each test system
#=============================================================================================
testsystem_energies = {
'AlanineDipeptideExplicit' : -24654.9876211 * units.kilojoules_per_mole,
'AlanineDipeptideImplicit' : -137.437357167 * units.kilojoules_per_mole,
'ConstraintCoupledHarmonicOscillator' : 0.0 * units.kilojoules_per_mole,
'Diatom' : 0.0 * units.kilojoules_per_mole,
'HarmonicOscillator' : 0.0 * units.kilojoules_per_mole,
'HarmonicOscillatorArray' : 0.0 * units.kilojoules_per_mole,
'LennardJonesCluster' : 4.10034520364 * units.kilojoules_per_mole,
'LennardJonesFluid' : -653.16317781 * units.kilojoules_per_mole,
'CustomLennardJonesFluid' : -653.162946612 * units.kilojoules_per_mole,
'CustomGBForceSystem' : -78203.4777545 * units.kilojoules_per_mole,
'SodiumChlorideCrystal' : -455.766773418 * units.kilojoules_per_mole,
'WaterBox' : -7316.86673998 * units.kilojoules_per_mole,
'LysozymeImplicit' : -25593.6293016 * units.kilojoules_per_mole,
'IdealGas' : 0.0 * units.kilocalories_per_mole,
'MethanolBox' : 1331.1307688 * units.kilojoules_per_mole,
'MolecularIdealGas' : 1357.65080814 * units.kilojoules_per_mole,
}
#=============================================================================================
# UTILITIES
#=============================================================================================
ENERGY_TOLERANCE = 0.06*units.kilocalories_per_mole
def assert_approximately_equal(computed_potential, expected_potential, tolerance=ENERGY_TOLERANCE):
"""
Check whether computed potential is acceptably close to expected value, using an error tolerance.
ARGUMENTS
computed_potential (simtk.unit.Quantity in units of energy) - computed potential energy
expected_potential (simtk.unit.Quantity in units of energy) - expected
OPTIONAL ARGUMENTS
tolerance (simtk.unit.Quantity in units of energy) - acceptable tolerance
EXAMPLES
>>> assert_approximately_equal(0.0000 * units.kilocalories_per_mole, 0.0001 * units.kilocalories_per_mole, tolerance=0.06*units.kilocalories_per_mole)
"""
# Compute error.
error = (computed_potential - expected_potential)
# Raise an exception if the error is larger than the tolerance.
if abs(error) > tolerance:
raise Exception("Computed potential %s, expected %s. Error %s is larger than acceptable tolerance of %s." % (computed_potential, expected_potential, error, tolerance))
return
#=============================================================================================
# MAIN
#=============================================================================================
# Run doctests on all systems.
#doctest.testmod(testsystems, extraglobs={'platform' : platform})
platform = openmm.Platform.getPlatformByName('CUDA')
# Compute energies and run a short bit of dynamics for each test system.
tests_passed = 0
tests_failed = 0
for cls in testsystems.testsystem_classes:
system_name = cls.__name__
print '*' * 80
print system_name
# Set failure flag.
failure = False
# Create system.
print "Constructing system..."
testsystem = cls()
[system, positions] = [testsystem.system, testsystem.positions]
# Create integrator and context.
temperature = 298.0 * units.kelvin
collision_rate = 91.0 / units.picosecond
timestep = 1.0 * units.femtosecond
integrator = openmm.LangevinIntegrator(temperature, collision_rate, timestep)
context = openmm.Context(system, integrator, platform)
# Set positions
context.setPositions(positions)
# Evaluate the potential energy.
print "Computing potential energy..."
state = context.getState(getEnergy=True)
potential = state.getPotentialEnergy()
# If we have an expected result, check to make sure this is approximately satisfied.
if system_name in testsystem_energies:
try:
expected_potential = testsystem_energies[system_name]
assert_approximately_equal(potential, expected_potential)
except Exception as exception:
print str(exception)
failure = True
else:
print "'%s' : %s * units.kilojoules_per_mole," % (system_name, str(potential / units.kilojoules_per_mole))
# Check that energy is not 'nan'.
if numpy.isnan(potential / units.kilojoules_per_mole):
print "Potential energy is 'nan'."
failure = True
# Integrate a few steps of dynamics to see if system remains stable.
nsteps = 10 # number of steps to integrate
print "Running %d steps of dynamics..." % nsteps
integrator.step(nsteps)
# Retrieve configuration to make sure no positions are nan.
state = context.getState(getPositions=True)
positions = state.getPositions(asNumpy=True)
if numpy.any(numpy.isnan(positions / units.nanometers)):
print 'Some positions are nan after integration.'
failure = True
# Accumulate passes and fails.
if failure:
tests_failed += 1
else:
tests_passed += 1
print '*' * 80
print "%d tests passed" % tests_passed
print "%d tests failed" % tests_failed
# Exit.
if tests_failed > 0:
# signal failure
sys.exit(1)
else:
sys.exit(0)
|
UTF-8
|
Python
| false | false | 2,014 |
5,970,004,566,939 |
194928c176dd80400eb58469b9382e8bcd8be1fd
|
a56a0da1641bd51b4f6da731df85c035645a16b2
|
/server/sqlLib.py
|
006ae6aa1e7d1d6fb2172c9bc3eb804e7e722672
|
[] |
no_license
|
Nirvilus/SuperSeedboxClient
|
https://github.com/Nirvilus/SuperSeedboxClient
|
60f2ce3ea17fa1cc354f65a7a7a1e5f5327987ce
|
5827774171fb269e95c0da9c4fa5074a4fb651ad
|
refs/heads/master
| 2016-04-02T20:55:56.129830 | 2014-12-26T17:35:34 | 2014-12-26T17:35:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding=UTF-8
import sqlite3
import JsonLib
import seriesLib
import os
#Creation de la base de donnée
def createBase(c, base, s):
c.execute("""CREATE TABLE IF NOT EXISTS `episodes` (
`IDtorrent` int(11) NOT NULL,
`IDserie` int(11) DEFAULT NULL,
`IDsaison` int(11) DEFAULT NULL,
`nom` varchar(255) NOT NULL
) """)
c.execute("""CREATE TABLE IF NOT EXISTS `films` (
`IDtorrent` int(11) NOT NULL,
`Nom` varchar(255) NOT NULL
) """)
c.execute("""CREATE TABLE IF NOT EXISTS `jeux` (
`IDtorrent` int(11) NOT NULL,
`nom` varchar(255) NOT NULL
) """)
c.execute("""CREATE TABLE IF NOT EXISTS `musiques` (
`IDtorrent` int(11) NOT NULL,
`nom` varchar(255) NOT NULL
) """)
c.execute("""CREATE TABLE IF NOT EXISTS `livres` (
`IDtorrent` int(11) NOT NULL,
`Nom` varchar(255) NOT NULL
) """)
c.execute("""CREATE TABLE IF NOT EXISTS `saisons` (
`ID` INTEGER PRIMARY KEY AUTOINCREMENT,
`IDserie` int(11),
`nom` varchar(255) NOT NULL,
`voted` int(1)
) """)
c.execute("""CREATE TABLE IF NOT EXISTS `series` (
`ID` INTEGER PRIMARY KEY AUTOINCREMENT,
`nom` varchar(255) NOT NULL,
`path` varchar(255) NOT NULL,
`voted` int(1)
) """)
c.execute("INSERT INTO series VALUES (0, ?, ?, 0)", ("A ranger !", "D:\\series\\"))
c.execute("""CREATE TABLE IF NOT EXISTS `torrents` (
`ID` INTEGER PRIMARY KEY AUTOINCREMENT,
`hash` varchar(255) NOT NULL,
`dateAjout` int(11) NOT NULL,
`nom` varchar(255) NOT NULL,
`path` varchar(255) NOT NULL,
`voted` int(1)
) """)
base.commit()
remplirBase(JsonLib.getTorrents(s, c, base), c, base)
def ajoutFilm(torrent, idTorrent, c, base, nom=None, langue=None):
if(nom == None):
nom = torrent[0].decode('UTF-8')
c.execute("INSERT INTO films VALUES (?,?)", (idTorrent, nom))
base.commit()
def ajoutJeux(torrent, idTorrent, c, base, nom=None):
if(nom == None):
nom = torrent[0].decode('UTF-8')
c.execute("INSERT INTO jeux VALUES (?,?)", (idTorrent, nom))
base.commit()
def ajoutLivres(torrent, idTorrent, c, base, nom=None, auteur = None):
if(nom == None):
nom = torrent[0].decode('UTF-8')
if(auteur == None):
auteur = u"?"
c.execute("INSERT INTO livres VALUES (?,?)", (idTorrent, nom))
base.commit()
def ajoutSeries(torrent, idTorrent, c, base):
serie = seriesLib.getSerie(torrent).decode('UTF-8')
#print(seriesLib.getSerie(torrent), " <====> ", serie)
saison = seriesLib.getSaison(torrent).decode('UTF-8')
episode = seriesLib.getEpisode(torrent).decode('UTF-8')
nomTorrent = torrent[0].decode('UTF-8')
idSerie = 0
idSaison = 0
try:
if(serie != nomTorrent):
c.execute("SELECT id FROM series WHERE nom = ?", (serie,))
result = c.fetchone()
if(result == None):
c.execute("INSERT INTO series VALUES (NULL, ?, ?, 0)", (serie, "D:\\series\\"))
idSerie = c.lastrowid
else:
idSerie = result
if(not isinstance(idSerie, (basestring, int))):
idSerie = idSerie[0]
if(saison != nomTorrent):
c.execute("SELECT id FROM saisons WHERE nom = ? AND IDserie = ?", (saison, idSerie))
if(c.fetchone() == None):
c.execute("INSERT INTO saisons VALUES (NULL, ?, ?, 0)", (idSerie, saison))
idSaison = c.lastrowid
else:
idSaison = c.fetchone()
elif(idSerie!=0):
c.execute("SELECT id FROM saisons WHERE IDserie = ? AND nom = ?", (idSerie, u"A ranger !"))
aRanger = c.fetchone()
if(aRanger == None):
c.execute("INSERT INTO saisons VALUES (NULL, ?, ?, 0)", (idSerie, u"A ranger !"))
idSaison = c.lastrowid
else:
idSaison = aRanger
if(not isinstance(idSaison, (basestring, int))):
idSaison = idSaison[0]
c.execute("INSERT INTO episodes VALUES (?,?,?,?)",(idTorrent, idSerie, idSaison, nomTorrent))
except Exception as details:
print("ERREUR SUR UN TORRENT : ")
print torrent
print(details)
print("saisonID : ", idSaison)
print("________________________")
base.commit()
def remplirBase(torrents, c, base):
for torrent in torrents:
tType = torrent[1].decode('UTF-8')
tPath = torrent[4].decode('UTF-8')
tHash = torrent[7].decode('UTF-8')
tDate = torrent[2]
tName = torrent[0].decode('UTF-8')
c.execute("INSERT INTO `torrents` VALUES (NULL ,?, ?, ?, ?, 0)", (tHash, tDate, tName, tPath))
lastID = c.lastrowid
if(tType == "films"):
ajoutFilm(torrent, lastID, c, base)
if(tType == "jeux"):
ajoutJeux(torrent, lastID, c, base)
if(tType == "livres"):
ajoutLivres(torrent, lastID, c, base)
if(tType == "series"):
ajoutSeries(torrent, lastID, c, base)
base.commit()
def getBddTorrents(c, base):
c.execute("SELECT * FROM torrents WHERE 1")
return c.fetchall()
def getBddSeries(c, base):
c.execute("SELECT * FROM series WHERE 1 ORDER BY nom ASC")
return c.fetchall()
def getSaisons(idSerie, c, base):
#print("idSerie : ", idSerie," | int(id) = ", int(idSerie))
c.execute("SELECT * FROM saisons WHERE IDserie = ?", (int(idSerie),))
return c.fetchall()
## Récupérer dernier element inséré :
# sqlite3_last_insert_rowid()
def bddInit(s):
print "Initialisation de la BDD"
#os.remove("bdd.db")
if( not( os.path.isfile("bdd.db"))):
create = True
else:
create = False
base = sqlite3.connect("bdd.db")
bdd = base.cursor()
if(create):
createBase(bdd, base, s)
print "BDD initialized"
return (bdd, base)
def ajouteTorrent(torrent, dic, path, c, m):
#pathTorrent = "D:\\"+dic["type"]+"\\"+dic["serie"]+"\\"+dic["saison"]+"\\"
if(torrent == None):
return
c.execute("INSERT INTO torrents VALUES (NULL, ?, ?, ?, ?, 0)", (torrent[7], torrent[2], torrent[0], path))
idTorrent = c.lastrowid
tType = dic['type'].lower()
if(tType != "series"):
c.execute("INSERT INTO "+tType+" VALUES (?, ?)", (idTorrent, dic['nom']))
else:
pathSerie = "D:\\series\\"+dic["serie"]+"\\"
c.execute("SELECT id FROM series WHERE nom = ?", (dic['serie'],))
idSerie = c.fetchone()
if(idSerie == None):
c.execute("INSERT INTO series VALUES (NULL, ?, ?, 0)", (dic['serie'], pathSerie))
idSerie = c.lastrowid
pathSaison = pathSerie+dic['saison']+"\\"
c.execute("SELECT id FROM saisons WHERE nom = ? AND idSerie = ?", (dic['saison'], idSerie))
idSaison = c.fetchone()
if(idSaison == None):
c.execute("INSERT INTO saisons VALUES (NULL, ?, ?, 0)", (idSerie, dic['saison']))
idSaison = c.lastrowid
pathEpisode = path
c.execute("INSERT INTO episodes VALUES (?, ?, ?, ?)", (idTorrent, idSerie, idSaison, dic['nom']))
m.commit()
def isVoted(tHash, c, m):
c.execute("SELECT id, voted FROM torrents WHERE hash = ?", (tHash,))
res = c.fetchone()
if(res == None):
return ("", False)
boolean = False
if(res[1] == 1):
boolean = True
return ("voteTorrent "+str(res[0]), boolean)
def voteTorrent(id, c, m, force=None):
if(force == None):
c.execute("SELECT voted FROM torrents WHERE ID = ?", (id,))
voted = c.fetchone()
voted = (voted[0]+1)%2
else:
voted = force
c.execute("UPDATE torrents SET voted=? WHERE ID = ?",(voted, id))
m.commit()
def voteSerie(id, c, m):
c.execute("SELECT voted FROM series WHERE ID = ?", (id,))
voted = c.fetchone()
voted = (voted[0]+1)%2
c.execute("SELECT ID FROM saisons WHERE idSerie = ?", (id,))
saisons = c.fetchall()
for saison in saisons:
saison = saison[0]
voteSaison(saison, c, m, voted)
c.execute("UPDATE series SET voted=? WHERE ID = ?",(voted, id))
m.commit()
def voteSaison(id, c, m, force=None):
if(force==None):
c.execute("SELECT voted FROM saisons WHERE ID = ?", (id,))
voted = c.fetchone()
voted = (voted[0]+1)%2
else:
voted = force
c.execute("SELECT idTorrent FROM episodes WHERE idSaison = ?", (id,))
episodes = c.fetchall()
for episode in episodes:
episode = episode[0]
voteTorrent(episode, c, m, voted)
c.execute("UPDATE saisons SET voted=? WHERE ID = ?",(voted, id))
m.commit()
|
UTF-8
|
Python
| false | false | 2,014 |
14,645,838,508,360 |
477e6a664af46ec8b88e51e50eac2f954bf9e2c0
|
f82f5cb9111cc8610d3a606afe916f7fc3b16f91
|
/shim-code/eval/harness/lipstick-checkpoint.py
|
e52f40066ca65b4052617e697698b635e1460c67
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-crapl-0.1",
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
mewada/bolton-sigmod2013-code
|
https://github.com/mewada/bolton-sigmod2013-code
|
b07d1cbe36114e37a8ea9beb75e630a3b755898d
|
5f1cb5918eaedd6180debfe1c5c7e49ebb8cd4b2
|
refs/heads/master
| 2020-08-01T10:28:12.733123 | 2013-11-02T01:09:32 | 2013-11-02T01:45:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from common_funcs import *
from time import gmtime, strftime
from os import system
thetime = strftime("%a-%d-%b-%Y-%H_%M", gmtime())
resultsdir = "results/localcheckpointing-"+thetime
system("mkdir -p "+resultsdir)
system("git log -1 --format=\"%%h;%%ad\" > %s/git-revision.txt" % (resultsdir))
#blow away old configs, etc.
#must be run first so pull works
killall_java("lipstick-hosts")
prepare_cassandra_for_lipstick("cassandra-hosts")
prepare_lipstick_all("lipstick-hosts")
prepare_lipstick("east-lipstick-hosts", "east-cassandra-hosts")
prepare_lipstick("west-lipstick-hosts", "west-cassandra-hosts")
threads = 4
clients = 1024
variance = 1
print "Setting threads per server"
set_lipstick_threads_per_server(threads)
print "Setting clients per server"
set_lipstick_clients_per_server(clients)
enable_backend_reads()
print "Setting variance per server"
set_lipstick_variance_potential(variance)
set_backend_potential()
enable_local_checkpointing("lipstick-hosts")
#potential causality stuff
for checkpoint_interval in [1000, 10000]:
for trial in range(0, 5):
reset_lipstick_states()
set_checkpoint_interval(checkpoint_interval)
run_lipstick_experiment(True, "%s/P-localcheck-%d-%dC-%dV-CPI%d-%d" % (resultsdir, threads, clients, variance, checkpoint_interval, trial))
|
UTF-8
|
Python
| false | false | 2,013 |
8,400,956,043,781 |
84e98751cf7c26d602597ffdc3d0896c86ab785c
|
d912b99b4a3f12638f04e9c1f5a94d71e3fb5988
|
/others/programming/python/thread2.py
|
b89b4f9835b6b3ddedb72561057d0daef96bcefb
|
[] |
no_license
|
gh0std4ncer/tools
|
https://github.com/gh0std4ncer/tools
|
5f8c06d57e8a39285caf4719fd8b3657f2ef188b
|
e084c4eaf8fbb40633772323bc2fe998697eb453
|
refs/heads/master
| 2018-03-22T18:31:47.963232 | 2014-07-16T10:31:28 | 2014-07-16T10:31:28 | 21,944,333 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import sys
import time
from threading import Thread
thread_list = []
def main():
try:
try:
t1 = Thread(target = foo1, args=("foo-1",))
t1.start()
t2 = Thread(target = foo2, args=("foo-2",))
t2.start()
except:
print "Thread Olusturulamadi !!!"
sys.exit(1)
thread_list.append(t1)
thread_list.append(t2)
for t in thread_list:
t.join()
except Exception, err:
print err
sys.exit(2)
def foo1(str):
for count in range(1,4):
print "%s %d"% (str,count)
time.sleep(1)
def foo2(str):
for count in range(1,5):
print "%s %d"% (str,count)
time.sleep(1)
if __name__ == "__main__":
main()
##########################
# ./thread3.py
foo-1 1
foo-2 1
foo-1 2
foo-2 2
foo-1 3
foo-2 3
foo-2 4
|
UTF-8
|
Python
| false | false | 2,014 |
3,917,010,213,140 |
0162537b58eeff6082dfecc453c433646cf58d62
|
58f4b2291ae249f95c830dcd2f41695db6c6c02c
|
/apps/products/templatetags/products_extras.py
|
76f33e64d76cf8c81f4cb808fba5a1579f64322f
|
[] |
no_license
|
wd5/3-sphere
|
https://github.com/wd5/3-sphere
|
2f2e86abe399e754764357d0c881a2ce87ba424e
|
3a04b4d37b7c2cfef214688973e963c682d258af
|
refs/heads/master
| 2021-01-10T01:37:40.898925 | 2013-02-15T15:12:29 | 2013-02-15T15:12:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from django import template
from django.db.models import Q
from string import split
register = template.Library()
from models import *
|
UTF-8
|
Python
| false | false | 2,013 |
8,014,408,980,773 |
bfd25f9dc44f3f2b1c0602a03a479d7ba3e68fe2
|
de9e116588a5063249ceaec649eaecf5a9fdd9be
|
/gslib/commands/acl.py
|
0dd106287a836ab0a1a4c82ed3a889886c90477b
|
[
"Apache-2.0"
] |
permissive
|
tedromer/gsutil
|
https://github.com/tedromer/gsutil
|
cc5d507c08080e1f650f5eb244bd3a9163c8f46d
|
5198cfea6182bd92e1ac81b4181eb469e9d01247
|
refs/heads/master
| 2020-12-24T22:49:22.452347 | 2013-09-12T23:14:01 | 2013-09-12T23:14:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getopt
from boto.exception import GSResponseError
from gslib import aclhelpers
from gslib import name_expansion
from gslib.command import Command
from gslib.command import COMMAND_NAME
from gslib.command import COMMAND_NAME_ALIASES
from gslib.command import FILE_URIS_OK
from gslib.command import MAX_ARGS
from gslib.command import MIN_ARGS
from gslib.command import PROVIDER_URIS_OK
from gslib.command import SUPPORTED_SUB_ARGS
from gslib.command import URIS_START_ARG
from gslib.exception import CommandException
from gslib.help_provider import CreateHelpText
from gslib.help_provider import HELP_NAME
from gslib.help_provider import HELP_NAME_ALIASES
from gslib.help_provider import HELP_ONE_LINE_SUMMARY
from gslib.help_provider import HELP_TEXT
from gslib.help_provider import HELP_TYPE
from gslib.help_provider import HelpType
from gslib.help_provider import SUBCOMMAND_HELP_TEXT
from gslib.util import NO_MAX
from gslib.util import Retry
_SET_SYNOPSIS = """
gsutil acl set [-f] [-R] [-a] file-or-canned_acl_name uri...
"""
_GET_SYNOPSIS = """
gsutil acl get uri
"""
_CH_SYNOPSIS = """
gsutil acl ch [-R] -u|-g|-d <grant>... uri...
where each <grant> is one of the following forms:
-u <id|email>:<perm>
-g <id|email|domain|All|AllAuth>:<perm>
-d <id|email|domain|All|AllAuth>
"""
_GET_DESCRIPTION = """
<B>GET</B>
The "acl get" command gets the ACL XML for a bucket or object, which you can
save and edit for the setacl command.
"""
_SET_DESCRIPTION = """
<B>SET</B>
The "acl set" command allows you to set an Access Control List on one or
more buckets and objects. The simplest way to use it is to specify one of
the canned ACLs, e.g.,:
gsutil acl set private gs://bucket
or:
gsutil acl set public-read gs://bucket/object
See "gsutil help acls" for a list of all canned ACLs.
NOTE: By default, publicly readable objects are served with a Cache-Control
header allowing such objects to be cached for 3600 seconds. If you need to
ensure that updates become visible immediately, you should set a
Cache-Control header of "Cache-Control:private, max-age=0, no-transform" on
such objects. For help doing this, see 'gsutil help setmeta'.
If you want to define more fine-grained control over your data, you can
retrieve an ACL using the "acl get" command, save the output to a file, edit
the file, and then use the "acl set" command to set that ACL on the buckets
and/or objects. For example:
gsutil acl get gs://bucket/file.txt > acl.txt
Make changes to acl.txt such as adding an additional grant, then:
gsutil acl set acl.txt gs://cats/file.txt
Note that you can set an ACL on multiple buckets or objects at once,
for example:
gsutil acl set acl.txt gs://bucket/*.jpg
If you have a large number of ACLs to update you might want to use the
gsutil -m option, to perform a parallel (multi-threaded/multi-processing)
update:
gsutil -m acl set acl.txt gs://bucket/*.jpg
Note that multi-threading/multi-processing is only done when the named URIs
refer to objects. gsutil -m acl set gs://bucket1 gs://bucket2 will run the
acl set operations sequentially.
<B>SET OPTIONS</B>
The "set" sub-command has the following options
-R, -r Performs "acl set" request recursively, to all objects under
the specified URI.
-a Performs "acl set" request on all object versions.
-f Normally gsutil stops at the first error. The -f option causes
it to continue when it encounters errors. With this option the
gsutil exit status will be 0 even if some ACLs couldn't be
set.
"""
_CH_DESCRIPTION = """
<B>CH</B>
The "acl ch" (or "acl change") command updates access control lists, similar
in spirit to the Linux chmod command. You can specify multiple access grant
additions and deletions in a single command run; all changes will be made
atomically to each object in turn. For example, if the command requests
deleting one grant and adding a different grant, the ACLs being updated will
never be left in an intermediate state where one grant has been deleted but
the second grant not yet added. Each change specifies a user or group grant
to add or delete, and for grant additions, one of R, W, FC (for the
permission to be granted). A more formal description is provided in a later
section; below we provide examples.
<B>CH EXAMPLES</B>
Examples for "ch" sub-command:
Grant the user [email protected] WRITE access to the bucket
example-bucket:
gsutil acl ch -u [email protected]:WRITE gs://example-bucket
Grant the group [email protected] FULL_CONTROL access to all jpg files in
the top level of example-bucket:
gsutil acl ch -g [email protected]:FC gs://example-bucket/*.jpg
Grant the user with the specified canonical ID READ access to all objects
in example-bucket that begin with folder/:
gsutil acl ch -R \\
-u 84fac329bceSAMPLE777d5d22b8SAMPLE785ac2SAMPLE2dfcf7c4adf34da46:R \\
gs://example-bucket/folder/
Grant all users from my-domain.org READ access to the bucket
gcs.my-domain.org:
gsutil acl ch -g my-domain.org:R gs://gcs.my-domain.org
Remove any current access by [email protected] from the bucket
example-bucket:
gsutil acl ch -d [email protected] gs://example-bucket
If you have a large number of objects to update, enabling multi-threading
with the gsutil -m flag can significantly improve performance. The
following command adds FULL_CONTROL for [email protected] using
multi-threading:
gsutil -m acl ch -R -u [email protected]:FC gs://example-bucket
Grant READ access to everyone from my-domain.org and to all authenticated
users, and grant FULL_CONTROL to [email protected], for the buckets
my-bucket and my-other-bucket, with multi-threading enabled:
gsutil -m acl ch -R -g my-domain.org:R -g AllAuth:R \\
-u [email protected]:FC gs://my-bucket/ gs://my-other-bucket
<B>CH PERMISSIONS</B>
You may specify the following permissions with either their shorthand or
their full name:
R: READ
W: WRITE
FC: FULL_CONTROL
<B>CH SCOPES</B>
There are four different scopes: Users, Groups, All Authenticated Users,
and All Users.
Users are added with -u and a plain ID or email address, as in
"-u [email protected]:r"
Groups are like users, but specified with the -g flag, as in
"-g [email protected]:fc". Groups may also be specified as a full
domain, as in "-g my-company.com:r".
AllAuthenticatedUsers and AllUsers are specified directly, as
in "-g AllUsers:R" or "-g AllAuthenticatedUsers:FC". These are case
insensitive, and may be shortened to "all" and "allauth", respectively.
Removing permissions is specified with the -d flag and an ID, email
address, domain, or one of AllUsers or AllAuthenticatedUsers.
Many scopes can be specified on the same command line, allowing bundled
changes to be executed in a single run. This will reduce the number of
requests made to the server.
<B>CH OPTIONS</B>
The "ch" sub-command has the following options
-R, -r Performs chacl request recursively, to all objects under the
specified URI.
-u Add or modify a user permission as specified in the SCOPES
and PERMISSIONS sections.
-g Add or modify a group permission as specified in the SCOPES
and PERMISSIONS sections.
-d Remove all permissions associated with the matching argument,
as specified in the SCOPES and PERMISSIONS sections
"""
_SYNOPSIS = (_SET_SYNOPSIS + _GET_SYNOPSIS.lstrip('\n') +
_CH_SYNOPSIS.lstrip('\n') + '\n\n')
_DESCRIPTION = ("""
The acl command has three sub-commands:
""" + '\n'.join([_GET_DESCRIPTION, _SET_DESCRIPTION, _CH_DESCRIPTION]))
_detailed_help_text = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
_set_help_text = CreateHelpText(_SET_SYNOPSIS, _SET_DESCRIPTION)
_ch_help_text = CreateHelpText(_CH_SYNOPSIS, _CH_DESCRIPTION)
class AclCommand(Command):
"""Implementation of gsutil acl command."""
# Command specification (processed by parent class).
command_spec = {
# Name of command.
COMMAND_NAME : 'acl',
# List of command name aliases.
COMMAND_NAME_ALIASES : ['getacl', 'setacl', 'chacl'],
# Min number of args required by this command.
MIN_ARGS : 1,
# Max number of args required by this command, or NO_MAX.
MAX_ARGS : NO_MAX,
# Getopt-style string specifying acceptable sub args.
SUPPORTED_SUB_ARGS : 'afRrvg:u:d:',
# True if file URIs acceptable for this command.
FILE_URIS_OK : False,
# True if provider-only URIs acceptable for this command.
PROVIDER_URIS_OK : False,
# Index in args of first URI arg.
URIS_START_ARG : 1,
}
help_spec = {
# Name of command or auxiliary help info for which this help applies.
HELP_NAME : 'acl',
# List of help name aliases.
HELP_NAME_ALIASES : ['getacl', 'setacl', 'chmod', 'chacl'],
# Type of help:
HELP_TYPE : HelpType.COMMAND_HELP,
# One line summary of this help.
HELP_ONE_LINE_SUMMARY : 'Get, set, or change bucket and/or object ACLs',
# The full help text.
HELP_TEXT : _detailed_help_text,
# Help text for sub-commands.
SUBCOMMAND_HELP_TEXT : {'get' : _get_help_text,
'set' : _set_help_text,
'ch' : _ch_help_text},
}
def _CalculateUrisStartArg(self):
if (self.args[0].lower() == 'set') or (self.command_alias_used == 'setacl'):
return 1
else:
return 0
def _SetAcl(self):
self.continue_on_error = False
if self.sub_opts:
for o, unused_a in self.sub_opts:
if o == '-a':
self.all_versions = True
elif o == '-f':
self.continue_on_error = True
elif o == '-r' or o == '-R':
self.recursion_requested = True
elif o == '-v':
self.logger.warning('WARNING: The %s -v option is no longer'
' needed, and will eventually be '
'removed.\n' % self.command_name)
try:
self.SetAclCommandHelper()
except GSResponseError as e:
if e.code == 'AccessDenied' and e.reason == 'Forbidden' \
and e.status == 403:
self._WarnServiceAccounts()
raise
def _GetAcl(self):
try:
self.GetAclCommandHelper()
except GSResponseError as e:
if e.code == 'AccessDenied' and e.reason == 'Forbidden' \
and e.status == 403:
self._WarnServiceAccounts()
raise
def _ChAcl(self):
self.parse_versions = True
self.changes = []
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-g':
self.changes.append(
aclhelpers.AclChange(a, scope_type=aclhelpers.ChangeType.GROUP,
logger=self.logger))
if o == '-u':
self.changes.append(
aclhelpers.AclChange(a, scope_type=aclhelpers.ChangeType.USER,
logger=self.logger))
if o == '-d':
self.changes.append(
aclhelpers.AclDel(a, logger=self.logger))
if not self.changes:
raise CommandException(
'Please specify at least one access change '
'with the -g, -u, or -d flags')
storage_uri = self.UrisAreForSingleProvider(self.args)
if not (storage_uri and storage_uri.get_provider().name == 'google'):
raise CommandException(
'The "{0}" command can only be used with gs:// URIs'.format(
self.command_name))
bulk_uris = set()
for uri_arg in self.args:
for result in self.WildcardIterator(uri_arg):
uri = result.uri
if uri.names_bucket():
if self.recursion_requested:
bulk_uris.add(uri.clone_replace_name('*').uri)
else:
# If applying to a bucket directly, the threading machinery will
# break, so we have to apply now, in the main thread.
self.ApplyAclChanges(uri)
else:
bulk_uris.add(uri_arg)
try:
name_expansion_iterator = name_expansion.NameExpansionIterator(
self.command_name, self.proj_id_handler, self.headers, self.debug,
self.logger, self.bucket_storage_uri_class, bulk_uris,
self.recursion_requested)
except CommandException as e:
# NameExpansionIterator will complain if there are no URIs, but we don't
# want to throw an error if we handled bucket URIs.
if e.reason == 'No URIs matched':
return 0
else:
raise e
self.everything_set_okay = True
self.Apply(self.ApplyAclChanges,
name_expansion_iterator,
self._ApplyExceptionHandler)
if not self.everything_set_okay:
raise CommandException('ACLs for some objects could not be set.')
def _ApplyExceptionHandler(self, exception):
self.logger.error('Encountered a problem: {0}'.format(exception))
self.everything_set_okay = False
@Retry(GSResponseError, tries=3, timeout_secs=1)
def ApplyAclChanges(self, uri_or_expansion_result):
"""Applies the changes in self.changes to the provided URI."""
if isinstance(uri_or_expansion_result, name_expansion.NameExpansionResult):
uri = self.suri_builder.StorageUri(
uri_or_expansion_result.expanded_uri_str)
else:
uri = uri_or_expansion_result
try:
current_acl = uri.get_acl()
except GSResponseError as e:
if (e.code == 'AccessDenied' and e.reason == 'Forbidden'
and e.status == 403):
self._WarnServiceAccounts()
self.logger.warning('Failed to set acl for {0}: {1}'
.format(uri, e.reason))
return
modification_count = 0
for change in self.changes:
modification_count += change.Execute(uri, current_acl)
if modification_count == 0:
self.logger.info('No changes to {0}'.format(uri))
return
# TODO: Remove the concept of forcing when boto provides access to
# bucket generation and metageneration.
headers = dict(self.headers)
force = uri.names_bucket()
if not force:
key = uri.get_key()
headers['x-goog-if-generation-match'] = key.generation
headers['x-goog-if-metageneration-match'] = key.metageneration
# If this fails because of a precondition, it will raise a
# GSResponseError for @Retry to handle.
try:
uri.set_acl(current_acl, uri.object_name, False, headers)
except GSResponseError as e:
# Don't retry on bad requests, e.g. invalid email address.
if getattr(e, 'status', None) == 400:
raise CommandException('Received bad request from server: %s' % str(e))
raise
self.logger.info('Updated ACL on {0}'.format(uri))
# Command entry point.
def RunCommand(self):
action_subcommand = self.args.pop(0)
(self.sub_opts, self.args) = getopt.getopt(self.args,
self.command_spec[SUPPORTED_SUB_ARGS])
self.CheckArguments()
if action_subcommand == 'get':
func = self._GetAcl
elif action_subcommand == 'set':
func = self._SetAcl
elif action_subcommand in ('ch', 'change'):
func = self._ChAcl
else:
raise CommandException(('Invalid subcommand "%s" for the %s command.\n'
'See "gsutil help acl".') %
(action_subcommand, self.command_name))
func()
return 0
|
UTF-8
|
Python
| false | false | 2,013 |
11,519,102,337,353 |
18139a5cd67114a54f1b5a130c07334e62c8182a
|
d9e897bd1c57bd1439f5433d23e119d79772e413
|
/apollo/server/plugins/example.py
|
8653af7fac17f945490548d9fc93c513066ea28a
|
[
"CC-BY-NC-3.0",
"MIT"
] |
non_permissive
|
thecowboy/apollo
|
https://github.com/thecowboy/apollo
|
ef3bbf1125d462ed2743e12fb1bb960487d7555a
|
01109a1c5ace80916919c9c2c3616d810fc0523b
|
refs/heads/master
| 2021-01-01T18:17:16.891800 | 2011-04-30T23:22:55 | 2011-04-30T23:22:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#
# Copyright (C) 2011 by Tony Young
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
__name__ = "Example"
__author__ = "Tony Young"
__version__ = "0.1"
__description__ = "Sample plugin."
depends = [ "apollo.server.plugins.hooks" ]
from apollo.server.plugins.hooks import registry
def move_test(self, core, session):
print "i got triggered!"
return True
def setup(core):
registry.before_move.addListener(move_test)
def shutdown(core):
registry.before_move.removeListener(move_test)
|
UTF-8
|
Python
| false | false | 2,011 |
11,699,490,922,441 |
58c42574fa3ee3da8f194c53d95e2c8dc83e9651
|
f1ee50b74c78122742321d3598a6102edb210642
|
/_setup/hello_uwsgi_app.py
|
a4aab1c01d43867cda4e2fc9bb70418b68bb330d
|
[
"Apache-2.0"
] |
permissive
|
leven-cn/admin-linux
|
https://github.com/leven-cn/admin-linux
|
6b9e5aa4d2ca85d7bd69c6a7f497d753fa3d5230
|
4268451e64a49cc6ae5c3a69be638cf7db182250
|
refs/heads/master
| 2020-05-26T19:52:01.104931 | 2014-10-21T00:57:33 | 2014-10-21T00:57:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''@package _setup
uWSGI Application Sample
Copyright 2014 Li Yun <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
def application(env, start_response):
start_response('200 OK', [('Content-Type','text/html')])
return ["Hello uWSGI"] # python2
#return [b"Hello World"] # python3
|
UTF-8
|
Python
| false | false | 2,014 |
4,260,607,576,235 |
50fa4170a25004b244b1dbdefeecdc7fedff47b8
|
d0ff27822bacbffe87e83faa20367325ed2cc322
|
/pavara/network/client.py
|
cd1c494143b11d732a1827959e5da25085210ca4
|
[] |
no_license
|
dcwatson/pavara-pyglet
|
https://github.com/dcwatson/pavara-pyglet
|
bd90990edf1f36bf6308d117719c1740da917013
|
9a8d519cf085514ff9cd9c448395d5333ba731eb
|
refs/heads/master
| 2016-09-05T09:18:21.766635 | 2014-10-30T02:17:49 | 2014-10-30T02:17:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import socket
from .packet import parse_packet
import struct
class Client (object):
def __init__(self, server, port):
self.server = server
self.port = port
self.last_seq = 0
self.pending = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setblocking(0)
@property
def nextseq(self):
self.last_seq += 1
return self.last_seq
def tick(self, dt, world):
# Read in any pending datagrams since last time.
while True:
try:
data, _address = self.socket.recvfrom(512)
p = parse_packet(data)
if p.sequence in self.pending:
del self.pending[p.sequence]
pos = 0
while pos < len(p.payload):
idx, px, py, pz, vx, vy, vz = struct.unpack('!L6d', p.payload[pos:pos+52])
pos += 52
world.objects[idx].position.set(px, py, pz)
#world.objects[idx].velocity.set(vx, vy, vz)
except socket.error:
break
# Write any pending packets, either added since last time, or not yet acknowledged.
for p in self.pending.values():
self.write(p)
def write(self, packet):
self.socket.sendto(packet.flatten(), (self.server, self.port))
if packet.needs_ack:
self.pending[packet.sequence] = packet
|
UTF-8
|
Python
| false | false | 2,014 |
12,936,441,524,022 |
e01b242f4e937cc0e9e442e2d69017806a7dc1de
|
ca9b0be15429d9f2b7f6511d2aa9b932337417e6
|
/CustomStruct.py
|
6704f7e1a46f2efb14beb487555c33b8339a6f9a
|
[] |
no_license
|
pzucco/ReliableUDP
|
https://github.com/pzucco/ReliableUDP
|
720168ffa27eec5cdc11f45eb550e0d6084706e2
|
cf2d0ef588b96643cc86521f76795588d5a2d7ee
|
refs/heads/master
| 2020-04-24T02:32:31.830863 | 2013-05-20T23:11:02 | 2013-05-20T23:11:02 | 10,183,537 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'pzucco'
import struct
_code = 0
_byte_struct = struct.Struct('!B')
_int_struct = struct.Struct('!I')
_structure = {}
class Base(object):
def __init__(self):
global _code
self.code = _byte_struct.pack(_code)
_structure[_code] = self
_code += 1
class Atom(Base):
def __init__(self, format_):
Base.__init__(self)
self.format = format_
self.struct = struct.Struct(format_)
def write(self, data):
return self.struct.pack(data)
def read(self, raw, offset):
return self.struct.unpack_from(raw, offset)[0], offset + self.struct.size
Byte = Atom('B')
Short = Atom('H')
Int = Atom('I')
SigByte = Atom('b')
SigShort = Atom('h')
SigInt = Atom('i')
Float = Atom('f')
Double = Atom('d')
class List(Base):
def __init__(self, structure):
Base.__init__(self)
self.structure = structure
if structure.__class__ == Atom:
self.write = self._write_atoms
self.read = self._read_atoms
def write(self, data):
return _byte_struct.pack(len(data)) + ''.join([self.structure.write(data[i]) for i in range(len(data))])
def read(self, raw, offset):
count = _byte_struct.unpack_from(raw, offset)[0]
offset += 1
data = []
for i in range(count):
update, offset = self.structure.read(raw, offset)
data.append(update)
return data, offset
def _write_atoms(self, data):
return _byte_struct.pack(len(data)) + struct.pack('!%i%s' % (len(data), self.structure.format), *data)
def _read_atoms(self, raw, offset):
length = _byte_struct.unpack_from(raw, offset)[0]
offset += 1
format = '!%i%s' % (length, self.structure.format)
return list(struct.unpack_from(format, raw, offset)), offset + length * struct.calcsize(format)
class Tuple(Base):
def __init__(self, structure, count):
Base.__init__(self)
self.count = count
self.structure = structure
if structure.__class__ == Atom:
self.format = struct.Struct('!%i%s' % (self.count, self.structure.format))
self.write = self._write_atoms
self.read = self._read_atoms
def write(self, data):
return ''.join([self.structure.write(data[i]) for i in range(self.count)])
def read(self, raw, offset):
data = []
for i in range(self.count):
update, offset = self.structure.read(raw, offset)
data.append(update)
return data, offset
def _write_atoms(self, data):
return self.format.pack(*data)
def _read_atoms(self, raw, offset):
return self.format.unpack_from(raw, offset), offset + self.format.size
def _string_write(data):
raw = _byte_struct.pack(len(data))
return raw + struct.pack('!%is' % len(data), data)
def _string_read(raw, offset):
length = _byte_struct.unpack_from(raw, offset)[0]
offset += 1
return struct.unpack_from('!%is' % length, raw, offset)[0], offset + length
String = Base()
String.write = _string_write
String.read = _string_read
def _raw_data_write(data):
raw = _int_struct.pack(len(data))
return raw + data
def _raw_data_read(raw, offset):
length = _int_struct.unpack_from(raw, offset)[0]
offset += 4
return raw[offset:offset+length], offset + length
RawData = Base()
RawData.write = _raw_data_write
RawData.read = _raw_data_read
class Structure(Base):
def __init__(self, **args):
Base.__init__(self)
self.struct = '!'
self.static = []
self.dynamic = []
for field, structure in args.items():
if structure.__class__ == Atom:
self.static.append(field)
self.struct += structure.format
else:
self.dynamic.append((field, structure))
self.struct = struct.Struct(self.struct)
def write(self, data):
if data.__class__ != dict: data = data.__dict__
raw = self.struct.pack(*[data[i] for i in self.static])
return raw + ''.join([structure.write(data[field]) for field, structure in self.dynamic])
def read(self, raw, offset):
data = dict(zip(self.static, self.struct.unpack_from(raw, offset)))
offset += self.struct.size
for field, dynamic in self.dynamic:
update, offset = dynamic.read(raw, offset)
data[field] = update
return _aux_constructor(self, data), offset
def _default_aux_constructor(structure, data):
return data
_aux_constructor = _default_aux_constructor
def set_constructor(constructor):
global _aux_constructor; _aux_constructor = constructor
def deserialize(raw):
code = _byte_struct.unpack_from(raw)[0]
data, _ = _structure[code].read(raw, 1)
return _structure[code], data
def serialize(structure, data):
return structure.code + structure.write(data)
|
UTF-8
|
Python
| false | false | 2,013 |
5,798,205,852,872 |
f2d36d601a932156c596174822c7ab6a3de2308a
|
3e8c6b7bd713c0c547896d212b2b3a9676de4ec6
|
/tests/test_client.py
|
b76be6ddd118b3068e05bfd5f42c539bf1f7663a
|
[] |
no_license
|
jiangz222/ooredis
|
https://github.com/jiangz222/ooredis
|
3e0560d5e0cb54b7e9c5168205f3ec60ae837f9b
|
3fac6ce1ff16986225fecffe2731c4efd224fcc3
|
refs/heads/master
| 2021-05-28T18:59:10.171149 | 2014-07-28T14:02:54 | 2014-07-28T14:02:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/env python2.7
# coding:utf-8
import unittest
from ooredis.client import connect, get_client
import redis
class TestClient(unittest.TestCase):
def setUp(self):
self.client = connect()
def test_get(self):
self.assertTrue(isinstance(get_client(), redis.Redis))
def test_get_get(self):
self.assertEqual(get_client(), get_client())
if __name__ == "__main__":
unittest.main()
|
UTF-8
|
Python
| false | false | 2,014 |
11,708,080,868,169 |
6a720a4fd0a7dc9c7b568862df21e86e8ca6f876
|
7186f3ddaa7477334f97e2fbc09f9f35ac7a4036
|
/scikits/sqrts/sqrts.py
|
9aad2379abc17ced85b06ef445bb55e9eb594f6b
|
[] |
no_license
|
fp4code/scikits-sqrts
|
https://github.com/fp4code/scikits-sqrts
|
f85b7f5b296cd2925fe3bba09328e8d283b7e4e8
|
ed8afe70f8ec3fb83dfed32399ef6f55e4937aed
|
refs/heads/master
| 2021-01-25T12:13:30.359748 | 2010-08-19T12:16:51 | 2010-08-19T12:16:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
def nmv(z):
"""Nevière-Maystre-Vincent complex square-root.
Cf. http://dx.doi.org/10.1088/0150-536X/8/4/002
>>> sqrts.nmv([-2j-1e-20,-2j,-2j+1e-20])
array([ 1.+1.j, 1.-1.j, -1.-1.j])
"""
return np.sqrt(1j)*np.sqrt(-1j*np.asarray(z,np.complex))
def nmvo(z):
"""Nevière-Maystre-Vincent complex square-root, opposite branch cut
>>> sqrts.nmvo([2j+1e-20,2j,2j-1e-20])
array([-1.-1.j, -1.-1.j, 1.+1.j])
"""
return -np.sqrt(-1j)*np.sqrt(1j*np.asarray(z,np.complex))
def posimag(z):
"""Positive imaginary part complex square root
>>> sqrts.posimag([1+1e-20j,1,1-1e-20j]).real
array([ 1., 1., -1.])
"""
return 1j*np.conj(np.sqrt(-np.conj(z)))
class Nearest:
"""Nearest complex square root
>>> s = sqrts.Nearest([1,1j,-1,-1j])
>>> s.sqrt(1).real
array([ 1., 1., -1., 1.])
>>> s = sqrts.Nearest(1)
>>> s.sqrt(2j)
(1+1j)
>>> s.sqrt(-1)
1j
>>> s.sqrt(-2j)
(-1+1j)
>>> s.sqrt(1)
(-1+0j)
>>> s.sqrt(2j)
(-1-1j)
>>> s.sqrt(-1)
(-0-1j)
>>> s.sqrt(-2j)
(1-1j)
>>> s.sqrt(1)
(1+0j)
"""
def __init__(self, value):
self.previous = np.asarray(value,np.complex)
def sqrt(self,z):
s = np.sqrt(np.complex(z))
d1 = np.abs(self.previous - s)
d2 = np.abs(self.previous + s)
self.previous = ((d1 <= d2)*2-1)*s
return self.previous
|
UTF-8
|
Python
| false | false | 2,010 |
19,542,101,219,429 |
4835e5ac3a74ebddb8563b03bfda7778f4a51fd0
|
914b504e13df945a50f35eca4d850eb2c5b52c0b
|
/test/compute/test_softlayer.py
|
c8e453e849b6ba30614c8c266ecfe55f5afc3f5e
|
[
"Apache-2.0"
] |
permissive
|
cloudkick/libcloud
|
https://github.com/cloudkick/libcloud
|
d05c0401bd232279cb38b5abacd3d4c85d7d072f
|
9c8605e1518c6b5e2511f0780e1946089a7256dd
|
refs/heads/master
| 2021-01-01T19:51:41.895189 | 2011-03-14T02:34:57 | 2011-03-14T02:34:57 | 258,426 | 8 | 7 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib
import unittest
import sys
from xml.etree import ElementTree as ET
import xmlrpclib
from libcloud.compute.drivers.softlayer import SoftLayerNodeDriver as SoftLayer
from libcloud.compute.types import NodeState
from test import MockHttp
from test.compute import TestCaseMixin
from test.file_fixtures import ComputeFileFixtures
from test.secrets import SOFTLAYER_USER, SOFTLAYER_APIKEY
class MockSoftLayerTransport(xmlrpclib.Transport):
def request(self, host, handler, request_body, verbose=0):
self.verbose = 0
method = ET.XML(request_body).find('methodName').text
mock = SoftLayerMockHttp(host, 80)
mock.request('POST', "%s/%s" % (handler, method))
resp = mock.getresponse()
return self._parse_response(resp.body, None)
class SoftLayerTests(unittest.TestCase):
def setUp(self):
SoftLayer.connectionCls.proxyCls.transportCls = [MockSoftLayerTransport, MockSoftLayerTransport]
self.driver = SoftLayer(SOFTLAYER_USER, SOFTLAYER_APIKEY)
def test_list_nodes(self):
node = self.driver.list_nodes()[0]
self.assertEqual(node.name, 'test1')
self.assertEqual(node.state, NodeState.RUNNING)
self.assertEqual(node.extra['password'], 'TEST')
def test_list_locations(self):
locations = self.driver.list_locations()
seattle = (l for l in locations if l.name == 'sea01').next()
self.assertEqual(seattle.country, 'US')
self.assertEqual(seattle.id, '18171')
def test_list_images(self):
images = self.driver.list_images()
image = images[0]
self.assertEqual(image.id, '1684')
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 2)
self.assertEqual(sizes[0].id, 'sl1')
class SoftLayerMockHttp(MockHttp):
fixtures = ComputeFileFixtures('softlayer')
def _xmlrpc_v3_SoftLayer_Account_getVirtualGuests(self, method, url, body, headers):
body = self.fixtures.load('v3_SoftLayer_Account_getVirtualGuests.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _xmlrpc_v3_SoftLayer_Location_Datacenter_getDatacenters(self, method, url, body, headers):
body = self.fixtures.load('v3_SoftLayer_Location_Datacenter_getDatacenters.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
UTF-8
|
Python
| false | false | 2,011 |
12,412,455,534,565 |
e60ba0a21770e16e35408d6394f42292e6cf8649
|
1a104751d1354f3b3dc1555ed148abf1717e203b
|
/plumbing/slurm/job.py
|
12cffca213d80cf2c55d705528cc62fe31c2517a
|
[
"MIT"
] |
permissive
|
svetlyak40wt/plumbing
|
https://github.com/svetlyak40wt/plumbing
|
bf1ad43f02f91e4de5c2ba9ae1f082989fca49ea
|
928e7900d8b55f3ae031c3acfeeb374150df268a
|
refs/heads/master
| 2021-01-14T13:26:55.469539 | 2014-12-01T15:02:46 | 2014-12-01T15:02:46 | 27,484,423 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Built-in modules #
import os, re, platform
import base64, hashlib
from collections import OrderedDict
# Internal modules #
from plumbing.slurm.existing import projects, jobs
from plumbing.common import tail, flatter
from plumbing.color import Color
from plumbing.tmpstuff import new_temp_path
from plumbing.slurm import num_processors
from plumbing.autopaths import FilePath, DirectoryPath
from plumbing.cache import property_cached
# Third party modules #
import sh
# Constants #
hostname = platform.node()
################################################################################
class JobSLURM(object):
"""Makes launching SLURM jobs easy to write and easy to use. Here are some
examples on how to use this class:
for command in enumerate(['print "hi"', 'print "hello"']):
job = JobSLURM(command, time='00:01:00', qos='short')
job.run()
for path in ['~/data/scafolds1.txt', '~/data/scafolds2.txt', '~/data/scafolds3.txt']:
command = ['import sh\n']
command += ['script = sh.Command("analyze.py")\n']
command += ['script(%s)' % path]
job = JobSLURM(command, time='00:01:00', qos='short', job_name=path[-25:])
job.run()
print "Job %i is running !" % job.id
"""
extensions = {
'bash': "sh",
'python': "py"
}
shebang_headers = {
'bash': ["#!/bin/bash -le"], # As a login shell and stop on error
'python': ["#!/usr/bin/env python"]
}
slurm_headers = OrderedDict((
('job_name' , {'tag': '#SBATCH -J %s', 'needed': True}),
('change_dir', {'tag': '#SBATCH -D %s', 'needed': True, 'default': os.path.abspath(os.getcwd())}),
('out_file' , {'tag': '#SBATCH -o %s', 'needed': True, 'default': '/dev/null'}),
('project' , {'tag': '#SBATCH -A %s', 'needed': False, 'default': "b2011035"}),
('time' , {'tag': '#SBATCH -t %s', 'needed': True, 'default': '7-00:00:00'}),
('machines' , {'tag': '#SBATCH -N %s', 'needed': True, 'default': '1'}),
('cores' , {'tag': '#SBATCH -n %s', 'needed': True, 'default': num_processors}),
('partition' , {'tag': '#SBATCH -p %s', 'needed': True, 'default': 'node'}),
('email' , {'tag': '#SBATCH --mail-user %s', 'needed': False, 'default': os.environ.get('EMAIL')}),
('email-when', {'tag': '#SBATCH --mail-type=%s', 'needed': True, 'default': 'END'}),
('qos' , {'tag': '#SBATCH --qos=%s', 'needed': False, 'default': 'short'}),
('dependency', {'tag': '#SBATCH -d %s', 'needed': False, 'default': 'afterok:1'}),
('constraint', {'tag': '#SBATCH -C %s', 'needed': False, 'default': 'fat'}),
('cluster' , {'tag': '#SBATCH -M %s', 'needed': False, 'default': 'milou'}),
('alloc' , {'tag': '#SBATCH --reservation=%s', 'needed': False, 'default': 'miiiiiine'}),
('jobid' , {'tag': '#SBATCH --jobid=%i', 'needed': False, 'default': 2173455}),
))
script_headers = {
'bash': ['echo "SLURM: start at $(date) on $(hostname)"'],
'python': ['import dateutil.tz, datetime, platform',
'now = datetime.datetime.now(dateutil.tz.tzlocal())',
r'now = now.strftime("%Y-%m-%d %Hh%Mm%Ss %Z%z")',
'node = platform.node()',
'print "SLURM: start at {0} on {1}".format(now, node)']}
script_footers = {
'bash': ['echo "SLURM: end at $(date)"'],
'python': ['now = datetime.datetime.now(dateutil.tz.tzlocal())'
r'now = now.strftime("%Y-%m-%d %Hh%Mm%Ss %Z%z")'
'print "SLURM: end at {0}".format(now)']}
def __repr__(self): return '<%s object "%s">' % (self.__class__.__name__, self.name)
@property
def name(self): return self.kwargs['job_name']
def __init__(self,
command = ["import sys", "print 'Hello world'", "sys.exit()"],
language = 'python',
base_dir = None,
script_path = None,
**kwargs):
# Required attributes #
self.command = command
self.language = language
self.kwargs = kwargs
# Set the file paths #
self.set_paths(base_dir, script_path)
# Check command type #
if not isinstance(self.command, list): self.command = [self.command]
# Get the name #
if 'job_name' not in self.kwargs:
hashed = hashlib.md5(''.join(self.command)).digest()
encoded = base64.urlsafe_b64encode(hashed)
self.kwargs['job_name'] = encoded
# Check we have a project otherwise choose the one with less hours #
if hostname.startswith('milou'):
if 'project' not in self.kwargs and 'SBATCH_ACCOUNT' not in os.environ:
if projects: self.kwargs['project'] = projects[0]['name']
def set_paths(self, base_dir, script_path):
"""Set the directory, the script path and the outfile path"""
# Make absolute paths #
if 'change_dir' in self.kwargs:
self.kwargs['change_dir'] = DirectoryPath(os.path.abspath(self.kwargs['change_dir']))
if 'out_file' in self.kwargs:
self.kwargs['out_file'] = FilePath(os.path.abspath(self.kwargs['out_file']))
# In case there is a base directory #
if base_dir is not None:
self.base_dir = DirectoryPath(os.path.abspath(base_dir))
self.script_path = FilePath(base_dir + "run." + self.extensions[self.language])
self.kwargs['change_dir'] = base_dir
self.kwargs['out_file'] = FilePath(base_dir + "run.out")
# Other cases #
if base_dir is None and script_path is None: self.script_path = FilePath(new_temp_path())
if script_path is not None: self.script_path = FilePath(os.path.abspath(script_path))
@property_cached
def slurm_params(self):
"""The list of parameters to give to the `sbatch` command."""
# Main loop #
result = OrderedDict()
for param, info in self.slurm_headers.items():
if not info['needed'] and not param in self.kwargs: continue
if param in self.kwargs: result[param] = self.kwargs.get(param)
else: result[param] = info['default']
# Special cases #
if result.get('cluster') == 'halvan': result['partition'] = 'halvan'
# Return #
return result
@property
def script(self):
"""The script to be submitted to the SLURM queue."""
self.shebang_header = self.shebang_headers[self.language]
self.slurm_header = [self.slurm_headers[k]['tag'] % v for k,v in self.slurm_params.items()]
self.script_header = self.script_headers[self.language]
self.script_footer = self.script_footers[self.language]
return '\n'.join(flatter([self.shebang_header,
self.slurm_header,
self.script_header,
self.command,
self.script_footer]))
def make_script(self):
"""Make the script and return a FilePath object pointing to the script above."""
self.script_path.write(self.script)
self.script_path.permissions.make_executable()
return self.script_path
@property
def log_tail(self):
"""If we have a log file, what is its tail"""
if not self.kwargs['out_file'].exists: return False
else: return tail(self.slurm_params['out_file'])
@property
def status(self):
"""What is the status of the job ?"""
# If there is no script it is either ready or a lost duplicate #
if not self.script_path.exists:
if self.name in jobs.names: return "DUPLICATE"
if self.name not in jobs.names: return "READY"
# It is submitted already #
if self.name in jobs.names:
if jobs[self.name]['type'] == 'queued': return "QUEUED"
if jobs[self.name]['type'] == 'running': return "RUNNING"
# So the script exists for sure but it is not in the queue #
if not self.kwargs['out_file'].exists: return "ABORTED"
# Let's look in log file #
if 'CANCELLED' in self.log_tail: return "CANCELLED"
if 'SLURM: end at' in self.log_tail: return "ENDED"
# Default #
return "INTERUPTED"
@property
def info(self):
"""Get the existing job information dictionary"""
if self.name not in jobs: return {'status': self.status}
else: return jobs[self.name]
def run(self):
"""Will call self.launch() after performing some checks"""
# Check already exists #
if self.status == "READY": return self.launch()
# Check name conflict #
if self.status == "DUPLICATE": message = "Job with same name '%s' already in queue, but we lost the script."
if self.status == "QUEUED": message = "Job '%s' already in queue."
if self.status == "RUNNING": message = "Job '%s' already running."
if self.status == "ENDED": message = "Job '%s' already ended successfully."
if self.status == "ABORTED": message = "Job '%s' was killed without any output file (?)."
if self.status == "CANCELLED": message = "Job '%s' was canceled while running."
if self.status == "INTERUPTED": message = "Job '%s' is not running. Look at the log file."
print Color.i_red + message % (self.name,) + Color.end
print "Job might have run already (?). Not starting."
def launch(self):
"""Make the script file and return the newly created job id"""
# Make script file #
self.make_script()
# Do it #
sbatch_out = sh.sbatch(self.script_path)
jobs.expire()
# Message #
print Color.i_blu + "SLURM:" + Color.end + " " + str(sbatch_out),
# Return id #
self.id = int(re.findall("Submitted batch job ([0-9]+)", str(sbatch_out))[0])
return self.id
def interupt(self):
if self.status != "QUEUED" and self.status != "RUNNING":
raise Exception("Can't cancel job '%s'" % self.name)
sh.scancel(self.info['jobid'])
def wait(self):
"""Wait until the job is finished"""
pass
|
UTF-8
|
Python
| false | false | 2,014 |
7,842,610,303,953 |
000a189ff7830372dd7905029c9307b46f7c1ee6
|
981c97e1f5c99687d50455f26a405b25485abe14
|
/catalog/views.py
|
13baa958ae33c9f842457ca3f4d3edcca3ec108c
|
[] |
no_license
|
derkcris/ourwebcatalog
|
https://github.com/derkcris/ourwebcatalog
|
ec298b79723ef19fc11fd93a6675b240b93b8374
|
e3b0a468709cc36be70bbf1ea9b37b4e25c9898e
|
refs/heads/master
| 2016-08-06T03:20:59.509978 | 2013-05-10T11:11:49 | 2013-05-10T11:11:49 | 9,125,751 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from catalog.models import Category, Article, Item, STATE_CHOICES, STATE_AVAILABLE, CONDITION_CHOICES
from django.shortcuts import render, get_object_or_404
def index(request):
categories = Category.objects.all().order_by('name')
articles = Article.objects.all().order_by('category')
context = {
'categories': categories,
'articles': articles
}
return render(request, 'index.html', context)
def category_index(request):
categories = Category.objects.all().order_by('name')
context = {
'categories': categories,
}
return render(request, 'category_index.html', context)
def category(request, category_id):
categories = Category.objects.all().order_by('name')
category = get_object_or_404(Category, pk=category_id)
subcategories = Category.objects.filter(main_category_id=category.id).order_by('-name')
context = {
'categories': categories,
'category': category,
'subcategories': subcategories,
}
return render(request, 'category.html', context)
def category_add(request):
categories = Category.objects.all().order_by('name')
category = Category()
context = {
'category': category,
'categories': categories,
}
return render(request, 'category_add.html', context)
def category_edit(request, category_id):
categories = Category.objects.all().order_by('name')
category = get_object_or_404(Category, pk=category_id)
context = {
'category': category,
'categories': categories,
}
return render(request, 'category_add.html', context)
def category_save(request, category_id):
categories = Category.objects.all().order_by('name')
error = False
error_message = []
if(category_id == 0 or category_id == '0'):
category = Category()
else:
category = get_object_or_404(Category, pk=category_id)
category.name = request.POST['name']
category.description = request.POST['description']
if len(category.name) == 0:
error = True
error_message.append("Name is required")
main_category = request.POST['main_category']
if main_category != 0 and main_category != '0':
category.main_category = Category.objects.get(id=main_category)
else:
category.main_category = None
if error:
context = {
'category': category,
'error_message': error_message,
'categories': categories,
}
return render(request, 'category_add.html', context)
else:
category.save()
context = {
'categories': categories,
'category': category,
'success_message': 'La categoria ' + category.name + ' ha sido guardada exitosamente.',
}
return render(request, 'category.html', context)
def category_remove(request, category_id):
categories = Category.objects.all().order_by('name')
category = get_object_or_404(Category, pk=category_id)
category.delete()
context = {
'categories': categories,
'success_message': 'La categoria ' + category.name + ' ha sido eliminada exitosamente.',
}
return render(request, 'category_index.html', context)
def category_article_add(request, category_id):
categories = Category.objects.all().order_by('name')
article = Article()
article.category = get_object_or_404(Category, pk=category_id)
context = {
'article': article,
'categories': categories,
}
return render(request, 'article_add.html', context)
def category_subcategory_add(request, category_id):
categories = Category.objects.all().order_by('name')
category = Category()
category.main_category = get_object_or_404(Category, pk=category_id)
context = {
'category': category,
'categories': categories,
}
return render(request, 'category_add.html', context)
def article(request, article_id):
categories = Category.objects.all().order_by('name')
article = get_object_or_404(Article, pk=article_id)
context = {
'categories': categories,
'article': article,
}
return render(request, 'article.html', context)
def article_add(request):
categories = Category.objects.all().order_by('name')
article = Article()
context = {
'article': article,
'categories': categories,
}
return render(request, 'article_add.html', context)
def article_edit(request, article_id):
categories = Category.objects.all().order_by('name')
article = get_object_or_404(Article, pk=article_id)
context = {
'article': article,
'categories': categories,
}
return render(request, 'article_add.html', context)
def article_save(request, article_id):
categories = Category.objects.all().order_by('name')
error = False
error_message = []
try:
if(article_id == 0 or article_id == '0'):
article = Article()
else:
article = get_object_or_404(Article, pk=article_id)
article.name = request.POST['name']
article.description = request.POST['description']
if len(article.name) == 0:
error = True
error_message.append("El nombre es requerido")
article.category = Category.objects.get(id=request.POST['category'])
except(KeyError, Category.DoesNotExist):
error = True
error_message.append("La categoria es requerida")
if error:
context = {
'article': article,
'error_message': error_message,
'categories': categories,
}
return render(request, 'article_add.html', context)
else:
article.save()
context = {
'categories': categories,
'article': article,
'success_message': 'El articulo ' + article.name + ' ha sido guardado exitosamente.',
}
return render(request, 'article.html', context)
def article_remove(request, article_id):
categories = Category.objects.all().order_by('name')
article = get_object_or_404(Article, pk=article_id)
article.delete()
articles = Article.objects.all().order_by('category')
context = {
'categories': categories,
'articles': articles,
'success_message': 'El articulo ' + article.name + ' ha sido eliminado exitosamente.',
}
return render(request, 'index.html', context)
def item(request, item_id):
categories = Category.objects.all().order_by('name')
item = get_object_or_404(Item, pk=item_id)
context = {
'categories': categories,
'item': item,
}
return render(request, 'item.html', context)
def item_add(request, article_id):
categories = Category.objects.all().order_by('name')
item = Item()
item.article = get_object_or_404(Article, pk=article_id)
articles = Article.objects.all().order_by('name')
context = {
'categories': categories,
'item': item,
'articles': articles,
'STATE_CHOICES': STATE_CHOICES,
'CONDITION_CHOICES': CONDITION_CHOICES,
}
return render(request, 'item_add.html', context)
def item_edit(request, item_id):
categories = Category.objects.all().order_by('name')
item = get_object_or_404(Item, pk=item_id)
articles = Article.objects.all().order_by('name')
context = {
'categories': categories,
'item': item,
'articles': articles,
'STATE_CHOICES': STATE_CHOICES,
'CONDITION_CHOICES': CONDITION_CHOICES,
}
return render(request, 'item_add.html', context)
def item_save(request, item_id):
categories = Category.objects.all().order_by('name')
error = False
error_message = []
if(item_id == 0 or item_id == '0'):
item = Item()
item.state = STATE_AVAILABLE
else:
item = get_object_or_404(Item, pk=item_id)
item.name = request.POST['name']
item.description = request.POST['description']
item.condition = request.POST['condition']
if len(item.name) == 0:
error = True
error_message.append("El nombre es requerido")
if len(item.condition) == 0:
error = True
error_message.append("La condicion es requerida")
article_id = request.POST['article']
if article_id != 0 and article_id != '0':
item.article = Article.objects.get(id=article_id)
else:
item.article = None
if error:
articles = Article.objects.all().order_by('name')
context = {
'categories': categories,
'item': item,
'error_message': error_message,
'articles': articles,
}
return render(request, 'item_add.html', context)
else:
item.save()
article = Article.objects.get(pk=item.article.id)
context = {
'categories': categories,
'article': article,
'success_message': 'El item ' + item.name + ' ha sido eliminado exitosamente.',
}
return render(request, 'article.html', context)
def item_remove(request, item_id):
categories = Category.objects.all().order_by('name')
item = get_object_or_404(Category, pk=item_id)
item.delete()
article = Article.objects.get(pk=item.article.id)
context = {
'categories': categories,
'article': article,
'success_message': 'El item ' + item.name + ' ha sido eliminado exitosamente.',
}
return render(request, 'article.html', context)
|
UTF-8
|
Python
| false | false | 2,013 |
18,665,927,892,844 |
cdaa0c5cab9d94ef1a00f598730ba6c88718cf7f
|
f7acbdee331c08965a17ceb696391d55c9e1bf0f
|
/Week 0 tasks/Simple_problems/09contains_digits/09contains_digits.py
|
5b149fc7b688947bb0eeefb4370ced7d0ed02d23
|
[] |
no_license
|
mimipaskova/HackBulgaria-Programming101
|
https://github.com/mimipaskova/HackBulgaria-Programming101
|
b3d84279fd964af4cde85446eb653ce80b92ab96
|
f7858bb659c3f6931fcf862e3ae94f78156d8897
|
refs/heads/master
| 2021-01-10T20:11:33.565978 | 2014-04-10T18:43:17 | 2014-04-10T18:43:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def contains_digits(number, digits):
return(set(digits).issubset(set(number_to_list(number))))
#return digits in number_to_list(number)
def number_to_list(n):
list=[]
my_number=n
while (my_number>0):
my_digit=my_number%10
list = [my_digit] + list
my_number=my_number // 10
#print(n)
return list
def main():
print(contains_digits(402123, [0,3,4]))
print(contains_digits(666, [6,4]))
print(contains_digits(123456789, [1,2,3,0]))
#print(contains_digits(456, []))
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,014 |
14,439,680,057,475 |
e9b1326945fe074790e3b06ae28e43e854b0d518
|
5419622a9b0fb695a45f8e87d7502bb7aed7a19b
|
/simulator-ui/dist-files/Building a Brain/chapter5/question-control.py
|
5af2916936a24a29c00eee6263badcbc85f4b32f
|
[
"MPL-1.1",
"GPL-1.0-or-later",
"MPL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
non_permissive
|
shuw/nengo
|
https://github.com/shuw/nengo
|
2de3f6e8f3706462e4ac66ce998ba8b55f0eea34
|
d681a5448e89ed75e4d39cc7ba7f911c56a3910e
|
refs/heads/master
| 2021-01-17T10:24:24.138702 | 2012-11-17T20:51:07 | 2012-11-17T20:51:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
D=100
N=30
import nef
import hrr
# import templates accessible from the drag-and-drop bar
import nef.templates.integrator as integrator
import nef.templates.binding as binding
import nef.templates.gate as gating
import nef.templates.basalganglia as bgtemplate
import nef.templates.basalganglia_rule as bg_rule
net = nef.Network('Question Answering with Control (pre-built)',seed=1)
# Define the vocabulary of vectors
vocab = hrr.Vocabulary(D,max_similarity=0.05)
vocab.parse('CIRCLE+BLUE+RED+SQUARE+QUESTION+STATEMENT')
# Input, output, and intermediate ensembles
visual = net.make_array('Visual', N, D)
channel = net.make_array('Channel', N, D)
net.make_array('Motor', N, D)
# Create the memory
integrator.make(net,name='Memory',neurons=N*D,dimensions=D,tau_feedback=0.4,tau_input=0.05,scale=1)
memory = net.network.getNode('Memory')
# Add projections to and from the channel ensemble
net.connect('Visual', 'Channel', pstc=0.02)
net.network.addProjection(channel.getOrigin('X'), memory.getTermination('input'))
# Create ensemble calculating the unbinding transformation
binding.make(net, name='Unbind', outputName='Motor', N_per_D=100, invertA=True, invertB=False)
unbind = net.network.getNode('Unbind')
net.network.addProjection(visual.getOrigin('X'), unbind.getTermination('A'))
net.network.addProjection(memory.getOrigin('X'), unbind.getTermination('B'))
# Create basal ganglia and pattern matching rules
bgtemplate.make(net,name='Basal Ganglia',dimensions=2,pstc=0.01)
bg = net.network.getNode('Basal Ganglia')
bg_rule.make(net,bg,index=0,dimensions=D,pattern='STATEMENT',pstc=0.01,use_single_input=True)
bg_rule.make(net,bg,index=1,dimensions=D,pattern='QUESTION',pstc=0.01,use_single_input=True)
net.network.addProjection(visual.getOrigin('X'), bg.getTermination('rule_00'))
net.network.addProjection(visual.getOrigin('X'), bg.getTermination('rule_01'))
# Create the thalamus network to process the output from the basal ganglia
thalamus = net.make_array('Thalamus', N, 2, quick=True, intercept=(-1, 0), encoders=[[1]])
thalamus.addDecodedTermination('bg',[[-3, 0], [0, -3]],0.01,False)
net.network.addProjection(bg.getOrigin('output'), thalamus.getTermination('bg'))
def xBiased(x):
return [x[0]+1]
# Add gating signals to control memory acquisition and motor output
gating.make(net,name='Gate1', gated='Channel', neurons=100 ,pstc=0.01)
gating.make(net,name='Gate2', gated='Motor', neurons=100 ,pstc=0.01)
net.connect(thalamus, 'Gate1', index_pre=0, func=xBiased)
net.connect(thalamus, 'Gate2', index_pre=1, func=xBiased)
# Automatic inputs
class Input(nef.SimpleNode):
def __init__(self,name):
self.zero=[0]*D
nef.SimpleNode.__init__(self,name)
self.v1=vocab.parse('STATEMENT+RED*CIRCLE').v
self.v2=vocab.parse('STATEMENT+BLUE*SQUARE').v
self.v3=vocab.parse('QUESTION+RED').v
self.v4=vocab.parse('QUESTION+SQUARE').v
def origin_x(self):
t=self.t_start
if t<1:
if 0.1<self.t_start<0.5:
return self.v1
elif 0.6<self.t_start<1.0:
return self.v2
else:
return self.zero
else:
t=(t-1.0)%0.6
if 0.2<t<0.4:
return self.v3
elif 0.4<t<0.6:
return self.v4
else:
return self.zero
input=Input('Input')
net.add(input)
net.connect(input.getOrigin('x'), 'Visual')
net.add_to_nengo()
|
UTF-8
|
Python
| false | false | 2,012 |
16,612,933,509,340 |
d10b2e68e30fbfda2ab0c28e996ff285f50ad982
|
e8ab787fbfe7135f9b4b881556a213183c884797
|
/blogapp/urls.py
|
a5546ea3edf6123edf290941d0f37bdbf5972454
|
[
"MIT"
] |
permissive
|
eastside/blog
|
https://github.com/eastside/blog
|
a508d9ee56194b322bbf9932a3c7fc235445c4fc
|
f5713b001f31d50550f88dafde5c427c872acf32
|
refs/heads/master
| 2016-03-25T15:33:42.145970 | 2013-09-19T02:25:56 | 2013-09-19T02:25:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from blogapp import models
from django.conf.urls import patterns, url
from django.views.generic import DetailView, ListView
# Todo: Once I have more than say, a dozen blogposts, I'll need to add paging for the ListView
urlpatterns = patterns('',
url(r'^/$',
ListView.as_view(model=models.BlogPost,
queryset=models.BlogPost.objects.filter(draft=False)),
name='blogpost_list'),
url(r'^/(?P<slug>[\w-]{1,50})/$', DetailView.as_view(model=models.BlogPost), name='blogpost_detail'),
)
|
UTF-8
|
Python
| false | false | 2,013 |
10,213,432,254,517 |
88b872e472888940a34bf5f1c03342ca6dcc7523
|
f71abe21e00fa8ea404ee125c3eacec90e9fb8eb
|
/polyTri.py
|
29ee8413de8c962c6cd5c310080568a62ecf5c47
|
[] |
no_license
|
omega-hub/polytri
|
https://github.com/omega-hub/polytri
|
4766e34e01d67090e652791dad543ee31cf83347
|
ed079e5083f373a6e5dc912748e4cf7861e2f474
|
refs/heads/master
| 2021-01-04T14:06:40.652145 | 2013-10-05T08:03:31 | 2013-10-05T08:03:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Alexander Simes, [email protected]
#
# Usage:
# Input is expected to describe a boundary / polygon (List of Vector2)
# Example: [Vector2(x1, y1), Vector2(x2, y2), etc...]
# Output is a surface made of triangles (List of List of Vector2)
# Example: [[Vector2(a1x, a1y), Vector2(b1x, b1y), Vector2(c1x, c1y)], etc...]
#
# Tips:
# If nothing appears your surface normal may be pointing the wrong way
# If possible, use clockwise boundaries / polygons to avoid reversing the List
# If you are certain your boundaries / polygons are clockwise the the first two sections can skipped
#
# Warning:
# If your boundary / polygon self intersects the while loop will never terminate
from euclid import *
from math import *
TWOPI = math.pi*2.0
def polyTri(inBoundary):
copyBoundary = inBoundary[:] # Don't modify inBoundary
outSurface = []
# Determine if the boundary is clockwise or counterclockwise
circularDirection = 0.0
cbLen = len(copyBoundary)-1
for i in range(cbLen):
bi = copyBoundary[i]
bn = copyBoundary[i+1]
circularDirection += (bn.x-bi.x)*(bn.y+bi.y)
circularDirection += (copyBoundary[0].x-copyBoundary[cbLen].x)*(copyBoundary[0].y+copyBoundary[cbLen].y)
# Reverse the direction if the boundary is counterclockwise
if circularDirection < 0.0: copyBoundary.reverse()
index = 0
while len(copyBoundary) > 2:
cbLen = len(copyBoundary)
pIndex = (index+cbLen-1)%cbLen
nIndex = (index+1)%cbLen
bp = copyBoundary[pIndex]
bi = copyBoundary[index]
bn = copyBoundary[nIndex]
# Calculate the interior angle described by bp, bi, and bn
theta = math.atan2(bi.y-bn.y, bi.x-bn.x)-math.atan2(bi.y-bp.y, bi.x-bp.x)
if theta < 0.0: theta += TWOPI
# If bp, bi, and bn describe an "ear" of the polygon
if theta < math.pi:
inside = False
# Make sure other vertices are not inside the "ear"
for i in range(cbLen):
if i == pIndex or i == index or i == nIndex: continue
# Black magic point in triangle expressions
# http://answers.yahoo.com/question/index?qid=20111103091813AA1jksL
pi = copyBoundary[i]
ep = (bi.x-bp.x)*(pi.y-bp.y)-(bi.y-bp.y)*(pi.x-bp.x)
ei = (bn.x-bi.x)*(pi.y-bi.y)-(bn.y-bi.y)*(pi.x-bi.x)
en = (bp.x-bn.x)*(pi.y-bn.y)-(bp.y-bn.y)*(pi.x-bn.x)
# This only tests if the point is inside the triangle (no edge / vertex test)
if (ep < 0 and ei < 0 and en < 0) or (ep > 0 and ei > 0 and en > 0):
inside = True
break
# No vertices in the "ear", add a triangle and remove bi
if not inside:
outSurface.append([bp, bi, bn])
copyBoundary.pop(index)
index = (index+1)%len(copyBoundary)
return outSurface
|
UTF-8
|
Python
| false | false | 2,013 |
4,294,967,340,689 |
7845a4f2e8c88bab1a9c5a33d98a4927611cad01
|
86ab25962ebdf4f4922949c618b370d596fd72f1
|
/tests/test_client.py
|
ca06b77c33866da1f892962bd3d1c2d006c972d6
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
AhmedSoliman/vcap-python-client
|
https://github.com/AhmedSoliman/vcap-python-client
|
61d21157a31e85b2bc0d5784c5175f341a19d218
|
db3438c79a0a81d17253884281b371aa8153cb4d
|
refs/heads/master
| 2021-01-18T10:07:18.383729 | 2011-09-28T16:11:21 | 2011-09-28T16:11:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on Sep 28, 2011
@author: arefaey
'''
import unittest
from vpc.client import VPC
from vpc import constants
class TestClient(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_initialize(self):
client = VPC()
self.assertEqual(constants.DEFAULT_TARGET, client.target)
def test_info(self):
client = VPC()
info = client.info()
self.assertEqual('{"name":"vcap","build":2222,"support":"http://support.cloudfoundry.com","version":"0.999","description":"VMware\'s Cloud Application Platform","allow_debug":false}', info)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
UTF-8
|
Python
| false | false | 2,011 |
14,731,737,874,262 |
c2cfd23860adee4445be605f97f7d98d71dc4ef2
|
85a8241c338544f7d3eb220985669ce6da79e617
|
/gcc/tasks/system.py
|
b3723b7b8a22a6fbe1efa5a41aef0eae0f6d10ad
|
[] |
no_license
|
mahmed-mdsol/gcc.py
|
https://github.com/mahmed-mdsol/gcc.py
|
1f1b767b46333c2884231bb291789e0350623e02
|
6f67f8a986c93115d87cac7f3403697617fe5a2f
|
refs/heads/master
| 2023-08-30T15:09:25.466599 | 2012-08-26T19:30:38 | 2012-08-26T19:30:38 | 5,374,588 | 0 | 0 | null | false | 2023-09-06T17:28:48 | 2012-08-10T21:10:49 | 2013-12-28T00:29:43 | 2023-09-06T17:28:44 | 11,296 | 0 | 0 | 1 |
Python
| false | false |
from gcc.tasks.task import CompilationTask, LinkingTask
import os
import shlex
import subprocess
import sys
from gcc.util import find_files
class SystemTask(object):
def __init__(self, command_maker, shell=False, work_in_git_dir=False, stop_on_exception = False, log_stdout=None, log_stderr=None): # TODO support STDIN
'''Initialize the SystemTask.
command_maker should either be a callable object that takes the task input and options and generates a string command to execute
or a static string representing the command to execute
(
Note that unless shell = True, this command will not have access to special shell semantics (like pipes or & or >, etc).
This is because arbitrary shell access is [usually] BAD and inherently dangerous.
)
stop_on_exception should be True if compilation should stop if an exception occurs (command returns nonzero status code)
log_stdout/log_stderr should be set to an object to which stdout/stderr should be sent (instead of the console)
'''
self.command_maker = isinstance(command_maker, str) and (lambda *args: command_maker) or command_maker
self.popen_options = {'shell': shell}
if log_stdout:
self.popen_options['stdout'] = log_stdout
if log_stderr:
self.popen_options['stderr'] = log_stderr
self
self.work_in_git_dir = work_in_git_dir
self.stop_on_exception = stop_on_exception
self.execute = subprocess.check_call
def get_command(self, task_input, options):
'''Generate the command to pass to subprocess given the task input and options'''
cmd = self.command_maker(task_input, options)
return self.popen_options['shell'] and cmd or shlex.split(self.command_maker(task_input, options))
class SystemCompilationTask(CompilationTask, SystemTask):
# TODO this is horrible. You call yourself a programmer?
# Implement event observers for tasks instead.
def __init__(self, precompilation_commands = [], precompile_commands = [], postcompile_commands = [], postcompilation_commands = [], *args, **kwargs):
CompilationTask.__init__(self)
SystemTask.__init__(self, *args, **kwargs)
self.precompilation_commands = precompilation_commands
self.precompile_commands = precompile_commands
self.postcompile_commands = postcompile_commands
self.postcompilation_commands = postcompilation_commands
def mark_incomplete_on_fail(f):
'''Decorator to mark a commit compilation task incomplete if an exception was raised.'''
def wrapped(self, commit, *args, **kwargs):
try:
f(self, commit, *args, **kwargs) # Call the wrapped function
except Exception as e:
self.mark_incomplete(commit, e)
if self.stop_on_exception:
raise
else:
self.log("Exception ignored: {0}\n".format(repr(e)))
return wrapped
def precompilation(self):
if self.work_in_git_dir:
self.popen_options.update(cwd = self.git_manager.repo.working_dir)
if 'in_order' in self.options:
self.targets.sort(key=lambda c: c.committed_date) # sort by committed date if commits should be compiled in_order
self.execute_all(self.precompilation_commands)
def execute_all(self, commands):
for command in commands:
self.execute(command, **self.popen_options)
def should_compile(self, commit):
'''Compile if there is no output directory for the commit or if the compilation is incomplete.'''
return not os.path.isdir(self.output_directory_for(commit)) or self.has_incomplete_output_for(commit)
@mark_incomplete_on_fail
def precompile(self, commit):
CompilationTask.precompile(self, commit)
self.options.update(output_directory = self.output_directory_for(commit))
self.execute_all(self.precompile_commands)
@mark_incomplete_on_fail
def compile(self, commit):
self.execute(self.get_command(commit, self.options), **self.popen_options)
return find_files(self.output_directory_for(commit), '*')
def __call__(self, targets, options, git_manager, *args, **kwargs):
''':[ This is terrible. You're an awful programmer.'''
self.targets, self.options, self.git_manager = targets, options, git_manager
CompilationTask.__call__(self, targets, options, git_manager, *args, **kwargs)
@mark_incomplete_on_fail
def postcompile(self, commit):
self.execute_all(self.postcompile_commands)
def postcompilation(self):
self.execute_all(self.postcompilation_commands)
|
UTF-8
|
Python
| false | false | 2,012 |
51,539,616,702 |
55f14f558ac360d678889693ff2a784243b5eea5
|
fbb29e0b9500dff29eae2aae80d57764b7af1498
|
/top10nl/bin/top10-trans.py
|
0e6dd15ad53d25d8b39d44b372d8eecb0a04ec5d
|
[
"GPL-3.0-only"
] |
non_permissive
|
fsteggink/NLExtract
|
https://github.com/fsteggink/NLExtract
|
c7717db0f948da15894a66ebdf2e709f9e98dee3
|
b3fec8be15902d3a6b67575aa58da5631e5e6d79
|
refs/heads/master
| 2021-01-20T21:45:17.390274 | 2012-10-02T17:35:09 | 2012-10-02T17:35:09 | 3,297,049 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#
# Auteur: Frank Steggink
# Doel: Opknippen en transformeren GML-bestanden
# Imports
import argparse
import os.path
import sys
from copy import deepcopy
from lxml import etree
from time import localtime, strftime
# Constantes
GML_NS = 'http://www.opengis.net/gml'
NS = {'gml': GML_NS}
MAX_FEATURES = 40000 # 50000 features/bestand kan er al voor zorgen dat de XSLT-transformatie mislukt
def transform(gml_file, xslt_file, out_dir, max_features = MAX_FEATURES):
print 'Begintijd top10-trans:', strftime('%a, %d %b %Y %H:%M:%S', localtime())
# Bepaal de base name
gmlBaseName = os.path.splitext(os.path.basename(gml_file))[0]
print 'GML bestand=%s baseName=%s out_dir=%s' % (gml_file, gmlBaseName,out_dir)
# Open het XSLT-bestand
xsltF = open(xslt_file, 'r')
xsltDoc = etree.parse(xsltF)
xslt = etree.XSLT(xsltDoc)
xsltF.close()
# Open het GML bestand; verwijder hierbij nodes met alleen whitespace
print 'Inlezen GML bestand %s...' % gml_file
parser = etree.XMLParser(remove_blank_text=True)
gmlF=open(gml_file, 'r')
gmlDoc=etree.parse(gmlF, parser)
gmlF.close()
# Bepaal de features in het bestand en verwijder gml:featureMembers / gml:featureMember elementen
features = []
for elem in gmlDoc.getroot():
tag = elem.tag.rsplit('}', 1)[-1]
if tag=='featureMembers' or tag=='featureMember':
features.extend(list(elem))
gmlDoc.getroot().remove(elem)
print 'Aantal features in bestand %s: %d' % (gml_file, len(features))
# Maak een tijdelijk element aan om de features in op te slaan. De features worden hierbij verplaatst.
root = etree.Element('root')
for feature in features:
root.append(feature)
# Vervang het verwijderde featureMembers element of de verwijderde featureMember elementen door een
# nieuw featureMembers element
etree.SubElement(gmlDoc.getroot(), etree.QName(GML_NS, 'featureMembers'))
# Verwerk de features
idx=0 # teller
gmlTemplate=gmlDoc
fileNameTemplate=os.path.join(out_dir, '%s_%%02d.gml' % gmlBaseName)
features=root.xpath('*')
while len(features) > 0:
# Kloon de GML template en verplaats een deel van de features er naar toe
print 'Iteratie %d: %d te verwerken features' % (idx, len(features[0:max_features]))
gmlDoc = deepcopy(gmlTemplate)
featureMembers = gmlDoc.xpath('gml:featureMembers', namespaces=NS)[0]
for feature in features[0:max_features]:
featureMembers.append(feature)
# Voer gelijk de transformatie uit
resultDoc=xslt(gmlDoc)
# Sla het nieuwe GML bestand op
fileName = fileNameTemplate % idx
o = open(fileName, 'w')
o.write(etree.tostring(resultDoc, pretty_print=True, xml_declaration=True, encoding='UTF-8'))
o.flush()
o.close()
# Voor volgende iteratie
features=root.xpath('*')
idx+=1
print 'Eindtijd top10-trans:', strftime('%a, %d %b %Y %H:%M:%S', localtime())
def main():
# Argumenten
argparser = argparse.ArgumentParser(
description='Splits en transform een GML-bestand',
epilog='Vanwege de transformatie is uiteindelijk het aantal features per bestand hoger')
argparser.add_argument('GML', type=str, help='het op te splitsen GML-bestand')
argparser.add_argument('XSLT', type=str, help='het XSLT-bestand')
argparser.add_argument('DIR', type=str, help='locatie opgesplitste bestanden')
argparser.add_argument('--max_features', dest='maxFeatures', default=MAX_FEATURES, type=int, help='features per bestand, default: %d' % MAX_FEATURES)
args = argparser.parse_args()
# Controleer paden
if not os.path.exists(args.GML):
print 'Het opgegeven GML-bestand is niet aangetroffen!'
sys.exit(1)
if not os.path.exists(args.XSLT):
print 'Het opgegeven XSLT-bestand is niet aangetroffen!'
sys.exit(1)
if not os.path.exists(args.DIR):
print 'De opgegeven directory is niet aangetroffen!'
sys.exit(1)
transform(args.GML, args.XSLT, args.DIR, args.maxFeatures)
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 2,012 |
7,284,264,563,525 |
745436bb987c38c8b18713067857642f7225a3a0
|
be55ce250bd194dea2e8098975391da4495af24a
|
/src/infi/win32service/service.py
|
fd5990a12b7b86955298c0d533f26cacba04d9c5
|
[] |
no_license
|
pombredanne/infi.win32service
|
https://github.com/pombredanne/infi.win32service
|
6e923473d29c66c00a919ab612386e617c1dd97d
|
4df844bd712d8c168f5e17c4f4156653cbd5c559
|
HEAD
| 2017-05-02T17:05:06.138213 | 2013-04-23T07:08:50 | 2013-04-23T07:08:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import ctypes
import logging
from .utils import enum
from .common import ServiceControl, ServiceType
StartService = ctypes.windll.advapi32.StartServiceW
ControlService = ctypes.windll.advapi32.ControlService
DeleteService = ctypes.windll.advapi32.DeleteService
SetServiceStatus = ctypes.windll.advapi32.SetServiceStatus
CloseServiceHandle = ctypes.windll.advapi32.CloseServiceHandle
QueryServiceStatus = ctypes.windll.advapi32.QueryServiceStatus
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms685992%28v=VS.85%29.aspx
# typedef struct _SERVICE_STATUS_PROCESS {
# DWORD dwServiceType;
# DWORD dwCurrentState;
# DWORD dwControlsAccepted;
# DWORD dwWin32ExitCode;
# DWORD dwServiceSpecificExitCode;
# DWORD dwCheckPoint;
# DWORD dwWaitHint;
# DWORD dwProcessId;
# DWORD dwServiceFlags;
# } SERVICE_STATUS_PROCESS, *LPSERVICE_STATUS_PROCESS;
class SERVICE_STATUS_PROCESS(ctypes.Structure):
_fields_ = [("dwServiceType", ctypes.c_ulong),
("dwCurrentState", ctypes.c_ulong),
("dwControlsAccepted", ctypes.c_ulong),
("dwWin32ExitCode", ctypes.c_ulong),
("dwServiceSpecificExitCode", ctypes.c_ulong),
("dwCheckPoint", ctypes.c_ulong),
("dwWaitHint", ctypes.c_ulong),
("dwProcessId", ctypes.c_ulong),
("dwServiceFlags", ctypes.c_ulong)]
# From http://msdn.microsoft.com/en-us/library/windows/desktop/ms685996%28v=vs.85%29.aspx
ServiceState = enum(
STOPPED = 0x00000001,
START_PENDING = 0x00000002,
STOP_PENDING = 0x00000003,
RUNNING = 0x00000004,
CONTINUE_PENDING = 0x00000005,
PAUSE_PENDING = 0x00000006,
PAUSED = 0x00000007
)
ServiceControlsAccepted = enum(
STOP = 0x00000001,
SHUTDOWN = 0x00000004,
PARAMCHANGE = 0x00000008,
PAUSE_CONTINUE = 0x00000002,
NETBINDCHANGE = 0x00000010,
HARDWAREPROFILECHANGE = 0x00000020,
POWEREVENT = 0x00000040,
SESSIONCHANGE = 0x00000080,
PRESHUTDOWN = 0x00000100,
TIMECHANGE = 0x00000200,
TRIGGEREVENT = 0x00000400
)
# typedef struct _SERVICE_STATUS {
# DWORD dwServiceType;
# DWORD dwCurrentState;
# DWORD dwControlsAccepted;
# DWORD dwWin32ExitCode;
# DWORD dwServiceSpecificExitCode;
# DWORD dwCheckPoint;
# DWORD dwWaitHint;
# } SERVICE_STATUS, *LPSERVICE_STATUS;
class SERVICE_STATUS(ctypes.Structure):
_fields_ = [("dwServiceType", ctypes.c_ulong),
("dwCurrentState", ctypes.c_ulong),
("dwControlsAccepted", ctypes.c_ulong),
("dwWin32ExitCode", ctypes.c_ulong),
("dwServiceSpecificExitCode", ctypes.c_ulong),
("dwCheckPoint", ctypes.c_ulong),
("dwWaitHint", ctypes.c_ulong)]
LPSERVICE_STATUS = ctypes.POINTER(SERVICE_STATUS)
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms685947%28v=VS.85%29.aspx
# typedef VOID( CALLBACK * PFN_SC_NOTIFY_CALLBACK ) (
# IN PVOID pParameter
# );
FN_SC_NOTIFY_CALLBACK = ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_void_p)
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms685947%28v=VS.85%29.aspx
# typedef struct _SERVICE_NOTIFY {
# DWORD dwVersion;
# PFN_SC_NOTIFY_CALLBACK pfnNotifyCallback;
# PVOID pContext;
# DWORD dwNotificationStatus;
# SERVICE_STATUS_PROCESS ServiceStatus;
# DWORD dwNotificationTriggered;
# LPTSTR pszServiceNames;
# } SERVICE_NOTIFY, *PSERVICE_NOTIFY;
class SERVICE_NOTIFY(ctypes.Structure):
_fields_ = [("dwVersion", ctypes.c_ulong),
("pfnNotifyCallback", FN_SC_NOTIFY_CALLBACK),
("pContext", ctypes.c_void_p),
("dwNotificationStatus", ctypes.c_ulong),
("ServiceStatus", SERVICE_STATUS_PROCESS),
("dwNotificationTriggered", ctypes.c_ulong),
("pszServiceNames", ctypes.c_wchar_p)]
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms684276%28v=VS.85%29.aspx
ServiceNotifyMask = enum(
STOPPED = 0x00000001,
START_PENDING = 0x00000002,
STOP_PENDING = 0x00000004,
RUNNING = 0x00000008,
CONTINUE_PENDING = 0x00000010,
PAUSE_PENDING = 0x00000020,
PAUSED = 0x00000040,
CREATED = 0x00000080,
DELETED = 0x00000100,
DELETE_PENDING = 0x00000200
)
# From WinError.h:
ERROR_SERVICE_SPECIFIC_ERROR = 1066
NO_ERROR = 0
class Service(object):
def __init__(self, handle, tag=None):
self.handle = handle
self.tag = tag
def start(self, *args):
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms686321%28v=vs.85%29.aspx
# BOOL WINAPI StartService(
# __in SC_HANDLE hService,
# __in DWORD dwNumServiceArgs,
# __in_opt LPCTSTR *lpServiceArgVectors
# );
if len(args) == 0:
lpServiceArgVectors = None
else:
lpServiceArgVectors = (ctypes.c_wchar_p * len(args))(*args)
if not StartService(self.handle, len(args), lpServiceArgVectors):
raise ctypes.WinError()
def stop(self):
"""
Stops the service.
"""
new_status = SERVICE_STATUS()
if not ControlService(self.handle, ServiceControl.STOP, ctypes.byref(new_status)):
raise ctypes.WinError()
if new_status.dwCurrentState not in [ServiceState.STOPPED, ServiceState.STOP_PENDING]:
raise ctypes.WinError()
def safe_stop(self):
"""
Stops the service, and ignores "not started" errors
"""
try:
self.stop()
except WindowsError, e:
if e.winerror != 1062:
raise
def query_config(self):
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms684932%28v=vs.85%29.aspx
# BOOL WINAPI QueryServiceConfig(
# __in SC_HANDLE hService,
# __out_opt LPQUERY_SERVICE_CONFIG lpServiceConfig,
# __in DWORD cbBufSize,
# __out LPDWORD pcbBytesNeeded
# );
raise NotImplementedError()
def query_optional_config(self):
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms684935%28v=VS.85%29.aspx
# BOOL WINAPI QueryServiceConfig2(
# __in SC_HANDLE hService,
# __in DWORD dwInfoLevel,
# __out_opt LPBYTE lpBuffer,
# __in DWORD cbBufSize,
# __out LPDWORD pcbBytesNeeded
# );
raise NotImplementedError()
def set_status(self, status):
"""
Sets the service status. status argument must be a SERVICE_STATUS object.
"""
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms686241%28v=VS.85%29.aspx
# BOOL WINAPI SetServiceStatus(
# __in SERVICE_STATUS_HANDLE hServiceStatus,
# __in LPSERVICE_STATUS lpServiceStatus
# );
if not SetServiceStatus(self.handle, LPSERVICE_STATUS(status)):
raise ctypes.WinError()
def delete(self):
"""
Deletes the service.
"""
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms682562%28v=vs.85%29.aspx
# BOOL WINAPI DeleteService(
# __in SC_HANDLE hService
# );
if not DeleteService(self.handle):
raise ctypes.WinError()
def close(self):
if self.handle != 0:
if not CloseServiceHandle(self.handle):
raise ctypes.WinError()
self.handle = 0
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
|
UTF-8
|
Python
| false | false | 2,013 |
12,876,311,960,457 |
3748d7052a214a9966327fb2330056fe6eb17021
|
93aa6f6c590871ecfc619f4016ea94cac680f7e9
|
/config.py
|
a73048c1c8f5310f254fedd88484296492234f3f
|
[
"ZPL-2.1",
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
collective/ATBiblioTopic
|
https://github.com/collective/ATBiblioTopic
|
4160afbb7890d307b2404612f9d62d7adaa5b249
|
77fdfb716e362e77aee161aeb7ce3b389d0f9852
|
refs/heads/master
| 2023-03-22T15:55:48.815921 | 2008-08-25T21:23:10 | 2008-08-25T21:23:10 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
##########################################################################
# #
# copyright (c) 2006 +++ sunweavers.net +++ #
# and contributors #
# #
# maintainers: Mike Gabriel, [email protected] #
# #
##########################################################################
""" Product configuration
"""
import os
from Products.CMFCore.permissions import AddPortalContent
from Products.Archetypes.public import DisplayList
from Products.CMFBibliographyAT.config import REFERENCE_TYPES
from Products.ATContentTypes.tool.topic import TopicIndex
from ZPublisher.HTTPRequest import record
GLOBALS = globals()
ADD_CONTENT_PERMISSION = AddPortalContent
PROJECTNAME = "ATBiblioTopic"
SKINS_DIR = 'skins'
ATBT_DIR = os.path.abspath(os.path.dirname(__file__))
ATBIBLIOTOPIC_BIBFOLDER_REF = 'ATBiblioTopic_associated_bibfolder'
REFERENCE_ALLOWED_TYPES = [tn.replace(' Reference', 'Reference') for tn in REFERENCE_TYPES]
LISTING_VALUES = DisplayList((
('bulleted', 'Bulleted list'),
('ordered', 'Ordered list'),
('lines', 'Simple lines list'),
('table', 'Table listing'),
))
try:
from Products.CMFBibliographyAT_extended_schemata.config import BIBLIOTOPIC_EXTENDEDSCHEMATA_STRUCTURAL_LAYOUT
except ImportError:
BIBLIOTOPIC_EXTENDEDSCHEMATA_STRUCTURAL_LAYOUT = []
STRUCTURAL_VALUES = DisplayList(tuple([
('none','No structuring'),
('publication_year', 'Publication Year'),
('portal_type', 'Reference Type'),
('AuthorItems', 'Single (Co)Authors'),
]
+ BIBLIOTOPIC_EXTENDEDSCHEMATA_STRUCTURAL_LAYOUT
))
# monkey patch section
BIBLIOGRAPHY_EXTENDED_SCHEMATA = True
# do not touch this variable structure!!! unless you know what you are doing!!! actually do not touch anything in this file
try:
import Products.TextIndexNG2
# extra args for the TextIndexNG2 index to be added to portal_catalog, do not touch!!!
ting2_extra = record()
ting2_extra.indexed_fields = ''
ting2_extra.default_encoding = 'utf-8'
ting2_extra.use_converters = 0
text_index_type = { 'type': 'TextIndexNG2', 'extra': ting2_extra, }
except ImportError:
# do not at all touch zcti_extra, it is needed to created ZCTextIndex catalog indexes
zcti_extra = record()
zcti_extra.lexicon_id = 'plone_lexicon'
zcti_extra.index_type = 'Okapi BM25 Rank'
zcti_extra.doc_attr = None
text_index_type = { 'type': 'ZCTextIndex', 'extra': zcti_extra, }
try:
from Products.CMFBibliographyAT_extended_schemata.config import BIBLIOTOPIC_EXTENDEDSCHEMATA_CRITERIAFIELDS
except ImportError:
BIBLIOTOPIC_EXTENDEDSCHEMATA_CRITERIAFIELDS = []
BIBLIOTOPIC_CRITERIAFIELDS = [
{
'field' : ('SearchableText', 'Search all reference item text fields',
'This criterion looks at all searchable text passages in bibliographical reference items.', '', ),
'ctypes' : ('ATSimpleStringCriterion', ),
},
{
'field' : ('getAuthors','Authors','Full text search for authors of bibliographical entries (SearchableText expression).',''),
'catalog' : True,
'custom_view' : True,
'index_type' : text_index_type,
'ctypes' : ( 'ATSimpleStringCriterion',),
},
{
'field' : ('AuthorItems','Single (Co)Author', 'Search for all publications of a specific author (format: last name and first initial like in "Smith J").',''),
'custom_view' : True,
'index_type' : 'KeywordIndex',
'ctypes' : ('ATSimpleStringCriterion',),
},
{
'field' : ( 'publication_date', 'Publication Date', 'Publication date of referenced bibliographical items.',),
'custom_view' : True,
'ctypes' : ( 'ATDateRangeCriterion', 'ATFriendlyDateCriteria',),
},
{
'field' : ('Title','Title', 'Full text search for title of referenced bibliographical items.',''),
'custom_view' : True,
'ctypes' : ('ATSimpleStringCriterion',),
},
{
'field' : ('path','Website Path', 'Select specific subtrees of your site to be searched for bibliographical items.',''),
'custom_view' : True,
'ctypes' : ('ATPathCriterion',),
},
{
'field' : ('portal_type','Reference Type(s)', 'Select reference types that you want to include into your smart bibliography list.',''),
'custom_view' : True,
'ctypes' : ('ATPortalTypeCriterion',),
},
] + BIBLIOTOPIC_EXTENDEDSCHEMATA_CRITERIAFIELDS
BIBLIOTOPIC_SORTFIELDS = [
{
'field' : ((('Authors','nocase','asc')),'Authors', 'Author(s) of referenced bibliographical items.',),
'ctypes' : ('ATSortCriterion', ),
},
{
'field' : ('publication_year', 'Publication Year', 'Publication year of referenced bibliographical items.',),
'ctypes' : ('ATSortCriterion',),
},
{
'field' : ('sortable_title','Title', 'Title of referenced bibliographical items.',),
'ctypes' : ('ATSortCriterion',),
},
{
'field' : ((('publication_year', 'cmp', 'desc'),('Authors', 'nocase', 'asc')), 'Publication year (descending), Authors (ascending)', 'Sort referenced bibliographical items by publication year (descending) first and then by author(s) (ascending).', ),
'ctypes' : ('ATSortCriterion', ),
},
{
'field' : ((('publication_year', 'cmp', 'asc'),('Authors', 'nocase', 'asc')), 'Publication year (ascending), Authors (ascending)', 'Sort referenced bibliographical items by publication year (ascending) first and then by author(s) (ascending).', ),
'ctypes' : ('ATSortCriterion', ),
},
{
'field' : ((('Authors', 'nocase', 'asc'),('publication_year', 'cmp', 'desc')), 'Authors (ascending), Publication year (descending)', 'Sort referenced bibliographical items by author(s) (ascending) first and then by publication year (descending).', ),
'ctypes' : ('ATSortCriterion', ),
},
{
'field' : ((('Authors', 'nocase', 'asc'),('publication_year', 'cmp', 'asc')), 'Authors (ascending), Publication year (ascending)', 'Sort referenced bibliographical items by author(s) (ascending) first and then by publication year (ascending).', ),
'ctypes' : ('ATSortCriterion', ),
},
]
# generated from the BIBLIOTOPIC_CRITERIAFIELDS
CATALOG_INDEXES = [ dict([('name',criterion['field'][0])] + [ (key, criterion['index_type'][key]) for key in criterion['index_type'].keys() ]) for criterion in (BIBLIOTOPIC_CRITERIAFIELDS + BIBLIOTOPIC_SORTFIELDS) if criterion.has_key('catalog') and criterion['catalog'] ]
CATALOG_METADATA = [ criterion['field'][0] for criterion in BIBLIOTOPIC_SORTFIELDS if criterion.has_key('catalog') and criterion['catalog'] ]
# initializing criteria indexes for BiblioTopics
BIBLIOTOPIC_INDEXES = {}
for crit_field in BIBLIOTOPIC_CRITERIAFIELDS + BIBLIOTOPIC_SORTFIELDS:
index = {}
index_name = crit_field['field'][0]
index['friendlyName'] = crit_field['field'][1]
index['description'] = crit_field['field'][2]
index['criteria'] = crit_field['ctypes']
indexObj = TopicIndex(index_name, **index)
BIBLIOTOPIC_INDEXES[index_name] = indexObj
|
UTF-8
|
Python
| false | false | 2,008 |
4,776,003,641,030 |
df30a409ae72043fb23363c19b1a3196930abc12
|
e57c3e540e85dffd17d1f91a5ed097555e000345
|
/lillith/local.py
|
737fb2105ab8a2c3eda7a07797bdef70d8dd8e32
|
[] |
no_license
|
agrif/lillith
|
https://github.com/agrif/lillith
|
3fb75a4d25951bb8aae1b318aea285b99cb17e74
|
a5d23bbaa5b77560a61a7dfe8fd1c5484132a57c
|
refs/heads/master
| 2021-01-01T15:59:32.633320 | 2014-07-12T22:30:17 | 2014-07-12T22:30:17 | 21,527,714 | 0 | 0 | null | false | 2014-11-10T00:08:23 | 2014-07-05T20:28:58 | 2014-11-10T00:08:23 | 2014-11-10T00:08:23 | 187 | 0 | 1 | 1 |
Python
| null | null |
from .config import _getcf
__all__ = ['Equal', 'Like', 'Greater', 'GreaterEqual', 'Less', 'LessEqual']
class LocalObject:
_table = None
def __new__(cls, **kwargs):
obj, = cls.filter(**kwargs)
return obj
def __init__(self, **kwargs):
pass
@classmethod
def filter(cls, **kwargs):
raise NotImplementedError("filter")
@classmethod
def new_from_id(cls, id, data=None):
cfg = _getcf()
try:
return cfg.localcache[(cls, id)]
except KeyError:
pass
obj = super().__new__(cls)
obj.id = id
obj._cfg = cfg
if data:
obj._data = data
else:
qb = QueryBuilder(obj)
qb.condition("rowid", id)
obj._data, = qb.select()
obj.__init__()
cfg.localcache[(cls, id)] = obj
return obj
@classmethod
def all(cls):
return cls.filter()
class Comparison:
def render(self, field):
raise NotImplementedError("render")
class SimpleComparison(Comparison):
format = None
def __init__(self, val):
self.val = val
def render(self, field):
return (self.format.format(field), [self.val])
class Equal(SimpleComparison):
format = "{} = ?"
class Like(SimpleComparison):
format = "{} like ?"
class Greater(SimpleComparison):
format = "{} > ?"
class GreaterEqual(SimpleComparison):
format = "{} >= ?"
class Less(SimpleComparison):
format = "{} < ?"
class LessEqual(SimpleComparison):
format = "{} <= ?"
class QueryBuilder:
def __init__(self, cls):
self.table = cls._table
self.conds = []
self.condfields = []
def condition(self, field, val):
if val is None:
return
if not isinstance(val, Comparison):
val = Equal(val)
cond, condfields = val.render(field)
self.conds.append(cond)
self.condfields += condfields
def conditions(self, locals, **kwargs):
for k, v in kwargs.items():
self.condition(v, locals[k])
def select(self, *fields):
if fields:
fields = ', '.join(fields)
else:
fields = '*'
query = "select rowid, {} from {}".format(fields, self.table)
condfields = None
if self.conds:
conditions = " and ".join(self.conds)
condfields = tuple(self.condfields)
query += " where " + conditions
c = _getcf().dbconn.cursor()
if condfields:
c.execute(query, condfields)
else:
c.execute(query)
for row in c:
yield dict(zip((i[0] for i in c.description), row))
|
UTF-8
|
Python
| false | false | 2,014 |
11,261,404,250,818 |
6c5f257e779b410728a71b0d5f53d123b4616fd5
|
843098e9b9efc9b9c3c93df27ddca081b8107c33
|
/bak/tools.py
|
08d89c5508e2126365c862c261798bd4e00a2788
|
[] |
no_license
|
zhwei/sdut-cumpus-video
|
https://github.com/zhwei/sdut-cumpus-video
|
d3da1154bbf0f8aa2421d749a77c4d948e59570d
|
bde88d764a42799789bb84074b53dc55d1ff6aaa
|
refs/heads/master
| 2021-01-01T18:23:32.683207 | 2013-09-25T08:25:55 | 2013-09-25T08:25:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python2.7
# -*- coding: gbk -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from datetime import datetime
# print str(datetime.now())[:10]
import codecs
import chardet
# gbk_file = codecs.open('sources/Total.xml','r')
# encd = chardet.detect(gbk_file.readlines()[1])['encoding']
#
# print encd
# print gbk_file.read().encode(encd)
# for i in gbk_file.read():
# print chardet.detect(i)['encoding']
# print i.decode(chardet.detect(i)['encoding'])
# print content
#
# file_name = "utf_file%s.xml" % str(datetime.now())[:10]
#
# print file_name
#
# utf_file = open('sources/utf_file.xml', 'a')
|
UTF-8
|
Python
| false | false | 2,013 |
4,904,852,663,576 |
e57f4fd3360fcece0a6cef05489a4cc593d8edbc
|
a1f754bbdf61404d43f0edeae80bfa6b6068066a
|
/pygrafix/window/__init__.py
|
299f39fed7e0b623180b7811b016a739793dbf7f
|
[] |
no_license
|
Queatz/pygrafix
|
https://github.com/Queatz/pygrafix
|
aef5dd5b43997953ce9d84890e62b72e76f6f311
|
99729123a3748633250145fc440451b70ea5012e
|
refs/heads/master
| 2021-01-20T21:40:47.449699 | 2012-01-11T18:20:10 | 2012-01-11T18:20:10 | 3,138,086 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from pygrafix.window._window import *
|
UTF-8
|
Python
| false | false | 2,012 |
19,292,993,127,569 |
b9d83dca6ac68220b7b128e586f236cf0e519559
|
5878826160b0c58254b170dcc6d2a8322f5d1a51
|
/examples/utils.py
|
b0b72643c10472633a1449ecab48757066734480
|
[
"MIT"
] |
permissive
|
jeromeku/theano-nets
|
https://github.com/jeromeku/theano-nets
|
b39fcaa038148916cecd8a8fa52d0193f01212e5
|
128e81dbbe212b5e0362d40521bf40aa52fa6e2d
|
refs/heads/master
| 2021-01-18T11:46:40.663130 | 2014-08-17T00:01:25 | 2014-08-17T00:01:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import climate
import pickle
import gzip
import matplotlib.pyplot as plt
import numpy as np
import os
import tempfile
KW = {}
try:
import urllib.request
KW['encoding'] = 'latin1'
except: # Python 2.x
import urllib
logging = climate.get_logger(__name__)
climate.enable_default_logging()
def load_mnist(
labels=False,
url='http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz',
local=os.path.join(tempfile.gettempdir(), 'mnist.pkl.gz')):
'''Load the MNIST digits dataset.'''
if not os.path.isfile(local):
logging.info('downloading mnist digit dataset from %s' % url)
try:
urllib.request.urlretrieve(url, local)
except: # Python 2.x
urllib.urlretrieve(url, local)
logging.info('saved mnist digits to %s' % local)
dig = [(x, y.astype('int32')) for x, y in pickle.load(gzip.open(local), **KW)]
if not labels:
dig = [x[0] for x in dig]
return dig
def plot_images(imgs, loc, title=None):
'''Plot an array of images.
We assume that we are given a matrix of data whose shape is (n*n, s*s) --
that is, there are n^2 images along the first axis of the array, and each
image is a square measuring s pixels on a side. Each row of the input will
be plotted as a sub-region within a single image array containing an n x n
grid of images.
'''
n = int(np.sqrt(len(imgs)))
assert n * n == len(imgs), 'images array must contain a square number of rows!'
s = int(np.sqrt(len(imgs[0])))
assert s * s == len(imgs[0]), 'images must be square!'
img = np.zeros((s * n, s * n), dtype=imgs[0].dtype)
for i, pix in enumerate(imgs):
r, c = divmod(i, n)
img[r * s:(r+1) * s, c * s:(c+1) * s] = pix.reshape((s, s))
ax = plt.gcf().add_subplot(loc)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_frame_on(False)
ax.imshow(img, cmap=plt.cm.gray)
if title:
ax.set_title(title)
def plot_layers(weights, tied_weights=False):
'''Create a plot of weights, visualized as "bottom-level" pixel arrays.'''
if hasattr(weights[0], 'get_value'):
weights = [w.get_value() for w in weights]
k = min(len(weights), 9)
imgs = np.eye(weights[0].shape[0])
for i, weight in enumerate(weights[:-1]):
imgs = np.dot(weight.T, imgs)
plot_images(imgs, 100 + 10 * k + i + 1, 'Layer {}'.format(i+1))
weight = weights[-1]
if int(np.sqrt(weight.shape[1])) ** 2 != weight.shape[1]:
return
if tied_weights:
imgs = np.dot(weight.T, imgs)
plot_images(imgs, 100 + 10 * k + k, 'Layer {}'.format(k))
else:
plot_images(weight, 100 + 10 * k + k, 'Decoding weights')
|
UTF-8
|
Python
| false | false | 2,014 |
94,489,320,684 |
33fd1223637a8ae770da05e6d3e6c24499ef21d5
|
f2e06317d064f7502785dceedd8be8e0a1eacdb1
|
/limerick/limerick.py
|
6447e6756c04abc7816bdb9e3c44cf20e67ac8f8
|
[] |
no_license
|
vault/pylimerick
|
https://github.com/vault/pylimerick
|
e84600f2d23589fd62f825490efbefd12556242f
|
a194b228dca5fce74691e5210ca000fb948c9580
|
refs/heads/master
| 2021-01-10T22:02:25.332683 | 2013-03-11T18:47:59 | 2013-03-11T18:47:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
from nltk_contrib.readability import syllables_en as syllables
from nltk.corpus import cmudict
from string import lowercase
target_meter = np.array([8, 8, 5, 5, 8])
valid_chars = lowercase + " \n"
rhyme_dict = cmudict.dict()
class LimerickInfo(object):
def __init__(self, lim):
self.limerick = lim
self.lines = to_lines(lim)
self.meter_violations = list(bad_lines(self.lines))
self.rhyme_violations = bad_rhymes(self.lines)
self.good_meter = good_meter(self.lines)
self.good_rhyme = good_rhyme(self.lines)
self.valid = good_limerick(self.lines)
def limerick(lim):
return LimerickInfo(lim)
def count_syllables(lines):
"""Syllables in a line for each line"""
return np.array(list(syllables.count(l) for l in lines))
def bad_lines(lines):
"""Returns a list of which lines are bad
A line is bad if it's more than 1 syllable away from the target"""
meter = count_syllables(lines)
offby = abs(target_meter - meter)
bad = offby > 2
return np.arange(1, 6)[bad]
def good_meter(lines):
return len(bad_lines(lines)) == 0
def do_rhyme(word1, word2):
"""Returns true if any combination of word pronuncations rhyme"""
try:
pron1 = rhyme_dict[word1.lower()]
pron2 = rhyme_dict[word2.lower()]
except KeyError:
return False
return any([prons_rhyme(p1, p2) for p1 in pron1 for p2 in pron2])
def prons_rhyme(pron1, pron2, count=2):
"""Returns true if a specific set of pronuncations rhyme"""
suf1 = pron1[-1:-(1+count):-1]
suf2 = pron2[-1:-(1+count):-1]
return suf1 == suf2
def last_words(lines):
return [line.split(' ')[-1] for line in lines]
def bad_rhymes(lines):
ends = last_words(lines)
bad = []
if not do_rhyme(ends[2], ends[3]):
bad.append((3, 4))
if not do_rhyme(ends[0], ends[1]):
bad.append((1, 2))
if not do_rhyme(ends[1], ends[4]):
bad.append((2, 5))
if not do_rhyme(ends[0], ends[4]):
bad.append((1, 5))
return bad
def good_rhyme(lines):
return len(bad_rhymes(lines)) == 0
def good_limerick(lines):
if len(lines) != 5:
return False
if not good_rhyme(lines):
return False
if not good_meter(lines):
return False
return True
def normalize_text(text):
return filter(lambda c: c in valid_chars, text.lower())
def to_lines(text):
lines = normalize_text(text).splitlines()
return [line.strip() for line in lines if len(line) > 4]
|
UTF-8
|
Python
| false | false | 2,013 |
7,550,552,535,110 |
5c9184f6e06075c3b958384eb0059d796c4184e7
|
1178b4870e3e3dc734bd3c357912796143bcc421
|
/initialize.py
|
980495cf53efd1d255ccda6cc78d69c164e3ef61
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
olafura/hwidgets
|
https://github.com/olafura/hwidgets
|
1cd4a7bb53c194cf0540343d5aefba06f62dde1a
|
21c31f0e1edf2761be5b91f1fbbc2028fe1c503b
|
refs/heads/master
| 2021-01-24T06:27:29.126914 | 2014-05-06T14:20:12 | 2014-05-06T14:20:12 | 19,367,924 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import ConfigParser
import uuid
import couchdb
from couchdb.http import basic_auth
from getpass import getpass
import urllib2
import json
couch = couchdb.Server()
all_headers = couch.resource.headers.copy()
config = ConfigParser.RawConfigParser()
config.read("hwidgets.conf")
try:
usern = config.get("Server","username")
pw = config.get("Server","password")
couch.resource.credentials = (usern,pw)
except ConfigParser.NoOptionError:
usern = "hwidgets"
pw = str(uuid.uuid4()).replace("-","")
config.set("Server","password", pw)
username = raw_input("Username leave blank if none: ")
if not username == "":
password = getpass("Password: ")
couch.resource.credentials = (username,password)
authorization = basic_auth(couch.resource.credentials)
if authorization:
all_headers['Authorization'] = authorization
all_headers["Content-Type"] = "application/json"
url = "http://localhost:5984/_config/admins/"+usern
data = pw
request = urllib2.Request(url, data=json.dumps(data), headers=all_headers)
request.get_method = lambda: 'PUT'
try:
urllib2.urlopen(request)
except urllib2.HTTPError:
pass
couch.resource.credentials = (usern,pw)
with open("hwidgets.conf", "wb") as configfile:
config.write(configfile)
if not "wifi" in couch:
couch.create("wifi")
if not "battery" in couch:
couch.create("battery")
authorization = basic_auth(couch.resource.credentials)
if authorization:
all_headers['Authorization'] = authorization
all_headers["Content-Type"] = "application/json"
config_changes = [
("http://localhost:5984/_config/httpd/enable_cors", "true"),
("http://localhost:5984/_config/cors/origins", "*"),
("http://localhost:5984/_config/cors/credentials", "true"),
("http://localhost:5984/_config/cors/methods",
"GET, PUT, POST, HEAD, DELETE"),
("http://localhost:5984/_config/cors/headers",
"accept, authorization, content-type, origin")
]
for conf in config_changes:
url, data = conf
request = urllib2.Request(url, data=json.dumps(data), headers=all_headers)
request.get_method = lambda: 'PUT'
try:
urllib2.urlopen(request)
except urllib2.HTTPError:
pass
#print(couch.config())
|
UTF-8
|
Python
| false | false | 2,014 |
14,302,241,111,513 |
60b6d0d56320c1dab36fe45618918898b6e47bee
|
c4f1843845cc3ef291e2153048d1c2e36cfdab85
|
/app.py
|
8ccbd28439aa78bd9ea4fca0a965bbbc515b2a5a
|
[
"MIT"
] |
permissive
|
msztolcman/py_simple_plugins
|
https://github.com/msztolcman/py_simple_plugins
|
96204c1cbc0332506c3ee003397240aee229f9ae
|
bdf0a81f739cd69983154e5beff2dd8b64e1c36a
|
refs/heads/master
| 2020-04-10T00:48:05.616791 | 2013-07-28T07:46:44 | 2013-07-28T07:46:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python -tt
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import os, os.path
import sys
import re
from pprint import pprint, pformat
import plugs
for name, mod in plugs.store.items():
print(name, ': ', sep='', end='')
mod.run()
|
UTF-8
|
Python
| false | false | 2,013 |
17,703,855,198,081 |
b7765185cfe2ff3bbd81c2b527548017d7b4dad2
|
f700b50703f69f36a7f7afd7cafe0995189a1dc7
|
/plugin/ceilometer_plugin.py
|
088bb378ad794b2ea499653eb01ee5baaf4c7b2b
|
[] |
no_license
|
Semyazz/ganglia-openstack
|
https://github.com/Semyazz/ganglia-openstack
|
80cb82ea5e9e5cfb71aeef3d9674b027815de3f4
|
41ddbc91e658a4d69ae0ae0cf19ad590ee7a7883
|
refs/heads/master
| 2021-01-19T10:20:14.266744 | 2012-07-04T17:57:33 | 2012-07-04T17:57:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- encoding: utf-8 -*-
__docformat__ = 'restructuredtext en'
import threading
import logging
from time import time
import time
from Gmetad import gmetad_plugin
#from jsonrpc2_zeromq import RPCNotifierClient
from jsonrpc2_zeromq import NotificationReceiverClient
from Gmetad.gmetad_plugin import GmetadPlugin
from Gmetad.gmetad_config import getConfig, GmetadConfig
def get_plugin():
return CeilometerPlugin('ceilometer')
class CeilometerPlugin(GmetadPlugin):
configuration = None
CONNECTION_ADDRESS = 'connection_address'
TIMEOUT = 'timeout'
RRD_DIR = "rrd_rootdir"
_cfgDefaults = {
CONNECTION_ADDRESS : "tcp://127.0.0.1:99993",
TIMEOUT : 1000,
RRD_DIR : "/var/lib/ganglia/rrds"
}
def __init__(self, cfgid):
self.cfg = None
self.kwHandlers = None
self._reset_config()
try:
self.rrdPlugin = gmetad_plugin._plugins
# print str(self.rrdPlugin)
GmetadPlugin.__init__(self, cfgid)
self.ceilometer = NotificationReceiverClient(self.cfg[CeilometerPlugin.CONNECTION_ADDRESS])
self.ceilometer.timeout = self.cfg[CeilometerPlugin.TIMEOUT]
self._send_lock = threading.Lock()
logging.info("Initialized notifier")
except Exception, err:
logging.error('Unable to start CeilometerPlugin. Cannot connect to ceilometer. Msg: %s', str(err))
raise Exception()
def _parseConfig(self, cfgdata):
for kw,args in cfgdata:
if self.kwHandlers.has_key(kw.lower()):
self.kwHandlers[kw.lower()](args)
def _parse_config(self, cfgdata):
self._parseConfig(cfgdata)
def _reset_config(self):
self.cfg = CeilometerPlugin._cfgDefaults
self.kwHandlers = {
CeilometerPlugin.CONNECTION_ADDRESS : self._parse_connection_address,
}
# print str(self.kwHandlers)
def _parse_connection_address(self, connection_address_string):
self.cfg[CeilometerPlugin.CONNECTION_ADDRESS] = connection_address_string
#TODO: Add connection string validation
def _parse_reply(self, params):
# logging.info("Get reply %s", str(params))
if params is None:
return
answer = {}
for demand in params:
if self.cfg.has_key(demand):
answer[demand] = self.cfg[demand]
self.ceilometer.notify.update_ganglia_configuration(answer)
def _get_message(self, clusterNode, metricNode):
metricName = metricNode.getAttr('name')
values = metricName.split('.',1)
vmName = None
if len(values) > 1:
vmName = values[0]
metricName = ".".join(values[1:])
slope = metricNode.getAttr('slope')
if slope.lower() == 'positive':
dsType = 'COUNTER'
else:
dsType = 'GAUGE'
processTime = clusterNode.getAttr('localtime')
if processTime is None:
processTime = int(time())
heartbeat = 8
args = {'time': processTime,
'metricName': metricName,
'value': metricNode.getAttr('val'),
'units' : metricNode.getAttr('units'),
'type' : metricNode.getAttr('type'),
}
if vmName is not None:
args["instance_name"] = vmName
# logging.info(args)
return args
def notify(self, clusterNode):
# logging.error('CeilometerPlugin: ClusterNode')
clusterPath = '%s' % (clusterNode.getAttr('name'))
if 'GRID' == clusterNode.id:
clusterPath = '%s/__SummaryInfo__'%clusterPath
# We do not want to process grid data
if 'GRID' == clusterNode.id:
return None
for hostNode in clusterNode:
for metricNode in hostNode:
# Don't update metrics that are numeric values.
if metricNode.getAttr('type') in ['string', 'timestamp']:
continue
hostPath = '%s/%s'%(clusterPath, hostNode.getAttr('name'))
# metric_full_name = '%s/%s'%(hostPath, metricNode.getAttr('name'))
msg = self._get_message(clusterNode, metricNode)
msg["host"] = hostPath
msg["clusterName"] = clusterNode.getAttr('name')
if msg is not None:
with self._send_lock:
try:
reply = self.ceilometer.update_stats(msg)
self._parse_reply(reply)
except Exception, e:
logging.error("CeilometerPlugin: Error during notification. %s", str(e))
# gmetadConfig = getConfig()
#
# GmetadPlugin.notify(self, clusterNode)
#
# if not clusterNode:
# data = {"data": "<cpu_temp>72</cpu_temp>"}
#
# with self._send_lock:
# try:
# reply = self.ceilometer.update_stats(clusterNode)
# if reply:
# self.ceilometer.notify.update_ganglia_configuration(self.configuration)
# except Exception as err:
# print err.message
# for hostNode in clusterNode:
# for metricNode in hostNode:
# if metricNode.getAttr('type') in ['string', 'timestamp']:
# continue
#
# print metricNode
def start(self):
logging.info('CeilometerPlugin: Start')
#self.cfg
def stop(self):
logging.info('CeilometerPlugin: Stop')
# def ceilometer_isalive(self):
#
# isalive_timeout = self.REQUEST_TIMEOUT
#
# if self.ceilometer:
# self.ceilometer.timeout = isalive_timeout
#
# _ceilometer_alive = False
# while not _ceilometer_alive:
# self._counter += 1
# error = None
# reply = None
#
# try:
# reply = self.ceilometer.alive(self._counter)
# assert _ceilometer_alive == False
# except Exception as err:
# error = err
# _ceilometer_alive = False
#
# if reply and int(reply) == self._counter:
# _ceilometer_alive = True
# assert self._ceilometer_alive == True
# try:
# self.ceilometer.notify.update_ganglia_configuration(self.configuration)
# except Exception as err:
# error = err
# _ceilometer_alive = False
#
# if error == None:
# assert _ceilometer_alive == True
#
# self.ceilometer.timeout = self.REQUEST_TIMEOUT
|
UTF-8
|
Python
| false | false | 2,012 |
11,321,533,814,394 |
0bba23e08e2e5a8fede4405eff4a79ffaa52db7b
|
3e534ac0d2053e72e8d6b67f96b42cf56464b5fd
|
/dorthy/server.py
|
35eb0c578d19b132822242ae7ce1e4f5cca2a3ad
|
[] |
no_license
|
marcoceppi/dorthy
|
https://github.com/marcoceppi/dorthy
|
466b89574a940991ca86be752b36c876964df699
|
781bd2b60fa8551671cdb2fd681012dad7e24490
|
refs/heads/master
| 2020-05-05T06:43:58.330792 | 2014-05-17T03:20:33 | 2014-05-17T03:20:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import logging
import tornado.ioloop
import tornado.web
import tornado.log
log = logging.getLogger('dorthy.server')
def listen(routes, port=None):
if not port:
try:
port = os.environ['PORT']
except:
port = 8899
app = tornado.web.Application(routes.routes)
log.info('Starting tornado server on 127.0.0.1:%s' % port)
app.listen(port)
tornado.ioloop.IOLoop.instance().start()
|
UTF-8
|
Python
| false | false | 2,014 |
6,442,450,981,891 |
556348ce95ed1a821c80fb3e403647859e8a31f1
|
ad1bf0f86a96d329e01976e1d1520efec7785cd2
|
/example_scripts/runTests.py
|
c41f776abc768a7cbd54d84f73614dcc65b46068
|
[] |
no_license
|
mberth/pymzML
|
https://github.com/mberth/pymzML
|
809a91ee342f35af79884e155991e0d3915439d8
|
7c79aa56b5b8835a0c6686391e66d6debb756b22
|
refs/heads/master
| 2021-01-18T10:22:48.303009 | 2012-08-19T12:51:58 | 2012-08-19T12:51:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3.2
"""
Testscript to demonstrate
"""
from __future__ import print_function
import highestPeaks
import parseAllExampleFiles
import hasPeak
import find_abundant_precursors
import compareSpectra
import chromatogram
import accessAllData
import searchScan
def main():
print(highestPeaks.main(), 'highestPeaks.py')
print(parseAllExampleFiles.main(), 'parseAllExampleFiles.py')
print(hasPeak.main(), 'hasPeak.py')
print(accessAllData.main(), 'accessAllData.py')
print(find_abundant_precursors.main(), 'find_abundant_precursors.py')
print(compareSpectra.main(), 'compareSpectra.py')
print(chromatogram.main(), 'chromatogram.py')
print(searchScan.main(), 'searchScan.py')
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,012 |
8,650,064,152,527 |
545701d66bb48555da42d5d34fd09d9acd32a5c3
|
fb71aba13c03bd2886c437c118314963e465d5e7
|
/Bono/esqueleto/plot_nbody.py
|
8004083ef5fab8cb86cb93cf29b271f8222c0457
|
[] |
no_license
|
JuanRAlvarez/Tarea-6
|
https://github.com/JuanRAlvarez/Tarea-6
|
4c92973f65916c8caa996c9d6046d67de28b5f8c
|
ab75c7073361f085c2f3dbbc931786eb6f7f125f
|
refs/heads/master
| 2021-05-27T20:17:13.450489 | 2014-04-24T00:09:30 | 2014-04-24T00:09:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#Plots data from nbody.c
|
UTF-8
|
Python
| false | false | 2,014 |
3,152,506,030,432 |
f25b889f16e18f3f27fee4160694863450ea395c
|
fc793e20ba6f1c3ac2fc77f7ce608fce1d2c1f7a
|
/autowriter/hpgltext.py
|
d93e11f1ee89e0165d25091a20ad81153827b456
|
[
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
aalex/autowriter
|
https://github.com/aalex/autowriter
|
42c04a8a7d251b5c86f2b0e2a5084583ea3e922f
|
cee714ce677ab934562116ba2c129b3f6d9e4c91
|
refs/heads/master
| 2021-01-15T17:42:14.170184 | 2013-08-06T17:44:55 | 2013-08-06T17:44:55 | 11,004,381 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
HPGL Text translation and drawing utilities.
Author: Alexandre Quessy
Date: July 2013
"""
from __future__ import print_function
import os
import sys
from twisted.python import log
def utf8_to_number(character):
try:
return ord(unicode(character, "utf-8"))
except TypeError, e:
log.msg("Error converting character %s to number: %s " % (character, str(e)))
return 32 # space
except UnicodeDecodeError, e:
# log.msg("Error converting character %s to number: %s " % (character, str(e)))
# return ord(unicode(character, "utf-8"))
return 32 # space
# #return ord(chr(character).decode('utf-8'))
# try:
# utf8_encoded = unichr(character).decode("utf-8")
# return utf8_encoded
# except TypeError, e:
# log.msg("Error converting character %s to number: %s " % (character, str(e)))
# return 32 # space
# #return ord(unicode(character))
def utf8_to_filename(prefix, character):
EXTENSION = ".hpgl"
with_u = os.path.join(prefix, str(utf8_to_number(character)) + "u" + EXTENSION)
normal = os.path.join(prefix, str(utf8_to_number(character)) + EXTENSION)
if os.path.exists(with_u):
return with_u
elif os.path.exists(normal):
return normal
else:
raise RuntimeError("File not found: %s" % (normal))
class HPGLCommand(object):
"""
Stores the infos for a HPGL command.
Form: two-letter command name, plus optional arguments.
"""
def __init__(self, command, arguments):
self._command = command
self._arguments = arguments
def get_command(self):
return self._command
def get_arguments(self):
return self._arguments
def set_arguments(self, arguments):
self._arguments = arguments
def __str__(self):
result = self._command
num_args = len(self._arguments)
for i in xrange(num_args):
result += self._arguments[i]
if i == (num_args - 1):
pass
else:
result += ","
result += ";"
return result
def hpgl_split(hpgl):
"""
Splits tokens in a string. Separator is ";" or newline character.
"""
result = []
lines = hpgl.splitlines()
for line in lines:
if line == "":
pass
else:
for token in line.split(";"):
if token == "":
pass
else:
result.append(token)
return result
def hpgl_command_to_object(token):
"""
Convert a HPGL text command to and instance of HPGLCommand.
"""
command = token[0:2]
args = []
leftover = token[2:]
for comma_separated in leftover.split(","):
for arg in comma_separated.split(" "):
args.append(arg)
return HPGLCommand(command, args)
def translate_pa(pa_command, translate_x, translate_y):
"""
Translates a HPGLCommand PA's arguments by some XY offset.
Does not currently support floats, just integers.
"""
#result = ""
# tokens = text.split(
# TODO
args = pa_command.get_arguments()
result_args = []
num_args = len(args)
for i in xrange(num_args):
value = int(args[i])
modulo_two = i % 2
if modulo_two == 0:
value += translate_x
else:
value += translate_y
result_args.append(str(value))
pa_command.set_arguments(result_args)
return pa_command
def draw_character(character, font_dir, offset_x, offset_y):
"""
Draws a HPGL character.
return: text of the translated HPGL file.
param character: utf8 one-character string
param font_dir: directory containing the HPGL letters
param offset_x: int
param offset_i: int
"""
result = ""
try:
file_name = utf8_to_filename(font_dir, character)
open_file = open(file_name, "rU")
open_file.seek(0)
hpgl = open_file.read()
#print(hpgl)
#print("TRANSLATED:")
tokens = hpgl_split(hpgl)
for token in tokens:
command = hpgl_command_to_object(token)
if command.get_command() == "PA":
command = translate_pa(command, offset_x, offset_y)
result += str(command)
else:
# XXX: weird we have to write this line twice. see
# two lines above
result += str(command)
except RuntimeError, e:
log.msg(str(e))
pass
# print(e)
return result
def text_to_hpgl(text, font_dir, line_height, char_width, topleft):
"""
Converts a whole text to HPGL.
Ready to send to the plotter.
The lines must not be too long.
param topleft: int. The first line will be at that height. (Y-axis)
"""
result = ""
num_line = 0
offset_y = topleft
lines = text.splitlines()
for line in lines:
num_line = 0
offset_y -= line_height # the Y coordinate decrease at each line
offset_x = 0
num_letter = 0
for letter in line:
num_letter += 1
offset_x += char_width
result += draw_character(letter, font_dir, offset_x, offset_y)
result += "\n"
return result
if __name__ == "__main__":
try:
letter = sys.argv[1] # "X"
offset_x = int(sys.argv[2]) # 0
offset_y = int(sys.argv[3]) # 0
FONT_DIR = "../../fonts/hershey"
result = draw_character(letter, FONT_DIR, offset_x, offset_y)
print(result, "", "")
except IndexError:
print("Usage: ./to-hpgl.py [letter] [offset_x] [offset_y]")
sys.exit(1)
|
UTF-8
|
Python
| false | false | 2,013 |
15,375,982,925,223 |
4e747af88015aac8a537084b5649e874b3c78448
|
684cdc4379570cf91d2c93f964dd49944842dc7e
|
/source_hem_label.py
|
36fd69dbbcd94475932c8f650681548086d76e67
|
[] |
no_license
|
KuperbergLab/MEG_scripts
|
https://github.com/KuperbergLab/MEG_scripts
|
8a8f398a83690e347afc3cfb45b9c29ee91842dd
|
56dc4ac4ba73b2e5117712c5b9bbf781263a8a05
|
refs/heads/master
| 2016-08-06T19:25:14.418858 | 2014-01-14T19:54:19 | 2014-01-14T19:54:19 | 1,350,785 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
====================================================
Outputs the subject mean for each subject in the given ROI in the given time-window
====================================================
The label is not subject-specific but defined from fsaverage brain
"""
# Author: Alexandre Gramfort <[email protected]>
# modified by Ellen Lau
# License: BSD (3-clause)
print __doc__
import mne
import numpy as np
import scipy
import argparse
import writeOutput
parser = argparse.ArgumentParser(description='Get input')
parser.add_argument('protocol',type=str)
parser.add_argument('label',type=str)
parser.add_argument('set',type=str)
parser.add_argument('t1',type=float)
parser.add_argument('t2',type=float)
args=parser.parse_args()
baseline=100 #ms
data_path = '/cluster/kuperberg/SemPrMM/MEG/results/source_space/ga_stc'
sample1 = int( round( (args.t1+baseline)/1.6667 ) )
sample2 = int( round( (args.t2+baseline)/1.6667 ) )
subjects = [1, 3, 4, 6, 9, 12, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33]
if args.protocol == 'MaskedMM':
subjects = [6, 9, 12, 13, 15, 16, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 30, 31, 32, 33]
hemList = ['lh', 'rh']
valuesHem = []
for hem in hemList:
stcs_fname = ['/cluster/kuperberg/SemPrMM/MEG/data/ya%d/ave_projon/stc/ya%d_%s_All_c%sM-spm-%s.stc' % (s, s, args.protocol,args.set,hem) for s in subjects]
#stcs_fname = ['/cluster/kuperberg/SemPrMM/MEG/data/ya%d/ave_projon/stc/ya%d_%s_AllUnrelated_c%sM-spm-%s.stc' % (s, s, args.protocol,args.set,hem) for s in subjects]
#label = 'BaleenHP_c1_c2_350-450_cluster0-'+hem
label = args.label+hem
label_fname = data_path + '/label/%s.label' % label
print label
#label = 'G_front_inf-Triangul-'+hem
#label = 'G_front_inf-Opercular-'+hem
#label = 'G_front_inf-Orbital-'+hem
#label = 'G_temp_sup-Lateral-'+hem
#label = 'G_temporal_middle-'+hem
#label = 'Pole_temporal-'+hem
#label = 'S_temporal_sup-'+hem
#label = 'G_pariet_inf-Angular-'+hem
#values, times, vertices = mne.label_time_courses(label_fname, stc_fname)
#vtv = [mne.label_time_courses(label_fname, stc_fname) for stc_fname in stcs_fname]
valuesAll = []
for stc_fname in stcs_fname:
values, times, vertices = mne.label_time_courses(label_fname, stc_fname)
values = np.mean(values,0)
values = values[sample1:sample2]
values = np.mean(values,0)
#print values
valuesAll.append(values)
valuesHem.append(valuesAll)
print "mean",np.mean(valuesAll)
outTable = []
for x in range(len(stcs_fname)):
temp = []
temp.append(valuesHem[0][x])
temp.append(valuesHem[1][x])
temp.append(valuesHem[0][x]-valuesHem[1][x])
outTable.append(temp)
outFile = '/cluster/kuperberg/SemPrMM/MEG/results/source_space/hem_measures/ya.n'+str(len(stcs_fname))+'.'+args.protocol+'_c'+args.set+'_'+args.label+str(int(args.t1))+'-'+str(int(args.t2))+'.txt'
writeOutput.writeTable(outFile, outTable)
|
UTF-8
|
Python
| false | false | 2,014 |
17,265,768,568,788 |
5c2ec8ebe466021d939c11b11ea9bc7008be29dc
|
07091f53e29efabba7e9a13b9b28651fe85c7912
|
/scripts/loot/lootItems/rarelootchest/mynock_costume_instructions.py
|
72f76e42b906a487b0f37075c0d9a9e3a3fa3f62
|
[
"LGPL-3.0-only",
"GPL-1.0-or-later"
] |
non_permissive
|
Undercova/NGECore2
|
https://github.com/Undercova/NGECore2
|
377d4c11efba071e313ec75b3c2d864089733dc4
|
16d52e678201cab7c6e94924050ae1fc4a40de95
|
refs/heads/master
| 2019-01-03T17:35:40.610143 | 2014-11-09T03:34:03 | 2014-11-09T03:34:03 | 26,386,905 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def itemTemplate():
return ['object/tangible/item/costume_kit/shared_costume_deed.iff'] # needs correct iff still
|
UTF-8
|
Python
| false | false | 2,014 |
11,708,080,854,105 |
17a7801dfa2d7b704b07e1f54a4cf19d7817e3cf
|
135b9177ff0669d1003868fdd362fa5e70a36526
|
/main.py
|
0ae9b0c7ef6715bdbe737760ed791077258b8f38
|
[
"MIT"
] |
permissive
|
Volshebnyi/alchemy
|
https://github.com/Volshebnyi/alchemy
|
30672bc1746694348262cc0e9048d1168d64ae3e
|
491c9f8fab389c6ba4d00e1f69cc1309ff7d4f91
|
refs/heads/master
| 2021-01-10T20:13:10.588589 | 2014-08-19T19:08:14 | 2014-08-19T19:08:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/kivy
__version__ = '1.0'
import kivy
import random
import math
kivy.require('1.0.6')
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.graphics import Color, Rectangle, Point, GraphicException, Line
from kivy.graphics.instructions import InstructionGroup
from kivy.clock import Clock
from kivy.uix.screenmanager import ScreenManager, Screen
from math import sqrt
import levels
class LayoutHelper(object):
def get_id(self, id):
for child in self.children:
if child.id and child.id == id:
return child
return None
def get_or_create(self, id, cls, *args, **kwargs):
item = self.get_id(id)
if not item:
kwargs['id'] = id
item = cls(*args, **kwargs)
self.add_widget(item)
return item
class ItemWidget(Widget):
max_opacity = 0.8
def __init__(self, item, *args, **kwargs):
super(ItemWidget, self).__init__(*args, **kwargs)
self.item = item
self.size_hint = (None, None)
self.button = Button(
text=item.title,
size_hint = (None, None),
pos=self.pos,
)
self.set_pos(self.item.pos)
self.add_widget(self.button)
self.touch_start_pos = None
self.touch_last_pos = None
self.speed = (0.0, 0.0)
self.ease_interval = None
self.fade_interval = None
self.opacity = self.max_opacity
self.actions = None
self.popup = None
def get_pos(self):
return self.item.pos
def set_pos(self, pos):
self.item.pos = pos
self.pos = list(map(int, (
pos[0] - self.size[0] / 2,
pos[1] - self.size[1] / 2,
)))
self.button.pos = self.pos
def mod_pos(self, pos):
pos = (
self.get_pos()[0] + pos[0],
self.get_pos()[1] + pos[1],
)
return self.set_pos(pos)
def on_touch_down(self, touch):
super(ItemWidget, self).on_touch_down(touch)
if not self.collide_point(*touch.pos):
return False
self.touch_start_pos = touch.pos
self.touch_last_pos = touch.pos
self.bring_to_front()
touch.grab(self)
return True
def on_touch_move(self, touch):
if touch.grab_current is not self:
return
self.mod_pos((
touch.x - self.touch_last_pos[0],
touch.y - self.touch_last_pos[1],
))
self.touch_last_pos = touch.pos
# super(self.__class__, self).on_touch_move(touch)
def on_touch_up(self, touch):
if touch.grab_current is not self:
return
touch.ungrab(self)
if touch.pos == self.touch_start_pos:
actions = self.item.get_actions()
if actions:
return self.show_actions()
items = []
for child in self.parent.children:
if child.collide_point(*touch.pos):
items.append(child.item)
if len(items) > 1:
self.parent.react(items)
super(ItemWidget, self).on_touch_up(touch)
return True
def show_actions(self):
actions = self.item.get_actions()
self.popup = ActionsPopup()
self.popup.title = self.item.get_title()
self.popup.ids.description.text = self.item.get_description()
for action_name, method in actions:
self.popup.ids.actions.add_widget(Button(
text=action_name,
on_press=self.make_action(method),
))
self.popup.open()
def make_action(self, action):
def wrapper(instance):
result = action()
if not result:
self.popup.dismiss()
self.update_parent()
return wrapper
def update_parent(self):
self.parent.update()
def bring_to_front(self):
parent = self.parent
if parent.children[0] is self:
return
parent.remove_widget(self)
parent.add_widget(self)
def fadein(self):
self.opacity = 0.0
self.fade_interval = Clock.schedule_interval(self._fadein, 0.02)
def _fadein(self, dt):
self.opacity += 0.05
if self.opacity > self.max_opacity:
self.opacity == self.max_opacity
self.fade_interval.cancel()
return False
return True
def remove(self):
self.parent.remove_widget(self)
def fadeout(self):
self.fade_interval = Clock.schedule_interval(self._fadeout, 0.02)
def _fadeout(self, dt):
self.opacity -= 0.05
if self.opacity < 0:
self.fade_interval.cancel()
self.parent.remove_widget(self)
return True
def ease_random(self):
angle_found = False
while not angle_found:
module = sum(self.size) / 2
angle = random.uniform(0, 2 * math.pi)
ease = (module * math.sin(angle), module * math.cos(angle))
pos = self.get_pos()
pos_future = (ease[0] + pos[0], ease[1] + pos[1])
if self.check_pos(pos_future):
angle_found = True
self.ease(ease)
def check_pos(self, pos):
if pos[0] < self.size[0] / 2:
return False
if pos[1] < self.size[1] / 2:
return False
if pos[0] > self.parent.size[0] - self.size[0] / 2:
return False
if pos[1] > self.parent.size[1] - self.size[1] / 2:
return False
return True
def ease(self, pos_delta):
self.pos_delta = pos_delta
self.ease_interval = Clock.schedule_interval(self._ease, 0.02)
def _ease(self, dt):
self.mod_pos(list(map(lambda x: x * 0.1, self.pos_delta)))
self.pos_delta = list(map(lambda x: x * 0.9, self.pos_delta))
if all([abs(i) < 0.1 for i in self.pos_delta]):
self.pos_delta = (0.0, 0.0)
self.ease_interval.cancel()
return False
return True
class MainLayout(FloatLayout):
pass
class ActionsPopup(Popup):
pass
class SpaceLayout(FloatLayout):
def __init__(self, level, *args, **kwargs):
super(SpaceLayout, self).__init__(*args, **kwargs)
self.level = level
self.space = level.space
self.items = {}
def on_touch_down(self, touch):
result = super(SpaceLayout, self).on_touch_down(touch)
if result:
return True
if self.level.on_tap(touch.x, touch.y):
self.update()
return False
def update(self):
for item in self.space.items:
if item not in self.items:
self.widget_add(item)
items = list(self.items.items())
for item, widget in items:
if item not in self.space.items:
self.item_remove(item)
def react(self, items):
products = self.space.react(items)
self.update()
for product in products:
self.item_product(product)
def item_remove(self, item):
if item in self.space.items:
self.space.remove(item)
widget = self.items[item]
widget.remove()
del self.items[item]
def widget_add(self, item):
widget = ItemWidget(item)
self.items[item] = widget
self.add_widget(widget)
widget.fadein()
return widget
def item_add(self, item_cls, pos):
item = self.space.add(item_cls, pos)
widget = ItemWidget(item)
self.items[item] = widget
self.add_widget(widget)
widget.fadein()
return item
def get_widget(self, item):
return self.items[item]
def item_product(self, item):
widget = self.get_widget(item)
widget.ease_random()
class MenuScreen(Screen):
pass
class LevelScreen(Screen, LayoutHelper):
def __init__(self, level_cls, *args, **kwargs):
super(LevelScreen, self).__init__(*args, **kwargs)
self.level_cls = level_cls
self.name = self.level_cls.name
def on_pre_enter(self):
space_layout = self.get_id('space')
if space_layout is None:
self.level = self.level_cls()
space = self.level.get_space()
space_layout = SpaceLayout(self.level, id='space')
self.add_widget(space_layout)
class AlchemyApp(App):
title = 'Alchemy'
icon = 'icon.png'
def build(self):
sm = ScreenManager()
sm.add_widget(MenuScreen(name='menu'))
for level in levels.LEVELS:
sm.add_widget(LevelScreen(level, name=level.name))
sm.current = 'menu'
return sm
def on_pause(self):
return True
if __name__ == '__main__':
AlchemyApp().run()
|
UTF-8
|
Python
| false | false | 2,014 |
893,353,232,531 |
0fc01b44d9ce1a7ab2c4c9808bd65abc40ede7fa
|
6e0c88d5d1d7badae16198a232c6f2c9ca91a3fd
|
/dormserv/cal/forms.py
|
7c9f999e506b0628b8de2a817b7f365106b4b7af
|
[] |
no_license
|
ehg17/dormserv
|
https://github.com/ehg17/dormserv
|
ff934c072f8da1ad1ad3ab0459081472da68a4a9
|
47d2d4830ae3376657a9555f1bb260a6452a889c
|
refs/heads/master
| 2016-08-02T22:05:40.230487 | 2014-02-01T19:10:12 | 2014-02-01T19:10:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django import forms
from cal.models import Item
class ItemForm(forms.ModelForm):
class Meta:
fields = ('wanted',)
|
UTF-8
|
Python
| false | false | 2,014 |
3,040,836,892,498 |
374522270469b3894e1c9c0ef126ff1bc385aca5
|
354a00fbeff55d651199312a301d9772a9ca3c49
|
/source/config.py
|
9018a505f7ac6e257f17664f23badbeea24794bb
|
[] |
no_license
|
stjohnjohnson/alfred-github-workflow
|
https://github.com/stjohnjohnson/alfred-github-workflow
|
bdc0f67590725d8918782e4e2ff16c8e0daba5f8
|
100cecd3841e5c8051840270e3eb34c884e5324c
|
refs/heads/master
| 2016-05-25T11:40:51.046090 | 2013-04-10T20:32:37 | 2013-04-10T20:38:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import getpass
bundle_id = 'nl.jeroenseegers.Alfred.Github'
client_id = 'ac65ae910d43c36eb2d1'
# http://developer.github.com/v3/oauth/#scopes
oauth_scope = 'user,repo'
github_oauth_url = 'https://github.com/login/oauth/authorize?client_id='+client_id+'&scope='+oauth_scope
# Time to keep data in cache (in seconds)
cache_ttl = 300
volatile_dir = '/Users/'+getpass.getuser()+'/Library/Caches/com.runningwithcrayons.Alfred-2/Workflow Data/'+bundle_id+'/'
non_volatile_dir = '/Users/'+getpass.getuser()+'/Library/Application Support/Alfred 2/Workflow Data/'+bundle_id+'/'
token_file = non_volatile_dir+'token.conf'
repo_lists_cache_file = volatile_dir+'repo_lists.cache'
guest_actions = {
'auth':('auth', 'Generate a Github OAuth token', github_oauth_url, 'yes', ' auth', 'icon.png'),
'token':('token {oauth_token}', 'Store the generated token for use with this workflow', '', 'no', ' token ', 'icon.png')
}
main_actions = {
'repo':('repo', 'Github repository actions', '', 'no', ' repo ', 'icon.png')
}
repo_actions = {
'list':('repo list', 'List all your own repositories', '', 'no', ' repo list', 'icon.png'),
'starred':('repo starred', 'List all your starred repositories', '', 'no', ' repo starred', 'icon.png'),
'watched':('repo watched', 'List all your watched repositories', '', 'no', ' repo watched', 'icon.png'),
'all':('repo all', 'List all your own/starred/watched repositories', '', 'no', ' repo all', 'icon.png')
}
|
UTF-8
|
Python
| false | false | 2,013 |
18,519,898,987,597 |
e29f70403d921e0d84012d7651cf6be1d855db73
|
b6b81e73a118553c69a0342eda46fbf666d9d533
|
/hopefully_better_gc/better_gc/third_party_test.py
|
41183a1deb562f9714c589a9242081d3b214fe7f
|
[] |
no_license
|
seckin206/python-gc-adjuster
|
https://github.com/seckin206/python-gc-adjuster
|
4d92387f1eaab78145f811837650115cae328ad9
|
ceb1555a2b83119a252f253161745d091522cd60
|
refs/heads/master
| 2017-12-21T19:44:53.449809 | 2014-06-03T15:20:22 | 2014-06-03T15:20:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
print("I'm a third party file that no one cares about my internals in this project!")
for elm in sys.argv:
print(elm)
|
UTF-8
|
Python
| false | false | 2,014 |
19,670,950,231,283 |
ae60a4a7e09792e013a29b0ae2b6a9d037ae7b85
|
61c947fa028aef86d06e804ee8666a6e31166eb3
|
/main.py
|
465d177ccb7323ae77003c34a7746e3d1f2f0bc6
|
[] |
no_license
|
TweededBadger/RedditDailyPoster
|
https://github.com/TweededBadger/RedditDailyPoster
|
50977d3ecbbbc16d90b89ad73059dbf1a23ed173
|
2991ba4cba71c6af092e4ee09ace05f93f36a171
|
refs/heads/master
| 2016-09-06T20:21:39.923817 | 2014-02-24T00:35:07 | 2014-02-24T00:35:07 | 17,120,376 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'Will Jutsum'
# A quick and dirty script for posting daily to a subreddit
import sqlite3 as sql
import datetime
import RedditPoster
from ConfigParser import ConfigParser
import sys
class main():
def __init__(self):
config = ConfigParser()
config.read("settings.ini")
self.title = config.get('PostInfo','title')
self.content = config.get('PostInfo','content').replace("\n","\n\n")
self.sub = config.get('PostInfo','sub')
self.dateformat = config.get('PostInfo','dateformat')
self.url = "http://www.google.com"
self.extra = ""
self.today = datetime.date.today()
self.yesterday = datetime.date.fromordinal(datetime.date.today().toordinal()-1)
self.con = sql.connect('posts.db',detect_types=sql.PARSE_DECLTYPES)
with self.con:
cur = self.con.cursor()
try:
cur.execute("CREATE TABLE posts(postedTime DATE,url TEXT)")
cur.execute("CREATE TABLE extras(id INTEGER PRIMARY KEY AUTOINCREMENT,txt TEXT, used INTEGER)")
except:
pass
def start(self):
if (self.checktoday()):
self.yesterday_url = self.get_yesterday_url()
self.post_to_reddit()
else:
print "Already posted today"
def checktoday(self):
with self.con:
cur = self.con.cursor()
cur.execute("select postedTime,url from posts WHERE postedTime = ?",(self.today,))
row = cur.fetchall()
return (len(row) == 0)
def get_yesterday_url(self):
with self.con:
cur = self.con.cursor()
cur.execute("select url from posts WHERE postedTime = ?",(self.yesterday,))
row = cur.fetchone()
if row:
return row[0]
else:
return ""
def post_to_reddit(self):
rp = RedditPoster.RedditPoster()
self.extra = self.get_next_extra()
posturl = rp.post(self.sub,self.title,self.content,self.extra,self.yesterday_url,self.today.strftime(self.dateformat))
print posturl
with self.con:
cur = self.con.cursor()
cur.execute("insert into posts(postedTime,url) values (?,?)", (self.today,posturl))
def add_extra(self,txt):
print "Adding: "+ txt
with self.con:
cur = self.con.cursor()
cur.execute("insert into extras(txt,used) values (?,0)", (txt,))
self.list_extra()
def delete_extra(self,id):
print "Deleting ID " + str(id)
with self.con:
cur = self.con.cursor()
cur.execute("delete from extras where id = ?",(id,))
self.list_extra()
def list_extra(self):
with self.con:
cur = self.con.cursor()
cur.execute("select id,txt,used from extras WHERE used = 0")
rows = cur.fetchall()
for row in rows:
print str(row[0]) + " - " + str(row[1]) + " - " + str(row[2])
def get_next_extra(self):
with self.con:
cur = self.con.cursor()
cur.execute("select id,txt,used from extras WHERE used = 0")
row = cur.fetchone()
if row:
cur.execute("UPDATE extras SET used = 1 WHERE id = ?",(row[0],))
return row[1]
else:
return ""
if len(sys.argv) > 1:
main = main()
if sys.argv[1] == "send":
main.start()
if sys.argv[1] == "addextra":
main.add_extra(sys.argv[2])
if sys.argv[1] == "deleteextra":
main.delete_extra(sys.argv[2])
if sys.argv[1] == "list":
main.list_extra()
|
UTF-8
|
Python
| false | false | 2,014 |
15,341,623,184,195 |
04167e358a09c5a5dbcb56878c9723296fac8e4b
|
1834b4286f77e252a08a14fad3261ece6a1775de
|
/src/test/python/cypher.py
|
cb8a6f37d771a4a599b46ed3a24ae5c47c2c3b44
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-commercial-license",
"LicenseRef-scancode-public-domain",
"GPL-3.0-only",
"GPL-1.0-or-later",
"ANTLR-PD"
] |
non_permissive
|
asonnenschein/python-embedded
|
https://github.com/asonnenschein/python-embedded
|
68cadf815dec0059df3858e59ff5ba9a746d4b67
|
a76def699d60a5ec1c27f81ca8f7744f11132162
|
refs/heads/master
| 2021-01-21T20:07:40.871571 | 2014-11-11T23:16:22 | 2014-11-11T23:16:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- mode: Python; coding: utf-8 -*-
# Copyright (c) 2002-2013 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Neo4j is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import unit_tests
import tempfile, os
class CypherTest(unit_tests.GraphDatabaseTest):
def test_simple_query(self):
db = self.graphdb
# START SNIPPET: basicCypherQuery
result = db.query("START n=node(0) RETURN n")
# END SNIPPET: basicCypherQuery
# START SNIPPET: getCypherResultColumn
root_node = "START n=node(0) RETURN n"
# Fetch an iterator for the "n" column
column = db.query(root_node)['n']
for cell in column:
node = cell
# Coumns support "single":
column = db.query(root_node)['n']
node = column.single
# END SNIPPET: getCypherResultColumn
self.assertEquals(0, node.id)
# START SNIPPET: iterateCypherResult
root_node = "START n=node(0) RETURN n"
# Iterate through all result rows
for row in db.query(root_node):
node = row['n']
# We know it's a single result,
# so we could have done this as well
node = db.query(root_node).single['n']
# END SNIPPET: iterateCypherResult
self.assertEquals(0, node.id)
def test_list_columns(self):
db = self.graphdb
# START SNIPPET: listCypherResultColumns
result = db.query("START n=node(0) RETURN n,count(n)")
# Get a list of the column names
columns = result.keys()
# END SNIPPET: listCypherResultColumns
self.assertEquals(columns[0], 'n')
self.assertEquals(columns[1], 'count(n)')
def test_parameterized_query(self):
db = self.graphdb
# START SNIPPET: parameterizedCypherQuery
result = db.query("START n=node({id}) RETURN n",id=0)
node = result.single['n']
# END SNIPPET: parameterizedCypherQuery
self.assertEquals(0, node.id)
def test_resultset_to_list(self):
db = self.graphdb
result = db.query("START n=node({id}) RETURN n",id=0)
rows = list(result)
self.assertEquals(1, len(rows))
self.assertEquals(0, rows[0]['n'].id)
def test_prepared_queries(self):
db = self.graphdb
# START SNIPPET: preparedCypherQuery
get_node_by_id = db.prepare_query("START n=node({id}) RETURN n")
result = db.query(get_node_by_id, id=0)
node = result.single['n']
# END SNIPPET: preparedCypherQuery
self.assertEquals(0, node.id)
def test_aggregate_queries(self):
db = self.graphdb
with db.transaction:
result = db.query('''CREATE p=node-[:Depends_on]->port<-[:Has]-parent1<-[:Has]-parent2,
port<-[:Has]-parent3
RETURN node,p''')
for row in result:
node = row['node']
result = db.query('''
START n=node({node})
MATCH p=n-[:Depends_on]->port<-[:Has]-parent
RETURN COLLECT(p) AS end_points''',node=node)
end_points = result['end_points'].single
# Should be able to iterate across them
count = 0
for path in end_points:
count += 1
# Should have been two of them
self.assertEquals(count, 2)
if __name__ == '__main__':
unit_tests.unittest.main()
|
UTF-8
|
Python
| false | false | 2,014 |
15,710,990,404,043 |
e273ee6cab00a33776dba4ae852adbc738c19aa1
|
76ad5caa219723735960b00139a0f1f4f957efc1
|
/simple_sso/sso_client/views.py
|
751ed89a6e701724da9fb557f58b851fa47b9d5c
|
[
"BSD-3-Clause"
] |
permissive
|
nuklea/django-simple-sso
|
https://github.com/nuklea/django-simple-sso
|
c6a74fba80a823446d8180a53ec32b88cb8ccc48
|
89e897ad97af9bcf50155b9dea0ac20508a509b4
|
refs/heads/master
| 2021-01-18T07:27:50.262903 | 2012-02-09T21:33:19 | 2012-02-09T21:33:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth import login
from django.contrib.auth.backends import ModelBackend
from django.http import HttpResponseBadRequest, QueryDict, HttpResponseRedirect
from simple_sso.signatures import build_signature, verify_signature
from simple_sso.sso_client.utils import load_json_user
from urlparse import urljoin, urlparse
import requests
import urllib
BACKEND = ModelBackend()
def get_request_token():
"""
Requests a Request Token from the SSO Server. Returns False if the request
failed.
"""
params = [('key', settings.SIMPLE_SSO_KEY)]
signature = build_signature(params, settings.SIMPLE_SSO_SECRET)
params.append(('signature', signature))
url = urljoin(settings.SIMPLE_SSO_SERVER, 'request-token') + '/'
response = requests.get(url, params=dict(params))
if response.status_code != 200:
return False
data = QueryDict(response.content)
if 'signature' not in data:
return False
if 'request_token' not in data:
return False
params = [(key, value) for key,value in data.items() if key != 'signature']
if not verify_signature(params, data['signature'], settings.SIMPLE_SSO_SECRET):
return False
return data['request_token']
def verify_auth_token(data):
"""
Verifies a Auth Token in a QueryDict. Returns a
django.contrib.auth.models.User instance if successful or False.
"""
if 'auth_token' not in data:
return False
if 'request_token' not in data:
return False
auth_token = data['auth_token']
params = [('auth_token', auth_token), ('key', settings.SIMPLE_SSO_KEY)]
signature = build_signature(params, settings.SIMPLE_SSO_SECRET)
params.append(('signature', signature))
url = urljoin(settings.SIMPLE_SSO_SERVER, 'verify') + '/'
response = requests.get(url, params=dict(params))
if response.status_code != 200:
return False
data = QueryDict(response.content)
if 'signature' not in data:
return False
if 'user' not in data:
return False
params = [(key, value) for key,value in data.items() if key != 'signature']
if not verify_signature(params, data['signature'], settings.SIMPLE_SSO_SECRET):
return False
return load_json_user(data['user'])
def get_next(request):
"""
Given a request, returns the URL where a user should be redirected to
after login. Defaults to '/'
"""
next = request.GET.get('next', None)
if not next:
return '/'
netloc = urlparse(next)[1]
# Heavier security check -- don't allow redirection to a different
# host.
# Taken from django.contrib.auth.views.login
if netloc and netloc != request.get_host():
return '/'
return next
def login_view(request):
"""
Login view.
Requests a Request Token and then redirects the User to the the SSO Server.
"""
next = get_next(request)
request.session['simple-sso-next'] = next
request_token = get_request_token()
if not request_token:
return HttpResponseBadRequest()
params = [('request_token', request_token), ('key', settings.SIMPLE_SSO_KEY)]
signature = build_signature(params, settings.SIMPLE_SSO_SECRET)
params.append(('signature', signature))
query_string = urllib.urlencode(params)
url = urljoin(settings.SIMPLE_SSO_SERVER, 'authorize') + '/'
return HttpResponseRedirect('%s?%s' % (url, query_string))
def authenticate_view(request):
"""
Authentication view.
Verifies the user token and logs the user in.
"""
user = verify_auth_token(request.GET)
if not user:
return HttpResponseBadRequest()
user.backend = "%s.%s" % (BACKEND.__module__, BACKEND.__class__.__name__)
login(request, user)
return HttpResponseRedirect(request.session.get('simple-sso-next', '/'))
|
UTF-8
|
Python
| false | false | 2,012 |
15,247,133,902,483 |
dcfbd38dc275e19bdfab9beccc8699280131600b
|
8c645504affa7b2ef97b738e28be104ae6dca8ae
|
/gui/ajax.py
|
680d4a3ac68a3dc182a125dbe9e0c957e416071c
|
[
"AGPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only"
] |
non_permissive
|
liuyiwei/ChemToolsWebService
|
https://github.com/liuyiwei/ChemToolsWebService
|
beb429871ef237ae10e6e497078518bdbeffd3db
|
55cd3afdf1b35d3235d5ef7a58a7b39e234bd234
|
refs/heads/master
| 2021-01-16T16:59:22.782563 | 2013-04-11T10:34:48 | 2013-04-11T10:34:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: UTF-8 -*-
'''
Created on 2013-03-25
@author: tianwei
Desc: This module will be used for ajax request, such as form valid, search
query, calculated submit.
'''
import simplejson
from dajaxice.decorators import dajaxice_register
from backend.logging import logger
from utils.ChemSpiderPy.wrapper import search_cheminfo
@dajaxice_register(method='GET')
@dajaxice_register(method='POST', name="calculate_submit_post")
def calculate_submit(request,
smile=None,
mol=None,
notes=None,
name=None,
unique_names=None,
types="pdf;txt;csv",
models=None
):
# Calculated Submit Process
logger.info(unique_names)
logger.info(types)
logger.info(models)
return simplejson.dumps({'message': 'tianwei hello world!'})
@dajaxice_register(method='GET')
@dajaxice_register(method='POST', name="search_varify_post")
def search_varify_info(request, query=None):
# Calculated Submit Process
logger.info(query)
data = {}
if query is not None:
search_result = search_cheminfo(query.strip())
data = {"is_searched": True,
"search_result": search_result}
else:
data = {"is_searched": False,
"search_result": "None"}
logger.info(data)
return simplejson.dumps(data)
|
UTF-8
|
Python
| false | false | 2,013 |
16,037,407,899,942 |
617f6448dc20657d946d704a11c1eed131e7777c
|
7a053e02de4fc8e8cbf3dc4f8dc8f8e8ab003166
|
/qPCR_abi.py
|
4979907d811f52a58a13d4f277d0fdef02278052
|
[] |
no_license
|
reinson/qPCR_analysis
|
https://github.com/reinson/qPCR_analysis
|
c3c8004c372b7c9429953a7b39f79b481bddba6f
|
c4659f7183d9241f4d79cc0d4e39ad99a73f961f
|
refs/heads/master
| 2016-09-15T19:35:29.209886 | 2013-08-15T11:10:41 | 2013-08-15T11:10:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import string
def create_plate(nr_of_columns):
plate = {}
letters="ABCDEFGHIJKLMNOP"
for row in letters:
plate[row] = []
for column in range(1,nr_of_columns+1):
plate[row].append(0)
return plate
def pp(plate,**kw):
#prints out values of a plate, one row at a time,
#or only one value from each list if additional arguments are given
if kw:
for row in plate:
accumulator = []
for element in plate[row]:
accumulator.append(element[kw["arg"]])
print accumulator
else:
for row in plate:
print row, plate[row]
def test_value(value):
try:
result = float(value)
except ValueError as ve:
return ''
except TypeError as te:
raise TypeError('The value supplied cannot be turned into a float:', value)
except Exception as e:
raise Exception('Unexpected error converting the following to a float:', value)
if result == 0:
return ''
return result
def calculate(values,fn):
for i in values:
if i:
continue
else:
return ""
return fn(values)
def analyze_triplicate(triplicate, allowed_std = 0.5 ,std_difference = 2):
# get's triplicate data (three Ct values), returns list of elements:
# 0. Average of three ("", if some values are missing)
# 1. Standard deviation of three ("", if some values are missing)
# 2. Average of best pair ("", if not possible)
# 3. Standard deviation of best pair
# 4. Existing values
# 5. Values taken into account
# 6. Final average (see details below)
# 7. Final std
# 8. list of all the values in triplicate
a,b,c = triplicate
a,b,c = map(test_value,[a,b,c])
existing_values = ""
if a: existing_values += "A"
if b: existing_values += "B"
if c: existing_values += "C"
if a and b and c:
#if all three values are present find average of three and standard deviation
av3 = round(calculate([a,b,c],np.average),2)
std3 = round(calculate([a,b,c],np.std),2)
#find averages and standard deviations for all the possible pairs
pairs = [a,b], [a,c], [b,c]
pairs_av = [round(calculate(x,np.average),2) for x in pairs]
pairs_std = [round(calculate(x,np.std),2) for x in pairs]
pairs_combined = zip(pairs_std,pairs_av,["ab","ac","bc"])
#sort pairs based on standard deviations (first element in list)
pairs_combined = sorted(pairs_combined)
std2, av2, pair_vtia = pairs_combined[0]
else:
#if some values from the triplicate are missing do the following
av3, std3 = "", ""
pair_dict = {"AB":[a,b], "AC":[a,c], "BC":[b,c]}
if existing_values in pair_dict:
#if two values are present
av2 = round(np.average(pair_dict[existing_values]),2)
std2 = round(np.std(pair_dict[existing_values]),2)
else:
#if more than one value is missing, then set values to "":
av2, std2 = ["",""]
#Following code is necessary to find final average and std and values taken into account
if existing_values == "ABC":
#if all three values are present
if std3 < allowed_std:
final_av, final_std, vtia = av3, std3, "abc"
elif std2 < allowed_std and std3/std_difference > std2:
final_av, final_std, vtia = av2, std2, pair_vtia
else:
final_av, final_std, vtia = "", "", ""
elif av2:
if std2 < allowed_std:
final_av, final_std, vtia = av2, std2, existing_values.lower()
else:
final_av, final_std, vtia = "", "", ""
else:
final_av, final_std, vtia = "", "", ""
return [av3, std3, av2, std2, existing_values, vtia, final_av, final_std,[a,b,c]]
#print cal_tri (['22.966892', '20.785282', '22.531816'])
#for i in [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1]:
# print cal_tri([(22+i),22.5,21.5])
|
UTF-8
|
Python
| false | false | 2,013 |
5,145,370,847,096 |
b070759a3cc3aa263bd200e73c969adb85cfa8b9
|
b58aad4fe3ba03112bc58ec69f7c3490948a46e9
|
/geo/util.py
|
efa2ea168d9012fff441388e25d7abc4ee362dd3
|
[] |
no_license
|
ziliangdotme/geo-util
|
https://github.com/ziliangdotme/geo-util
|
151d52a1da323039315fac0ffa660a5f0c248f76
|
8a7a90e76f225cdaf5e2f3fa100e0fd0a2537b8c
|
refs/heads/master
| 2021-01-10T19:10:23.049105 | 2014-08-24T16:19:47 | 2014-08-24T16:19:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def get_sunrise_by_coord(x, y):
raise UnimplementedException()
def get_coord_by_city(city_name):
raise UnimplementedException()
|
UTF-8
|
Python
| false | false | 2,014 |
9,371,618,663,986 |
3b34dd76a556ab02090e59230e28708c3311d5c3
|
6124e8bf48a8f0977f2b5e9fe5a729d22ab2d81a
|
/55/main.py
|
f9748f67234cffbe30298815fc7a4a37bf2bdf39
|
[] |
no_license
|
heledar/ProjectEuler
|
https://github.com/heledar/ProjectEuler
|
98d34257418dbea9ef0e54e3c578c9ca01855c25
|
2bb1202669959884082b0cb1a4b9f9d322afd99d
|
refs/heads/master
| 2021-01-25T06:05:54.750047 | 2014-11-05T21:43:18 | 2014-11-05T21:43:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from euler import isPalyndromic
import time
startTime = time.time()
ans = 0
for i in xrange(1, 10000):
tmp = i
for j in xrange(50):
tmp += int(str(tmp)[::-1])
if isPalyndromic(tmp):
break
if not isPalyndromic(tmp):
ans += 1
endTime = time.time()
print "The number of Lychrel numbers below 10000 is : "+str(ans)
print "Computation took "+str(endTime-startTime)+" seconds"
|
UTF-8
|
Python
| false | false | 2,014 |
14,035,953,145,173 |
b7513764f035659755aed9430ddcd55e2f066cd3
|
31c679c0d8dda273478847a4641af9f2dc0dbdd6
|
/coupons/api_urls.py
|
31327812da5e6a1fb83e389492e2a5b9cb306351
|
[] |
no_license
|
beforebeta/pennywyse
|
https://github.com/beforebeta/pennywyse
|
09b0b58aea7b54f045f28f561e2fe15a0f400f78
|
cec6946e5eba8d11cb807838e9c0587e35cecfac
|
refs/heads/master
| 2021-01-21T12:06:39.407603 | 2014-04-15T23:31:03 | 2014-04-15T23:31:03 | 11,926,795 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import patterns, include, url
from api.mobile_api import MobileResource
mobile_resource = MobileResource()
# API
urlpatterns = patterns('',
url(r'^v3/deals', mobile_resource.deals_return_response),
url(r'^v3/deal', mobile_resource.single_deal_return_response),
url(r'^v3/localinfo', mobile_resource.localinfo_return_response),
)
|
UTF-8
|
Python
| false | false | 2,014 |
16,750,372,477,702 |
4bbb0979fcc811f3eac213ad6bbb72470d59733e
|
8a34f3ffd22ec9c7cce2535503cadedbbd4eb0a1
|
/app.py
|
2a85c075f0f0aec1baa33c67469543a046750747
|
[] |
no_license
|
kristjanjansen/environmental_notices_fusion
|
https://github.com/kristjanjansen/environmental_notices_fusion
|
111909804f7e8e9c3c0bb50e452cf5b2c8bed759
|
5eebd42060bf012527a5f7e35663da33c5c09b34
|
refs/heads/master
| 2020-04-11T09:28:29.573627 | 2012-05-10T14:18:37 | 2012-05-10T14:18:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import Flask, redirect, render_template, jsonify
import os
app = Flask(__name__)
# Redirecting to static frontend
@app.route('/')
def index():
return render_template('index.html');
# Populating frontend conf file with environment variables
@app.route('/config.json')
def conf():
return jsonify(GOOGLE_FUSION_ID=os.environ['GOOGLE_FUSION_ID']);
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
|
UTF-8
|
Python
| false | false | 2,012 |
3,770,981,307,658 |
d4f6b48453180b01b572587d5e404201668e258b
|
ce3964c7195de67e07818b08a43286f7ec9fec3e
|
/loop_kpt_snapshots.py
|
51e7a4f0ae1132d598d8ca744792d3376714a426
|
[] |
no_license
|
zhuligs/physics
|
https://github.com/zhuligs/physics
|
82b601c856f12817c0cfedb17394b7b6ce6b843c
|
7cbac1be7904612fd65b66b34edef453aac77973
|
refs/heads/master
| 2021-05-28T07:39:19.822692 | 2013-06-05T04:53:08 | 2013-06-05T04:53:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""
loop_kpt_snapshots.py
Author: Brian Boates
Select snapshots randomly from a wrapped xyz file and convert
to POSCAR's, create number directories and submit jobs on brasdor
run from within a TEMPK/1.rs/ directory (i.e. 1000K/1.40/)
"""
import os, sys, commands, glob, random
def main():
# Retrieve user input
try:
fname = sys.argv[1]
f = open(fname,'r')
lines = f.readlines()
natom = int(lines[0].strip())
nConfigs = int( len(lines) / (natom+2.0) )
alat = sys.argv[2]
ay_over_ax = sys.argv[3]
az_over_ax = sys.argv[4]
nruns = int(sys.argv[5])
launch = int(sys.argv[6])
except:
print '\n usage: '+sys.argv[0]+' wrapped.xyz alat(Ang) ay/ax az/ax nruns starting_index_for_dirname\n'
sys.exit(0)
# Generate list of new directories for calculations
dir_nums = range(launch,launch+nruns)
dirs = ['0'+str(d) for d in dir_nums]
# Create existing list of directories for checks
old = glob.glob('0*')
# Proceed with random selections and submissions
cwd = os.getcwd()
for d in dirs:
if d not in old:
os.mkdir(d)
os.system('cp -rf ../template/* '+d)
os.chdir(d)
k = 1
while k != 0:
snapshot = int( (0.10 + 0.90*random.random()) * nConfigs )
k = os.system('select_snapshot.py ../'+fname+' '+str(snapshot)+' >& select_snapshot.out')
os.system('rm -f select_snapshot.out')
os.system('POSCAR_from_xyz_general.py snapshot.xyz '+alat+' '+ay_over_ax+' '+az_over_ax+' y')
os.chdir(cwd)
else:
print '\n Directory', d, 'already exists - skipping...\n'
os.system('sleep 61s')
for d in dirs:
if d not in old:
os.chdir(d)
os.system('qsub vasp.brasdor.s')
os.chdir(cwd)
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,013 |
2,207,613,215,195 |
dd157b1d89bdbc3e9965e602fe621e1b4c426b8e
|
405ca58dc964381df9758a7af8875a5dadd4b485
|
/Helpviewer/Passwords/Ui_PasswordsDialog.py
|
0b26fef716bfc7ba84da9a67eb1912f26521e9c5
|
[
"GPL-1.0-or-later",
"GPL-3.0-only"
] |
non_permissive
|
davy39/eric
|
https://github.com/davy39/eric
|
28fb96974571afbac117070855bf2fc7d44f8d85
|
96d249819dda0764e93914c1a0a94644f7c81df4
|
refs/heads/master
| 2016-09-05T17:35:51.773838 | 2014-12-31T09:14:27 | 2014-12-31T09:14:27 | 28,652,633 | 20 | 15 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './Helpviewer/Passwords/PasswordsDialog.ui'
#
# Created: Tue Nov 18 17:53:58 2014
# by: PyQt5 UI code generator 5.3.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_PasswordsDialog(object):
def setupUi(self, PasswordsDialog):
PasswordsDialog.setObjectName("PasswordsDialog")
PasswordsDialog.resize(500, 350)
PasswordsDialog.setSizeGripEnabled(True)
self.verticalLayout = QtWidgets.QVBoxLayout(PasswordsDialog)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.searchEdit = E5ClearableLineEdit(PasswordsDialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.searchEdit.sizePolicy().hasHeightForWidth())
self.searchEdit.setSizePolicy(sizePolicy)
self.searchEdit.setMinimumSize(QtCore.QSize(300, 0))
self.searchEdit.setObjectName("searchEdit")
self.horizontalLayout.addWidget(self.searchEdit)
self.horizontalLayout_2.addLayout(self.horizontalLayout)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.passwordsTable = E5TableView(PasswordsDialog)
self.passwordsTable.setAlternatingRowColors(True)
self.passwordsTable.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.passwordsTable.setTextElideMode(QtCore.Qt.ElideMiddle)
self.passwordsTable.setShowGrid(False)
self.passwordsTable.setSortingEnabled(True)
self.passwordsTable.setObjectName("passwordsTable")
self.verticalLayout.addWidget(self.passwordsTable)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.removeButton = QtWidgets.QPushButton(PasswordsDialog)
self.removeButton.setAutoDefault(False)
self.removeButton.setObjectName("removeButton")
self.horizontalLayout_3.addWidget(self.removeButton)
self.removeAllButton = QtWidgets.QPushButton(PasswordsDialog)
self.removeAllButton.setAutoDefault(False)
self.removeAllButton.setObjectName("removeAllButton")
self.horizontalLayout_3.addWidget(self.removeAllButton)
spacerItem1 = QtWidgets.QSpacerItem(208, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem1)
self.passwordsButton = QtWidgets.QPushButton(PasswordsDialog)
self.passwordsButton.setText("")
self.passwordsButton.setObjectName("passwordsButton")
self.horizontalLayout_3.addWidget(self.passwordsButton)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.buttonBox = QtWidgets.QDialogButtonBox(PasswordsDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(PasswordsDialog)
self.buttonBox.accepted.connect(PasswordsDialog.accept)
self.buttonBox.rejected.connect(PasswordsDialog.reject)
QtCore.QMetaObject.connectSlotsByName(PasswordsDialog)
PasswordsDialog.setTabOrder(self.searchEdit, self.passwordsTable)
PasswordsDialog.setTabOrder(self.passwordsTable, self.removeButton)
PasswordsDialog.setTabOrder(self.removeButton, self.removeAllButton)
PasswordsDialog.setTabOrder(self.removeAllButton, self.passwordsButton)
PasswordsDialog.setTabOrder(self.passwordsButton, self.buttonBox)
def retranslateUi(self, PasswordsDialog):
_translate = QtCore.QCoreApplication.translate
PasswordsDialog.setWindowTitle(_translate("PasswordsDialog", "Saved Passwords"))
self.searchEdit.setToolTip(_translate("PasswordsDialog", "Enter search term"))
self.removeButton.setToolTip(_translate("PasswordsDialog", "Press to remove the selected entries"))
self.removeButton.setText(_translate("PasswordsDialog", "&Remove"))
self.removeAllButton.setToolTip(_translate("PasswordsDialog", "Press to remove all entries"))
self.removeAllButton.setText(_translate("PasswordsDialog", "Remove &All"))
self.passwordsButton.setToolTip(_translate("PasswordsDialog", "Press to toggle the display of passwords"))
from E5Gui.E5TableView import E5TableView
from E5Gui.E5LineEdit import E5ClearableLineEdit
|
UTF-8
|
Python
| false | false | 2,014 |
10,153,302,734,477 |
94a272843a3be93ab30a900d1059021b8230ae71
|
423747cd11ae03aec3d5906dfafe807f495a3ee6
|
/Script/aceutils.py
|
58627a7488b9b37eaa5045241e22417e34735ea6
|
[
"Zlib",
"MS-PL",
"MIT"
] |
permissive
|
rydotyosh/amusement-creators-engine
|
https://github.com/rydotyosh/amusement-creators-engine
|
a2cd15804711d4d5e0cd11bce210782a9a84e398
|
a04bde2e636d235254fcda4b15acd6ed75c98082
|
refs/heads/master
| 2017-05-02T22:56:44.203754 | 2014-12-07T14:53:11 | 2014-12-07T14:53:11 | 21,535,629 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import os
import shutil
import subprocess
import os
import os.path
import zipfile
import platform
import os.path
def get_files(path):
""" get files.
"""
def getlistdir(path,l):
dirs = os.listdir(path)
for d in dirs:
newpath = os.path.join( path, d )
if os.path.isdir( newpath ):
getlistdir( newpath, l )
else:
l.append( newpath )
ret = []
getlistdir( path, ret )
return ret
def copytreeWithExt(src,dst,exts):
files = get_files(src)
for _from in files:
root, ext = os.path.splitext(_from)
if not ext in exts:
continue
_to = dst + _from[len(src):]
print(_from + '\t:\t' + _to)
if not os.path.exists(os.path.dirname(_to)):
os.makedirs(os.path.dirname(_to))
shutil.copy(_from,_to)
def isWin():
return platform.system() == 'Windows'
def call( cmd ):
""" call command line.
"""
print( cmd )
p = subprocess.Popen(cmd, shell=True)
ret = p.wait()
print('')
def wget( address ):
version = sys.version_info
if version[0] == 2:
import urllib
urllib.urlretrieve(address, os.path.basename(address))
elif version[0] == 3:
import urllib.request
urllib.request.urlretrieve(address, os.path.basename(address))
def unzip(zip_filename):
zip_file = zipfile.ZipFile(zip_filename, "r")
for f in zip_file.namelist():
if f.endswith('/'):
os.makedirs(f)
else:
version = sys.version_info
if version[0] == 2:
unzip_file = file(f, "wb")
unzip_file.write(zip_file.read(f))
unzip_file.close()
elif version[0] == 3:
unzip_file = open(f, "wb")
unzip_file.write(zip_file.read(f))
unzip_file.close();
zip_file.close()
def rm(path):
if os.path.exists(path):
os.remove(path)
def rmdir(path):
if os.path.exists(path):
print("rmdir : " + path)
shutil.rmtree(path)
else:
print("rmdir : not found " + path)
def cd(path):
os.chdir(path)
def cdToScript():
cd(os.path.dirname(__file__))
def mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
def copy(src,dst):
print("copying from {0} to {1}".format(src, dst))
shutil.copy(src,dst)
def copytree(src,dst,change=False):
if change and os.path.exists(dst):
rmdir(dst)
if not os.path.exists(dst):
shutil.copytree(src,dst)
def editCmakeForACE(path,enc='utf-8'):
# This script edits a cmake file for ACE.
# The script appends fPIC to compile on 64bit *nix OS.
f = open(path, 'r', encoding=enc)
lines = f.read()
f.close()
if '# ForACE' in lines:
return
# to glfw
if 'GLFW' in lines:
lines = lines.replace('-Wall', '-Wall -fPIC')
lines = lines + "\n"
lines = lines + "# ForACE\n"
lines = lines + "if (MSVC)\n"
lines = lines + "else()\n"
lines = lines + "\tadd_definitions(-fPIC)\n"
lines = lines + "\tset(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} -fPIC\")\n"
lines = lines + "\tset(CMAKE_C_FLAGS_DEBUG \"${CMAKE_C_FLAGS_DEBUG} -fPIC\")\n"
lines = lines + "\tset(CMAKE_C_FLAGS_RELEASE \"${CMAKE_C_FLAGS_RELEASE} -fPIC\")\n"
lines = lines + "\tset(CMAKE_C_FLAGS_MINSIZEREL \"${CMAKE_C_FLAGS_MINSIZEREL} -fPIC\")\n"
lines = lines + "\tset(CMAKE_C_FLAGS_RELWITHDEBINFO \"${CMAKE_C_FLAGS_RELWITHDEBINFO} -fPIC\")\n"
lines = lines + "\tset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -fPIC\")\n"
lines = lines + "endif()\n"
f = open(path, 'w')
f.write(lines)
f.close()
|
UTF-8
|
Python
| false | false | 2,014 |
11,682,311,076,477 |
bc5fdcc42efb05ae6be16f203105e5a6fae8db2e
|
a813cc4065945f4e624970f4e0fc52b1f33d6c05
|
/src/droid/instruments/serial.py
|
6a9b47c5c53c56b9b4c760c558a9d662ee566fe2
|
[] |
no_license
|
kdart/powerdroid
|
https://github.com/kdart/powerdroid
|
648e86ecbf1ad812c70edce3e4d144d982ec62c7
|
f9474f7af07698b8831d7a5ddca0f5c12e90037e
|
refs/heads/master
| 2020-04-06T06:15:29.086550 | 2009-05-28T12:39:25 | 2009-05-28T12:39:25 | 32,687,085 | 8 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python2.4
# -*- coding: us-ascii -*-
# vim:ts=2:sw=2:softtabstop=0:tw=74:smarttab:expandtab
#
# Copyright 2006 The Android Open Source Project
"""Base support for intsruments with RS-232 serial interfaces.
"""
# TODO(dart) Actually, this works with the Fluke interface, so should be
# refactored as the general base class.
from pycopia import expect
from pycopia import tty
from droid.instruments import core
class Error(Exception):
pass
class CommandError(Error):
pass
class ExecutionError(Error):
pass
class SerialInstrument(object):
"""Base class for instruments that interface with RS-232 ports.
"""
_exp = None
def __init__(self, devspec, logfile=None, **kwargs):
fo = tty.SerialPort(devspec.port)
fo.set_serial(devspec.serial)
self._timeout = devspec.get("timeout", 30.0)
self._exp = expect.Expect(fo, prompt=devspec.prompt,
timeout=self._timeout,
logfile=logfile)
self.Initialize(devspec, **kwargs)
def __del__(self):
self.close()
def close(self):
if self._exp:
self._exp.close()
self._exp = None
closed = property(lambda self: self._exp is None)
def _get_timeout(self):
return self._timeout
def _set_timeout(self, value):
self._timeout = float(value)
timeout = property(_get_timeout, _set_timeout)
def Initialize(self, devspec, **kwargs):
pass
def Prepare(self, measurecontext):
return 0.20 # default for medium
def read(self, len=4096):
return self._exp.read(len, timeout=self._timeout)
readbin = read
def writebin(self, string, length):
self.write(string)
def write(self, string):
self._exp.write(string)
self._exp.write("\n")
return self._wait_for_prompt()
def ask(self, string):
self._exp.write(string)
self._exp.write("\n")
return self._wait_for_prompt()
# gpib compatible methods
def wait(self):
pass
def clear(self):
self._exp.write(chr(3)) # ^C
self._wait_for_prompt()
def _wait_for_prompt(self):
mo = self._exp.expect("([=?!])>\r\n", expect.REGEX)
if mo:
indicator = mo.group(1)
if indicator == "=":
return mo.string[:-6]
if indicator == "?":
raise CommandError("Command not understood")
if indicator == "!":
raise ExecutionError("Command could not be executed.")
def poll(self):
"""Serial polls the device. The status byte is returned."""
return 0
def trigger(self):
"""Sends a GET (group execute trigger) command to the device."""
pass
def identify(self):
return core.Identity(self.ask("*IDN?"))
def GetError(self):
return core.DeviceError(0, "Not supported")
def Errors(self):
"""Return a list of all queued errors from the device.
"""
rv = []
err = self.GetError()
while err._code != 0:
rv.append(err)
err = self.GetError()
return rv
def ClearErrors(self):
pass
def GetVoltageHeadings(self):
return ()
def GetCurrentHeadings(self):
return ()
def GetConfig(self, option):
return None
|
UTF-8
|
Python
| false | false | 2,009 |
11,854,109,742,114 |
00d3b7c0df10317f363c70c5a0bfab8f05d1444e
|
d0d80b403602f8db5521ec9df61e96beb1cdd4a5
|
/architectural_prototype/src/aggregator.py
|
bd663f70bd81e3e1b39c9f4a7597d28da9934336
|
[] |
no_license
|
athas/architecture-course-project
|
https://github.com/athas/architecture-course-project
|
dffad2dafaa761126fac7809e29ab08e85ed4a4e
|
d09bffcca2e748f6a6d3dae0cce1282df3baa9ee
|
refs/heads/master
| 2021-05-26T19:47:02.827296 | 2012-11-04T20:41:07 | 2012-11-04T20:41:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import request
from wsgi import app
from datastore import TruckData, database
import datetime, json
@app.route('/data', methods=['POST'])
def recent_data():
database.connect()
collected = []
for truck, points in json.loads(request.data).iteritems():
for time, coords in points.iteritems():
new = TruckData()
new.timestamp = float(time)
new.truck_id = int(truck)
new.latitude = float(coords[0])
new.longitude = float(coords[1])
new.save()
collected.append(new)
database.close()
return "success"
|
UTF-8
|
Python
| false | false | 2,012 |
3,547,642,996,687 |
a8cbd49e4be659a1db7aeb73a9bdd4d9a7bfc689
|
ca08ebd72fe0d65f1e99e8768e858fccd0b58827
|
/modules/Site/GranulateStudy/models.py
|
bba472eeedcfd918e3f078e6c31f628459344c91
|
[
"LicenseRef-scancode-proprietary-license"
] |
non_permissive
|
jgmao/MTC
|
https://github.com/jgmao/MTC
|
634da873fb970fac7af18d638652b432207bf5f1
|
68be2c0a690b2d154f00b898daee96c4dc2bfa46
|
refs/heads/master
| 2021-01-18T22:56:10.606958 | 2014-05-12T15:48:01 | 2014-05-12T15:48:01 | 12,138,502 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
# Create your models here.
class Gran(models.Model):
orgName = models.CharField(max_length=50)
SIZE = ((0,4),(1,8),(2,16),(3,32),(4,64),(5,128),(6,-1))#the last one means none of any size
create_date = models.DateTimeField(auto_now_add=True)
gran = models.IntegerField(choices=SIZE)
ip = models.CharField(max_length=50)
def __unicode__(self):
return self.orgName+"_"+str(self.gran)
|
UTF-8
|
Python
| false | false | 2,014 |
18,425,409,701,240 |
1de35f404b4894e05466dbf7a19859a48da685a0
|
2afadfca6026451026aec3595a765307d4499f72
|
/sitio/admin.py
|
dbd4a4a8d9a22ab0ac5f58fdb5080b2591aaa499
|
[] |
no_license
|
CARocha/sitioweb
|
https://github.com/CARocha/sitioweb
|
b8166890b240af822ed810d82b11a8982cc2a477
|
791f9fd8f05a4daf3b9671cef050fbc9aa247b36
|
refs/heads/master
| 2021-01-20T21:53:57.978352 | 2013-02-01T05:25:45 | 2013-02-01T05:25:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from models import *
class SocialInline(admin.TabularInline):
model = Social
extra = 1
class UserProfileAdmin(admin.ModelAdmin):
list_display = ('user','descripcion')
search_fields = ['user']
list_filter = ('user',)
inlines = [SocialInline]
admin.site.register(UserProfile, UserProfileAdmin)
class VideosAdmin(admin.ModelAdmin):
#prepopulated_fields = {"slug": ("titulo",)}
list_display = ('fecha', 'usuario','titulo','portada','curso','categoria')
search_fields = ['titulo']
list_filter = ('usuario','categoria','curso')
date_hierarchy = 'fecha'
class Media:
js = ('js/tiny_mce/tiny_mce.js',
'js/basic_config.js',)
class TalleresAdmin(admin.ModelAdmin):
list_display = ('titulo', 'ponente',)
search_fields = ['titulo']
list_filter = ('ponente',)
class Media:
js = ('js/tiny_mce/tiny_mce.js',
'js/basic_config.js',)
admin.site.register(Videos, VideosAdmin)
admin.site.register(Talleres, TalleresAdmin)
admin.site.register(Categoria)
admin.site.register(Cursos)
admin.site.register(Redes)
admin.site.register(EnVivo)
admin.site.register(Ponente)
admin.site.register(ComunidadAmiga)
|
UTF-8
|
Python
| false | false | 2,013 |
1,778,116,474,813 |
90801b1976518d85495472ac7c76c3606d60879c
|
67739e11eaf98dcf4029f4f8699b7df6c32b9106
|
/TestPrograms/heuristicRules.py
|
fd1f35b5c03b1afc689378df8520e1ae7949721f
|
[] |
no_license
|
tushar54983/Python
|
https://github.com/tushar54983/Python
|
1dff6d5195d934d535c8c18c1c6293f7d9aa27f1
|
addb2cfd051023dfb082645344096f21c20c1f7b
|
refs/heads/master
| 2016-09-07T13:35:48.836058 | 2014-04-05T10:56:13 | 2014-04-05T10:56:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def heuristic_rules(text):
i = 0
count = len(text)
listOfBiGrams = []
if count > 3:
while count > i+2:
biGram = []
pos1 = text[i][1]
pos2 = text[i+1][1]
pos3 = text[i+2][1]
if 'JJ' in pos1 and 'NN' in pos2:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'NN' in pos1 and 'JJ' in pos2:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'RB' in pos1 and 'JJ' in pos2 and 'NN' not in pos3:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'JJ' in pos1 and 'JJ' in pos2 and 'NN' not in pos3:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'NN' in pos1 and 'JJ' in pos2 and 'NN' not in pos3:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'RB' in pos1 and 'VB' in pos2:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'VB' in pos1 and 'NN' in pos2:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'RB' in pos2 and 'JJ' in pos3:
biGram.append(text[i+1])
biGram.append(text[i+2])
listOfBiGrams.append(biGram)
elif 'JJ' in pos2 and 'JJ' in pos3:
biGram.append(text[i+1])
biGram.append(text[i+2])
listOfBiGrams.append(biGram)
elif 'NN' in pos2 and 'JJ' in pos3:
biGram.append(text[i+1])
biGram.append(text[i+2])
listOfBiGrams.append(biGram)
elif 'NN' in pos1 and 'NN' in pos2:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'NN' in pos2 and 'NN' in pos3:
biGram.append(text[i+1])
biGram.append(text[i+2])
listOfBiGrams.append(biGram)
elif 'JJ' in pos2 and 'NN' in pos3:
biGram.append(text[i+1])
biGram.append(text[i+2])
listOfBiGrams.append(biGram)
i+=1
return listOfBiGrams
elif count == 3:
while count > i + 2:
biGram = []
pos1 = text[i][1]
pos2 = text[i+1][1]
pos3 = text[i+2][1]
if 'JJ' in pos1 and 'NN' in pos2:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'NN' in pos1 and 'JJ' in pos2:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'RB' in pos1 and 'JJ' in pos2 and 'NN' not in pos3:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'JJ' in pos1 and 'JJ' in pos2 and 'NN' not in pos3:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'NN' in pos1 and 'JJ' in pos2 and 'NN' not in pos3:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'RB' in pos1 and 'VB' in pos2:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'VB' in pos1 and 'NN' in pos2:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'JJ' in pos2 and 'NN' in pos3:
biGram.append(text[i+1])
biGram.append(text[i+2])
listOfBiGrams.append(biGram)
elif 'RB' in pos2 and 'VB' in pos3:
biGram.append(text[i+1])
biGram.append(text[i+2])
listOfBiGrams.append(biGram)
elif 'VB' in pos2 and 'NN' in pos3:
biGram.append(text[i+1])
biGram.append(text[i+2])
listOfBiGrams.append(biGram)
elif 'RB' in pos2 and 'JJ' in pos3:
biGram.append(text[i+1])
biGram.append(text[i+2])
listOfBiGrams.append(biGram)
elif 'JJ' in pos2 and 'JJ' in pos3:
biGram.append(text[i+1])
biGram.append(text[i+2])
listOfBiGrams.append(biGram)
elif 'NN' in pos2 and 'JJ' in pos3:
biGram.append(text[i+1])
biGram.append(text[i+2])
listOfBiGrams.append(biGram)
elif 'NN' in pos1 and 'NN' in pos2:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'NN' in pos2 and 'NN' in pos3:
biGram.append(text[i+1])
biGram.append(text[i+2])
listOfBiGrams.append(biGram)
elif 'JJ' in pos2 and 'NN' in pos3:
biGram.append(text[i+1])
biGram.append(text[i+2])
listOfBiGrams.append(biGram)
i+=1
return listOfBiGrams
elif count == 2:
biGram = []
pos1 = text[0][1]
pos2 = text[1][1]
if 'JJ' in pos1 and 'NN' in pos2:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'NN' in pos1 and 'JJ' in pos2:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'RB' in pos1 and 'VB' in pos2:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'VB' in pos1 and 'RB' in pos2:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'VB' in pos1 and 'NN' in pos2:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'NN' in pos1 and 'VB' in pos2:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
elif 'NN' in pos1 and 'NN' in pos2:
biGram.append(text[i])
biGram.append(text[i+1])
listOfBiGrams.append(biGram)
return listOfBiGrams
def bigramPolarity(tagScoreList):
count = len(tagScoreList)
i = 0
if count > 0 :
objScore = float(0)
posScore = float(0)
negScore = float(0)
flag = 0
for tagScore in tagScoreList:
if 'NN' not in tagScore[1]:
flag +=1
objScore = objScore + tagScore[2][2]
if flag == 0:
objScore = float(1)
else:
objScore = objScore/flag
if objScore <= 0.7:
for index,tagScore in enumerate(tagScoreList):
tagScoreList[index] = decideClassOfWord(tagScore)
if count == 1:
posScore = posScore + tagScoreList[0][2][0]
negScore = negScore + tagScoreList[0][2][1]
tagScoreList.append(('POS',posScore))
tagScoreList.append(('NEG',negScore))
else:
while count > i+1:
if ('JJ' in tagScoreList[i][1] and 'NN' in tagScoreList[i+1][1]) or ('RB' in tagScoreList[i][1] and 'VB' in tagScoreList[i+1][1]) or ('JJ' in tagScoreList[i][1] and 'JJ' in tagScoreList[i+1][1]) or ('RB' in tagScoreList[i][1] and 'JJ' in tagScoreList[i+1][1]) or ('VB' in tagScoreList[i][1] and 'NN' in tagScoreList[i+1][1]):
if 'POS' in tagScoreList[i][3] and 'POS' in tagScoreList[i+1][3]:
posScore = posScore + tagScoreList[i][2][0] + tagScoreList[i+1][2][0]
elif 'POS' in tagScoreList[i][3] and 'OBJ' in tagScoreList[i+1][3]:
posScore = posScore + tagScoreList[i][2][0] + tagScoreList[i+1][2][0]
elif 'POS' in tagScoreList[i+1][3] and 'OBJ' in tagScoreList[i][3]:
posScore = posScore + tagScoreList[i][2][0] + tagScoreList[i+1][2][0]
elif 'NEG' in tagScoreList[i][3] and 'POS' in tagScoreList[i+1][3]:
negScore = negScore + tagScoreList[i][2][1] + tagScoreList[i+1][2][1]
elif 'NEG' in tagScoreList[i][3] and 'OBJ' in tagScoreList[i+1][3]:
negScore = negScore + tagScoreList[i][2][1] + tagScoreList[i+1][2][1]
elif 'NEG' in tagScoreList[i][3] and 'NEG' in tagScoreList[i+1][3]:
negScore = negScore + tagScoreList[i][2][1] + tagScoreList[i+1][2][1]
elif 'NEG' in tagScoreList[i+1][3] and 'OBJ' in tagScoreList[i][3]:
negScore = negScore + tagScoreList[i][2][1] + tagScoreList[i+1][2][1]
elif 'NEG' in tagScoreList[i+1][3] and 'POS' in tagScoreList[i][3]:
negScore = negScore + tagScoreList[i][2][1] + tagScoreList[i+1][2][1]
i+=1
tagScoreList.append(('POS',posScore))
tagScoreList.append(('NEG',negScore))
else:
tagScoreList.append(('OBJ',objScore))
else:
tagScoreList.append(('OBJ',1.0))
return tagScoreList
def decideClassOfWord(tagScore):
scores = tagScore[2]
if scores[2] > 0.5:
tagScore = tagScore + ('OBJ',)
elif scores[0] >= scores[1]:
tagScore = tagScore + ('POS',)
elif scores[0] < scores[1]:
tagScore = tagScore + ('NEG',)
return tagScore
def main():
text = sys.argv
line = text.split()
k = [i for i in line if len(i) >= 3]
txt = ' '.join(k)
lst = tagger.tag_line(txt)
print heuristic_rules(lst)
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,014 |
9,019,431,350,704 |
2a0d1f2ca125547b32882a0aac0f7ac3cea93955
|
9c0e571538c8a22de59499a6878ed9cd3a3a8717
|
/constants.py
|
3e7314d710bf37e625345f197fed28c372b3c1aa
|
[] |
no_license
|
pravj/logg
|
https://github.com/pravj/logg
|
6cb51769cfe94d4dcddae850375680795844a2f5
|
692f2a63f56389306a5161599996f6abd473fe78
|
refs/heads/master
| 2016-09-08T13:03:00.517136 | 2014-01-02T14:57:27 | 2014-01-02T14:57:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- encoding:utf-8 -*-
# to left blank somewhere in calendar
blank = " "
# block used for bar-graph and boxes in calendar
square = '■'
# weekdays with 'Sunday' as first day
days = ['Sun','Mon','Tue','Wed','Thu','Fri','Sat']
months = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
# think on maping things
hours = ['00','01','02','03','04','05','06','07','08','09','10','11']
for i in range(len(hours)):
hours.append(str(int(hours[i])+12))
|
UTF-8
|
Python
| false | false | 2,014 |
13,460,427,520,605 |
46fca97c493c3133f9b5e2e5a3d3e59614afd161
|
43936cdb78496d39d641d51aa8e04b65ae6c8df9
|
/api/serializers.py
|
1a13d500bded2382e97b020a9f2e18d6ef9c55c1
|
[] |
no_license
|
frnhr/rsstest
|
https://github.com/frnhr/rsstest
|
258939f893250dd28eabfb17f1eb8a197c6112f0
|
41ded77510fbcc30fc3167b526066fa75b4c2e43
|
refs/heads/master
| 2020-05-16T15:58:31.539640 | 2014-08-25T10:22:16 | 2014-08-25T10:22:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from rest_framework import serializers
from rest_framework.fields import Field
from rest_framework.reverse import reverse
from api.models import Feed, Entry, Word, WordCount
class HyperlinkNestedSelf(Field):
"""
Use instead of default URL field on nested resources.
Because default URL field looks for url named "<resource>-detail", and url.reverse args are not compatible.
Note: a read-only field
"""
url = None
view_name = None
parents_lookup = None
self_field = None
obj_field = None
def __init__(self, view_name, parents_lookup=None, obj_field=None, self_field='pk', *args, **kwargs):
super(HyperlinkNestedSelf, self).__init__(*args, **kwargs)
self.view_name = view_name
self.parents_lookup = parents_lookup
self.self_field = self_field
self.obj_field = obj_field
def field_to_native(self, obj, field_name):
request = self.context.get('request', None)
parents_lookup = [[parent_lookup, 'pk'] if isinstance(parent_lookup, basestring) else parent_lookup
for parent_lookup in self.parents_lookup] # copy the list and make "pk" optional default
if self.obj_field is not None:
obj = getattr(obj, self.obj_field)
#@TODO this is a good point to unify with HyperlinkNestedViewField
def get_parent_data(parent_lookup, parent_data):
"""
Gather parent objects and field values
"""
if len(parent_lookup) < 1:
return parent_data
lookup = parent_lookup.pop()
parent_attr = lookup[0].split("__")[-1]
parent_field = lookup[1]
obj = parent_data[-1]['obj']
parent = getattr(obj, parent_attr)
parent_data.append({
'obj': parent,
'field': parent_field,
'value': getattr(parent, parent_field),
'lookup': lookup[0],
})
return get_parent_data(parent_lookup, parent_data)
parent_data = [{'obj': obj, 'field': self.self_field, 'value': getattr(obj, self.self_field) }, ]
parents_data = get_parent_data(parents_lookup, parent_data)
kwargs = {} # populate kwargs for URL reverse() call
for i, parent_data in enumerate(parents_data):
if i == 0:
kwargs[parent_data['field']] = parent_data['value']
else:
kwargs['parent_lookup_%s' % parent_data['lookup']] = parent_data['value']
return reverse(self.view_name, kwargs=kwargs, request=request)
#@TODO DRY it out, it's quite similar to HyperlinkNestedSelf
class HyperlinkNestedViewField(Field):
"""
Use to link to arbitrary url that has relationship with current resource.
Note: a read-only field
"""
url = None
view_name = None
parents_lookup = None
nested_field = None
def __init__(self, view_name, parents_lookup=None, nested_field=None, *args, **kwargs):
super(HyperlinkNestedViewField, self).__init__(*args, **kwargs)
self.view_name = view_name
self.parents_lookup = parents_lookup
self.nested_field = nested_field
def field_to_native(self, obj, field_name):
request = self.context.get('request', None)
parents_lookup = [[parent_lookup, 'pk'] if isinstance(parent_lookup, basestring) else parent_lookup
for parent_lookup in self.parents_lookup] # copy the list and make "pk" optional default
def get_parent_data(parent_lookup, parent_data):
"""
Gather parent objects and field values
"""
if len(parent_lookup) < 1:
return parent_data
lookup = parent_lookup.pop()
parent_attr = lookup[0].split("__")[-1]
parent_field = lookup[1]
obj = parent_data[-1]['obj']
parent = getattr(obj, parent_attr)
parent_data.append({
'obj': parent,
'field': parent_field,
'value': getattr(parent, parent_field),
'lookup': lookup[0],
})
return get_parent_data(parent_lookup, parent_data)
nested_obj = getattr(obj, self.nested_field).model() #@TODO not a nice trick, creating a dummy nested object
setattr(nested_obj, parents_lookup[-1][0], obj)
parent_data = [{'obj': nested_obj, 'field': 'pk', 'value': getattr(nested_obj, 'pk') }, ]
parents_data = get_parent_data(parents_lookup, parent_data)
kwargs = {} # populate kwargs for URL reverse() call
for i, parent_data in enumerate(parents_data):
if i == 0:
pass
else:
kwargs['parent_lookup_%s' % parent_data['lookup']] = parent_data['value']
return reverse(self.view_name, kwargs=kwargs, request=request)
# list serializers
class FeedListSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Feed
fields = ('_url', 'url', 'is_active', )
class EntryListSerializer(serializers.HyperlinkedModelSerializer):
_url = HyperlinkNestedSelf(view_name="feeds-entry-detail", parents_lookup=['feed', ])
class Meta:
model = Entry
fields = ('_url', 'title', )
class WordField(serializers.CharField):
def field_to_native(self, obj, field_name):
"""
Given and object and a field name, returns the value that should be
serialized for that field.
"""
#return obj.word.word if obj else ''
return obj.word.word
class WordCountListSerializer(serializers.HyperlinkedModelSerializer):
_url = HyperlinkNestedSelf(view_name="feeds-entries-wordcount-detail", parents_lookup=['entry__feed', 'entry', ])
word = WordField()
class Meta:
model = WordCount
fields = ('_url', 'word', 'count', )
class WordListSerializer(serializers.HyperlinkedModelSerializer):
class WordCountWordListSerializer(serializers.HyperlinkedModelSerializer):
_url = HyperlinkNestedSelf(view_name="feeds-entries-wordcount-detail", parents_lookup=['entry__feed', 'entry', ])
entry = HyperlinkNestedSelf(view_name="feeds-entry-detail", parents_lookup=['feed', ], obj_field='entry')
class Meta:
model = WordCount
fields = ('_url', 'entry', 'count', )
wordcounts = WordCountWordListSerializer()
class Meta:
model = Word
fields = ('_url', 'word', 'wordcounts', )
# detail serializers
class WordCountRootSerializer(serializers.HyperlinkedModelSerializer):
class FeedURLField(Field):
def field_to_native(self, obj, field_name):
return obj.entry.feed.url
class EntryTitleField(Field):
def field_to_native(self, obj, field_name):
return obj.entry.title
_url = HyperlinkNestedSelf(view_name="feeds-entries-wordcount-detail", parents_lookup=['entry__feed', 'entry', ])
word = WordField()
entry = HyperlinkNestedSelf(view_name="feeds-entry-detail", parents_lookup=['feed', ], obj_field='entry')
entry_title = EntryTitleField()
feed_url = FeedURLField()
class Meta:
model = WordCount
fields = ('_url', 'word', 'count', 'entry', 'entry_title', 'feed_url' )
class WordCountTopSerializer(serializers.Serializer):
class WordWordField(serializers.CharField):
def field_to_native(self, obj, field_name):
"""
Given and object and a field name, returns the value that should be
serialized for that field.
"""
#return obj.word.word if obj else ''
return obj['word__word']
word = WordWordField()
count = serializers.Field()
class WordCountSerializer(serializers.HyperlinkedModelSerializer):
_url = HyperlinkNestedSelf(view_name="feeds-entries-wordcount-detail", parents_lookup=['entry__feed', 'entry', ])
entry = HyperlinkNestedSelf(view_name="feeds-entry-detail", parents_lookup=['feed', ], obj_field='entry')
word = WordField()
class Meta:
model = WordCount
fields = ('_url', 'entry', 'word', 'count', )
class EntrySerializer(serializers.HyperlinkedModelSerializer):
_url = HyperlinkNestedSelf(view_name="feeds-entry-detail", parents_lookup=['feed', ])
_wordcounts = HyperlinkNestedViewField(view_name='feeds-entries-wordcount-list', parents_lookup=['entry__feed', 'entry', ], nested_field="wordcounts")
class Meta:
model = Entry
fields = ('_url', '_wordcounts', 'feed', 'title', 'url', 'timestamp', 'text', )
class FeedSerializer(serializers.HyperlinkedModelSerializer):
_entries = HyperlinkNestedViewField(view_name='feeds-entry-list', parents_lookup=['feed', ], nested_field="entries")
class Meta:
model = Feed
fields = ('_url', '_entries', 'url', 'is_active', )
read_only_fields = ('url', )
|
UTF-8
|
Python
| false | false | 2,014 |
6,768,868,475,123 |
dfd1dbb5b41c4f193a2414f44f39356ecf5c9f94
|
46bb0da1318108d4c35ab061d1720b6ba1c3b217
|
/scraper/scraper/spiders/DRESpider.py
|
42cac33d2796e7272050d2ffb610f0935bfa4911
|
[
"GPL-3.0-only"
] |
non_permissive
|
hugohp/scrapDiarioDaRepublica
|
https://github.com/hugohp/scrapDiarioDaRepublica
|
ba711385ba3f2b61f1d2536baa98061eaf1c4d32
|
e6504fa1ab6dc5e8bddf0ade535f743d5dbabcb4
|
refs/heads/master
| 2016-09-11T01:18:48.711884 | 2012-11-17T10:27:42 | 2012-11-17T10:27:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import FormRequest
from scrapy.http import Request
from scrapy.item import Item, Field
from scraper.items import DocItem
from scrapy import log
import re
import os
# This spider sends HTTP POST for every number,year combination
# and downloads all pdf links on the returned page.
class DRESpider(BaseSpider):
name = 'DRE'
allowed_domains = ['dre.pt']
start_urls = ['http://dre.pt/sug/1s/diarios.asp']
def __init__(self, startyear="1910", endyear="2012", endnumero = "1000", pathname=""):
self.startyear = int(startyear)
self.endyear = int(endyear)
self.endnumero = int(endnumero)
self.pathname = pathname
# Converts ugly filename into a nice name
def get_filepath(self,path, ano, title):
title = title.encode('ascii','ignore')
title = re.sub('[\.\,]', '', title)
title = re.sub('^Dirio n ','Num', title)
title = re.sub('Suplemento ', 'Supl_', title)
title = re.sub('Srie ', 'Serie', title)
title = re.sub('de ', 'Data', title)
title = re.sub(' ', '_', title)
return path + "/" + str(ano) + "/" + title + ".pdf"
def check_if_file_exists(self,filepath):
if os.path.exists(filepath):
return True
else:
dirname = os.path.dirname(filepath)
if os.path.exists(dirname) == False:
try:
os.mkdir(dirname)
except OSError as err:
self.log("Failed to mkdir " + dirname + "with errno: " + err.errno, level=log.WARNING)
return False
def parse(self, response):
hxs = HtmlXPathSelector(response)
for ano in range(self.startyear,self.endyear + 1):
for numero in range(1,self.endnumero + 1):
request = FormRequest.from_response(response,
formdata={'v01': '',
'v02': str(numero),
'v03': str(ano),
'v04': '',
'v05': '',
'submit': 'Procurar+di%E1rio' },
callback = self.parse_links,
dont_click = True)
request.meta['numero'] = numero
request.meta['ano'] = ano
yield request
# Retrieves all pdf links and downloads only the ones that are not on disk
def parse_links(self, response):
hxs = HtmlXPathSelector(response)
documents = hxs.select('//div[contains(@class,"cx_lista")]/ul/li/a')
for doc in documents:
item = DocItem()
ano = response.meta['ano']
numero = response.meta['numero']
title = doc.select('text()').extract()[0]
href = u'http://dre.pt' + doc.select('@href').extract()[0]
filepath = self.get_filepath(self.pathname, ano, title)
item = DocItem()
item['ano'] = ano
item['numero'] = numero
item['title'] = title
item['href'] = href
if self.check_if_file_exists(filepath):
yield item
else:
request = Request(url=href, callback=self.parse_pdf)
request.meta['item'] = item
yield request
# Store pdf on disk
def parse_pdf(self, response):
item = response.meta['item']
filename = self.get_filepath(self.pathname, item['ano'], item['title'])
f = open(filename,'w')
f.write(response.body)
f.close
return item
|
UTF-8
|
Python
| false | false | 2,012 |
11,957,188,982,756 |
3abd967bf287374255a1dcd5114ec47e7603683a
|
8a6d85c996a6323a3060eb8f9aab61b8cd119468
|
/app/bot/KCABot.py
|
a106dde2e39bc960589458208aac2348317d8ddf
|
[] |
no_license
|
KarmaCourt/KCABot
|
https://github.com/KarmaCourt/KCABot
|
d74bd7384ef0c39866bc3e966d3690b449831689
|
4561af0972aabcfc10cb3be0ffa2d064d1b37123
|
refs/heads/master
| 2015-08-17T18:43:26.501809 | 2014-12-27T10:44:18 | 2014-12-27T10:44:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import praw
from flask.ext.sqlalchemy import SQLAlchemy
from flask import Flask
from KarmaCourtDB import db, LawFirms, Attorneys, Cases
class KCABot:
user_agent = "KCABot\1.0 by Tuxmascot and KarmaCourt"
reddit = praw.Reddit(user_agent)
def __init__(self, username, password):
self.reddit.login(username, password)
# Creates new attorney in SQL database.
def add_attorney(self, username, wins, losses, position=None, type=None, specialization=None, certification=None, firm=None, AOTM_votes=None):
new_attorney = Attorneys(username, position, type, specialization, certification, firm, AOTM_votes)
db.session.add(new_attorney)
db.session.commit()
# This function updates the win/loss record for users.
def update_win_loss_record(self):
print("Checking for messages!")
for message in self.reddit.get_unread():
if message.subject == "[WIN]":
print("Message found!")
if len(message.body) > 6 or message.body == None:
print("Invalid request! Sending message to requestee...")
self.reddit.send_message(message.author.name, "Invalid Bot Request",
("Your win record request was invalid!\n\nPlease insert the case "
"number in the body and send the message."))
message.mark_as_read()
# Case numbers are six chars.
elif len(message.body) == 6:
attorney = Attorneys.query.filter_by(username=message.author.name).first()
# No attorney found, must be a newbie!
if attorney == None:
print("No Attorney found! Creating new one...")
self.add_attorney(message.author.name, 1, 0)
print("Welcome new user - {}".format(message.author.name))
message.mark_as_read()
self.reddit.send_message(message.author.name, 'Win Record - KarmaCourt', "We think you're a new user! Welcome to /r/KarmaCourt! Enjoy your stay!\n\nYour win has been recorded.")
elif attorney != None:
print("Adding win to Attorney - {}".format(message.author.name))
attorney.wins += 1
if attorney.firm != None:
firm = LawFirms.query.filter_by(name=attorney.firm).first()
if firm == None:
print("Error! User's firm should be an actual firm name! Check user - {}".format(message.author.name))
elif firm != None:
firm.wins += 1
message.mark_as_read()
self.reddit.send_message(message.author.name, 'Win Record - KarmaCourt', 'Your win has been recorded for you and your firm! - /r/KarmaCourt')
else:
message.mark_as_read()
self.reddit.send_message(message.author.name, 'Win Record - KarmaCourt', 'Your win has been recorded! - /r/KarmaCourt')
if message.subject == "[LOSS]":
print("Message found!")
if len(message.body) > 6 or message.body == None:
print("Invalid request! Sending message to requestee...")
self.reddit.send_message(message.author.name, "Invalid Bot Request",
("Your loss record request was invalid!\n\nPlease insert the case "
"number in the body and send the message."))
message.mark_as_read()
# Case numbers are six chars.
elif len(message.body) == 6:
attorney = Attorneys.query.filter_by(username=message.author.name).first()
# No attorney found, must be a newbie!
if attorney == None:
print("No Attorney found! Creating new one...")
self.add_attorney(message.author.name, 0, 1)
message.mark_as_read()
self.reddit.send_message(message.author.name, 'Loss Record - KarmaCourt', "We think you're a new user! Welcome to /r/KarmaCourt! Enjoy your stay!\n\nYour loss has been recorded.")
elif attorney != None:
print("Adding loss to Attorney - {}".format(message.author.name))
attorney.losses += 1
if attorney.firm != None:
firm = LawFirms.query.filter_by(name=attorney.firm).first()
if firm == None:
print("Error! User's firm should be an actual firm name! Check user - {}".format(message.author.name))
elif firm != None:
firm.losses += 1
message.mark_as_read()
self.reddit.send_message(message.author.name, 'Loss Record - KarmaCourt', 'Your Loss has been recorded for you and your firm! Better luck next time! - /r/KarmaCourt')
else:
essage.mark_as_read()
elf.reddit.send_message(message.author.name, 'Loss Record - KarmaCourt', 'Your loss has been recorded! - /r/KarmaCourt')
db.session.commit()
# Gets AOTM votes from messages. (User must be an active user for vote and to vote. ie: must be created).
def update_AOTM_votes(self):
for message in self.reddit.get_unread():
if message.subject == "[AOTM]":
sender = Attorneys.query.filter_by(username=message.author.name).first()
if sender != None:
attorney = Attorneys.query.filter_by(username=message.body).first()
if attorney == None:
self.reddit.send_message(message.author.name, "Attorney Of The Month Request - KarmaCourt", "The Attorney is not real or not active.\n\n\Make sure to copy the attorney's reddit name without '/u/'")
message.mark_as_read()
elif attorney != None and sender.has_voted == False:
print("AOTM vote recieved for - {} - Adding vote...".format(message.body))
sender.has_voted = True
attorney.AOTM_votes += 1
message.mark_as_read()
elif sender == None:
self.reddit.send_message(message.author.name, "Attorney Of The Month Request - KarmaCourt", "You are not an active Attorney; therefore, you cannot vote!")
message.mark_as_read()
db.session.commit()
# Counts AOTM votes and returns a name. If no choice is made, sends a message to mods for further action.
def get_new_AOTM(self):
winners = [Attorneys.query.first()] # Create a list of winners. If there are more than one, send message.
for attorney in Attorneys.query.all():
if winners[0].AOTM_votes < attorney.AOTM_votes:
winners[0] = attorney
# Now check if there is a tie...
for attorney in Attorneys.query.filter_by(AOTM_votes=winners[0].AOTM_votes).all():
if attorney != winners[0]:
winners.append(attorney)
# Now comes the decision making. Returns a name is a winner is found, sends a message if not.
if len(winners) > 1:
message_subject = "AOTM decision not reached! Need Mod Intervention!! - KarmaCourt"
message_body = "**---AOTM RESULTS---**\n\n"
for result in winners:
message_body += "**{}** has **{}** votes\n\n".format(result.username, result.AOTM_votes)
self.reddit.send_message('/r/KarmaCourtAttorneys', message_subject, message_body)
return None # Return none if no winner.
else:
return winners[0]
# Reset all Attorney's AOTM_votes to 0.
def reset_AOTM_votes(self):
for attorney in db.session.query.all():
attorney.AOTM_votes = 0
attorney.has_voted = False
db.session.commit()
# Gets a request for Attorney data and sends it to a user.
def get_data_request(self):
for message in self.reddit.get_unread():
if message.subject == "[DATA_REQUEST]":
attorney = Attorneys.query.filter_by(username=message.body).first()
if attorney != None:
message_subject = "Data Request for {} - KarmaCourt".format(message.body)
message_body = ("Data Request for **{username}**:\n\n"
"**---CASE RECORD---**\n\n"
"*{username}* has a case record of **{wins}** and **{losses}**.\n\n".format(username=message.body, wins=attorney.wins, losses=attorney.losses))
# Begin the if statements to append the other variable things to the request.
message_body += "**---CERTIFICATION---**\n\n"
if attorney.certification == None:
message_body += "*{username}* doesn't have a certification.\n\n".format(username=message.body)
elif attorney.certification != None:
message_body += "*{username}* has a **{cert}** certification.\n\n".format(username=message.body, cert=attorney.certification)
message_body += "**---LAW FIRM AFFILIATIONS---**\n\n"
if attorney.firm == None:
message_body += "*{username}* isn't a member of a Law Firm.\n\n".format(username=message.body)
elif attorney.firm != None:
message_body += "*{username}* is the/a **{pos}** in the Law Firm - **{firm}**\n\n".format(username=message.body, pos=attorney.position, firm=attorney.firm)
message_body += "**---SPECIALIZATIONS---**\n\n"
if attorney.specialization == None:
message_body += "*{username}* doesn't have any known specializations in the Court of Karma.\n\n".format(username=message.body)
elif attorney.specialization != None:
message_body += "*{username}* has a known specialization in {spec} in the Court of Karma.\n\n".format(username=message.body, spec=attorney.specialization)
# Variable stuffs have ended.
self.reddit.send_message(message.author.name, message_subject, message_body)
message.mark_as_read()
elif attorney == None:
self.reddit.send_message(message.author.name, "Data Request for {} - KarmaCourt".format(message.body), "We couldn't find any data on *{}*. Try checking your spelling.".format(message.body))
message.mark_as_read()
|
UTF-8
|
Python
| false | false | 2,014 |
7,060,926,235,437 |
2b1367a1f6e290de644748717cbb1b7821d9758b
|
f001336e287e56ef5cc0dc0a2bd4b369e0ccc69b
|
/stoqlib/gui/dialogs/sintegradialog.py
|
99f355f9306fefc7bce3ba3e81e701c176389ce9
|
[
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LicenseRef-scancode-other-copyleft"
] |
non_permissive
|
romaia/stoq
|
https://github.com/romaia/stoq
|
e9f24331e1c4ddb3c217834c505af222e7d8c66f
|
8088cf3af63ca134e4f6d3d938d580c931fc0eef
|
refs/heads/master
| 2021-01-17T09:35:30.237854 | 2013-03-15T17:18:29 | 2013-03-15T17:18:29 | 8,842,321 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <[email protected]>
##
""" Sintegra generator dialog """
import datetime
from dateutil.relativedelta import relativedelta
import gtk
from kiwi.ui.dialogs import save
from kiwi.ui.search import DateSearchFilter
from stoqlib.database.queryexecuter import StoqlibQueryExecuter
from stoqlib.domain.system import SystemTable
from stoqlib.gui.base.dialogs import BasicDialog
from stoqlib.lib.dateutils import get_month_names
from stoqlib.lib.message import warning
from stoqlib.lib.sintegra import SintegraError
from stoqlib.lib.sintegragenerator import StoqlibSintegraGenerator
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class SintegraDialog(BasicDialog):
title = _('Fiscal Printer History')
def __init__(self, store):
BasicDialog.__init__(self, title=self.title)
self.justify_label(gtk.JUSTIFY_CENTER)
self.store = store
self.ok_button.set_label(_("Generate"))
self.date_filter = DateSearchFilter(_('Month:'))
self.date_filter.set_use_date_entries(False)
self.date_filter.clear_options()
self._populate_date_filter(self.date_filter)
self.date_filter.select()
self.add(self.date_filter)
self.date_filter.show()
def confirm(self):
start = self.date_filter.get_start_date()
end = self.date_filter.get_end_date()
filename = save(_("Save Sintegra file"),
self.get_toplevel(),
"sintegra-%s.txt" % (start.strftime('%Y-%m'), ))
if not filename:
return
try:
generator = StoqlibSintegraGenerator(self.store, start, end)
generator.write(filename)
except SintegraError, e:
warning(str(e))
return
self.close()
#
# Private
#
def _populate_date_filter(self, date_filter):
# The options we want to show to the users are the following:
# 'May 2007'
# 'June 2007'
# ...
# 'September 2008'
initial_date = self.store.find(SystemTable).min(
SystemTable.updated).date()
# Start is the first day of the month
# End is the last day of the month
start = initial_date + relativedelta(day=1)
end = datetime.date.today() + relativedelta(day=31)
intervals = []
while start < end:
intervals.append((start, start + relativedelta(day=31)))
start = start + relativedelta(months=1)
# When we have the list of intervals, add them to the list and
# make sure that they are translated
month_names = get_month_names()
for start, end in intervals:
# Translators: Do not translate 'month' and 'year'. You can
# change it's positions. In the way it is,
# it will product for example 'December 2012'
name = _('{month} {year}').format(
month=month_names[start.month - 1],
year=start.year)
date_filter.add_option_fixed_interval(
name, start, end, position=0)
def _date_filter_query(self, search_table, column):
executer = StoqlibQueryExecuter(self.store)
executer.set_filter_columns(self.date_filter, [column])
executer.set_table(search_table)
return executer.search([self.date_filter.get_state()])
|
UTF-8
|
Python
| false | false | 2,013 |
15,814,069,612,059 |
595d2a3a6af6ff15808c3b44367ea13336643309
|
a169f39d4ba4ca37e27ba16380b0b78e32732733
|
/getSmall.py
|
4561faade720323cd8225903166ce2aa311cb4f0
|
[] |
no_license
|
majineu/Scripts
|
https://github.com/majineu/Scripts
|
60b71fe022c7b0bf77bebb696cd2d4fda944a281
|
79e95ece22f02116c91806459ae6340c39fb6e06
|
refs/heads/master
| 2016-09-06T06:00:23.590127 | 2014-05-23T09:40:56 | 2014-05-23T09:40:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import re
ratio = int(sys.argv[1])
for i, line in enumerate(sys.stdin):
if i % 500000 == 0:
sys.stderr.write('processing %d line\r' %i)
if i % ratio == 0:
print line,
|
UTF-8
|
Python
| false | false | 2,014 |
14,860,586,858,273 |
fe4d5d96c1ae005aa9066aae8a6506b41f41f5d0
|
32fb3e7fea7ccc761655e08877872fd67fbc55ad
|
/nanoicq/events.py
|
121d7153b86f9e2a971732ca8c441dac59aaec07
|
[
"MIT"
] |
permissive
|
BackupTheBerlios/nanoicq
|
https://github.com/BackupTheBerlios/nanoicq
|
0f7bca3f79b6ea37777cb9d0904b4cbf60e47a3c
|
4dbada6b69c6563825c8fbfbb73277e04eedc771
|
refs/heads/master
| 2020-05-16T10:03:35.766816 | 2006-11-24T16:14:59 | 2006-11-24T16:14:59 | 40,045,420 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#
# $Id: events.py,v 1.21 2006/08/28 15:25:51 lightdruid Exp $
#
import wx
nanoEVT_REGISTRATION_ERROR = wx.NewEventType()
EVT_REGISTRATION_ERROR = wx.PyEventBinder(nanoEVT_REGISTRATION_ERROR, 1)
nanoEVT_AUTHORIZATION_GRANTED = wx.NewEventType()
EVT_AUTHORIZATION_GRANTED = wx.PyEventBinder(nanoEVT_AUTHORIZATION_GRANTED, 1)
nanoEVT_ADD_USER_TO_LIST_BY_NAME = wx.NewEventType()
EVT_ADD_USER_TO_LIST_BY_NAME = wx.PyEventBinder(nanoEVT_ADD_USER_TO_LIST_BY_NAME, 1)
nanoEVT_OFFLINE_MESSAGES = wx.NewEventType()
EVT_OFFLINE_MESSAGES = wx.PyEventBinder(nanoEVT_OFFLINE_MESSAGES, 1)
nanoEVT_AUTHENTIFICATION_REQUEST = wx.NewEventType()
EVT_AUTHENTIFICATION_REQUEST = wx.PyEventBinder(nanoEVT_AUTHENTIFICATION_REQUEST, 1)
nanoEVT_GOT_USER_INFO = wx.NewEventType()
EVT_GOT_USER_INFO = wx.PyEventBinder(nanoEVT_GOT_USER_INFO, 1)
nanoEVT_REQUEST_USER_INFO = wx.NewEventType()
EVT_REQUEST_USER_INFO = wx.PyEventBinder(nanoEVT_REQUEST_USER_INFO, 1)
nanoEVT_USER_DELETE = wx.NewEventType()
EVT_USER_DELETE = wx.PyEventBinder(nanoEVT_USER_DELETE, 1)
nanoEVT_GOT_NEW_UIN = wx.NewEventType()
EVT_GOT_NEW_UIN = wx.PyEventBinder(nanoEVT_GOT_NEW_UIN, 1)
nanoEVT_START_REGISTER = wx.NewEventType()
EVT_START_REGISTER = wx.PyEventBinder(nanoEVT_START_REGISTER, 1)
nanoEVT_GOT_CAPTCHA = wx.NewEventType()
EVT_GOT_CAPTCHA = wx.PyEventBinder(nanoEVT_GOT_CAPTCHA, 1)
nanoEVT_SEND_CAPTCHA_TEXT = wx.NewEventType()
EVT_SEND_CAPTCHA_TEXT = wx.PyEventBinder(nanoEVT_SEND_CAPTCHA_TEXT, 1)
nanoEVT_DIALOG_CLOSE = wx.NewEventType()
EVT_DIALOG_CLOSE = wx.PyEventBinder(nanoEVT_DIALOG_CLOSE, 1)
nanoEVT_MESSAGE_PREPARE = wx.NewEventType()
EVT_MESSAGE_PREPARE = wx.PyEventBinder(nanoEVT_MESSAGE_PREPARE, 1)
nanoEVT_SEND_MESSAGE = wx.NewEventType()
EVT_SEND_MESSAGE = wx.PyEventBinder(nanoEVT_SEND_MESSAGE, 1)
nanoEVT_INCOMING_MESSAGE = wx.NewEventType()
EVT_INCOMING_MESSAGE = wx.PyEventBinder(nanoEVT_INCOMING_MESSAGE, 1)
nanoEVT_BUDDY_STATUS_CHANGED = wx.NewEventType()
EVT_BUDDY_STATUS_CHANGED = wx.PyEventBinder(nanoEVT_BUDDY_STATUS_CHANGED, 1)
nanoEVT_MY_STATUS_CHANGED = wx.NewEventType()
EVT_MY_STATUS_CHANGED = wx.PyEventBinder(nanoEVT_MY_STATUS_CHANGED, 1)
nanoEVT_SEARCH_BY_UIN = wx.NewEventType()
EVT_SEARCH_BY_UIN = wx.PyEventBinder(nanoEVT_SEARCH_BY_UIN, 1)
nanoEVT_SEARCH_BY_EMAIL = wx.NewEventType()
EVT_SEARCH_BY_EMAIL = wx.PyEventBinder(nanoEVT_SEARCH_BY_EMAIL, 1)
nanoEVT_SEARCH_BY_NAME = wx.NewEventType()
EVT_SEARCH_BY_NAME = wx.PyEventBinder(nanoEVT_SEARCH_BY_NAME, 1)
nanoEVT_RESULT_BY_UIN = wx.NewEventType()
EVT_RESULT_BY_UIN = wx.PyEventBinder(nanoEVT_RESULT_BY_UIN, 1)
# Add user to list after search (FindUser.py)
nanoEVT_ADD_USER_TO_LIST = wx.NewEventType()
EVT_ADD_USER_TO_LIST = wx.PyEventBinder(nanoEVT_ADD_USER_TO_LIST, 1)
# Unable to add user to list after search (user with this UIN already in list, etc.)
nanoEVT_UNABLE_ADD_USER_TO_LIST = wx.NewEventType()
EVT_UNABLE_ADD_USER_TO_LIST = wx.PyEventBinder(nanoEVT_UNABLE_ADD_USER_TO_LIST, 1)
class NanoEvent(wx.PyCommandEvent):
def __init__(self, evtType, id):
wx.PyCommandEvent.__init__(self, evtType, id)
self._val = None
def __del__(self):
wx.PyCommandEvent.__del__(self)
def setVal(self, val):
self._val = val
def getVal(self):
return self._val
# ---
|
UTF-8
|
Python
| false | false | 2,006 |
12,094,627,943,032 |
eeb0f86b6cdc7de561dd080a7ab85d35ab6e24a4
|
95dfe15740adc448fb434fcc07e2be364791c191
|
/kitten/node.py
|
a361e955f341386e7a47c08178eea179d7a86540
|
[
"MIT"
] |
permissive
|
thiderman/kitten
|
https://github.com/thiderman/kitten
|
8657e57c6edd3591bd3b4fd2e7ca60a781edb984
|
5a72f9dcee994077483a801d4bc03a013cf91186
|
refs/heads/master
| 2015-07-23T21:13:09 | 2014-03-26T00:13:26 | 2014-03-26T00:13:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import re
import datetime
import logbook
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import Integer
from sqlalchemy import String
from kitten import conf
from kitten.db import Session
from kitten.db import Base
from kitten.paradigm import Paradigm
from kitten.paradigm import annotate
from kitten.validation import Validator
class NodeValidator(Validator):
def ping_request(self):
return {}
def ping_response(self):
return {
'code': {
'enum': ['OK', 'FAILED'],
}
}
def sync_request(self):
return {
'nodes': {
'type': 'array',
'items': {
'type': 'string',
},
}
}
def sync_response(self):
return {
'nodes': {
'type': 'array',
'items': {
'type': 'string',
},
}
}
class NodeParadigm(Paradigm):
validator = NodeValidator()
@annotate
def ping_request(self, request):
return request
@annotate
def ping_response(self, request):
return {
'code': 'OK'
}
@annotate
def sync_request(self, request):
return request
@annotate
def sync_response(self, request):
"""
Process a request to sync
Return all nodes present in local database but not provided in request,
and create new nodes for all nodes present in request but not in local
database.
"""
session = Session()
nodes = set(request['nodes'])
own = set(n.address for n in session.query(Node).all())
to_requester = list(own - nodes)
to_me = list(nodes - own)
# This should be in a greenlet
for address in to_me:
Node.create(address, True)
session.close()
return {
'nodes': sorted(to_requester),
}
class Node(Base):
__tablename__ = 'node'
paradigm = NodeParadigm()
id = Column(Integer(), primary_key=True)
address = Column(String(255))
created = Column(DateTime, default=datetime.datetime.now)
last_seen = Column(DateTime, default=datetime.datetime.now)
log = logbook.Logger('Node')
def __init__(self, address):
self.address = address
def __str__(self): # pragma: nocover
return '<Node: {0.address}>'.format(self)
def repr(self): # pragma: nocover
return self.__str__()
@staticmethod
def create(address, sync=False):
"""
Create a new node
Try connecting to the node to see if it is eligible for adding.
"""
session = Session()
# If no port is specified, make sure to add the default.
if not re.search(r':\d+', address):
address += ':{0}'.format(conf.DEFAULT_PORT)
con = Node(address)
q = session.query(Node).filter(Node.address == address)
if session.query(q.exists()).scalar():
con.log.error('{0} already exists.'.format(con))
return
if con.ping():
session.add(con)
session.commit()
con.log.info('{0} added.'.format(con))
if sync:
con.sync()
else:
con.log.error('Could not connect to {0}.'.format(con))
session.close()
@staticmethod
def list():
"""
Return a list of all nodes
"""
session = Session()
return session.query(Node).all()
def message(self, request):
return self.paradigm.send(self.address, request)
def ping(self):
"""
Send a quick ping heartbeat to the node
Returns boolean success.
"""
request = self.paradigm.ping_request({})
response = self.message(request)
return response['code'] == 'OK'
def sync(self):
"""
Sync the node list with another node
"""
# TODO: Currently not sending to self. Fix this properly.
nodes = [n.address for n in Node.list() if n.address != self.address]
request = self.paradigm.sync_request({
'nodes': nodes,
})
response = self.message(request)
for address in response['nodes']:
Node.create(address, True)
def setup_parser(subparsers):
con = subparsers.add_parser('node', help="List, add or modify nodes.")
sub = con.add_subparsers(help='Node commands', dest="sub")
list_ = sub.add_parser('list', help='List nodes (default)')
list_.add_argument('--filter', type=str)
add = sub.add_parser('add', help='Add a node')
add.add_argument('address', type=str)
remove = sub.add_parser('remove', help='Remove a node')
remove.add_argument('name', type=str)
return execute_parser
def execute_parser(ns):
if not ns.sub or ns.sub == "list":
src = Node.list()
# If a filter is specified, apply it to the display name
if hasattr(ns, 'filter') and ns.filter:
src = list(filter(lambda x: ns.filter in x.address, src))
for con in src:
print(con.repr())
elif ns.sub == 'add':
Node.create(ns.address, True)
|
UTF-8
|
Python
| false | false | 2,014 |
8,744,553,461,815 |
bd3776fbbd8294837b707f82cc6985e3cf10d9c6
|
0cb04755a1cf58e78853df935f406f637bc88dc1
|
/auth/templatetags/gravatar.py
|
48e932ba8903ba5337efc53e727abf711a5e5410
|
[] |
no_license
|
matts1/schoolsite
|
https://github.com/matts1/schoolsite
|
1afc9ef518e0ba3e167fae195293ebc18ad2a549
|
84f5c5fda35168decd6fead22ef52b05152f5830
|
refs/heads/master
| 2016-09-06T16:04:17.893991 | 2013-10-24T20:57:32 | 2013-10-24T20:57:32 | 12,754,043 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.template import Library
import urllib, hashlib
register = Library()
SIZE = 64
def gravatar(context):
email = context['user'].email
gravatar_url = 'http://www.gravatar.com/avatar/' + hashlib.md5(email.lower()).hexdigest() + '?'
gravatar_url += urllib.urlencode({'d':'identicon', 's':str(SIZE)})
return (
'<a class="gravatar" href="http://en.gravatar.com/">'
'<img src="%s">'
'<div class="image_overlay">Change Picture</div>'
'</a>') % gravatar_url
register.simple_tag(gravatar, takes_context=True)
|
UTF-8
|
Python
| false | false | 2,013 |
13,357,348,312,509 |
e9c8beff1d312db65211667efdec3b18f52e0c99
|
7ec6874c3a8bb040fe5e866fc617c8fd9f17c60e
|
/bin/backtest.py
|
2c8e19d3a63a5dce0875d86d89e88edcb9957523
|
[] |
no_license
|
ongbe/hedgeit
|
https://github.com/ongbe/hedgeit
|
2613c2b239893601e8a73fbd85041f56a42180f6
|
be232a5a3f87b176024b849b6ea8dd1608e48b9c
|
refs/heads/master
| 2021-01-21T08:46:31.611393 | 2013-11-09T03:46:12 | 2013-11-09T03:46:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
'''
Created on Apr 26, 2013
@author: rtw
'''
import sys
from hedgeit.common.logger import getLogger
import getopt
from datetime import datetime
import json
from hedgeit.control.controller import Controller
from hedgeit.feeds.db import InstrumentDb
Log = getLogger(__name__)
def usage():
print '''
usage: backtest.py <manifest> <sector-map> <feedStart> <tradeStart> <tradeEnd>
Options:
-h : show usage
-c <number> : set the starting (per sector) equity (default = 1000000)
-p <parms> : model parameters. Formatted as comma separated list of
key=value pairs. ex.
atrPeriod=100,period=7
-t <model> : model type ('breakout', 'macross', 'rsireversal',
'split7s', 'connorsrsi', default = 'breakout')
-g : no compounding of equity
--tssb <name>: write out two files for tssb consumption - <name>_long.csv
and <name>_short.csv containing long and short trades
respectively.
--dump <symbol>: write a <symbol>.csv for the specified symbol that
contains the full data feed included indicators calculated
by the strategy.
manifest : file containing information on tradable instruments. The file
is CSV format - see hedgeit.feeds.db for information
sector-map : file containing JSON specification for sectors. Must be
of the form:
{ '<sector1>' : ['<symbol1>','<symbol2'>,<'symbolN'>],
. . .
'<sectorN>' : ['<symbol1>','<symbol2'>,<'symbolN'>] }
feedStart : datetime to start feeds (needs to be early enough to
establish opening position on tradeStart)
tradeStart: datetime to start tracking real trade performance
tradeEnd : datetime to stop backtest
'''
def parseParmString(str_):
ret = {}
pairs = str_.strip().split(',')
for p in pairs:
(var, value) = p.strip().split('=')
ret[var] = eval(value)
return ret
def main(argv=None):
try:
opts, args = getopt.getopt(sys.argv[1:], "hc:p:t:g", ["tssb=","dump="])
except getopt.GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
cash = 1000000
type_ = 'breakout'
compounding = True
tssb = None
parms = None
dump = None
for o, a in opts:
if o == "-c":
cash = float(a)
Log.info('Setting initial cash position to: %0.2f' % cash)
elif o == "-p":
parms = parseParmString(a)
Log.info('Using model parms: %s' % parms)
elif o == "-t":
type_ = a
Log.info('Using model %s' % type_)
elif o == "-g":
compounding = False
Log.info('Compounding disabled')
elif o == "--tssb":
tssb = a
Log.info('Writing tssb files with base %s' % tssb)
elif o == "--dump":
dump = a
Log.info('Will output data feed to %s.csv' % dump)
else:
usage()
return
if len(args) != 5:
Log.error('Not enough arguments to backtest!')
usage()
sys.exit(1)
manifest = args[0]
sectormap = json.load(open(args[1]))
feedStart = datetime.strptime(args[2], '%Y-%m-%d')
tradeStart = datetime.strptime(args[3], '%Y-%m-%d')
tradeEnd = datetime.strptime(args[4], '%Y-%m-%d')
InstrumentDb.Instance().load(manifest)
plog = 'positions.csv'
elog = 'equity.csv'
rlog = 'returns.csv'
slog = 'summary.csv'
ctrl = Controller(sectormap,
modelType = type_,
cash = cash,
tradeStart = tradeStart,
compounding = compounding,
positionsFile = plog,
equityFile = elog,
returnsFile = rlog,
summaryFile = slog,
parms = parms
)
ctrl.run(feedStart, tradeStart, tradeEnd)
if dump:
ctrl.dumpFeed(dump)
tlog = 'trades.csv'
ctrl.writeAllTrades(tlog)
if tssb:
ctrl.writeTSSBTrades(tssb)
alog = 'alerts.csv'
ctrl.writePositionAlerts(alog)
Log.info('Net return : %0.1f%%' % (ctrl.net_return() * 100.0))
Log.info('Max drawdown : -%0.1f%%' % (ctrl.drawdown().getMaxDrawDown() * 100.0))
if __name__ == "__main__":
sys.exit(main())
|
UTF-8
|
Python
| false | false | 2,013 |
6,863,357,741,664 |
981ac77776b1b9b62e55c57d3c659d18665d5adf
|
5ec8409b2930c43bf787ab6083193259ab7fdf6e
|
/download.py
|
f1afb21cf857e0ae9bd5dffa72dded53240802a8
|
[] |
no_license
|
therudite/dlf
|
https://github.com/therudite/dlf
|
4159b447eaaf60da76143d38f5636b63831eaa27
|
9d5934d19a6cce4e68152085ccd992a092480b3e
|
refs/heads/master
| 2021-01-19T14:57:27.953682 | 2013-11-16T21:43:53 | 2013-11-16T21:43:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
from threading import Thread
from urllib.request import urlopen
class Down:
def __init__(self):
pass
def dow(self,link):
response=urlopen(link)
filename=link.split('/')[-1]
f=open(filename,'wb')
meta=response.info()
filesize=int(meta["Content-Length"])
print("Downloading: %s Bytes: %s"%(filename,filesize))
filesizedl = 0
blocksz = 8192
while True:
buffer = response.read(blocksz)
if not buffer:
break
filesizedl += len(buffer)
f.write(buffer)
#status=filename+r" %10d [%3.2f]"%(filesizedl,filesizedl*100./filesize)
#status = status + chr(8)*(len(status)+1)
#print(status,end='')
print("%s download complete!"%(filename))
def main():
d=Down()
threadlist=[]
for i in range(len(sys.argv)-1):
t=Thread(target=d.dow,args=(sys.argv[i+1],))
t.start()
threadlist.append(t)
for i in threadlist:
i.join()
if __name__=='__main__':
main()
|
UTF-8
|
Python
| false | false | 2,013 |
12,687,333,436,145 |
e8fa4ed8c290ec2f385189e30b118a46d1cbd228
|
fa28a28e771d9405b0a3766dd5d4641fa4300c2e
|
/Flask/utils/decorators/__init__.py
|
028f224e1b05a10c83ada7fe7e1bf61090548302
|
[
"MIT"
] |
permissive
|
vtemian/TodoAPI
|
https://github.com/vtemian/TodoAPI
|
3141095b0bc6124455f7e95d52192cf3aa97f111
|
47c9503eba85459bce7701018c9dbdd36d6466ab
|
refs/heads/master
| 2016-03-26T08:40:55.292876 | 2013-10-29T13:35:30 | 2013-10-29T13:35:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from .require import require
|
UTF-8
|
Python
| false | false | 2,013 |
5,686,536,743,007 |
7e52770e4adf44f4ebff1793823a401666d2d6ba
|
df4a16d6c4b43f01389e0c7db2aad4a572dfd534
|
/elena.podgornova/nw.py
|
e4d696a561ebf69b1dcecabb535daab5a68d9163
|
[] |
no_license
|
anton-bannykh/ml-2013
|
https://github.com/anton-bannykh/ml-2013
|
89258ed790c51eff89c1ecd048c833745066cd22
|
4274dc67202294fd105123e50bfc82a60f3b6400
|
refs/heads/master
| 2020-12-29T02:39:01.007202 | 2014-06-18T13:42:37 | 2014-06-18T13:42:37 | 12,725,449 | 4 | 3 | null | false | 2020-10-13T07:30:41 | 2013-09-10T09:21:44 | 2018-12-15T09:17:02 | 2020-10-13T07:30:40 | 35,598 | 4 | 25 | 5 |
Python
| false | false |
import numpy
import math
import random
g = 0.8
def h(x):
res = numpy.zeros(len(x))
for i in range(len(x)):
res[i] = 1.0/(1.0 + math.exp(-x[i]))
return res
def net(w, o):
res = numpy.zeros(len(w[0]))
for j in range(len(w[0])):
for i in range(len(o)):
res[j] += w[i][j] * o[i]
return res
def train(x, y, c):
n = len(x[0])
w1 = [[random.random() for j in range(c)]for i in range(n + 1)]
w2 = [[random.random()] for i in range(c + 1)]
for j in range(len(x)):
x[j] = numpy.append(x[j], [1])
for times in range(200):
dw1 = [numpy.zeros(c) for i in range(n + 1)]
dw2 = [0 for i in range(c + 1)]
for t in range(len(x)):
o1 = h(net(w1, x[t]))
o1 = numpy.append(o1, 1.0)
o2 = h(net(w2, o1))
d2 = -o2[0] * (1. - o2[0]) * (o2[0] - (y[t] + 1.)/2.)
# print o2[0], y[t], d2
d1 = numpy.zeros(c)
for i in range(c):
d1[i] = o1[i] * (1. - o1[i]) * w2[i][0] * d2
for i in range(c + 1):
dw2[i] += g * o1[i] * d2
for i in range(n + 1):
for j in range(c):
dw1[i][j] += g * x[t][i] * d1[j]
for i in range(c + 1):
w2[i][0] += dw2[i]
for i in range(n + 1):
for j in range(c):
w1[i][j] += dw1[i][j]
return w1, w2
def test(x, y, w1, w2):
for j in range(len(x)):
x[j] = numpy.append(x[j], [1])
res = [numpy.zeros(2),numpy.zeros(2)]
while res[0][0] + res[0][1] == 0 or res[0][0] + res[1][0] == 0:
res = [numpy.zeros(2),numpy.zeros(2)]
for t in range(len(x)):
o1 = h(net(w1, x[t]))
o1 = numpy.append(o1, [1])
o2 = h(net(w2, o1))
if o2[0] > 0.5:
tr = 1
else:
tr = -1
if y[t] == 1:
if tr == 1:
res[0][0]+=1
else :
res[0][1]+=1
else:
if tr == 1:
res[1][0]+=1
else:
res[1][1]+=1
return res[0][0]/(res[0][0] + res[0][1]), res[0][0]/(res[0][0] + res[1][0])
def get_c(x, y):
bc = 5
bf1 = 0
b = int(len(y) * 0.5)
train_x, train_y, test_x, test_y = x[:b], y[:b], x[b:], y[b:]
c = 10
while c <= 40:
w1, w2 = train(train_x[:], train_y, c)
p, r = test(test_x[:], test_y, w1, w2)
f1 = 2 * p * r/ (p + r)
if f1 > bf1:
bf1, bc = f1, c
# print p, r
# print f1
c += 10
return bc
|
UTF-8
|
Python
| false | false | 2,014 |
7,773,890,830,399 |
a7a4d583f69a864051f5abb0e6d8292e54c83333
|
a22609e931f79e97c753524f97173eb8300937e1
|
/feature_extractor/feature_extractor.py
|
76ddb64b252a9956d36e7560047fa7802adb553a
|
[] |
no_license
|
claytonlemons/AED
|
https://github.com/claytonlemons/AED
|
ce53bee5ff3d8a483f599177708f611d6f8e3ba1
|
0693da2f153e7bca9d6de27ee236df93c60b7e9c
|
refs/heads/master
| 2021-05-26T22:03:16.083428 | 2013-04-11T01:12:05 | 2013-04-11T01:12:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
from pprint import pprint
import numpy
import mdp
import cv
import pylab
import os
from os import path
from mlboost.core.ppdataset import Dataset
from optparse import OptionParser
parser = OptionParser(description=__doc__)
parser.add_option("-f", dest="filename", default = None, help="filename of the data")
parser.add_option("-s", dest="show", default=False, action='store_true', help="show first two dimensions of new dataset")
parser.add_option("-a", dest="algorithm", default="lle", help="set algorithm [lle, hlle]")
parser.add_option("-k", dest="k", type=int, help="number of nearest neighbors to use")
parser.add_option("-d", dest="outputDimensions", default=2, type=float, help="number of dimensions to output")
parser.add_option("-o", dest="outputFile", default="output.arff", help="the file name for the arff output")
parser.add_option("-n", dest="numberOfInstances", help="the number of instances to use from the dataset")
options, args = parser.parse_args()
COLORS = ["r","b","g","y","m","c","b"]
LABELS = ["happy", "sad", "angry", "surprised", "scared", "disgusted", "neutral"]
def generateARFF(result, labels, output):
output.write("@RELATION emotion\n\n")
for i in range(len(result[0,:])):
output.write("@ATTRIBUTE Dimension_%s CONTINUOUS\n" % i)
output.write("@ATTRIBUTE Emotion {%s}\n" % ", ".join(LABELS))
output.write("\n@DATA\n")
for i in range(len(result[:,0])):
output.write("%s, %s\n" % (", ".join(map(str, result[i])), LABELS[int(labels[i, 0])]))
print "Loading data set..."
dataset = numpy.loadtxt(open(options.filename, 'rb'), delimiter=",")
print "Data set loaded.\n"
# Split out label column
labels = dataset[:,[len(dataset[0]) - 1]]
dataset = dataset[:,0:len(dataset[0]) - 1]
originalNumberOfInstances = len(dataset[:,0])
if options.numberOfInstances:
labels = labels[0:int(options.numberOfInstances), :]
dataset = dataset[0:int(options.numberOfInstances), :]
# The default value for k is the number of instances minus 2, the maximum possible value of k
if not options.k:
options.k = len(dataset[:,0]) - 1
print "Using %d nearest neighbors." % options.k
print "Using %d out of %d instances." % (len(dataset[:,0]), originalNumberOfInstances)
print "Source Dimensionality: %d; Target Dimensionality %d.\n" % (len(dataset[0,:]), options.outputDimensions)
if options.algorithm == 'lle':
print "Running LLE..."
lle = mdp.nodes.LLENode(options.k, output_dim=options.outputDimensions)
result = lle(dataset)
elif options.algorithm == 'hlle':
print "Running HLLE..."
hlle = mdp.nodes.HLLENode(options.k, output_dim=int(options.outputDimensions))
result = hlle(dataset)
print "Finished running.\n"
print "Final Dimensionality: %d.\n" % len(result[0,:])
print "Generating ARFF files..."
with open(options.outputFile, "w") as output:
generateARFF(result, labels, output)
print "Finished generating ARFF files.\n"
# Show the first two dimensions of the new data set.
if options.show:
print "First Two Dimensions of Final Result:"
x, y = numpy.append(result[:,0:1], labels, 1), numpy.append(result[:,1:2], labels, 1)
for i, (a, b) in enumerate(zip(x, y)):
print "%s, %s" % (a, b)
pylab.title("Result")
xlabel = 'dim 1'
ylabel = 'dim 2'
for i in range(0, 7):
x_points = x[x[:,1] == i]
y_points = y[y[:,1] == i]
pylab.scatter(x_points[:,0], y_points[:,0], c=COLORS[i])
pylab.xlabel(xlabel)
pylab.ylabel(ylabel)
pylab.show()
|
UTF-8
|
Python
| false | false | 2,013 |
18,373,870,112,060 |
5765f68da59c85683078e433ed280e79f07bc662
|
efc31e04d47f326850d8b3b5fe7151006e826817
|
/SConstruct
|
a5782ea41422b394ad4de8f8abaf79ca6b6cc042
|
[
"GPL-2.0-only"
] |
non_permissive
|
foucault/husher
|
https://github.com/foucault/husher
|
087446b0127677a5e5b4d5b51a65bc0c43749604
|
1f9c0d99cad673b2dc74a53c5173ba3396fd635b
|
refs/heads/master
| 2018-01-07T12:41:24.664778 | 2010-11-28T10:01:02 | 2010-11-28T10:01:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import platform, os, glob, time, commands, shutil
from site_scons.site_tools.smartinstall import *
PROGRAM = "husher"
SRCDIR = "./src"
DOCDIR = "./doc"
DATADIR = "./data"
UIDIR = DATADIR
warning = """/**
* Autogenerated by SCons. DO NOT EDIT unless you know what you're doing!
**/ \n"""
def CheckPKGConfig(context, version):
context.Message('Checking for pkg-config... ')
ret = context.TryAction('pkg-config --atleast-pkgconfig-version=%s' % version)[0]
context.Result(ret)
return ret
def CheckPKG(context, name, version):
context.Message('Checking for package %s... ' % name)
ret = context.TryAction('pkg-config --exists \'%s >= %s\'' % (name,version))[0]
context.Result(ret)
return ret
# This generates the config header, which might be of some use depending
# on the project
def WriteConfigHeader(header_file,defs):
file = open(header_file,'w')
file.write(warning)
file.write('#ifndef CONFIG_H_LOADED'+"\n")
file.write('#define CONFIG_H_LOADED 1'+"\n\n")
for elem in defs:
file.write('#define ' + elem + "\n")
file.write("\n")
file.write('#endif'+"\n")
file.close()
def CopyDependency(target,source,env):
shutil.copy(str(source[0]),str(target[0]))
return None
BDIR_DEBUG = "Debug_"+platform.machine()+"-"+platform.system()+"/"
BDIR_RELEASE = "Release_"+platform.machine()+"-"+platform.system()+"/"
BDIR = ""
vars = Variables()
vars.Add('DEBUG', 'Set for debug (default=1)', 1)
vars.Add('THREADS','Number of threads to use (default=1)',1)
env = Environment(variables = vars)
Help(vars.GenerateHelpText(env))
env['package'] = PROGRAM
env.Tool('smartinstall')
if env['DEBUG'] == 1:
env.Append( CPPFLAGS = '-g')
BDIR = BDIR_DEBUG
else:
env.Append( CPPFLAGS = '-O2' )
BDIR = BDIR_RELEASE
env.AppendUnique(CPPFLAGS = ['-pedantic',
'-Wall',
'-W',
'-Wundef',
'-Wno-long-long'])
# Read all cpp files from SOURCE directory. Should be OK for a general case
sources = glob.glob(SRCDIR+"/*.cpp")
# Transform sources to reflect the BUILD directory
sources = [BDIR+os.path.basename(elem) for elem in sources]
# Initialise the list to hold the definitions for config.h
config_success = []
# Initialise the list to hold the pkg-config packages to compile with
packages = []
conf = Configure(env, custom_tests = { 'CheckPKGConfig' : CheckPKGConfig,
'CheckPKG' : CheckPKG })
env.Clean("distclean",
[
".sconsign.dblite",
".sconf_temp",
"config.log",
SRCDIR+"/"+"config.h",
BDIR_DEBUG,
BDIR_RELEASE,
DOCDIR
])
################### BEGIN CONFIGURATION ###################
if not (env.GetOption('clean') or env.GetOption('help') ):
if not conf.CheckPKGConfig('0.15.0'):
print 'pkg-config >= 0.15.0 not found.'
Exit(1)
else:
config_success.append('HAVE_PKG_CONFIG 1')
if not conf.CheckPKG('glibmm-2.4','2.16'):
print 'glibmm >= 2.16 not found.'
Exit(1)
else:
config_success.append('HAVE_GLIBMM 1')
packages.append('glibmm-2.4')
if not conf.CheckPKG('giomm-2.4','2.16'):
print 'giomm >= 2.16 not found'
Exit(1)
else:
config_success.append('HAVE_GIOMM 1')
packages.append('giomm-2.4')
if not conf.CheckPKG('gthread-2.0','2.16'):
print 'gthread-2.0 >= 2.16 not found'
Exit(1)
else:
config_success.append('HAVE_GTHREAD_2 1')
packages.append('gthread-2.0')
if not conf.CheckPKG('gtkmm-2.4','2.16'):
print 'gtkmm-2.4 >= 2.16 not found'
Exit(1)
else:
config_success.append('HAVE_GTKMM 1')
packages.append('gtkmm-2.4')
if not conf.CheckPKG('botan-1.9','1.9'):
if not conf.CheckPKG('botan-1.8','1.8'):
print 'libbotan not found. Please install at least v1.8.6'
Exit(1)
else:
config_success.append('HAVE_BOTAN 1')
packages.append('botan-1.8')
else:
packages.append('botan-1.9')
config_success.append('HAVE_BOTAN 1')
print "DEBUG flag is " + ("enabled" if env['DEBUG'] == 1 else "disabled")
if (env['DEBUG'] == 1):
config_success.append('DEBUG 1')
if (env['THREADS']) and (int(env['THREADS']) > 1):
config_success.append('NO_OF_THREADS ' + env['THREADS'])
config_success.append('PROGRAM_PREFIX "'+ get_default_prefix(env) + '"')
config_success.append('DATA_PREFIX "'+ get_default_data_prefix(env) +'"')
rev = commands.getoutput("git log | grep -m 1 \"^commit\"| awk '{print $2}'")
config_success.append('REVISION "' + rev+'"')
config_success.append('REVISION_SHORT "'+rev[0:8]+'"')
print "Generating config.h..."
WriteConfigHeader(SRCDIR+"/config.h",config_success)
env = conf.Finish()
#################### END CONFIGURATION ####################
print "Prefix for " + PROGRAM + " is: " + get_default_prefix(env)
env.VariantDir(BDIR,SRCDIR,duplicate = 0)
# Parse the pkg-config data into the environment
if not (env.GetOption('clean') or env.GetOption('help')):
env.ParseConfig('pkg-config --libs ' + " ".join(packages) )
env.ParseConfig('pkg-config --cflags ' + " ".join(packages) )
# Create the output dir if not existing
if not os.path.exists(BDIR):
os.mkdir(BDIR)
hasher = env.Program(target = BDIR+PROGRAM,source = sources)
uis = glob.glob(UIDIR + "/*.ui")
if not (env.GetOption('clean') or env.GetOption('help') ):
if os.path.exists(DATADIR):
for x in uis:
Command(BDIR+x,x,CopyDependency)
# vim: ts=4 tw=4 et ft=python
|
UTF-8
|
Python
| false | false | 2,010 |
13,494,787,282,235 |
8ec28ff0327aea06fa58e8d250931c5b800cb805
|
716b33a162ca3fa25414d219cc39f3594f695125
|
/pie/objects/test/__init__.py
|
b3074f30875814711687974930933a99e3762ed8
|
[] |
no_license
|
pie-interpreter/pie
|
https://github.com/pie-interpreter/pie
|
a5fc7711930fde7e8fb4e15bb0d6ec6da2a2362d
|
ad325f96e5dc9eecac889613d54c5dc310ff9ee1
|
refs/heads/master
| 2020-05-17T21:15:54.975329 | 2013-03-23T20:01:07 | 2013-03-23T20:01:07 | 8,976,196 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'sery0ga'
|
UTF-8
|
Python
| false | false | 2,013 |
15,049,565,419,030 |
215cc044ad1e4c47f5a0ef9c664bc80d11776872
|
dcb1b85e7886c03325fe7c062c2afcac28a49f60
|
/init/init_reset_env.py
|
43ab736edce3afc0ab793b5e1901e2b454ba3ce8
|
[
"GPL-1.0-or-later",
"BSD-3-Clause",
"GPL-2.0-only"
] |
non_permissive
|
tigerjibo/env
|
https://github.com/tigerjibo/env
|
840da231bbffe843e06aacc3542e73eef8492c90
|
6c6229075a0af1a041c35cdbfa13ba3fd540a11e
|
refs/heads/master
| 2021-06-06T20:14:27.638065 | 2014-04-09T14:05:53 | 2014-04-09T14:05:53 | 10,380,718 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
from os.path import join as pjoin
from handle_error import handle
from commands import getstatusoutput as getso
from config import HOST_NAME
def reset_tftp():
print 'reset tftp server...\n'
s,o = getso("/etc/init.d/xinetd reload")
handle(s,o)
s,o = getso("service xinetd restart")
handle(s,o)
def reset_nfs():
print 'reset nfs server...\n'
s,o = getso("exportfs -rv")
handle(s,o)
s,o = getso("service nfs-kernel-server restart")
handle(s,o)
def reset_sambo():
print 'input sambo passwd..\n'
s,o = getso("smbpasswd -a %s" %HOST_NAME)
handle(s,o)
print 'reset samba server...'
s,o = getso("service smbd restart")
handle(s,o)
def do():
reset_tftp()
reset_nfs()
reset_sambo()
|
UTF-8
|
Python
| false | false | 2,014 |
14,723,147,926,329 |
9984b09ce971001c9cc7fd8708ecf5f992287454
|
8e331a20e9046726e03f6ed625285a19cd2624c3
|
/src/mwr/droidhg/modules/common/shell.py
|
6611f1e1446ada6731348ad4c824bea381a8dfbf
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
zxgangandy/mercury
|
https://github.com/zxgangandy/mercury
|
7ef1ea20a630ef7262359ced3a092af0694f23bf
|
d04748f8d7baaa1e44887a255595b632ab44c849
|
refs/heads/master
| 2017-04-30T10:14:58.463980 | 2012-12-13T16:50:43 | 2012-12-13T16:50:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class Shell(object):
"""
Mercury Client Library: provides a wrapper around the Android Shell, allowing
commands to be executed.
"""
def shellExec(self, command):
"""
Execute a single Shell command on the Agent.
"""
ShellWrapper = self.loadClass("common/ShellWrapper.apk", "ShellWrapper")
return ShellWrapper.execute(command)
|
UTF-8
|
Python
| false | false | 2,012 |
8,400,956,051,693 |
25fe32dbd3c0e8ac742711c7cbad03b8cf6226ae
|
fe684dee36c0e89fbc9eb99f986995da04646bfb
|
/ib/client/sync_wrapper_gen.py
|
e2b91ac3596876371b76c4a3de39f274af3675ad
|
[] |
no_license
|
afcarl/Medici
|
https://github.com/afcarl/Medici
|
ff0bc7241b14cf2d26ff14066360ea7016d81881
|
f12bc63360e95fc1d7aa556faab9430c271abdc5
|
refs/heads/master
| 2020-08-22T14:57:12.642394 | 2014-03-05T21:55:27 | 2014-03-05T21:55:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'oglebrandon'
import inspect
from ib.ext.EWrapper import EWrapper
class_fields = []
refs = EWrapper
for func in dir(refs):
if func[0] != '_':
if inspect.ismethod(eval('refs.' + str(func))):
[class_fields.append(x) for x in inspect.getargspec(eval(
'refs.' + str(func))).__dict__['args'][1:]]
print ' \n'.join([x + ' = None' for x in list(set(class_fields))])
funcs = {}
for func in dir(refs):
if func[0] != '_':
if inspect.ismethod(eval('refs.' + str(func))):
local = inspect.getargspec(eval('refs.' + str(func))).__dict__['args'][1:]
args= ', '.join(local)
caller = 'self.handler = sys._getframe().f_code.co_name'
fields = "if '" + str(func) + "' in self.emitter: \n\t\tmsg = "\
+ "{" +'\n\t\t\t\t'.join(["'" + str(var) +"' : " +str(var) + ',' for var in local])[:-1] + '}' \
+ '\n\t\tif msg not in self.return_list:\n\t\t\tself.return_list.append(msg)'\
+ '\n\tif self.return_list:\n\t\t' + str(caller)\
+ "\n\tif self.suppress is False:\n"\
+ "\t\tshowmessage('{func}', vars())".format(func=func)
method =("""def {func}(self{params}):{fields}
""").format(func=func,
params= ', ' + args if args is not '' else args,
fields= '\n\t' + fields)
print method
|
UTF-8
|
Python
| false | false | 2,014 |
14,671,608,288,898 |
00fa8ccaac0d8a9613ce4a28f9ce41ecd543670f
|
460ce9245bf167fc14d33552898e741236a5660f
|
/problem312.py
|
305e97882d8420f85ec4dc9ef5dc94ed31a466ff
|
[] |
no_license
|
dzhou/project-euler
|
https://github.com/dzhou/project-euler
|
1d26a8fa15a49eafeefbd97be178d0b2e280d6d0
|
7b42a4a12d11f8167bd204619eb360f0d657ed41
|
refs/heads/master
| 2020-04-22T21:46:53.904623 | 2011-03-23T20:52:43 | 2011-03-23T20:52:43 | 1,080,518 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Project Euler 312
- A Sierpinski graph of order-1 (S1) is an equilateral triangle.
- Sn+1 is obtained from Sn by positioning three copies of Sn so that every pair of copies has one common corner.
Let C(n) be the number of cycles that pass exactly once through all the vertices of Sn.
For example, C(3) = 8 because eight such cycles can be drawn on S3, as shown below:
It can also be verified that :
C(1) = C(2) = 1
C(5) = 71328803586048
C(10 000) mod 108 = 37652224
C(10 000) mod 138 = 617720485
Find C(C(C(10 000))) mod 138.
I worked out most of the problem with pen/paper
iterative formula:
k(1) = 1
k(n+1)= 3*[k(n-1)**3]
C(n) = k(n)**3
and cycle lenth of C is 28960854
"""
def find_cycle(mod=13**8):
k = 24
c = 0
while True:
k = 3*(k**3) % mod
c += 1
if k == 24:
break
return c
# function C for n >= 5
def C(n, mod):
k = 24
c -= 4
while c > 0:
c -= 1
k = 3*(k**3) % m
return k**3 % m
def problem312():
return C(C(C(10**4, 28960854), 28960854), 13**8)
if __name__ == "__main__":
print problem312()
|
UTF-8
|
Python
| false | false | 2,011 |
11,776,800,335,504 |
bde409274b1099c671d38f6dee7acd09b47e2bee
|
3b5951e7dbec8d44abccdcb642afa2a2b0327eb9
|
/pyConTextWithaTwist/pyConTextWeb/pyConTextKit/criticalFinderGraph.py
|
79844a9c4d931d604fb6b4ab6271a1c33d06b3bd
|
[] |
no_license
|
DBMI/NLP
|
https://github.com/DBMI/NLP
|
b1f7490b586e7bde4ae46813a89e9dc396f6ab09
|
ee4f36f042853f0902b6c920b0924c3fde179ccc
|
refs/heads/master
| 2020-04-10T08:09:43.023105 | 2012-08-23T01:43:25 | 2012-10-05T01:17:12 | 2,873,529 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#Copyright 2010 Brian E. Chapman
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#
#Modified by Annie T. Chen, June-Aug., 2011.
"""
CriticalFinderGraph is a program that processes the impression section of dictated
radiology reports. pyContext is used to look for descriptions of acute critical
fidnings.
At this state, the program assumes that the reports are stored in a SQLite3 database.
The database should have a table named 'reports' with a field named 'impression'
although these values can be specified through the command line options.
"""
from django.db.models import Count
from pyConTextKit.models import Report, Result, Alert
import sys
import os
from optparse import OptionParser
import sqlite3 as sqlite
import networkx as nx
import datetime, time
import pyConTextNLP.helpers as helpers
import pyConTextGraphV2.pyConTextGraphV2 as pyConText
from pyConTextGraphV2.pyConTextSqlV2 import pyConTextSql
from pyConTextGraphV2 import itemData
import cPickle
import unicodecsv
"""helper functions to compute final classification"""
class criticalFinder(object):
"""This is the class definition that will contain the majority of processing
algorithms for criticalFinder.
The constructor takes as an argument the name of an SQLite database containing
the relevant information.
"""
def __init__(self, options):#dbname, outfile, save_dir, table, idcolumn, txtcolumn, doGraphs):
"""create an instance of a criticalFinder object associated with the SQLite
database.
dbname: name of SQLite database
"""
t = time.localtime()
self.doGraphs = options.doGraphs
self.allow_uncertainty = options.allow_uncertainty
self.proc_category = options.category
self.reports = Report.objects.filter(dataset=options.dataset)[:options.number]
#print "number of reports to process",len(self.reports)
#raw_input('continue')
# create context objects for each of the questions we want to be answering
self.context = {"disease":pyConText.pyConText()}
rsltsDB = options.odbname
alerts=Alert.objects.all()
alerts.delete()
rslts=Result.objects.all()
rslts.delete()
# Create the itemData object to store the modifiers for the analysis
# starts with definitions defined in pyConText and then adds
# definitions specific for peFinder
#label specifies whether the user wants a domain or linguistic set.
#items returns an array of contextItems (e.g. getCategory(), getLiteral() )
items_modifiers = itemData.instantiateFromSQLite("../pyConTextWeb.db",options.label_modifiers,"pyConTextKit_lexical")
items_targets = itemData.instantiateFromSQLite("../pyConTextWeb.db",options.label_targets,"pyConTextKit_lexical")
#itemData = itemData.itemData(items)
"""
probableNegations = itemData('PROBABLE_NEGATED_EXISTENCE')
definiteNegations = itemData('DEFINITE_NEGATED_EXISTENCE')
pseudoNegations = itemData('PSEUDONEG')
indications = itemData('INDICATION')
historicals = itemData('HISTORICAL')
conjugates = itemData('CONJ')
probables = itemData('PROBABLE_EXISTENCE')
definites = itemData('DEFINITE_EXISTENCE')
future = itemData('FUTURE')
critItems = itemData('CRIT_ITEMS')
self.modifiers = {"disease":itemData('')}
self.modifiers["disease"].prepend(pseudoNegations)
self.modifiers["disease"].prepend(definiteNegations)
self.modifiers["disease"].prepend(probableNegations)
self.modifiers["disease"].prepend(probables)
self.modifiers["disease"].prepend(definites)
self.modifiers["disease"].prepend(indications)
self.modifiers["disease"].prepend(conjugates)
self.modifiers["disease"].prepend(future)
self.modifiers["disease"].prepend(historicals)
"""
# Quality targets (generated from category parameter set by parser)
if( options.category.lower() == 'all'):
targetItems = critItems
else:
targetItems = itemData(options.category)
self.targets = {"disease":targetItems}
self.models = {}
def analyzeReport(self, report, mode, modFilters = None ):
"""given an individual radiology report, creates a pyConTextSql
object that contains the context markup
report: a text string containing the radiology reports
mode: which of the pyConText objects are we using: disease
modFilters: """
context = self.context.get(mode)
targets = self.targets.get(mode)
modifiers = self.modifiers.get(mode)
if modFilters == None :
modFilters = ['indication','pseudoneg','probable_negated_existence',
'definite_negated_existence', 'probable_existence',
'definite_existence', 'historical']
context.reset()
sentences = helpers.sentenceSplitter(report)
count = 0
for s in sentences:
context.setTxt(s)
context.markItems(modifiers, mode="modifier")
context.markItems(targets, mode="target")
context.pruneMarks()
context.dropMarks('Exclusion')
context.applyModifiers()
#context.pruneModifierRelationships()
context.dropInactiveModifiers()
context.commit()
count += 1
#context.computeDocumentGraph()
def classifyDocumentTargets(self):
rslts = {}
alerts = {}
cntxt = self.context["disease"]
cntxt.computeDocumentGraph()
g = cntxt.getDocumentGraph()
targets = [n[0] for n in g.nodes(data = True) if n[1].get("category","") == 'target']
if( not targets ):
return alerts,rslts
if(self.allow_uncertainty):
pos_filters = ["definite_existence","probable_existence"]
else:
pos_filters = ["definite_existence"]
for t in targets:
mods = g.predecessors(t)
rslts[t] = {}
if( not mods ): # an unmodified target is disease positive,certain, and acute
rslts[t]['disease'] = 'Pos'
rslts[t]['uncertainty'] = 'No'
rslts[t]['temporality'] = 'New'
else:
if (modifies(g,t,pos_filters) and
not modifies(g,t,[#"definite_negated_existence",
#"probable_negated_existence",
"future","indication","pseudoneg"])):
rslts[t]['disease'] = 'Pos'
else:
rslts[t]['disease'] = 'Neg'
if( modifies(g,t,["probable_existence",
"probable_negated_existence"]) ):
rslts[t]['uncertainty'] = 'Yes'
else:
rslts[t]['uncertainty'] = 'No'
if( modifies(g,t,["historical"]) ):
rslts[t]['temporality'] = 'Old'
else:
if( rslts[t]['disease'] == 'Neg'):
rslts[t]['temporality'] = 'NA'
else:
rslts[t]['temporality'] = 'New'
"""
generates the alerts; we should be able to remove this because we are
now using the results table to classify documents
"""
rsum = alerts.get(t.getCategory(),0)
if( rslts[t]["disease"] == 'Pos' and rslts[t]["temporality"] == 'New'):
alert = 1
else:
alert = 0
rsum = max(rsum,alert)
alerts[t.getCategory()] = rsum
return alerts, rslts
def plotGraph(self):
cntxt = self.context["disease"]
g = cntxt.getDocumentGraph()
ag = nx.to_pydot(g, strict=True)
gfile = os.path.join(self.save_dir,
"report_%s_unc%s_%s_critical.pdf"%(self.proc_category,
self.allow_uncertainty,
self.currentCase))
ag.write(gfile,format="pdf")
def processReports(self):
"""For the selected reports (training or testing) in the database,
process each report with peFinder
"""
count = 0
for r in self.reports:
#need to change the next two lines so that the fields are not hard-coded
self.currentCase = r.id
self.currentText = r.impression.lower()
self.analyzeReport(self.currentText,
"disease",
modFilters=['indication','probable_existence',
'definite_existence',
'historical','future','pseudoneg',
'definite_negated_existence',
'probable_negated_existence'])
self.recordResults()
def recordResults(self):
alerts, rslts = self.classifyDocumentTargets()
if( self.doGraphs and rslts):
self.plotGraph()
targets = rslts.keys()
if( targets ):
print self.currentCase
query = """INSERT INTO pyConTextKit_result (reportid,category, disease, uncertainty, historical, literal, matchedphrase) VALUES (?,?,?,?,?,?,?)"""
for t in targets:
rslt=Result(reportid=self.currentCase, category=t.getCategory(),
disease=rslts[t]["disease"],
uncertainty=rslts[t]["uncertainty"],
historical=rslts[t]["temporality"],
literal=t.getLiteral(),
matchedphrase=t.getPhrase())
rslt.save()
keys = alerts.keys()
if( keys ):
for c in keys:
alert=Alert(reportid=self.currentCase,category=c,alert=alerts[c],report=self.currentText)
alert.save()
def cleanUp(self):
self.resultsConn.commit()
def modifies(g,n,modifiers):
pred = g.predecessors(n)
if( not pred ):
return False
pcats = [n.getCategory().lower() for n in pred]
return bool(set(pcats).intersection([m.lower() for m in modifiers]))
# Add parser options for the label of the lexicon to use in the parsing
def getParser():
"""Generates command line parser for specifying database and other parameters"""
parser = OptionParser()
parser.add_option("-d","--db",dest='dbname',default="pyConTextWeb.db",
help='name of db containing reports to parse')
parser.add_option("-o","--odb",dest='odbname',
help='name of db containing results', default="pyConTextWeb.db")
#help='name of db containing results', default="testOutput.db")
#parser.add_option("-s","--save_dir",dest='save_dir',default='critFinderResults',
parser.add_option("-s","--save_dir",dest='save_dir',default='critFinderResults',
help='directory in which to store graphs of markups')
parser.add_option("-t","--table",dest='table',default='pyConTextKit_report',
help='table in database to select data from')
parser.add_option("-i","--id",dest='id',default='rowid',
help='column in table to select identifier from')
parser.add_option("-g", "--graph",action='store_true', dest='doGraphs',default=False)
parser.add_option("-r","--report",dest='report_text',default='impression',
help='column in table to select report text from')
parser.add_option("-c","--category",dest='category',default='ALL',
help='category of critical finding to search for. If ALL, all categories are processed')
parser.add_option("-u","--uncertainty_allowed",dest="allow_uncertainty",
action="store_true",default=False)
parser.add_option("-a","--dataset",dest="dataset",default='ALL',
help='report dataset to analyze')
parser.add_option("-b","--rcat",dest="rcat",default='',
help='report category to analyze')
parser.add_option("-n","--number",dest="number",default=20,
help='number of reports to analyze')
return parser
|
UTF-8
|
Python
| false | false | 2,012 |
1,279,900,301,065 |
8102ee07779a3294a27d081044db1bc97c568efe
|
2079345dabf7d2a7d754cff1a07c2e41fefed0c4
|
/modoverflow/models/Answer.py
|
d0202768a26a88d78a9e4261ebcdc9c2734e010a
|
[] |
no_license
|
jackgill/modoverflow
|
https://github.com/jackgill/modoverflow
|
35898e4f4fd9ec7f6c053063176fcbd93a36221a
|
3970fd7aedf4b99db6a958c1a0a9c54bc0830290
|
refs/heads/master
| 2020-04-15T06:59:27.151848 | 2012-09-11T23:14:05 | 2012-09-11T23:14:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey
from sqlalchemy.orm import relationship, backref
from modoverflow.database import Base
class Answer(Base):
__tablename__ = 'Answers'
id = Column(Integer, primary_key=True)
text = Column(String)
submitter_id = Column(String, ForeignKey('Users.id'))
question_id = Column(String, ForeignKey('Questions.id'))
votes = Column(Integer)
is_accepted = Column(Boolean)
submitter = relationship("User", backref=backref('answers', order_by=id))
question = relationship("Question", backref=backref('answers', order_by=votes.desc()))
def __repr__(self):
return '<Answer %s>' % (self.id)
|
UTF-8
|
Python
| false | false | 2,012 |
12,601,434,090,597 |
f06b59e843b04a1ad1564e3c20a28ddba5e5898b
|
3630f3c5b26ec5193edad3cb964dc98c23673de4
|
/BrutalForceMatice/brutalForce.py
|
2f857738c332ba104b49115db2f523d27998f119
|
[] |
no_license
|
vojtasvoboda/MathInPython
|
https://github.com/vojtasvoboda/MathInPython
|
8340bcfe6c6665b124e477633670a2539ff45ef9
|
6ddd014cc2e0d053d76765b7e919ca584cefd43c
|
refs/heads/master
| 2020-05-29T11:16:37.815683 | 2014-06-06T08:13:53 | 2014-06-06T08:13:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# mame zadanou zakodovanou zpravu a klic
# klic * x = zakodovana_zprava
# pomoci brutal force hledam x
from numpy import *
from time import strftime, gmtime
def main():
# zprava
zprava = matrix([[20],[17],[2],[5],[6]])
# klic
klic = matrix([[18, 0,19,12,23],
[22,30,32,19,10],
[19,17, 2,32,32],
[11,24,20,22, 5],
[30, 0,19,26,22]])
print "Brutal force started in",strftime("%H:%M:%S", gmtime())
for a in range(26):
print ""
print a,
for b in range(26):
print ".",
for c in range(26):
for d in range(26):
for e in range(26):
matice = matrix([[a],[b],[c],[d],[e]])
nasobek = klic * matice
if ( (nasobek[0]%33==28) & (nasobek[1]%33==9) & (nasobek[2]%33==8) & (nasobek[3]%33==4) & (nasobek[4]%33==14)):
print matice
print ""
print "Brutal force ended in",strftime("%H:%M:%S", gmtime())
# spustime main()
main()
|
UTF-8
|
Python
| false | false | 2,014 |
8,315,056,704,017 |
3d5587b1a77c02e698ed72a0792161ac637653d7
|
c0bd180807e15f863d1e7a30467d0c64813bdf3f
|
/resources/test/test_suite/optilab/optimization/__init__.py
|
743315674d6274dac19af567a92d5966f4853cc0
|
[
"GPL-1.0-or-later",
"GPL-2.0-only"
] |
non_permissive
|
fmach/agros2d
|
https://github.com/fmach/agros2d
|
b13110db43aede90098986343f61c141df41c4ae
|
9840b6ae38ef930fc6e7c0a629b2ebd96eb9778a
|
refs/heads/master
| 2020-12-03T09:31:44.244439 | 2014-07-31T22:05:53 | 2014-07-31T22:05:53 | 1,274,912 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__all__ = ["optimization", "genetic"]
from test_suite.optilab.optimization import optimization
from test_suite.optilab.optimization import genetic
|
UTF-8
|
Python
| false | false | 2,014 |
2,061,584,344,653 |
6401607f4fe941a5a65dec951817063fab0d344d
|
b7b2e33d365d377a76ba48f6b530daaf06d9a6fc
|
/tip_calculator/tip_calculator_v3.py
|
4860acd956639a07d1b93d1688302938333d5c32
|
[] |
no_license
|
methodmenon/Python
|
https://github.com/methodmenon/Python
|
c36ee8e184722afbf32aab052bcb402296d6275c
|
80ace465113676f214b0423268619a90d0963475
|
refs/heads/master
| 2020-12-25T19:14:56.120207 | 2014-06-05T05:11:32 | 2014-06-05T05:11:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
base_meal_price = float(sys.argv[1])
tax_rate = (float(sys.argv[2]))/100
tip_rate = (float(sys.argv[3]))/100
tax_value = base_meal_price * tax_rate
meal_with_tax = base_meal_price + tax_value
tip_value = tip_rate * base_meal_price
total = meal_with_tax + tip_value
print "The base cost of the meal is ${0:.2f}.".format(base_meal_price)
print "The tax on your meal is ${0:.2f}.".format(tax_value)
print "The tip on your meal is ${0:.2f}.".format(tip_value)
print "The grand total for your meal is ${0:.2f}.".format(total)
|
UTF-8
|
Python
| false | false | 2,014 |
11,819,750,013,560 |
d4d07aad3bc43f8ce38088beb5fbaae0ecbc3247
|
6dd6869d6d7fe4890607cc21e7403be403e1516a
|
/nanobsd/plugins/jdownloader_pbi/resources/jdownloaderUI/freenas/models.py
|
f9aee18864fb763385cd95f697d7ef90709c5729
|
[] |
no_license
|
cm86/FreeNAS_8-JDownloader
|
https://github.com/cm86/FreeNAS_8-JDownloader
|
e1e716db632c2dd03e285d0bd7eb87678e8162f2
|
88613dcc5fac4ffe889f890e07270e8a87d7b9d7
|
refs/heads/master
| 2020-07-03T05:11:32.160663 | 2012-09-26T05:42:17 | 2012-09-26T05:42:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
class JDownloader(models.Model):
"""
Django model describing every tunable setting for jdownloader
"""
enable = models.BooleanField(default=False)
x11_DISPLAY = models.CharField(max_length=500, default=':1', blank=True)
xvfb_enable = models.BooleanField(default=True)
|
UTF-8
|
Python
| false | false | 2,012 |
5,214,090,309,538 |
3ccece5188d02d7f72e1459c4859ba1378e55abd
|
fca9e63efde49b4c78694e0152f0c2cbfe02b3e7
|
/djangobench/benchmarks/default_middleware/urls.py
|
cbcdaa1248186a118b65959e811e0fb6e49595ed
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
akaariai/djangobench
|
https://github.com/akaariai/djangobench
|
398e999b1a775ff4c61fb48659cc81f3daa3e643
|
aeccd3200ff0c137bff2f1402e5b9f605613599d
|
refs/heads/master
| 2021-01-20T17:53:50.891374 | 2013-08-09T09:50:27 | 2013-08-09T10:29:55 | 3,669,708 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
try:
from django.conf.urls import patterns
except ImportError:
from django.conf.urls.defaults import patterns
urlpatterns = patterns('default_middleware',
(r'^.*$', 'views.index'),
)
|
UTF-8
|
Python
| false | false | 2,013 |
10,299,331,595,858 |
5df1f27a06b038e953639fd4756be4b8bb520f57
|
11dc62586746fb5581476e145a4115cf982605df
|
/problem-028/problem-028.py
|
4e88c7da6435eef1fbad03ae8271c3a2f6c58965
|
[] |
no_license
|
martinsik/projecteuler.net
|
https://github.com/martinsik/projecteuler.net
|
a6a17afd40e86c0a4dbb41074d0f39300acf155a
|
62aff4b38453f1689d5b95bad14a0c516325cc11
|
refs/heads/master
| 2016-09-05T11:00:06.634344 | 2012-07-27T15:02:13 | 2012-07-27T15:02:13 | 1,859,519 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# problem 28
# 21 22 23 24 25
# 20 7 8 9 10
# 19 6 1 2 11
# 18 5 4 3 12
# 17 16 15 14 13
# 43 44 45 46 47 48 49 50
# 42 21 22 23 24 25 26 51
# 41 20 7 8 9 10 27 52
# 40 19 6 1 2 11 28 53
# 39 18 5 4 3 12 29 54
# 38 17 16 15 14 13 30 55
# 37 36 35 34 33 32 31 56
# 57
# 43 49
# 21 25
# 7 9
# 1
# 5 3
# 17 13
# 37 31
# 57
total = 1;
last = 1;
for size in range(3, 1003, 2):
coef = size - 1;
for j in range(0, 4):
last += coef
total += last
print total
|
UTF-8
|
Python
| false | false | 2,012 |
17,867,063,978,474 |
0304bf62ecb377c41709dd30be22e3d1a1c5b1a9
|
ba66ece559a943f4155fb61938e43b8ac4f957df
|
/gui/recipe.py
|
ceea0701add63a3fe0b0d6822fb528d8b1aafe9b
|
[
"GPL-3.0-only"
] |
non_permissive
|
reybalgs/PyRecipe-4-U
|
https://github.com/reybalgs/PyRecipe-4-U
|
9a47eb6b6c7dc20422dc3d6ce563c914087a0d5c
|
e785fad4997d8904f03287272820499cc1aac2de
|
refs/heads/master
| 2020-04-06T04:52:15.493286 | 2013-02-20T09:21:08 | 2013-02-20T09:21:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
###############################################################################
#
# recipe.py
#
# This module contains all of the classes and functions that are shared by the
# Add Recipe and Edit Recipe dialogs.
#
# I chose to consolidate them into one module because they are quite similar in
# nature.
#
###############################################################################
# PySide imports
from PySide.QtCore import *
from PySide.QtGui import *
# Import recipe model
from models.recipemodel import *
# Ingredients window import
from ingredients import *
# Instructions window import
from instructions import *
# Error dialog import
from errordialog import *
import sys
class RecipeOverview(QDialog):
"""
The class of the dialog that shows an overview of an entire recipe. Does
not actually edit the recipe, but has buttons that lead to editing
functionalities.
"""
def get_recipe(self):
"""Returns the recipe inside this dialog"""
return self.recipe
def refresh_recipe_info(self):
"""
Refreshes the essential data of the recipe in view, usually done after
an edit.
"""
self.nameData.setText(self.recipe.name)
self.courseData.setText(self.recipe.course)
self.servingSizeData.setText(str(self.recipe.servingSize) + ' people')
def refresh_ingredients(self):
"""Refreshes the recipe's list of ingredients"""
self.ingredientData.clear() # clear the list
counter = 1 # A counter variable
# Put text in the ingredients list
for ingredient in self.recipe.ingredients:
# Go through the list of ingredients
self.ingredientData.insertPlainText(str(counter) + '. ' +
ingredient['name'] + ': ' + str(ingredient['quantity']) +
' ' + ingredient['unit'] + '\n')
counter += 1
def refresh_instructions(self):
"""Refreshes the recipe's list of instructions"""
self.instructionData.clear() # clear the llist
counter = 1 # A counter variable
# Put text in the instructions list
for instruction in self.recipe.instructions:
# Go through the list of instructions
self.instructionData.insertPlainText(str(counter) + '. ' +
instruction + '\n')
counter += 1
def edit_recipe_info(self):
"""Edits the essential data of the recipe in view"""
# Create an edit recipe dialog
editRecipeDialog = EditRecipeWindow(self, self.recipe)
# Execute the dialog
editRecipeDialog.exec_()
# Return the recipe from the dialog
self.recipe = editRecipeDialog.get_recipe()
# Refresh the info displayed
self.refresh_recipe_info()
def edit_ingredients(self):
"""Edits the ingredients of the recipe in view"""
# Create an ingredients dialog
ingredientsDialog = IngredientsWindow(self, self.recipe.ingredients)
# Execute the dialog
ingredientsDialog.exec_()
# Get the updated list of ingredients from the dialog
self.recipe.ingredients = ingredientsDialog.get_ingredients()
# Refresh the list of ingredients
self.refresh_ingredients()
def edit_instructions(self):
"""Edits the instructions of the recipe in view"""
# Create an instructions dialog
instructionsDialog = InstructionsWindow(self,
self.recipe.instructions)
# Execute the dialog
instructionsDialog.exec_()
# Get the updated list of instructions from the dialog
self.recipe.instructions = instructionsDialog.get_instructions()
# Refresh the list of instructions
self.refresh_instructions()
def import_image(self):
"""Imports an image to be used by the current recipe"""
# Create a var containing a list of images
files = ''
# Create a var that contains the path of the actual image
path = ''
# Invoke a filedialog that will look for the image
fileDialog = QFileDialog(self, "Import Image", "./gui/images/")
fileDialog.setFileMode(QFileDialog.ExistingFile)
#fileDialog.setNameFilter("Image Files(*.png, *.jpg, *.jpeg, *.gif" +
# ", *.bmp")
if fileDialog.exec_():
files = fileDialog.selectedFiles()
path = files[0]
print path + ' loaded!'
if (path) and (path.lower().endswith(('.png', '.jpg', '.jpeg', '.gif',
'.bmp'))):
# There is an image
# Put the image path into the list of images on this recipe.
self.recipe.images.append(path)
print self.recipe.name + ' images: ' + str(self.recipe.images)
# Now we have to set the image of the dish to the newly-imported image.
self.selectedImage = (len(self.recipe.images) - 1)
# Set the displayed image to the selected image
self.imageLabel.setPixmap(QPixmap(path).scaledToWidth(420))
# Refresh the image buttons
self.toggle_image_buttons()
print self.selectedImage
def delete_image(self):
"""Deletes the currently selected image"""
# TODO: Create a dialog that warns the user when there is only one
# image in the recipe
# Delete the image from the list of images
deleted = self.recipe.images.pop(self.selectedImage)
if(self.selectedImage == len(self.recipe.images)):
# We are at the end of the list
self.selectedImage -= 1
if(len(self.recipe.images) == 0):
# We don't have any images anymore
self.imageLabel.setPixmap(QPixmap("./gui/images/placeholder.png"))
self.selectedImage = 0
else:
# We still have an image
self.imageLabel.setPixmap(QPixmap(
self.recipe.images[self.selectedImage]).scaledToWidth(420))
# Refresh the buttons
self.toggle_image_buttons()
print str(deleted) + ' has been removed from the list of images!'
def next_image(self):
"""
Changes the currently displayed image to the next image in the recipe's
sequence of images, if any.
"""
if len(self.recipe.images) > 1:
# We have an image to go to
if self.selectedImage < (len(self.recipe.images) - 1):
# We are not at the end of of the image list, we can move
# forward
# Increment the selected image index
self.selectedImage += 1
# Change the displayed image
self.imageLabel.setPixmap(QPixmap(
self.recipe.images[self.selectedImage]).scaledToWidth(420))
# Toggle the buttons
self.toggle_image_buttons()
else:
print 'Already at the end of the image list!'
def previous_image(self):
"""
Changes the curently displayed image to the previous image in the
recipe's sequence of images, if any.
"""
if len(self.recipe.images) > 1:
# We have an image to go to
if self.selectedImage > 0:
# We are not at the beginning so we can move back once
# Decrement the selected image index
self.selectedImage -= 1
# Change the displayed image
self.imageLabel.setPixmap(QPixmap(
self.recipe.images[self.selectedImage]).scaledToWidth(420))
# Toggle the buttons
self.toggle_image_buttons()
else:
print 'Already at the beginning of the list!'
def toggle_image_buttons(self):
"""
Disables and/or enables the some of the image buttons depending on the
current state of the recipe.
"""
if (len(self.recipe.images) < 2):
# We have only one or no image for this recipe
# Disable the previous and next image buttons
self.prevImageButton.setEnabled(False)
self.nextImageButton.setEnabled(False)
if (len(self.recipe.images) == 0):
# We have no images, so disable the delete button as it makes
# no sense.
self.deleteImageButton.setEnabled(False)
else:
# There's at least one image, allow the user to delete that
# recipe
self.deleteImageButton.setEnabled(True)
else:
# There are two or more recipes
# We have to disable and enable the buttons based on the current
# image selected for the recipe.
#
# First let's enable the delete image button
self.deleteImageButton.setEnabled(True)
if self.selectedImage == 0:
# We are at the first image, disable the prev image button
self.prevImageButton.setEnabled(False)
self.nextImageButton.setEnabled(True)
elif self.selectedImage == ((len(self.recipe.images)) - 1):
# We are at the last image, disable the next image button
self.prevImageButton.setEnabled(True)
self.nextImageButton.setEnabled(False)
else:
# We are somewhere in the middle, enable both buttons
self.prevImageButton.setEnabled(True)
self.nextImageButton.setEnabled(True)
def init_signals(self):
"""Initializes the signals of the buttons in the dialog"""
# Buttons on the left side
self.editRecipeButton.clicked.connect(self.edit_recipe_info)
self.editIngredientsButton.clicked.connect(self.edit_ingredients)
self.editInstructionsButton.clicked.connect(self.edit_instructions)
self.newImageButton.clicked.connect(self.import_image)
self.prevImageButton.clicked.connect(self.previous_image)
self.nextImageButton.clicked.connect(self.next_image)
self.deleteImageButton.clicked.connect(self.delete_image)
def init_ui(self):
"""Initializes the UI of the dialog"""
# Element creation
self.mainLayout = QVBoxLayout()
self.splitLayout = QHBoxLayout()
self.formLayout = QGridLayout() # Layout for the left side
self.rightHandLayout = QVBoxLayout() # Layout for the right side
self.nameData = QLabel(self.recipe.name)
self.courseData = QLabel(self.recipe.course)
self.servingSizeData = QLabel(str(self.recipe.servingSize) + " people")
# Button to edit the recipe's essential information
self.editRecipeButton = QPushButton("Edit Recipe Information")
self.editRecipeButton.setToolTip("Edit this recipe's vital " +
"information (name, course, serving size)")
self.ingredientData = QTextBrowser()
self.ingredientData.setMinimumHeight(0)
self.editIngredientsButton = QPushButton("Edit Ingredients")
self.editIngredientsButton.setToolTip("Edit the ingredients for " +
"this recipe.")
self.instructionData = QTextBrowser()
self.instructionData.setMinimumWidth(360)
self.instructionData.setMinimumHeight(0)
self.editInstructionsButton = QPushButton("Edit Instructions")
self.editInstructionsButton.setToolTip("Edit the instructions for " +
"this recipe.")
# Refresh the ingredients and instructions list
self.refresh_ingredients()
self.refresh_instructions()
self.buttonLayout = QHBoxLayout()
self.editRecipeButton = QPushButton("Edit Recipe")
self.editIngredientsButton = QPushButton("Edit Ingredients")
self.editInstructionsButton = QPushButton("Edit Instructions")
# Right hand side items
# The recipe image
# First, we check if the recipe has an image already
self.imageLabel = QLabel()
if(len(self.recipe.images)):
# We have an image
self.imageLabel.setPixmap(QPixmap(
self.recipe.images[self.selectedImage]).scaledToWidth(420))
else:
# No image, use placeholder
self.imageLabel.setPixmap(QPixmap("./gui/images/placeholder.png"))
# Image chooser buttons
# Layout for the buttons
self.imageButtonsLayout = QHBoxLayout()
self.prevImageButton = QPushButton("Previous")
self.nextImageButton = QPushButton("Next")
self.newImageButton = QPushButton("New Image")
self.deleteImageButton = QPushButton("Delete Image")
# Layouting
self.setLayout(self.mainLayout)
self.mainLayout.addLayout(self.splitLayout)
self.splitLayout.addLayout(self.formLayout)
self.splitLayout.addLayout(self.rightHandLayout)
# Left hand side
self.formLayout.addWidget(QLabel("<b>Name:</b>"), 0, 0)
self.formLayout.addWidget(self.nameData, 0, 1)
self.formLayout.addWidget(QLabel("<b>Course:</b>"), 1, 0)
self.formLayout.addWidget(self.courseData, 1, 1)
self.formLayout.addWidget(QLabel("<b>Serving Size:</b>"), 2, 0)
self.formLayout.addWidget(self.servingSizeData, 2, 1)
self.formLayout.addWidget(self.editRecipeButton, 3, 1)
self.formLayout.addWidget(QLabel("<b>Ingredients:</b>"), 4, 0,
Qt.AlignTop)
self.formLayout.addWidget(self.ingredientData, 4, 1)
self.formLayout.addWidget(self.editIngredientsButton, 5, 1)
self.formLayout.addWidget(QLabel("<b>Instructions:</b>"), 6, 0,
Qt.AlignTop)
self.formLayout.addWidget(self.instructionData, 6, 1)
self.formLayout.addWidget(self.editInstructionsButton, 7, 1)
# Right hand side
self.rightHandLayout.addWidget(self.imageLabel)
self.rightHandLayout.addLayout(self.imageButtonsLayout)
self.imageButtonsLayout.addWidget(self.prevImageButton)
self.imageButtonsLayout.addWidget(self.newImageButton)
self.imageButtonsLayout.addWidget(self.deleteImageButton)
self.imageButtonsLayout.addWidget(self.nextImageButton)
# Toggling the image buttons
self.toggle_image_buttons()
def __init__(self, parent, recipe):
super(RecipeOverview, self).__init__(parent)
self.recipe = recipe # Get the recipe passed
self.setWindowTitle("Overview for " + self.recipe.name)
# A counter variable that keeps track of the currently selected
# image for the recipe
# Initialized at 0 because we always start from the start.
self.selectedImage = 0
self.init_ui()
self.init_signals()
class RecipeWindow(QDialog):
"""
The main class of all the windows that deal with the manipulation of
recipes.
Does not have a __init__ method. Rather, the subclasses of this class
are the ones that should be called.
"""
def __init__(self, parent):
super(RecipeWindow, self).__init__(parent)
# Form stuff
self.mainLayout = QVBoxLayout()
self.formLayout = QFormLayout()
self.buttonLayout = QHBoxLayout()
# Name
self.nameData = QLineEdit()
# Tooltip for the name data
self.nameData.setToolTip("The name of your recipe.")
# Course
self.courseData = QComboBox()
# Tooltip for the course data
self.courseData.setToolTip("The course of your recipe.")
# Adding items to the course combobox
self.courseData.addItem("Appetizer")
self.courseData.addItem("Main")
self.courseData.addItem("Dessert")
# Serving Size
self.servingSizeData = QDoubleSpinBox()
# Tooltip for the serving size data
self.servingSizeData.setToolTip("How many people your recipe can " +
"serve")
# Buttons
self.submitButton = QPushButton("Submit")
self.recipe = RecipeModel() # Create a model
def get_recipe(self):
"""
Returns the recipe in this dialog
"""
return self.recipe
def submit(self):
"""
Puts all the form data into a model and returns that model to the
main screen
"""
self.recipe.name = self.nameData.text()
self.recipe.course = self.courseData.currentText()
self.recipe.servingSize = self.servingSizeData.value()
if not (self.recipe.name == '' or self.recipe.servingSize == 0.0):
# Everything seems to be in order, so carry on
# Put all the information in the forms into the model
print 'Form submitted!'
self.done(1)
else:
# Something is missing, raise the error dialog
errorDialog = ErrorDialog(self, 'recipe')
errorDialog.exec_() # execute the dialog
if (errorDialog.get_flag() == 1):
# User wanted to just discard the recipe
self.done(1)
def init_layout(self):
"""
Initializes the layout of the dialog. Usually called by its subclasses.
"""
self.mainLayout.addLayout(self.formLayout)
# Initialize the form
self.formLayout.addRow("Name:", self.nameData)
self.formLayout.addRow("Course:", self.courseData)
self.formLayout.addRow("Serving Size:", self.servingSizeData)
# Add the instructions and ingredients buttons
self.mainLayout.addLayout(self.buttonLayout)
# Add the submit button
self.mainLayout.addWidget(self.submitButton)
# Set the mainlayout as the layout of the entire window
self.setLayout(self.mainLayout)
def init_signals(self):
"""
Initializes the signal of the submit button
"""
self.submitButton.clicked.connect(self.submit)
class AddRecipeWindow(RecipeWindow):
"""
Window dialog that is called whenever we need to add a recipe into
the system
"""
def __init__(self, parent):
super(AddRecipeWindow, self).__init__(parent)
# Change some of the names of the elements
self.setWindowTitle("Add Recipe")
# Initialize the layout
self.init_layout()
# Initialize signals
self.init_signals()
class EditRecipeWindow(RecipeWindow):
"""
Window dialog that is called whenever we need to add a recipe into the
system
"""
def refresh_data(self):
"""
Refreshes the data on the data fields. Ideally used at startup or
whenever changes were made
"""
# Set the name data
self.nameData.setText(self.recipe.name)
# Set the course data depending on the course given
if self.recipe.course == 'Appetizer':
self.courseData.setCurrentIndex(0)
elif self.recipe.course == 'Main':
self.courseData.setCurrentIndex(1)
elif self.recipe.course == 'Dessert':
self.courseData.setCurrentIndex(2)
else:
print 'Error! Wrong index/name of course!'
# Set the recipe's serving size data
self.servingSizeData.setValue(self.recipe.servingSize)
# Some debug messages
print 'Window refreshed!'
def __init__(self, parent, recipe):
super(EditRecipeWindow, self).__init__(parent)
# Change some of the names of the elements
self.setWindowTitle("Edit " + recipe.name)
# Initialize the layout
self.init_layout()
# Initialize signals
self.init_signals()
# Initialize the recipe to be edited
self.recipe = recipe
# Refresh the data fields to reflect the current recipe data
self.refresh_data()
|
UTF-8
|
Python
| false | false | 2,013 |
10,608,569,242,323 |
a2ee69f4863e52ddda92d47cc3c639837370a572
|
8a7aec7253dd1a0c3560cb71d354c1bcb1102c66
|
/tools/telemetry/telemetry/core/chrome/android_browser_finder.py
|
f6a0c0e3e00263b08d98aebc6420ee06f672ecec
|
[
"BSD-3-Clause",
"LGPL-2.1-only",
"MPL-1.1",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-unknown"
] |
non_permissive
|
loopCM/chromium
|
https://github.com/loopCM/chromium
|
78e60fe2945f40d5ab3a77a6fd36c667ca323b0f
|
8db1d931e4e1609d7d8f021ecb4fd2db0b92cb87
|
HEAD
| 2019-07-18T09:18:52.643862 | 2013-05-21T00:44:40 | 2013-05-21T00:44:40 | 10,188,303 | 7 | 5 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds android browsers that can be controlled by telemetry."""
import os
import logging as real_logging
import re
import subprocess
import sys
from telemetry.core import browser
from telemetry.core import possible_browser
from telemetry.core.chrome import adb_commands
from telemetry.core.chrome import android_browser_backend
from telemetry.core.platform import android_platform_backend
CHROME_PACKAGE_NAMES = {
'android-chrome': 'com.google.android.apps.chrome',
'android-chrome-beta': 'com.chrome.beta',
'android-chrome-dev': 'com.google.android.apps.chrome_dev',
'android-jb-system-chrome': 'com.android.chrome'
}
ALL_BROWSER_TYPES = ','.join([
'android-chromium-testshell',
'android-content-shell',
'android-webview',
] + CHROME_PACKAGE_NAMES.keys())
CONTENT_SHELL_PACKAGE = 'org.chromium.content_shell_apk'
CHROMIUM_TESTSHELL_PACKAGE = 'org.chromium.chrome.testshell'
WEBVIEW_PACKAGE = 'com.android.webview.chromium.shell'
# adb shell pm list packages
# adb
# intents to run (pass -D url for the rest)
# com.android.chrome/.Main
# com.google.android.apps.chrome/.Main
class PossibleAndroidBrowser(possible_browser.PossibleBrowser):
"""A launchable android browser instance."""
def __init__(self, browser_type, options, backend_settings):
super(PossibleAndroidBrowser, self).__init__(browser_type, options)
self._backend_settings = backend_settings
def __repr__(self):
return 'PossibleAndroidBrowser(browser_type=%s)' % self.browser_type
def Create(self):
backend = android_browser_backend.AndroidBrowserBackend(
self._options, self._backend_settings)
platform_backend = android_platform_backend.AndroidPlatformBackend(
self._backend_settings.adb.Adb(), self._options.no_performance_mode)
b = browser.Browser(backend, platform_backend)
backend.SetBrowser(b)
return b
def SupportsOptions(self, options):
if len(options.extensions_to_load) != 0:
return False
return True
def FindAllAvailableBrowsers(options, logging=real_logging):
"""Finds all the desktop browsers available on this machine."""
if not adb_commands.IsAndroidSupported():
return []
# See if adb even works.
try:
with open(os.devnull, 'w') as devnull:
proc = subprocess.Popen(['adb', 'devices'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=devnull)
stdout, _ = proc.communicate()
if re.search(re.escape('????????????\tno permissions'), stdout) != None:
logging.warn(
('adb devices reported a permissions error. Consider '
'restarting adb as root:'))
logging.warn(' adb kill-server')
logging.warn(' sudo `which adb` devices\n\n')
except OSError:
platform_tools_path = os.path.join(
os.path.dirname(__file__), '..', '..', '..', '..', '..'
'third_party', 'android_tools', 'sdk', 'platform-tools')
if (sys.platform.startswith('linux') and
os.path.exists(os.path.join(platform_tools_path, 'adb'))):
os.environ['PATH'] = os.pathsep.join([platform_tools_path,
os.environ['PATH']])
else:
logging.info('No adb command found. ' +
'Will not try searching for Android browsers.')
return []
device = None
if options.android_device:
devices = [options.android_device]
else:
devices = adb_commands.GetAttachedDevices()
if len(devices) == 0:
logging.info('No android devices found.')
return []
if len(devices) > 1:
logging.warn('Multiple devices attached. ' +
'Please specify a device explicitly.')
return []
device = devices[0]
adb = adb_commands.AdbCommands(device=device)
packages = adb.RunShellCommand('pm list packages')
possible_browsers = []
if 'package:' + CONTENT_SHELL_PACKAGE in packages:
b = PossibleAndroidBrowser(
'android-content-shell',
options, android_browser_backend.ContentShellBackendSettings(
adb, CONTENT_SHELL_PACKAGE))
possible_browsers.append(b)
if 'package:' + CHROMIUM_TESTSHELL_PACKAGE in packages:
b = PossibleAndroidBrowser(
'android-chromium-testshell',
options, android_browser_backend.ChromiumTestShellBackendSettings(
adb, CHROMIUM_TESTSHELL_PACKAGE))
possible_browsers.append(b)
if 'package:' + WEBVIEW_PACKAGE in packages:
b = PossibleAndroidBrowser(
'android-webview',
options,
android_browser_backend.WebviewBackendSettings(adb, WEBVIEW_PACKAGE))
possible_browsers.append(b)
for name, package in CHROME_PACKAGE_NAMES.iteritems():
if 'package:' + package in packages:
b = PossibleAndroidBrowser(
name,
options,
android_browser_backend.ChromeBackendSettings(adb, package))
possible_browsers.append(b)
# See if the "forwarder" is installed -- we need this to host content locally
# but make it accessible to the device.
if len(possible_browsers) and not adb_commands.HasForwarder():
logging.warn('telemetry detected an android device. However,')
logging.warn('Chrome\'s port-forwarder app is not available.')
logging.warn('To build:')
logging.warn(' ninja -C out/Release forwarder2 md5sum')
logging.warn('')
logging.warn('')
return []
return possible_browsers
|
UTF-8
|
Python
| false | false | 2,013 |
9,096,740,733,858 |
131bec447106260853b2a2e60929914ba37abeb1
|
46ae11bb6b73b0e11438ee0db07155e24ee5f9f9
|
/test/tag.py
|
443d326fbb8979be151f3fe8c84cf3e7f0dd3f4c
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
ajduncan/stagger
|
https://github.com/ajduncan/stagger
|
7107d1b07ab40339a01caa47c808ab6b36a1917d
|
20b8cb4fc9bef90bffcf9f87d208348257c81001
|
refs/heads/master
| 2021-01-21T16:38:43.513689 | 2014-12-19T17:30:34 | 2014-12-19T17:30:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#
# tag.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import unittest
import os.path
import warnings
import stagger
from stagger.id3 import *
class TagTestCase(unittest.TestCase):
def testBasic(self):
for cls, frm in (stagger.Tag22, TT2), (stagger.Tag23, TIT2), (stagger.Tag24, TIT2):
tag = cls()
# New tag must be empty
self.assertEqual(len(tag), 0)
# Set a frame using a single string, see if it's in the tag
tag[frm] = "Foobar"
self.assertTrue(frm.frameid in tag)
self.assertTrue(frm in tag)
self.assertEqual(len(tag), 1)
self.assertEqual(len(tag._frames[frm.frameid]), 1)
# Compare value to hand-constructed frame
self.assertEqual(len(tag[frm].text), 1)
self.assertEqual(tag[frm].text[0], "Foobar")
self.assertEqual(tag[frm], frm(encoding=None, text=["Foobar"]))
# Override the above text frame with a multivalue text frame
tag[frm] = ("Foo", "bar", "baz")
self.assertEqual(len(tag), 1)
self.assertEqual(len(tag._frames[frm.frameid]), 1)
self.assertEqual(
tag[frm], frm(encoding=None, text=["Foo", "bar", "baz"]))
# Delete frame from tag, verify it's gone
del tag[frm]
self.assertEqual(len(tag), 0)
self.assertTrue(frm not in tag)
self.assertTrue(frm.frameid not in tag)
def testPadding(self):
for tagcls, frames in ((stagger.Tag22, (TT2, TP1)),
(stagger.Tag23, (TIT2, TPE1)),
(stagger.Tag24, (TIT2, TPE1))):
# Create a simple tag
tag = tagcls()
for frame in frames:
tag[frame] = frame.frameid.lower()
# Try encoding tag with various padding options
tag.padding_max = 0
tag.padding_default = 0
data_nopadding_nohint = tag.encode()
data_nopadding_hint = tag.encode(size_hint=500)
length = len(data_nopadding_nohint)
self.assertEqual(
len(data_nopadding_nohint), len(data_nopadding_hint))
self.assertTrue(data_nopadding_nohint == data_nopadding_hint)
tag.padding_max = 1000
data_max_nohint = tag.encode()
data_max_hint = tag.encode(size_hint=500)
data_max_largehint = tag.encode(size_hint=5000)
self.assertEqual(len(data_max_nohint), length)
self.assertEqual(len(data_max_hint), 500)
self.assertEqual(len(data_max_largehint), length)
self.assertTrue(data_max_nohint[10:] == data_max_hint[10:length])
tag.padding_default = 250
data_default_nohint = tag.encode()
data_default_okhint = tag.encode(size_hint=500)
data_default_largehint = tag.encode(size_hint=2000)
data_default_smallhint = tag.encode(size_hint=20)
self.assertEqual(len(data_default_nohint), length + 250)
self.assertEqual(len(data_default_okhint), 500)
self.assertEqual(len(data_default_largehint), length + 250)
self.assertEqual(len(data_default_smallhint), length + 250)
def testFrameEncoding(self):
for tagcls, frm in ((stagger.Tag22, TT2),
(stagger.Tag23, TIT2),
(stagger.Tag24, TIT2)):
tag = tagcls()
value = frm.frameid.lower()
tag[frm] = value
tag.padding_max = 0
# By default, tag should use Latin-1 to encode value (it contains
# only ASCII).
data = tag.encode()
self.assertNotEqual(
data.find(value.encode("latin-1") + b"\x00"), -1)
# Now override encoding, see that frame is encoded accordingly.
old_encodings = tag.encodings
tag.encodings = ("utf-16",)
data = tag.encode()
self.assertEqual(data.find(value.encode("latin-1") + b"\x00"), -1)
self.assertNotEqual(
data.find(value.encode("utf-16") + b"\x00\x00"), -1)
tag.encodings = old_encodings
# Now change value to contain non-Latin-1 chars
value = "Lőrentey Károly"
tag[frm] = value
data = tag.encode()
if tagcls is stagger.Tag24:
# Stagger falls back to utf-8 for 2.4 frames.
self.assertNotEqual(
data.find(value.encode("utf-8") + b"\x00"), -1)
else:
# Other versions fall back to utf-16.
self.assertNotEqual(
data.find(value.encode("utf-16") + b"\x00\x00"), -1)
# Force UTF-16-BE encoding.
tag.encodings = ("utf-16-be",)
data = tag.encode()
self.assertNotEqual(
data.find(value.encode("utf-16-be") + b"\x00\x00"), -1)
def testFrameOrder(self):
# 24.stagger.sample-01.id3 contains a simple test tag that has file frames
# in the following order:
#
# TIT2("TIT2"), TPE1("TPE1"), TALB("TALB"), TRCK("TRCK"), TPE2("TPE2")
testfile = os.path.join(os.path.dirname(__file__), "samples",
"24.stagger.sample-01.id3")
framelist = [TRCK, TPE2, TALB, TIT2, TPE1]
# Read tag, verify frame ordering is preserved
tag = stagger.read_tag(testfile)
self.assertEqual(len(tag), 5)
self.assertEqual(
set(tag.keys()), set(frame.frameid for frame in framelist))
self.assertEqual([frame.frameid for frame in tag.frames(orig_order=True)],
[frame.frameid for frame in framelist])
# Test frame contents
for framecls in framelist:
# tag[TIT2] == tag["TIT2"]
self.assertTrue(framecls in tag)
self.assertTrue(framecls.frameid in tag)
self.assertEqual(tag[framecls], tag[framecls.frameid])
# type(tag[TIT2]) == TIT2
self.assertTrue(isinstance(tag[framecls], framecls))
# Each frame contains a single string, which is the frame id in
# lowercase.
self.assertEqual(len(tag[framecls].text), 1)
self.assertEqual(tag[framecls].text[0], framecls.frameid.lower())
# Encode tag with default frame ordering, verify result is different.
with open(testfile, "rb") as file:
filedata = file.read()
tag.padding_max = 0
# Default sorting order is different.
tagdata = tag.encode()
self.assertEqual(len(tagdata), len(filedata))
self.assertFalse(tagdata == filedata)
# Override the sort order with an empty list,
# verify resulting order is the same as in the original file.
tag.frame_order = stagger.tags.FrameOrder()
tagdata = tag.encode()
self.assertTrue(tagdata == filedata)
tag2 = stagger.decode_tag(tagdata)
self.assertTrue(tag == tag2)
def testMultipleStrings(self):
for cls in (stagger.Tag23, stagger.Tag24):
# Versions 2.3 and 2.4 have support for multiple values in text
# frames.
tag = cls()
tag.padding_max = 0
tag[TIT2] = ("Foo", "Bar", "Baz")
self.assertEqual(len(tag[TIT2].text), 3)
data = tag.encode()
dtag = stagger.decode_tag(data)
self.assertEqual(len(dtag[TIT2].text), 3)
self.assertEqual(dtag[TIT2].text, tag[TIT2].text)
# Version 2.2 has no such support, so stagger merges multiple strings.
tag = stagger.Tag22()
tag.padding_max = 0
tag[TT2] = ("Foo", "Bar", "Baz")
self.assertEqual(len(tag[TT2].text), 3)
with warnings.catch_warnings(record=True) as ws:
data = tag.encode()
self.assertEqual(len(ws), 1)
self.assertEqual(ws[0].category, stagger.FrameWarning)
dtag = stagger.decode_tag(data)
self.assertEqual(len(dtag[TT2].text), 1)
self.assertEqual(dtag[TT2].text, ["Foo / Bar / Baz"])
def testEmptyTag(self):
for cls in (stagger.Tag22, stagger.Tag23, stagger.Tag24):
tag = cls()
# Empty tags should encode as an empty byte sequence
# (i.e., no tag header or padding).
self.assertEqual(len(tag.encode()), 0)
def testEmptyStrings(self):
# 24.stagger.empty-strings.id3 consists of a TIT2 frame with 13 extra
# NUL characters at the end.
testfile = os.path.join(os.path.dirname(__file__), "samples",
"24.stagger.empty-strings.id3")
with warnings.catch_warnings(record=True) as ws:
tag = stagger.read_tag(testfile)
self.assertEqual(tag[TIT2].text, ["Foobar"])
self.assertEqual(len(ws), 1)
self.assertEqual(ws[0].category, stagger.FrameWarning)
self.assertEqual(ws[0].message.args, ("TIT2: Stripped 13 empty strings "
"from end of frame",))
suite = unittest.TestLoader().loadTestsFromTestCase(TagTestCase)
if __name__ == "__main__":
warnings.simplefilter("always", stagger.Warning)
unittest.main(defaultTest="suite")
|
UTF-8
|
Python
| false | false | 2,014 |
14,723,147,901,245 |
b71425d48c7cd519de26fdf101d1f11c9a0b7f7b
|
f1b12ecf461f6a4740f861f3d29067fc3cf342c7
|
/unittest/market_mock.py
|
49457d2bbe6b336bf28afcb00d629e790c5804c1
|
[
"GPL-3.0-only"
] |
non_permissive
|
sebastianhaberey/goxgui
|
https://github.com/sebastianhaberey/goxgui
|
3afa7aa7ea1698cac40a4b3b628e58864914acf7
|
ba7c38c4fa61722bd009056ca591d9061a7bf16b
|
refs/heads/master
| 2016-09-05T10:15:39.654514 | 2014-02-16T13:41:58 | 2014-02-16T13:41:58 | 9,430,821 | 8 | 0 | null | false | 2013-04-20T16:10:36 | 2013-04-14T15:23:42 | 2013-04-20T16:00:07 | 2013-04-20T16:00:02 | 288 | null | 1 | 0 |
Python
| null | null |
from PyQt4.QtCore import pyqtSignal, QObject
from money import to_money
class MarketMock(QObject):
'''
Mock object to simulate a market object in unit tests.
Also used for profiling.
'''
# log message
signal_log = pyqtSignal(str)
# none
signal_wallet = pyqtSignal()
# milliseconds
signal_orderlag = pyqtSignal(object, str)
# price, size, order type, order id, status
signal_userorder = pyqtSignal(object, object, str, str, str)
# price, size
signal_bid = pyqtSignal(object, object)
# list of [price, size]
signal_bids = pyqtSignal(object)
# price, size
signal_ask = pyqtSignal(object, object)
# list of [price, size]
signal_asks = pyqtSignal(object)
# bid, ask
signal_ticker = pyqtSignal(object, object)
# price, size, type
signal_trade = pyqtSignal(object, object, object)
def __init__(self):
QObject.__init__(self)
def depth_bid(self, price, volume):
self.signal_bid.emit(to_money(price), to_money(volume))
def depth_ask(self, price, volume):
self.signal_ask.emit(to_money(price), to_money(volume))
def depth_asks(self, depths):
for depth in depths:
depth[0] = to_money(depth[0])
depth[1] = to_money(depth[1])
self.signal_asks.emit(depths)
def ticker(self, bid, ask):
self.signal_ticker.emit(to_money(bid), to_money(ask))
|
UTF-8
|
Python
| false | false | 2,014 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.