__id__
int64 3.09k
19,722B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
256
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 3
values | repo_name
stringlengths 5
109
| repo_url
stringlengths 24
128
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 6.65k
581M
⌀ | star_events_count
int64 0
1.17k
| fork_events_count
int64 0
154
| gha_license_id
stringclasses 16
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
5.76M
⌀ | gha_stargazers_count
int32 0
407
⌀ | gha_forks_count
int32 0
119
⌀ | gha_open_issues_count
int32 0
640
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 2
classes | gha_disabled
bool 1
class | content
stringlengths 9
4.53M
| src_encoding
stringclasses 18
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | year
int64 1.97k
2.01k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4,913,442,607,485 |
56ca2b3a324d9fce720ac6861a55202cdae55a9e
|
83a0f21a6a199d0af997ba28c6d106f7cbce670c
|
/src/sync/proctl.py
|
f007d5f3cca84ea7c08e4e2eb3b144b58d31c60a
|
[
"GPL-2.0-only"
] |
non_permissive
|
gfxmonk/google-reader-iphone-sync
|
https://github.com/gfxmonk/google-reader-iphone-sync
|
5fb2e9f9c736cffd9efe70fb2f57e0e08de650b8
|
b67054dc404932dd74905a03d30625690424585f
|
refs/heads/master
| 2016-03-31T02:47:25.582333 | 2010-07-25T06:27:41 | 2010-07-25T06:27:41 | 26,334 | 8 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# process control
import commands
import signal
import os
import app_globals
from misc import *
def get_pid_filename():
return "%s/sync.pid" % (app_globals.OPTIONS['output_path'],)
def write_pid_file(filename):
write_file(filename, str(os.getpid()))
def report_pid():
none = 'None'
try:
pid = get_running_pid()
if pid is None:
print none
else:
print pid
except StandardError, e:
exception("Error getting running pid")
print none
def get_pids_matching(pattern):
status, output = commands.getstatusoutput("ps ux | grep -v grep | grep '%s' | awk '{print $2}'" % pattern) # classy!
running_pids = []
if output.endswith("Operation not permitted"):
if(os.uname()[-1] == 'i386'):
status, output = (0, '') # lets just pretend it worked, and everything is fine
else:
warning("Error fetching running pids: %s" % (output,))
warning(" - This is known to happen on the iphone simulator.")
warning(" - if you see it on a real device, please file a bug report")
if status != 0:
raise RuntimeError("could not execute pid-checking command. got status of %s, output:\n%s" % (status, output))
running_pids = output.split()
try:
running_pids = [int(x) for x in running_pids if len(x) > 0]
except ValueError, e:
raise RuntimeError("one or more pids could not be converted to an integer: %r" % (running_pids,))
return running_pids
def get_running_pid():
"""
@throws: IOError, ValueError, RuntimeError
"""
filename = get_pid_filename()
if not os.path.isfile(filename): return None
try:
pid = int(read_file(filename).strip())
except (IOError, ValueError), e:
exception("Couldn't load PID file at %s: %s" % (filename,e))
raise
if pid == os.getpid():
# it's me! it must have been stale, and happened to be reused. we don't want to kill it
return None
running_pids = get_pids_matching('python.*GRiS')
if pid in running_pids:
return pid
return None
def ensure_singleton_process():
"""
ensure only one sync process is ever running.
if --aggressive is given as a flag, this process will kill the existing one
otherwise, it will exit when there is already a process running
"""
aggressive = app_globals.OPTIONS['aggressive']
pid = None
try:
pid = get_running_pid()
debug("no pid file found at %s" % (filename,))
except StandardError, e:
pass
if not aggressive:
# check for gris.app as well
native_pids = get_pids_matching('Applications/GRiS\.app/GRiS')
if len(native_pids) > 0:
pid = native_pids[0]
if pid is not None:
if not aggressive:
error("There is already a sync process running, pid=%s" % (pid,))
sys.exit(2)
else:
try:
debug("killing PID %s " %(pid,))
os.kill(pid, signal.SIGKILL)
except OSError, e:
msg = "couldn't kill pid %s - %s" % (pid,e)
error(msg)
sys.exit(2)
# if we haven't exited by now, we're the new running pid!
filename = get_pid_filename()
write_pid_file(filename)
|
UTF-8
|
Python
| false | false | 2,010 |
9,749,575,770,413 |
089d1fe74cda1c3661cb0ec335621556b66de41a
|
1885ae0584d636bd6947a03b2395483645479361
|
/src/fuss.py
|
e501f783fb082a3cec5ad3ebcb2501d782d1a7b1
|
[] |
no_license
|
sbillaudelle/fuss
|
https://github.com/sbillaudelle/fuss
|
b93c86f5309172373b98c8709830afd115664e63
|
201ed98b42f088f5482c899e6669f6bb70f92852
|
refs/heads/master
| 2016-09-07T23:57:08.198114 | 2011-02-24T16:24:19 | 2011-02-24T16:24:19 | 1,150,103 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import os
import time
import gobject
import gtk
import clutter
import cluttergtk
import cairo
import pango
import pangocairo
import ooxcb
from ooxcb.protocol import xproto, screensaver, composite
import cream
import cream.gui
import fuss.helper
class XScreenSaverSession(object):
""" Wrapper for the XScreenSaverSession. """
def __init__(self):
self.connection = ooxcb.connect()
self.root = self.connection.setup.roots[self.connection.pref_screen].root
def query(self):
"""
Get the time since the last mouse movement.
:returns: Time since last mouse/keyboard activity.
:rtype: `int`
"""
reply = screensaver.DrawableMixin.query_info(self.root).reply()
return reply.state, int(round(float(reply.ms_until_server) / 1000, 1)), int(round(float(reply.ms_since_user_input) / 1000, 1))
class Text(clutter.CairoTexture):
def __init__(self, text, blur=False, font=None):
self.text = text
self.blur = blur
self.font = font or pango.FontDescription('Sans 12')
self.width, self.height = [i + 6 for i in fuss.helper.get_text_preferred_size(self.text, font=self.font)]
clutter.CairoTexture.__init__(self, int(self.width), int(self.height))
self.render()
def set_text(self, text):
self.text = text
self.width, self.height = [i + 6 for i in fuss.helper.get_text_preferred_size(self.text, font=self.font)]
self.set_surface_size(self.width, self.height)
self.set_size(self.width, self.height)
self.render()
def get_text(self):
return self.text
def render(self):
self.clear()
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, int(self.width), int(self.height))
ctx = cairo.Context(surface)
ctx.move_to(3, 3)
ctx.set_source_rgb(0, 0, 0)
pango_ctx = pangocairo.CairoContext(ctx)
layout = pango_ctx.create_layout()
layout.set_width(int((self.width - 6) * pango.SCALE))
layout.set_alignment(pango.ALIGN_CENTER)
layout.set_font_description(self.font)
layout.set_markup(self.text)
pango_ctx.show_layout(layout)
if self.blur:
surface = fuss.helper.blur(surface, 5)
ctx = cairo.Context(surface)
ctx.move_to(3, 3)
ctx.set_source_rgb(1, 1, 1)
pango_ctx = pangocairo.CairoContext(ctx)
layout = pango_ctx.create_layout()
layout.set_width(int((self.width - 6) * pango.SCALE))
layout.set_alignment(pango.ALIGN_CENTER)
layout.set_font_description(self.font)
layout.set_markup(self.text)
pango_ctx.show_layout(layout)
ctx = self.cairo_create()
ctx.set_source_surface(surface)
ctx.paint()
class Fuss(cream.Module):
visible = False
def __init__(self):
cream.Module.__init__(self, 'org.sbillaudelle.Fuss')
self.screensaver = XScreenSaverSession()
self.window = gtk.Window()
self.window.fullscreen()
self.window.set_opacity(0)
self.display = self.window.get_display()
self.screen = self.display.get_default_screen()
self.width, self.height = self.screen.get_width(), self.screen.get_height()
self.window.resize(self.width, self.height)
self.window.set_property('skip-pager-hint', True)
self.window.set_property('skip-taskbar-hint', True)
self.window.set_property('accept-focus', False)
self.window.stick()
self.window.set_keep_above(True)
self.embed = cluttergtk.Embed()
self.window.add(self.embed)
self.embed.realize()
self.stage = self.embed.get_stage()
self.stage.set_color(clutter.Color(50, 50, 50))
self.background = clutter.texture_new_from_file(os.path.expanduser(self.config.background_image))
self.stage.add(self.background)
# Display the time...
self.time = Text('10:15', blur=True, font=pango.FontDescription('Droid Sans 220'))
self.time.set_position((self.width - self.time.get_width()) / 2, 400)
self.time.connect('allocation-changed', self.time_allocation_changed_cb)
self.stage.add(self.time)
# Display the date...
self.date = Text('Montag, 6. Dezember 2010', blur=True, font=pango.FontDescription('Droid Sans 36'))
self.date.set_position((self.width - self.date.get_width()) / 2, 700)
self.date.connect('allocation-changed', self.date_allocation_changed_cb)
self.stage.add(self.date)
self.window.show_all()
self.window.window.input_shape_combine_region(gtk.gdk.Region(), 0, 0)
pixmap = gtk.gdk.Pixmap(None, 1, 1, 1)
color = gtk.gdk.Color()
cursor = gtk.gdk.Cursor(pixmap, pixmap, color, color, 0, 0)
self.window.window.set_cursor(cursor)
self.update()
gobject.timeout_add(333, self.update)
self.connection = ooxcb.connect()
self.root = self.connection.setup.roots[self.connection.pref_screen].root
self.cow = gtk.gdk.window_foreign_new(composite.WindowMixin.get_overlay_window(self.root).reply().overlay_win.xid)
self.window.window.redirect_to_drawable(self.cow, 0, 0, 0, 0, self.window.get_allocation().width, self.window.get_allocation().height)
def quit(self):
composite.WindowMixin.release_overlay_window(self.root)
cream.Module.quit(self)
def fade_in(self):
def fade(timeline, status):
self.window.set_opacity(status)
self.visible = True
self.messages.debug("Fading in...")
self.window.window.input_shape_combine_region(gtk.gdk.region_rectangle((0, 0, 1440, 900)), 0, 0)
t = cream.gui.Timeline(1000, cream.gui.CURVE_SINE)
t.connect('update', fade)
t.run()
def fade_out(self):
def fade(timeline, status):
self.window.set_opacity(1 - status)
self.visible = False
self.window.window.input_shape_combine_region(gtk.gdk.Region(), 0, 0)
self.messages.debug("Fading out...")
t = cream.gui.Timeline(1000, cream.gui.CURVE_SINE)
t.connect('update', fade)
t.run()
def update(self):
t = time.strftime('%H:%M')
if self.time.get_text() != t:
self.time.set_text(t)
d = time.strftime('%A, %d. %B %Y')
if self.date.get_text() != d:
self.date.set_text(d)
screensaver_info = self.screensaver.query()
self.messages.debug("'{0}' seconds left until fading in...".format(screensaver_info[1]))
if screensaver_info[0] == 1 and not self.visible:
self.fade_in()
elif screensaver_info[0] == 0 and self.visible:
self.fade_out()
return True
def time_allocation_changed_cb(self, *args):
self.time.set_position((self.width - self.time.get_width()) / 2, 400)
def date_allocation_changed_cb(self, *args):
self.date.set_position((self.width - self.date.get_width()) / 2, 700)
if __name__ == '__main__':
Fuss().main()
|
UTF-8
|
Python
| false | false | 2,011 |
1,030,792,183,545 |
630839f105547bec2abebe2528117ab17c8bc636
|
1a1ce287815c49da4b85d847b233f34b9a35d52c
|
/data.py
|
8e5eda56a766f723a5fd2e6ce57abe3de3ede705
|
[] |
no_license
|
ResByte/EMG-classifier
|
https://github.com/ResByte/EMG-classifier
|
1b43e2773f5db7850bdd8d8c68d311c112bfa874
|
2770a76681fe8f64cb86914d58b9427317c30a9f
|
refs/heads/master
| 2021-01-21T22:29:18.792104 | 2014-07-24T07:39:54 | 2014-07-24T07:39:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import math
import random
import sys
import scipy
from sklearn.neighbors.nearest_centroid import NearestCentroid
np.set_printoptions(threshold='nan')
def trainSet(arr):
centr,lb = sklearn.cluster.vq.kmeans2(arr, 3, iter=10, thresh=1e-05, minit='random', missing='warn')
return centr,lb
def mean(signal):
s = 0.0
for i in signal:
s = s + i
return s/10.0
def MAF(arr):
#for j in range(50000)
for i in range(10,len(arr)):
tmp = []
for j in range(10):
tmp.append(i-j)
arr[i] = mean(tmp)
return arr
def rms(signal):
sum = 0.0
for i in signal:
sum = sum + i*i
return np.sqrt(sum/len(signal))
def rmsFilter(arr):
for i in range(15,len(arr)):
tmp =[]
for j in range(15):
tmp.append(arr[i-j])
arr[i] = rms(tmp)
return arr
#print rms(tmp)
def butterworth(arr):
pass
def analyze_data(arr):
#plt.plot(arr)
peak = np.amax(arr)
area = np.trapz(arr)
mean = np.mean(arr)
mean_peak = mean/peak
"""
print "-----------------------"
print "Mean = "
print np.mean(arr)
print "Peak is "
print peak
print "Area under the emg = "
print area
#print "area to peak ratio is ="
#print area/peak
print "mean to peak ratio"
print mean_peak
print "------------------------"""
return [peak,mean, area,mean_peak]
def analyze_x(arr):
data_x_1 = []
for i in range(3500,6500):
data_x_1.append(arr[i])
data_x_2 = []
for i in range(7500,10500):
data_x_2.append(arr[i])
data_x_3 = []
for i in range(11500,14500):
data_x_3.append(arr[i])
data_x_4 = []
for i in range(15500,18500):
data_x_4.append(arr[i])
data_x_5 = []
for i in range(19500,22500):
data_x_5.append(arr[i])
data_x_6 = []
for i in range(27000,30000):
data_x_6.append(arr[i])
data_x_7 = []
for i in range(31000,34000):
data_x_7.append(arr[i])
data_x_8 = []
for i in range(35000,38000):
data_x_8.append(arr[i])
data_x_9 = []
for i in range(39000,42000):
data_x_9.append(arr[i])
data_x_10 = []
for i in range(43000,46000):
data_x_10.append(arr[i])
data_x_11 = []
for i in range(50000,53000):
data_x_11.append(arr[i])
data_x_12 = []
for i in range(55500,58500):
data_x_12.append(arr[i])
data_x_13 = []
for i in range(59500,62500):
data_x_13.append(arr[i])
data_x_14 = []
for i in range(63000,66000):
data_x_14.append(arr[i])
data_x_15 = []
for i in range(67000,70000):
data_x_15.append(arr[i])
filtered_data_x_1 = rmsFilter(data_x_1)
filtered_data_x_2 = rmsFilter(data_x_2)
filtered_data_x_3 = rmsFilter(data_x_3)
filtered_data_x_4 = rmsFilter(data_x_4)
filtered_data_x_5 = rmsFilter(data_x_5)
filtered_data_x_6 = rmsFilter(data_x_6)
filtered_data_x_7 = rmsFilter(data_x_7)
filtered_data_x_8 = rmsFilter(data_x_8)
filtered_data_x_9 = rmsFilter(data_x_9)
filtered_data_x_10 = rmsFilter(data_x_10)
filtered_data_x_11 = rmsFilter(data_x_11)
filtered_data_x_12 = rmsFilter(data_x_12)
filtered_data_x_13 = rmsFilter(data_x_13)
filtered_data_x_14 = rmsFilter(data_x_14)
filtered_data_x_15 = rmsFilter(data_x_15)
#print filtered_x
"""plt.plot(filtered_data_x_1)
print np.mean(filtered_data_x_1)
print np.amax(filtered_data_x_1)
print np.trapz(filtered_data_x_1)
plt.plot(filtered_data_x_2)
print np.mean(filtered_data_x_2)
print np.amax(filtered_data_x_2)
print np.trapz(filtered_data_x_2)
plt.plot(filtered_data_x_2)
print np.mean(filtered_data_x_2)
print np.amax(filtered_data_x_2)
print np.trapz(filtered_data_x_2)
"""
res_x_1 =analyze_data(filtered_data_x_1)
res_x_2 =analyze_data(filtered_data_x_2)
res_x_3 =analyze_data(filtered_data_x_3)
res_x_4 =analyze_data(filtered_data_x_4)
res_x_5 =analyze_data(filtered_data_x_5)
res_x_6 =analyze_data(filtered_data_x_6)
res_x_7 =analyze_data(filtered_data_x_7)
res_x_8 =analyze_data(filtered_data_x_8)
res_x_9 =analyze_data(filtered_data_x_9)
res_x_10 =analyze_data(filtered_data_x_10)
res_x_11 =analyze_data(filtered_data_x_11)
res_x_12 =analyze_data(filtered_data_x_12)
res_x_13 =analyze_data(filtered_data_x_13)
res_x_14 =analyze_data(filtered_data_x_14)
res_x_15 =analyze_data(filtered_data_x_15)
return [res_x_1,res_x_2,res_x_3,res_x_4,res_x_5,res_x_6,res_x_7,res_x_8,res_x_9,res_x_10,res_x_11,res_x_12,res_x_13,res_x_14,res_x_15]
#This function is made to analyze Y
def analyze_y(signal):
data_y_1 = []
for i in range(3500,6000):
data_y_1.append(signal[i])
data_y_2 = []
for i in range(7500,10000):
data_y_2.append(signal[i])
data_y_3 = []
for i in range(11500,14000):
data_y_3.append(signal[i])
data_y_4 = []
for i in range(15500,18000):
data_y_4.append(signal[i])
data_y_5 = []
for i in range(19500,22000):
data_y_5.append(signal[i])
data_y_6 = []
for i in range(27500,30000):
data_y_6.append(signal[i])
data_y_7 = []
for i in range(31500,34000):
data_y_7.append(signal[i])
data_y_8 = []
for i in range(35500,38000):
data_y_8.append(signal[i])
data_y_9 = []
for i in range(39500,42000):
data_y_9.append(signal[i])
data_y_10 = []
for i in range(43500,46000):
data_y_10.append(signal[i])
data_y_11 = []
for i in range(51500,54000):
data_y_11.append(signal[i])
data_y_12 = []
for i in range(55500,58000):
data_y_12.append(signal[i])
data_y_13 = []
for i in range(59500,62000):
data_y_13.append(signal[i])
data_y_14 = []
for i in range(63500,66000):
data_y_14.append(signal[i])
data_y_15 = []
for i in range(67500,70000):
data_y_15.append(signal[i])
filtered_data_y_1 = rmsFilter(data_y_1)
filtered_data_y_2 = rmsFilter(data_y_2)
filtered_data_y_3 = rmsFilter(data_y_3)
filtered_data_y_4 = rmsFilter(data_y_4)
filtered_data_y_5 = rmsFilter(data_y_5)
filtered_data_y_6 = rmsFilter(data_y_6)
filtered_data_y_7 = rmsFilter(data_y_7)
filtered_data_y_8 = rmsFilter(data_y_8)
filtered_data_y_9 = rmsFilter(data_y_9)
filtered_data_y_10 = rmsFilter(data_y_10)
filtered_data_y_11 = rmsFilter(data_y_11)
filtered_data_y_12 = rmsFilter(data_y_12)
filtered_data_y_13 = rmsFilter(data_y_13)
filtered_data_y_14 = rmsFilter(data_y_14)
filtered_data_y_15 = rmsFilter(data_y_15)
res_y_1=analyze_data(filtered_data_y_1)
res_y_2=analyze_data(filtered_data_y_2)
res_y_3=analyze_data(filtered_data_y_3)
res_y_4=analyze_data(filtered_data_y_4)
res_y_5=analyze_data(filtered_data_y_5)
res_y_6=analyze_data(filtered_data_y_6)
res_y_7=analyze_data(filtered_data_y_7)
res_y_8=analyze_data(filtered_data_y_8)
res_y_9=analyze_data(filtered_data_y_9)
res_y_10=analyze_data(filtered_data_y_10)
res_y_11=analyze_data(filtered_data_y_11)
res_y_12=analyze_data(filtered_data_y_12)
res_y_13=analyze_data(filtered_data_y_13)
res_y_14=analyze_data(filtered_data_y_14)
res_y_15=analyze_data(filtered_data_y_15)
return [res_y_1,res_y_2,res_y_3,res_y_4,res_y_5,res_y_6,res_y_7,res_y_8,res_y_9,res_y_10,res_y_11,res_y_12,res_y_13,res_y_14,res_y_15]
def analyze_z(signal):
data_z_1 = []
for i in range(3500,6500):
data_z_1.append(signal[i])
data_z_2 = []
for i in range(7500,10500):
data_z_2.append(signal[i])
data_z_3 = []
for i in range(11500,14500):
data_z_3.append(signal[i])
data_z_4 = []
for i in range(15500,18500):
data_z_4.append(signal[i])
data_z_5 = []
for i in range(19500,22500):
data_z_5.append(signal[i])
data_z_6 = []
for i in range(27500,30500):
data_z_6.append(signal[i])
data_z_7 = []
for i in range(31500,34500):
data_z_7.append(signal[i])
data_z_8 = []
for i in range(35500,38500):
data_z_8.append(signal[i])
data_z_9 = []
for i in range(39500,42500):
data_z_9.append(signal[i])
data_z_10 = []
for i in range(43500,46500):
data_z_10.append(signal[i])
data_z_11 = []
for i in range(51000,54000):
data_z_11.append(signal[i])
data_z_12 = []
for i in range(55500,58500):
data_z_12.append(signal[i])
data_z_13 = []
for i in range(59500,62500):
data_z_13.append(signal[i])
data_z_14 = []
for i in range(63500,66500):
data_z_14.append(signal[i])
data_z_15 = []
for i in range(67500,70500):
data_z_15.append(signal[i])
filtered_data_z_1 = rmsFilter(data_z_1)
filtered_data_z_2 = rmsFilter(data_z_2)
filtered_data_z_3 = rmsFilter(data_z_3)
filtered_data_z_4 = rmsFilter(data_z_4)
filtered_data_z_5 = rmsFilter(data_z_5)
filtered_data_z_6 = rmsFilter(data_z_6)
filtered_data_z_7 = rmsFilter(data_z_7)
filtered_data_z_8 = rmsFilter(data_z_8)
filtered_data_z_9 = rmsFilter(data_z_9)
filtered_data_z_10 = rmsFilter(data_z_10)
filtered_data_z_11 = rmsFilter(data_z_11)
filtered_data_z_12 = rmsFilter(data_z_12)
filtered_data_z_13 = rmsFilter(data_z_13)
filtered_data_z_14 = rmsFilter(data_z_14)
filtered_data_z_15 = rmsFilter(data_z_15)
res_z_1 = analyze_data(filtered_data_z_1)
res_z_2 = analyze_data(filtered_data_z_2)
res_z_3 = analyze_data(filtered_data_z_3)
res_z_4 = analyze_data(filtered_data_z_4)
res_z_5 = analyze_data(filtered_data_z_5)
res_z_6 = analyze_data(filtered_data_z_6)
res_z_7 = analyze_data(filtered_data_z_7)
res_z_8 = analyze_data(filtered_data_z_8)
res_z_9 = analyze_data(filtered_data_z_9)
res_z_10 = analyze_data(filtered_data_z_10)
res_z_11 = analyze_data(filtered_data_z_11)
res_z_12 = analyze_data(filtered_data_z_12)
res_z_13 = analyze_data(filtered_data_z_13)
res_z_14 = analyze_data(filtered_data_z_14)
res_z_15 = analyze_data(filtered_data_z_15)
return [res_z_1,res_z_2,res_z_3,res_z_4,res_z_5,res_z_6,res_z_7,res_z_8,res_z_9,res_z_10,res_z_11,res_z_12,res_z_13,res_z_14,res_z_15]
def analyze_k(signal):
data_k_1 = []
for i in range(3500,6500):
data_k_1.append(signal[i])
data_k_2 = []
for i in range(7500,10500):
data_k_2.append(signal[i])
data_k_3 = []
for i in range(11500,14500):
data_k_3.append(signal[i])
data_k_4 = []
for i in range(15500,18500):
data_k_4.append(signal[i])
data_k_5 = []
for i in range(19500,22500):
data_k_5.append(signal[i])
data_k_6 = []
for i in range(27500,30500):
data_k_6.append(signal[i])
data_k_7 = []
for i in range(31500,34500):
data_k_7.append(signal[i])
data_k_8 = []
for i in range(35000,38000):
data_k_8.append(signal[i])
data_k_9 = []
for i in range(39000,42000):
data_k_9.append(signal[i])
data_k_10 = []
for i in range(43000,46000):
data_k_10.append(signal[i])
data_k_11 = []
for i in range(51000,54000):
data_k_11.append(signal[i])
data_k_12 = []
for i in range(55500,58500):
data_k_12.append(signal[i])
data_k_13 = []
for i in range(59500,62500):
data_k_13.append(signal[i])
data_k_14 = []
for i in range(63500,66500):
data_k_14.append(signal[i])
data_k_15 = []
for i in range(67500,70500):
data_k_15.append(signal[i])
filtered_data_k_1 = rmsFilter(data_k_1)
filtered_data_k_2 = rmsFilter(data_k_2)
filtered_data_k_3 = rmsFilter(data_k_3)
filtered_data_k_4 = rmsFilter(data_k_4)
filtered_data_k_5 = rmsFilter(data_k_5)
filtered_data_k_6 = rmsFilter(data_k_6)
filtered_data_k_7 = rmsFilter(data_k_7)
filtered_data_k_8 = rmsFilter(data_k_8)
filtered_data_k_9 = rmsFilter(data_k_9)
filtered_data_k_10 = rmsFilter(data_k_10)
filtered_data_k_11 = rmsFilter(data_k_11)
filtered_data_k_12 = rmsFilter(data_k_12)
filtered_data_k_13 = rmsFilter(data_k_13)
filtered_data_k_14 = rmsFilter(data_k_14)
filtered_data_k_15 = rmsFilter(data_k_15)
res_k_1 = analyze_data(filtered_data_k_1)
res_k_2 = analyze_data(filtered_data_k_2)
res_k_3 = analyze_data(filtered_data_k_3)
res_k_4 = analyze_data(filtered_data_k_4)
res_k_5 = analyze_data(filtered_data_k_5)
res_k_6 = analyze_data(filtered_data_k_6)
res_k_7 = analyze_data(filtered_data_k_7)
res_k_8 = analyze_data(filtered_data_k_8)
res_k_9 = analyze_data(filtered_data_k_9)
res_k_10 = analyze_data(filtered_data_k_10)
res_k_11 = analyze_data(filtered_data_k_11)
res_k_12 = analyze_data(filtered_data_k_12)
res_k_13 = analyze_data(filtered_data_k_13)
res_k_14 = analyze_data(filtered_data_k_14)
res_k_15 = analyze_data(filtered_data_k_15)
return [res_k_1,res_k_2,res_k_3,res_k_4,res_k_5,res_k_6,res_k_7,res_k_8,res_k_9,res_k_10,res_k_11,res_k_12,res_k_13,res_k_14,res_k_15]
if __name__=='__main__':
with open(sys.argv[1],'r') as data_file:
data=[]
x=[]
y=[]
z=[]
k=[]
for line in data_file:
tmp = line.strip().split()
data.append(tmp)
x.append(abs(float(tmp[1])))
y.append(abs(float(tmp[2])))
z.append(abs(float(tmp[3])))
k.append(abs(float(tmp[4])))
#print len(data),len(x),len(y),len(z),len(k)
#filtered_x = rmsFilter(x) #MAF(x)
#print "Analyzing x"
res_x = analyze_x(x)
#plt.plot(z)
#print "Analyzing y"
res_y =analyze_y(y)
#print "Analyzing z"
res_z = analyze_z(z)
#print res_z
#print "Analyzing k"
res_k = analyze_k(k)
#print res_k
means_data_1 =[]
means_data_2 = []
means_data_3=[]
means_data_4=[]
ratio_data_1 = []
ratio_data_2=[]
ratio_data_3=[]
ratio_data_4=[]
for i in res_x:
means_data_1.append(i[1])
ratio_data_1.append(i[3])
for i in res_y:
means_data_2.append(i[1])
ratio_data_2.append(i[3])
for i in res_z:
means_data_3.append(i[1])
ratio_data_3.append(i[3])
for i in res_k:
means_data_4.append(i[1])
ratio_data_4.append(i[3])
# averaging out the means for all channels
mean_avg = []
for i in range(0,15):
mean_avg.append((means_data_1[i] + means_data_2[i] + means_data_3[i] + means_data_4[i])/4)
#print len(mean_avg)
ratio_avg = []
for i in range(0,15):
ratio_avg.append((ratio_data_1[i]+ratio_data_2[i]+ratio_data_3[i]+ratio_data_4[i])/4)
#print (ratio_avg)
#mean_center,mean_lab = trainSet(mean_avg)
#ratio_center, ratio_lab = trainSet(ratio_avg)
clf = NearestCentroid()
X = []
Y = []
for i in range(0,4):
X.append([int(mean_avg[i]),ratio_avg[i]] )
Y.append(0)
for i in range(5,9):
X.append([int(mean_avg[i]),ratio_avg[i]])
Y.append(1)
for i in range(10,14):
X.append([int(mean_avg[i]),ratio_avg[i]])
Y.append(2)
#print X
#print Y
clf.fit(X,Y)
res = clf.predict([[mean_avg[14],ratio_avg[14]]])
if res == 0:
print "rock"
if res == 1:
print "scissor"
if res == 2:
print "paper"
"""
# Plot individual channels data
plt.figure(1)
plt.subplot(431)
plt.plot(x)
plt.ylabel('x')
#plt.figure(2)
plt.subplot(412)
plt.plot(y)
plt.ylabel('y')
#plt.figure(3)
plt.subplot(421)
plt.plot(z)
plt.ylabel('z')
#plt.figure(4)
plt.subplot(422)
plt.plot(k)
plt.ylabel('k')
# Plot the individual means of the data
plt.figure(5)
plt.plot(means_data_1,'.')
plt.plot(means_data_2,'.')
plt.plot(means_data_3,'.')
plt.plot(means_data_4,'.')
plt.ylabel('mean points')
#plot the ratios of mean and peak for different channels
plt.figure(6)
plt.plot(ratio_data_1,'.')
plt.plot(ratio_data_2,'.')
plt.plot(ratio_data_3,'.')
plt.plot(ratio_data_4,'.')
plt.ylabel('ratio points')
"""
#plt.plot(mean_avg,'.')
#plt.plot(mean_lab,'.')
#plt.plot(ratio_avg,'.')
#plt.plot(X)
#plt.plot(Y)
plt.show()
|
UTF-8
|
Python
| false | false | 2,014 |
14,010,183,344,888 |
e56d758181c0900695d3d1a85688b8530aa01dbb
|
fa0eb5e9e8281d864bade211440575aede9bcff6
|
/gnu_radio_dab_player.py
|
3f5ed71be43c6c2e255ef2708ca73b176449e991
|
[
"GPL-2.0-only"
] |
non_permissive
|
steveyoon77/gnu_radio_dmb_player
|
https://github.com/steveyoon77/gnu_radio_dmb_player
|
cefbf1323e5e0b1d7d5e71faf272a22bd145466d
|
162e7891b91d81ca4338be6c61475859d20ac286
|
refs/heads/master
| 2021-01-16T19:00:10.362856 | 2014-09-15T04:40:44 | 2014-09-15T04:40:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import wx
import wx.grid
import wx.aui
import images
import settings_pannel
import cStringIO
import wx.lib.agw.ribbon as Ribbon
import resources
# --------------------------------------------------- #
# Some constants for ribbon buttons
ID_CIRCLE = wx.ID_HIGHEST + 1
ID_CROSS = ID_CIRCLE + 1
ID_TRIANGLE = ID_CIRCLE + 2
ID_SQUARE = ID_CIRCLE + 3
ID_POLYGON = ID_CIRCLE + 4
ID_SELECTION_EXPAND_H = ID_CIRCLE + 5
ID_SELECTION_EXPAND_V = ID_CIRCLE + 6
ID_SELECTION_CONTRACT = ID_CIRCLE + 7
ID_PRIMARY_COLOUR = ID_CIRCLE + 8
ID_SECONDARY_COLOUR = ID_CIRCLE + 9
ID_DEFAULT_PROVIDER = ID_CIRCLE + 10
ID_AUI_PROVIDER = ID_CIRCLE + 11
ID_MSW_PROVIDER = ID_CIRCLE + 12
ID_MAIN_TOOLBAR = ID_CIRCLE + 13
ID_POSITION_TOP = ID_CIRCLE + 14
ID_POSITION_TOP_ICONS = ID_CIRCLE + 15
ID_POSITION_TOP_BOTH = ID_CIRCLE + 16
ID_POSITION_LEFT = ID_CIRCLE + 17
ID_POSITION_LEFT_LABELS = ID_CIRCLE + 18
ID_POSITION_LEFT_BOTH = ID_CIRCLE + 19
ID_TOGGLE_PANELS = ID_CIRCLE + 20
align_center = resources.align_center
align_left = resources.align_left
align_right = resources.align_right
aui_style = resources.aui_style
auto_crop_selection = resources.auto_crop_selection
auto_crop_selection_small = resources.auto_crop_selection_small
circle = resources.circle
circle_small = resources.circle_small
colours = resources.colours
cross = resources.cross
empty = resources.empty
expand_selection_h = resources.expand_selection_h
expand_selection_v = resources.expand_selection_v
eye = resources.eye
hexagon = resources.hexagon
msw_style = resources.msw_style
position_left = resources.position_left
position_top = resources.position_top
ribbon = resources.ribbon
powerPanel = resources.powerPanel
square = resources.square
triangle = resources.triangle
icon_power = resources.power
icon_tune = resources.tune
icon_seek_prev = resources.seek_prev
icon_seek_next = resources.seek_next
icon_scan = resources.scan
icon_intro_scan = resources.intro_scan
icon_cancel = resources.cancel
icon_reload_ensemble = resources.reload_ensemble
ID_BT_POW = wx.ID_HIGHEST+100
ID_BT_TUNE = wx.ID_HIGHEST+101
ID_BT_SEEK_PREV = wx.ID_HIGHEST+102
ID_BT_SEEK_NEXT = wx.ID_HIGHEST+103
ID_BT_SCAN = wx.ID_HIGHEST+104
ID_BT_INTRO_SCAN = wx.ID_HIGHEST+105
ID_BT_CANCEL = wx.ID_HIGHEST+106
ID_BT_RELOAD_ENS = wx.ID_HIGHEST+107
# --------------------------------------------------- #
def CreateBitmap(xpm):
bmp = eval(xpm).Bitmap
return bmp
# --------------------------------------------------- #
class ColourClientData(object):
def __init__(self, name, colour):
self._name = name
self._colour = colour
def GetName(self):
return self._name
def GetColour(self):
return self._colour
# --------------------------------------------------- #
def GetMondrianData():
return \
'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\
\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00qID\
ATX\x85\xed\xd6;\n\x800\x10E\xd1{\xc5\x8d\xb9r\x97\x16\x0b\xad$\x8a\x82:\x16\
o\xda\x84pB2\x1f\x81Fa\x8c\x9c\x08\x04Z{\xcf\xa72\xbcv\xfa\xc5\x08 \x80r\x80\
\xfc\xa2\x0e\x1c\xe4\xba\xfaX\x1d\xd0\xde]S\x07\x02\xd8>\xe1wa-`\x9fQ\xe9\
\x86\x01\x04\x10\x00\\(Dk\x1b-\x04\xdc\x1d\x07\x14\x98;\x0bS\x7f\x7f\xf9\x13\
\x04\x10@\xf9X\xbe\x00\xc9 \x14K\xc1<={\x00\x00\x00\x00IEND\xaeB`\x82'
def GetMondrianBitmap():
return wx.BitmapFromImage(GetMondrianImage())
def GetMondrianImage():
stream = cStringIO.StringIO(GetMondrianData())
return wx.ImageFromStream(stream)
def GetMondrianIcon():
icon = wx.EmptyIcon()
icon.CopyFromBitmap(GetMondrianBitmap())
return icon
class TcFrame(wx.Frame):
def __init__(self, parent, id=-1, title="", pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE|wx.SUNKEN_BORDER|wx.CLIP_CHILDREN
):
super(TcFrame, self).__init__(parent, id, title, pos, size, style)
panel = wx.Panel(self)
# tell FrameManager to manage this frame
self._mgr = wx.aui.AuiManager()
self._mgr.SetManagedWindow(self)
self._perspectives = []
self.n = 0
self.x = 0
self.SetIcon(GetMondrianIcon())
#create ribbon bar
self._ribbon = Ribbon.RibbonBar(panel, wx.ID_ANY,
agwStyle=Ribbon.RIBBON_BAR_DEFAULT_STYLE|Ribbon.RIBBON_BAR_SHOW_PANEL_EXT_BUTTONS)
self._bitmap_creation_dc = wx.MemoryDC()
self._colour_data = wx.ColourData()
home = Ribbon.RibbonPage(self._ribbon, wx.ID_ANY, "DAB")
powerPanel = Ribbon.RibbonPanel(home, wx.ID_ANY, "Power")
powerBar = Ribbon.RibbonButtonBar(powerPanel)
powerBar.AddSimpleButton(ID_BT_POW, "On/Off", CreateBitmap("icon_power"),
"Reset application setting")
dabPanel = Ribbon.RibbonPanel(home, wx.ID_ANY, "DAB")
dabBar = Ribbon.RibbonButtonBar(dabPanel)
dabBar.AddSimpleButton(ID_BT_TUNE, "Tune", CreateBitmap("icon_tune"),
"Tune the frequency")
dabBar.AddSimpleButton(ID_BT_SEEK_PREV, "Seek Prev", CreateBitmap("icon_seek_prev"),
"Seek previous service component")
dabBar.AddSimpleButton(ID_BT_SEEK_NEXT, "Seek Next", CreateBitmap("icon_seek_next"),
"Seek next service component")
label_font = wx.Font(8, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_LIGHT)
self._bitmap_creation_dc.SetFont(label_font)
scheme = Ribbon.RibbonPage(self._ribbon, wx.ID_ANY, "UI")
self._default_primary, self._default_secondary, self._default_tertiary = self._ribbon.GetArtProvider().GetColourScheme(1, 1, 1)
provider_panel = Ribbon.RibbonPanel(scheme, wx.ID_ANY, "Art", wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize,
agwStyle=Ribbon.RIBBON_PANEL_NO_AUTO_MINIMISE)
provider_bar = Ribbon.RibbonButtonBar(provider_panel, wx.ID_ANY)
provider_bar.AddSimpleButton(ID_DEFAULT_PROVIDER, "Default Provider",
wx.ArtProvider.GetBitmap(wx.ART_QUESTION, wx.ART_OTHER, wx.Size(32, 32)), "")
provider_bar.AddSimpleButton(ID_AUI_PROVIDER, "AUI Provider", CreateBitmap("aui_style"), "")
provider_bar.AddSimpleButton(ID_MSW_PROVIDER, "MSW Provider", CreateBitmap("msw_style"), "")
primary_panel = Ribbon.RibbonPanel(scheme, wx.ID_ANY, "Primary Colour", CreateBitmap("colours"))
self._primary_gallery = self.PopulateColoursPanel(primary_panel, self._default_primary, ID_PRIMARY_COLOUR)
secondary_panel = Ribbon.RibbonPanel(scheme, wx.ID_ANY, "Secondary Colour", CreateBitmap("colours"))
self._secondary_gallery = self.PopulateColoursPanel(secondary_panel, self._default_secondary, ID_SECONDARY_COLOUR)
self._ribbon.Realize()
self._logwindow = wx.TextCtrl(panel, wx.ID_ANY, "", wx.DefaultPosition, wx.DefaultSize,
wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_LEFT | wx.TE_BESTWRAP | wx.BORDER_NONE)
s = wx.BoxSizer(wx.VERTICAL)
s.Add(self._ribbon, 0, wx.EXPAND)
s.Add(self._logwindow, 1, wx.EXPAND)
panel.SetSizer(s)
self.panel = panel
self.BindEvents([powerBar, dabBar, provider_bar])
self.SetIcon(GetMondrianIcon())
self.CenterOnScreen()
self.Show()
def BindEvents(self, bars):
powerBar, dabBar, provider_bar = bars
provider_bar.Bind(Ribbon.EVT_RIBBONBUTTONBAR_CLICKED, self.OnDefaultProvider, id=ID_DEFAULT_PROVIDER)
provider_bar.Bind(Ribbon.EVT_RIBBONBUTTONBAR_CLICKED, self.OnAUIProvider, id=ID_AUI_PROVIDER)
provider_bar.Bind(Ribbon.EVT_RIBBONBUTTONBAR_CLICKED, self.OnMSWProvider, id=ID_MSW_PROVIDER)
powerBar.Bind(Ribbon.EVT_RIBBONBUTTONBAR_CLICKED, self.OnSelectionExpandHButton, id=ID_SELECTION_EXPAND_H)
powerBar.Bind(Ribbon.EVT_RIBBONBUTTONBAR_CLICKED, self.OnSelectionExpandVButton, id=ID_SELECTION_EXPAND_V)
powerBar.Bind(Ribbon.EVT_RIBBONBUTTONBAR_CLICKED, self.OnSelectionContractButton, id=ID_SELECTION_CONTRACT)
dabBar.Bind(Ribbon.EVT_RIBBONBUTTONBAR_CLICKED, self.OnCircleButton, id=ID_CIRCLE)
dabBar.Bind(Ribbon.EVT_RIBBONBUTTONBAR_CLICKED, self.OnCrossButton, id=ID_CROSS)
dabBar.Bind(Ribbon.EVT_RIBBONBUTTONBAR_CLICKED, self.OnTriangleButton, id=ID_TRIANGLE)
dabBar.Bind(Ribbon.EVT_RIBBONBUTTONBAR_CLICKED, self.OnSquareButton, id=ID_SQUARE)
dabBar.Bind(Ribbon.EVT_RIBBONBUTTONBAR_DROPDOWN_CLICKED, self.OnTriangleDropdown, id=ID_TRIANGLE)
dabBar.Bind(Ribbon.EVT_RIBBONBUTTONBAR_DROPDOWN_CLICKED, self.OnPolygonDropdown, id=ID_POLYGON)
self.Bind(Ribbon.EVT_RIBBONGALLERY_HOVER_CHANGED, self.OnHoveredColourChange, id=ID_PRIMARY_COLOUR)
self.Bind(Ribbon.EVT_RIBBONGALLERY_HOVER_CHANGED, self.OnHoveredColourChange, id=ID_SECONDARY_COLOUR)
self.Bind(Ribbon.EVT_RIBBONGALLERY_SELECTED, self.OnPrimaryColourSelect, id=ID_PRIMARY_COLOUR)
self.Bind(Ribbon.EVT_RIBBONGALLERY_SELECTED, self.OnSecondaryColourSelect, id=ID_SECONDARY_COLOUR)
self.Bind(Ribbon.EVT_RIBBONTOOLBAR_CLICKED, self.OnNew, id=wx.ID_NEW)
self.Bind(Ribbon.EVT_RIBBONTOOLBAR_DROPDOWN_CLICKED, self.OnNewDropdown, id=wx.ID_NEW)
self.Bind(Ribbon.EVT_RIBBONTOOLBAR_CLICKED, self.OnPrint, id=wx.ID_PRINT)
self.Bind(Ribbon.EVT_RIBBONTOOLBAR_DROPDOWN_CLICKED, self.OnPrintDropdown, id=wx.ID_PRINT)
self.Bind(Ribbon.EVT_RIBBONTOOLBAR_DROPDOWN_CLICKED, self.OnRedoDropdown, id=wx.ID_REDO)
self.Bind(Ribbon.EVT_RIBBONTOOLBAR_DROPDOWN_CLICKED, self.OnUndoDropdown, id=wx.ID_UNDO)
self.Bind(Ribbon.EVT_RIBBONTOOLBAR_CLICKED, self.OnPositionLeft, id=ID_POSITION_LEFT)
self.Bind(Ribbon.EVT_RIBBONTOOLBAR_DROPDOWN_CLICKED, self.OnPositionLeftDropdown, id=ID_POSITION_LEFT)
self.Bind(Ribbon.EVT_RIBBONTOOLBAR_CLICKED, self.OnPositionTop, id=ID_POSITION_TOP)
self.Bind(Ribbon.EVT_RIBBONTOOLBAR_DROPDOWN_CLICKED, self.OnPositionTopDropdown, id=ID_POSITION_TOP)
self.Bind(wx.EVT_BUTTON, self.OnColourGalleryButton, id=ID_PRIMARY_COLOUR)
self.Bind(wx.EVT_BUTTON, self.OnColourGalleryButton, id=ID_SECONDARY_COLOUR)
self.Bind(wx.EVT_MENU, self.OnPositionLeftIcons, id=ID_POSITION_LEFT)
self.Bind(wx.EVT_MENU, self.OnPositionLeftLabels, id=ID_POSITION_LEFT_LABELS)
self.Bind(wx.EVT_MENU, self.OnPositionLeftBoth, id=ID_POSITION_LEFT_BOTH)
self.Bind(wx.EVT_MENU, self.OnPositionTopLabels, id=ID_POSITION_TOP)
self.Bind(wx.EVT_MENU, self.OnPositionTopIcons, id=ID_POSITION_TOP_ICONS)
self.Bind(wx.EVT_MENU, self.OnPositionTopBoth, id=ID_POSITION_TOP_BOTH)
def SetBarStyle(self, agwStyle):
self._ribbon.Freeze()
self._ribbon.SetAGWWindowStyleFlag(agwStyle)
pTopSize = self.panel.GetSizer()
pToolbar = wx.FindWindowById(ID_MAIN_TOOLBAR)
if agwStyle & Ribbon.RIBBON_BAR_FLOW_VERTICAL:
self._ribbon.SetTabCtrlMargins(10, 10)
pTopSize.SetOrientation(wx.HORIZONTAL)
if pToolbar:
pToolbar.SetRows(3, 5)
else:
self._ribbon.SetTabCtrlMargins(50, 20)
pTopSize.SetOrientation(wx.VERTICAL)
if pToolbar:
pToolbar.SetRows(2, 3)
self._ribbon.Realize()
self._ribbon.Thaw()
self.panel.Layout()
def PopulateColoursPanel(self, panel, defc, gallery_id):
gallery = wx.FindWindowById(gallery_id, panel)
if gallery:
gallery.Clear()
else:
gallery = Ribbon.RibbonGallery(panel, gallery_id)
dc = self._bitmap_creation_dc
def_item = self.AddColourToGallery(gallery, "Default", dc, defc)
gallery.SetSelection(def_item)
self.AddColourToGallery(gallery, "BLUE", dc)
self.AddColourToGallery(gallery, "BLUE VIOLET", dc)
self.AddColourToGallery(gallery, "BROWN", dc)
self.AddColourToGallery(gallery, "CADET BLUE", dc)
self.AddColourToGallery(gallery, "CORAL", dc)
self.AddColourToGallery(gallery, "CYAN", dc)
self.AddColourToGallery(gallery, "DARK GREEN", dc)
self.AddColourToGallery(gallery, "DARK ORCHID", dc)
self.AddColourToGallery(gallery, "FIREBRICK", dc)
self.AddColourToGallery(gallery, "GOLD", dc)
self.AddColourToGallery(gallery, "GOLDENROD", dc)
self.AddColourToGallery(gallery, "GREEN", dc)
self.AddColourToGallery(gallery, "INDIAN RED", dc)
self.AddColourToGallery(gallery, "KHAKI", dc)
self.AddColourToGallery(gallery, "LIGHT BLUE", dc)
self.AddColourToGallery(gallery, "LIME GREEN", dc)
self.AddColourToGallery(gallery, "MAGENTA", dc)
self.AddColourToGallery(gallery, "MAROON", dc)
self.AddColourToGallery(gallery, "NAVY", dc)
self.AddColourToGallery(gallery, "ORANGE", dc)
self.AddColourToGallery(gallery, "ORCHID", dc)
self.AddColourToGallery(gallery, "PINK", dc)
self.AddColourToGallery(gallery, "PLUM", dc)
self.AddColourToGallery(gallery, "PURPLE", dc)
self.AddColourToGallery(gallery, "RED", dc)
self.AddColourToGallery(gallery, "SALMON", dc)
self.AddColourToGallery(gallery, "SEA GREEN", dc)
self.AddColourToGallery(gallery, "SIENNA", dc)
self.AddColourToGallery(gallery, "SKY BLUE", dc)
self.AddColourToGallery(gallery, "TAN", dc)
self.AddColourToGallery(gallery, "THISTLE", dc)
self.AddColourToGallery(gallery, "TURQUOISE", dc)
self.AddColourToGallery(gallery, "VIOLET", dc)
self.AddColourToGallery(gallery, "VIOLET RED", dc)
self.AddColourToGallery(gallery, "WHEAT", dc)
self.AddColourToGallery(gallery, "WHITE", dc)
self.AddColourToGallery(gallery, "YELLOW", dc)
return gallery
def GetGalleryColour(self, gallery, item, name=None):
data = gallery.GetItemClientData(item)
if name != None:
name = data.GetName()
return data.GetColour(), name
def OnHoveredColourChange(self, event):
# Set the background of the gallery to the hovered colour, or back to the
# default if there is no longer a hovered item.
gallery = event.GetGallery()
provider = gallery.GetArtProvider()
if event.GetGalleryItem() != None:
if provider == self._ribbon.GetArtProvider():
provider = provider.Clone()
gallery.SetArtProvider(provider)
provider.SetColour(Ribbon.RIBBON_ART_GALLERY_HOVER_BACKGROUND_COLOUR,
self.GetGalleryColour(event.GetGallery(), event.GetGalleryItem(), None)[0])
else:
if provider != self._ribbon.GetArtProvider():
gallery.SetArtProvider(self._ribbon.GetArtProvider())
del provider
def OnPrimaryColourSelect(self, event):
colour, name = self.GetGalleryColour(event.GetGallery(), event.GetGalleryItem(), "")
self.AddText("Colour %s selected as primary."%name)
dummy, secondary, tertiary = self._ribbon.GetArtProvider().GetColourScheme(None, 1, 1)
self._ribbon.GetArtProvider().SetColourScheme(colour, secondary, tertiary)
self.ResetGalleryArtProviders()
self._ribbon.Refresh()
def OnSecondaryColourSelect(self, event):
colour, name = self.GetGalleryColour(event.GetGallery(), event.GetGalleryItem(), "")
self.AddText("Colour %s selected as secondary."%name)
primary, dummy, tertiary = self._ribbon.GetArtProvider().GetColourScheme(1, None, 1)
self._ribbon.GetArtProvider().SetColourScheme(primary, colour, tertiary)
self.ResetGalleryArtProviders()
self._ribbon.Refresh()
def ResetGalleryArtProviders(self):
if self._primary_gallery.GetArtProvider() != self._ribbon.GetArtProvider():
self._primary_gallery.SetArtProvider(self._ribbon.GetArtProvider())
if self._secondary_gallery.GetArtProvider() != self._ribbon.GetArtProvider():
self._secondary_gallery.SetArtProvider(self._ribbon.GetArtProvider())
def OnSelectionExpandHButton(self, event):
self.AddText("Expand powerBar horizontally button clicked.")
def OnSelectionExpandVButton(self, event):
self.AddText("Expand powerBar vertically button clicked.")
def OnSelectionContractButton(self, event):
self.AddText("Contract powerBar button clicked.")
def OnCircleButton(self, event):
self.AddText("Circle button clicked.")
def OnCrossButton(self, event):
self.AddText("Cross button clicked.")
def OnTriangleButton(self, event):
self.AddText("Triangle button clicked.")
def OnTriangleDropdown(self, event):
menu = wx.Menu()
menu.Append(wx.ID_ANY, "Equilateral")
menu.Append(wx.ID_ANY, "Isosceles")
menu.Append(wx.ID_ANY, "Scalene")
event.PopupMenu(menu)
def OnSquareButton(self, event):
self.AddText("Square button clicked.")
def OnPolygonDropdown(self, event):
menu = wx.Menu()
menu.Append(wx.ID_ANY, "Pentagon (5 sided)")
menu.Append(wx.ID_ANY, "Hexagon (6 sided)")
menu.Append(wx.ID_ANY, "Heptagon (7 sided)")
menu.Append(wx.ID_ANY, "Octogon (8 sided)")
menu.Append(wx.ID_ANY, "Nonagon (9 sided)")
menu.Append(wx.ID_ANY, "Decagon (10 sided)")
event.PopupMenu(menu)
def OnNew(self, event):
self.AddText("New button clicked.")
def OnNewDropdown(self, event):
menu = wx.Menu()
menu.Append(wx.ID_ANY, "New Document")
menu.Append(wx.ID_ANY, "New Template")
menu.Append(wx.ID_ANY, "New Mail")
event.PopupMenu(menu)
def OnPrint(self, event):
self.AddText("Print button clicked.")
def OnPrintDropdown(self, event):
menu = wx.Menu()
menu.Append(wx.ID_ANY, "Print")
menu.Append(wx.ID_ANY, "Preview")
menu.Append(wx.ID_ANY, "Options")
event.PopupMenu(menu)
def OnRedoDropdown(self, event):
menu = wx.Menu()
menu.Append(wx.ID_ANY, "Redo E")
menu.Append(wx.ID_ANY, "Redo F")
menu.Append(wx.ID_ANY, "Redo G")
event.PopupMenu(menu)
def OnUndoDropdown(self, event):
menu = wx.Menu()
menu.Append(wx.ID_ANY, "Undo C")
menu.Append(wx.ID_ANY, "Undo B")
menu.Append(wx.ID_ANY, "Undo A")
event.PopupMenu(menu)
def OnPositionTopLabels(self, event):
self.SetBarStyle(Ribbon.RIBBON_BAR_DEFAULT_STYLE)
def OnPositionTopIcons(self, event):
self.SetBarStyle((Ribbon.RIBBON_BAR_DEFAULT_STYLE &~Ribbon.RIBBON_BAR_SHOW_PAGE_LABELS)
| Ribbon.RIBBON_BAR_SHOW_PAGE_ICONS)
def OnPositionTopBoth(self, event):
self.SetBarStyle(Ribbon.RIBBON_BAR_DEFAULT_STYLE | Ribbon.RIBBON_BAR_SHOW_PAGE_ICONS)
def OnPositionLeftLabels(self, event):
self.SetBarStyle(Ribbon.RIBBON_BAR_DEFAULT_STYLE | Ribbon.RIBBON_BAR_FLOW_VERTICAL)
def OnPositionLeftIcons(self, event):
self.SetBarStyle((Ribbon.RIBBON_BAR_DEFAULT_STYLE &~Ribbon.RIBBON_BAR_SHOW_PAGE_LABELS) |
Ribbon.RIBBON_BAR_SHOW_PAGE_ICONS | Ribbon.RIBBON_BAR_FLOW_VERTICAL)
def OnPositionLeftBoth(self, event):
self.SetBarStyle(Ribbon.RIBBON_BAR_DEFAULT_STYLE | Ribbon.RIBBON_BAR_SHOW_PAGE_ICONS |
Ribbon.RIBBON_BAR_FLOW_VERTICAL)
def OnPositionTop(self, event):
self.OnPositionTopLabels(event)
def OnPositionTopDropdown(self, event):
menu = wx.Menu()
menu.Append(ID_POSITION_TOP, "Top with Labels")
menu.Append(ID_POSITION_TOP_ICONS, "Top with Icons")
menu.Append(ID_POSITION_TOP_BOTH, "Top with Both")
event.PopupMenu(menu)
def OnPositionLeft(self, event):
self.OnPositionLeftIcons(event)
def OnPositionLeftDropdown(self, event):
menu = wx.Menu()
menu.Append(ID_POSITION_LEFT, "Left with Icons")
menu.Append(ID_POSITION_LEFT_LABELS, "Left with Labels")
menu.Append(ID_POSITION_LEFT_BOTH, "Left with Both")
event.PopupMenu(menu)
def OnTogglePanels(self, event):
pass
def OnExtButton(self, event):
wx.MessageBox("Extended button activated")
def AddText(self, msg):
self._logwindow.AppendText(msg)
self._logwindow.AppendText("\n")
self._ribbon.DismissExpandedPanel()
def AddColourToGallery(self, gallery, colour, dc, value=None):
item = None
if colour != "Default":
c = wx.NamedColour(colour)
if value is not None:
c = value
if c.IsOk():
iWidth = 64
iHeight = 40
bitmap = wx.EmptyBitmap(iWidth, iHeight)
dc.SelectObject(bitmap)
b = wx.Brush(c)
dc.SetPen(wx.BLACK_PEN)
dc.SetBrush(b)
dc.DrawRectangle(0, 0, iWidth, iHeight)
colour = colour[0] + colour[1:].lower()
size = wx.Size(*dc.GetTextExtent(colour))
notcred = min(abs(~c.Red()), 255)
notcgreen = min(abs(~c.Green()), 255)
notcblue = min(abs(~c.Blue()), 255)
foreground = wx.Colour(notcred, notcgreen, notcblue)
if abs(foreground.Red() - c.Red()) + abs(foreground.Blue() - c.Blue()) + abs(foreground.Green() - c.Green()) < 64:
# Foreground too similar to background - use a different
# strategy to find a contrasting colour
foreground = wx.Colour((c.Red() + 64) % 256, 255 - c.Green(),
(c.Blue() + 192) % 256)
dc.SetTextForeground(foreground)
dc.DrawText(colour, (iWidth - size.GetWidth() + 1) / 2, (iHeight - size.GetHeight()) / 2)
dc.SelectObjectAsSource(wx.NullBitmap)
item = gallery.Append(bitmap, wx.ID_ANY)
gallery.SetItemClientData(item, ColourClientData(colour, c))
return item
def OnColourGalleryButton(self, event):
gallery = event.GetEventObject()
if gallery is None:
return
self._ribbon.DismissExpandedPanel()
if gallery.GetSelection():
self._colour_data.SetColour(self.GetGalleryColour(gallery, gallery.GetSelection(), None)[0])
dlg = wx.ColourDialog(self, self._colour_data)
if dlg.ShowModal() == wx.ID_OK:
self._colour_data = dlg.GetColourData()
clr = self._colour_data.GetColour()
# Try to find colour in gallery
item = None
for i in xrange(gallery.GetCount()):
item = gallery.GetItem(i)
if self.GetGalleryColour(gallery, item, None)[0] == clr:
break
else:
item = None
# Colour not in gallery - add it
if item == None:
item = self.AddColourToGallery(gallery, clr.GetAsString(wx.C2S_HTML_SYNTAX),
self._bitmap_creation_dc, clr)
gallery.Realize()
# Set powerBar
gallery.EnsureVisible(item)
gallery.SetSelection(item)
# Send an event to respond to the powerBar change
dummy = Ribbon.RibbonGalleryEvent(Ribbon.wxEVT_COMMAND_RIBBONGALLERY_SELECTED, gallery.GetId())
dummy.SetEventObject(gallery)
dummy.SetGallery(gallery)
dummy.SetGalleryItem(item)
self.GetEventHandler().ProcessEvent(dummy)
def OnDefaultProvider(self, event):
self._ribbon.DismissExpandedPanel()
self.SetArtProvider(Ribbon.RibbonDefaultArtProvider())
def OnAUIProvider(self, event):
self._ribbon.DismissExpandedPanel()
self.SetArtProvider(Ribbon.RibbonAUIArtProvider())
def OnMSWProvider(self, event):
self._ribbon.DismissExpandedPanel()
self.SetArtProvider(Ribbon.RibbonMSWArtProvider())
def SetArtProvider(self, prov):
self._ribbon.Freeze()
self._ribbon.SetArtProvider(prov)
self._default_primary, self._default_secondary, self._default_tertiary = \
prov.GetColourScheme(self._default_primary, self._default_secondary, self._default_tertiary)
self.PopulateColoursPanel(self._primary_gallery.GetParent(), self._default_primary,
ID_PRIMARY_COLOUR)
self.PopulateColoursPanel(self._secondary_gallery.GetParent(), self._default_secondary,
ID_SECONDARY_COLOUR)
self._ribbon.Thaw()
self.panel.GetSizer().Layout()
self._ribbon.Realize()
class TcApp(wx.App):
def OnInit(self):
self.frame = TcFrame(None, title="Telechips DAB GUI")
self.SetTopWindow(self.frame)
self.frame.Show()
return True
if __name__ == "__main__":
app = TcApp(False)
app.MainLoop()
|
UTF-8
|
Python
| false | false | 2,014 |
816,043,794,285 |
1885236d7d1e54860583a00976f2d7d38bb9057a
|
f3f15ca5ab6e29354087d043977cf43500f327c6
|
/test/suites/agent/server_tests.py
|
d3dacf66360acf49b3d9a75f351ec7f5601125a0
|
[] |
no_license
|
autopilot-paas/autopilot
|
https://github.com/autopilot-paas/autopilot
|
3625670898485de3e067c86d13735f74f0660489
|
108c08b5bd4cc3f4e955b149f3658cfa987f4de1
|
refs/heads/master
| 2016-09-05T21:54:09.189647 | 2014-12-31T05:29:33 | 2014-12-31T05:29:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/python
import os
import sys
sys.path.append(os.environ['AUTOPILOT_HOME'] + '/../')
import requests
import StringIO
from autopilot.common import utils
from autopilot.test.common.aptest import APtest
from autopilot.common.apenv import ApEnv
from autopilot.specifications.apspec import Apspec
from autopilot.protocol.message import Message
from autopilot.protocol.serializer import JsonPickleSerializer
from autopilot.common.server import Server
from autopilot.common.asyncpool import taskpool
from autopilot.agent.handlers.stackdeployhandler import StackDeployHandler
class ServerTest(APtest):
"""
Server tests
"""
def test_gevent_server_async(self):
m = Message(type="stack_deploy",
data=dict(name="test_gevent_async"),
identifier="test_gevent_async")
# schedule the client to be called after the server starts
clientg = taskpool.spawn(func=self._single_message_client, args=dict(message=m))
# start the server
self._start_server(handler=ServerTest.DefaultAsyncHandler())
(status_code, response) = clientg.get()
self.ae(200, status_code)
def test_gevent_server_data_object(self):
class SomeObject(object):
def __init__(self, value=2):
self.value = value
m = Message(type="stack_deploy",
data=SomeObject(),
identifier="test_gevent_async")
# schedule the client to be called after the server starts
clientg = taskpool.spawn(func=self._single_message_client, args=dict(message=m))
# start the server
self._start_server(handler=ServerTest.DefaultAsyncHandler())
(status_code, response) = clientg.get()
self.ae(200, status_code)
def test_e2e_stack_deploy(self):
"""
Start a gevent server
Create a stack deploy message and send it to the server
Server invokes stack handler and deploys the role
Verify the role is deployed
"""
# setup info for StackHandler
test_dir = '/tmp/test_install_role_task/'
apenv = self.get_default_apenv(wf_id="test_e2e_stack_deploy",
properties= {
"root_dir": test_dir
})
stack = Apspec.load(ApEnv(), "contoso.org", "dev.marketing.contoso.org",
self.openf('stack_test_python.yml'))
m = Message(type="stack-deploy",
headers={"domain": "dev.contoso.org"},
data={"target_role_group": "hdfs", "stack": stack})
handler = StackDeployHandler(apenv=apenv, message_type=m.type)
# schedule the client to be called after the server starts
clientg = taskpool.spawn(func=self._single_message_client, args=dict(message=m))
# start the server
self._start_server(handler=handler)
(status_code, response_text) = clientg.get()
self.ae(200, status_code)
stream = StringIO.StringIO()
stream.write(response_text)
stream.seek(0)
rm = JsonPickleSerializer().load(stream)
current_file_path = os.path.join(utils.path_join(test_dir, rm.data.get("wf_id"), "hdfs"),
"autopilot/hadoop-base/current")
with open(current_file_path) as f:
stack_name = f.readline().strip()
self.ae("hadoop-base", stack_name)
def test_gevent_server_empty_data(self):
m = Message(type="stack_deploy",
data=None,
identifier="test_gevent_async")
# schedule the client to be called after the server starts
clientg = taskpool.spawn(func=self._single_message_client, args=dict(message=m))
# start the server
self._start_server(handler=ServerTest.DefaultAsyncHandler())
(status_code, response) = clientg.get()
self.ae(200, status_code)
def test_gevent_handled_error(self):
m = Message(type="stack_deploy",
data=dict(name="test_gevent_async"),
identifier="test_gevent_async")
# schedule the client to be called after the server starts
clientg = taskpool.spawn(func=self._single_message_client, args=dict(message=m))
# start the server
self._start_server(handler=ServerTest.DefaultAsyncHandler(exception=Exception()))
# server stopped check status code
(status_code, response) = clientg.get()
self.ae(500, status_code)
def test_gevent_unhandled_error(self):
m = Message(type="stack_deploy",
data=dict(name="test_gevent_async"),
identifier="test_gevent_async")
# schedule the client to be called after the server starts
clientg = taskpool.spawn(func=self._single_message_client, args=dict(message=m))
# start the server
self._start_server(handler=ServerTest.DefaultAsyncHandler(exception=Exception(), unhandled=True))
# server stopped check status code
(status_code, response) = clientg.get()
self.ae(500, status_code)
def test_gevent_bad_message_format(self):
m = "bad_message_format"
# schedule the client to be called after the server starts
clientg = taskpool.spawn(func=self._single_message_client, args=dict(message=m))
# start the server
self._start_server(handler=ServerTest.DefaultAsyncHandler(exception=Exception(), unhandled=True))
# server stopped check status code
(status_code, response) = clientg.get()
self.ae(400, status_code)
def _start_server(self, handler, stop_delay=3):
s = Server(serializer=JsonPickleSerializer(), handler_resolver=lambda msg: handler)
taskpool.spawn(func=lambda: s.stop(), delay=stop_delay)
s.start()
def _single_message_client(self, message):
import StringIO
stream = StringIO.StringIO()
JsonPickleSerializer().dump(stream=stream, message=message)
stream.pos=0
r = requests.post(url="http://localhost:9191", data=stream)
return (r.status_code, r.text)
def _string_message_client(self, message):
r = requests.post(url="http://localhost:9191", data=message)
return (r.status_code, r.text)
class DefaultAsyncHandler(object):
def __init__(self, exception=None, unhandled=False):
self.waiter = taskpool.new_queue()
self.exception = exception
self.unhandled = unhandled
def work(self, process_future, message):
self.waiter.get(block=True, timeout=5)
taskpool.doyield(1)
process_future(result=message, exception=self.exception)
def process(self, message):
process_future = taskpool.callable_future()
if self.unhandled:
raise Exception()
taskpool.spawn(func=self.work, args={"process_future": process_future, "message": message})
taskpool.spawn(func=self.waiter.put, args={"item": 2})
return process_future
|
UTF-8
|
Python
| false | false | 2,014 |
10,075,993,303,084 |
e14688dc376589f276064c7aaf9830740471c64b
|
a20dedc824c1daccc7b5cb32b822d9b8118043c3
|
/lwr_markers/scripts/goal_marker.py
|
f80a88b3532222cc380b9015f70286c3b474bbe6
|
[] |
no_license
|
RCPRG-ros-pkg/lwr_gui
|
https://github.com/RCPRG-ros-pkg/lwr_gui
|
18b6f4d0b3e3a5cef9843bb07f79d1c283d529c7
|
972694a90cb4064bcfda05a73e2cc0a2fe637e54
|
refs/heads/master
| 2016-09-05T14:38:40.094602 | 2012-01-19T17:57:47 | 2012-01-19T17:57:47 | 2,284,950 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""
Copyright (c) 2011, Willow Garage, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Willow Garage, Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import roslib; roslib.load_manifest("lwr_markers")
import rospy
import copy
import actionlib
import arm_navigation_msgs.msg
from arm_navigation_msgs.msg import PositionConstraint
from arm_navigation_msgs.msg import OrientationConstraint
from arm_navigation_msgs.msg import Shape
from interactive_markers.interactive_marker_server import *
from interactive_markers.menu_handler import *
menu_handler = MenuHandler()
client = actionlib.SimpleActionClient('move_right', arm_navigation_msgs.msg.MoveArmAction)
def processFeedback(feedback):
#s = "Feedback from marker '" + feedback.marker_name
#if feedback.event_type == InteractiveMarkerFeedback.BUTTON_CLICK:
# rospy.loginfo( s + ": button click" + mp + "." )
if feedback.event_type == InteractiveMarkerFeedback.MENU_SELECT:
if (feedback.menu_entry_id == 1):
p = feedback.pose
print p
goalA = arm_navigation_msgs.msg.MoveArmGoal()
goalA.motion_plan_request.group_name = "right";
goalA.motion_plan_request.num_planning_attempts = 1;
goalA.motion_plan_request.planner_id = "";
goalA.planner_service_name = "ompl_planning/plan_kinematic_path";
goalA.motion_plan_request.allowed_planning_time = rospy.Duration(5.0);
desired_pose = PositionConstraint()
desired_pose.header.frame_id = "/world";
desired_pose.link_name = "right_arm_7_link";
desired_pose.position = p.position
desired_pose.constraint_region_shape.type = Shape.BOX
desired_pose.constraint_region_shape.dimensions = [0.02, 0.02, 0.02]
desired_pose.constraint_region_orientation.w = 1.0
goalA.motion_plan_request.goal_constraints.position_constraints.append(desired_pose)
oc = OrientationConstraint()
oc.header.stamp = rospy.Time.now()
oc.header.frame_id = "/world";
oc.link_name = "right_arm_7_link";
oc.orientation = p.orientation
oc.absolute_roll_tolerance = 0.04
oc.absolute_pitch_tolerance = 0.04
oc.absolute_yaw_tolerance = 0.04
oc.weight = 1.
goalA.motion_plan_request.goal_constraints.orientation_constraints.append(oc)
client.send_goal(goalA)
# rospy.loginfo( s + ": menu item " + str(feedback.menu_entry_id) + " clicked." )
# elif feedback.event_type == InteractiveMarkerFeedback.POSE_UPDATE:
# rospy.loginfo( s + ": pose changed")
# p = feedback.pose.position
# print feedback.marker_name + " is now at " + str(p.x) + ", " + str(p.y) + ", " + str(p.z)
if __name__=="__main__":
rospy.init_node("simple_marker")
client = actionlib.SimpleActionClient('move_right', arm_navigation_msgs.msg.MoveArmAction)
client.wait_for_server()
# create an interactive marker server on the topic namespace simple_marker
server = InteractiveMarkerServer("simple_marker")
menu_handler.insert( "Go to", callback=processFeedback )
# create an interactive marker for our server
int_marker = InteractiveMarker()
int_marker.header.frame_id = "/world"
int_marker.name = "my_marker"
int_marker.description = "Right"
# create a grey box marker
box_marker = Marker()
box_marker.type = Marker.CUBE
box_marker.scale.x = 0.1
box_marker.scale.y = 0.1
box_marker.scale.z = 0.1
box_marker.color.r = 0.0
box_marker.color.g = 0.5
box_marker.color.b = 0.5
box_marker.color.a = 0.5
# create a non-interactive control which contains the box
box_control = InteractiveMarkerControl()
box_control.always_visible = True
box_control.markers.append( box_marker )
# add the control to the interactive marker
int_marker.controls.append( box_control )
control = InteractiveMarkerControl()
control.orientation.w = 1
control.orientation.x = 1
control.orientation.y = 0
control.orientation.z = 0
control.name = "rotate_x"
control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS
int_marker.controls.append(control)
control = InteractiveMarkerControl()
control.orientation.w = 1
control.orientation.x = 1
control.orientation.y = 0
control.orientation.z = 0
control.name = "move_x"
control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS
int_marker.controls.append(control)
control = InteractiveMarkerControl()
control.orientation.w = 1
control.orientation.x = 0
control.orientation.y = 1
control.orientation.z = 0
control.name = "rotate_z"
control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS
int_marker.controls.append(control)
control = InteractiveMarkerControl()
control.orientation.w = 1
control.orientation.x = 0
control.orientation.y = 1
control.orientation.z = 0
control.name = "move_z"
control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS
int_marker.controls.append(control)
control = InteractiveMarkerControl()
control.orientation.w = 1
control.orientation.x = 0
control.orientation.y = 0
control.orientation.z = 1
control.name = "rotate_y"
control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS
int_marker.controls.append(control)
control = InteractiveMarkerControl()
control.orientation.w = 1
control.orientation.x = 0
control.orientation.y = 0
control.orientation.z = 1
control.name = "move_y"
control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS
int_marker.controls.append(control)
# make one control using default visuals
control = InteractiveMarkerControl()
control.interaction_mode = InteractiveMarkerControl.MENU
control.description="Right"
control.name = "menu_only_control"
int_marker.controls.append(copy.deepcopy(control))
int_marker.scale=0.2
# add the interactive marker to our collection &
# tell the server to call processFeedback() when feedback arrives for it
server.insert(int_marker, processFeedback)
menu_handler.apply( server, int_marker.name )
# 'commit' changes and send to all clients
server.applyChanges()
rospy.spin()
|
UTF-8
|
Python
| false | false | 2,012 |
6,236,292,518,748 |
fe4486e3f130ca6dfb874dc7caf96b414ef1470c
|
8e8f84b3a6532d39e46cff0c80ecf3a41523cf95
|
/pygame214.py
|
8f8152362ca63853d4983b85f382372823382c3d
|
[] |
no_license
|
zn80/python-playground
|
https://github.com/zn80/python-playground
|
a66da314d859aeaab5eae5bf87b4e96c177bf8fa
|
48baf292efe98d9c8b043ce461ab033f4270ef05
|
refs/heads/master
| 2020-02-27T18:29:29.425380 | 2014-03-06T06:56:57 | 2014-03-06T06:56:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/env python
import pygame
import sys
width = int(sys.argv[1])
height = int(sys.argv[2])
linecolor1 = (0, 0, 255)
linecolor2 = (255, 0, 0)
# vertical
line1p1 = ( int(width / 2.0), 0)
line1p2 = ( int(width / 2.0), height)
# horizontal
line2p1 = ( 0, int(height / 2.0) )
line2p2 = ( width, int(height / 2.0) )
# setting screen size
screen = pygame.display.set_mode( (width, height) )
# keep out programm running
running = 1
# the main event loop
while running:
event = pygame.event.poll()
if event.type == pygame.QUIT:
running = 0 # jump out the main event loop and quit
screen.fill( (0, 0, 0))
pygame.draw.line(screen, linecolor1, line1p1, line1p2)
pygame.draw.line(screen, linecolor2, line2p1, line2p2)
pygame.display.flip()
|
UTF-8
|
Python
| false | false | 2,014 |
4,801,773,474,701 |
600e26f3c4dfe97cf80ae1b914ac7fa6d71c8bed
|
98c037271274a12557307fae71e3cbc155387c65
|
/utilities/base64tojpg.py
|
078ad5777f1ac2d4c0937ab65927fa0d1155f8f4
|
[] |
no_license
|
onionater2/antools
|
https://github.com/onionater2/antools
|
aff8be2d270ba1c01a9c01367d9ace3163e40d86
|
a1e7e06bad7bf9c0f6d7d0c2eb7044339bc86ae8
|
refs/heads/master
| 2016-09-06T02:37:29.017348 | 2014-04-15T03:41:13 | 2014-04-15T03:41:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 12 01:34:45 2014
@author: amyskerry
"""
import base64
import csv
import datetime
import sys
def loadphotocsv(photofile):
'''full path to csvfile containing photos under column "photo"'''
allphotos=[]
csv.field_size_limit(sys.maxsize)
with open(photofile, 'rU') as csvfile:
reader = csv.reader(csvfile)
for rown, row in enumerate(reader):
if rown==0:
photoindex=row.index('photo')
else:
photoentry=row[photoindex]
#cutstring="MiniFieldStorage('photofield', '"
cutstring=''
photo=photoentry[len(cutstring):]
allphotos.append(photo)
print str(rown)+ ' lines in csv file'
return allphotos
def printphotos(photofile, savedir):
'''full path to csvfile containing photos under column "photo", directory in which to save photos'''
allphotos=loadphotocsv(photofile)
date=datetime.datetime.now()
datestr=date.strftime("%Y-%m-%d")
for photon, photo in enumerate(allphotos):
if photon<99:
number='0'+str(photon)
else:
number=str(photon)
g = open(savedir+'photo'+number+'_'+datestr+'.jpg', 'w')
g.write(base64.decodestring(photo))
g.close()
print number + ' photos printed'
|
UTF-8
|
Python
| false | false | 2,014 |
11,244,224,417,514 |
80966a415448734e9477a91a1a4b39f96120a502
|
89742c627a0b441e4752782ef12709f4c42c65cc
|
/bootstrap.py
|
21974814c5907afa143a889e66dce712e74c1ee5
|
[] |
no_license
|
eloe/pinebox
|
https://github.com/eloe/pinebox
|
bfcb531e9cbdb00d4e32ed325c661f7f14c3fb84
|
712f552fe8d8a1a971a3ef0cd3392f91a0cdf229
|
refs/heads/master
| 2021-01-19T14:55:52.327829 | 2012-03-27T17:21:12 | 2012-03-27T17:21:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import subprocess
if "VIRTUAL_ENV" not in os.environ:
sys.stderr.write("$VIRTUAL_ENV not found.\n\n")
parser.print_usage()
sys.exit(-1)
virtualenv = os.environ["VIRTUAL_ENV"]
file_path = os.path.dirname(__file__)
subprocess.call(["pip", "install", "-E", virtualenv, "--requirement",
os.path.join(file_path, "requirements.txt")])
dst_dir = os.path.join(virtualenv, "lib/python2.7/site-packages/hardlycode")
hc_lib_path = os.path.dirname(os.getcwd())
hc_lib_dir = os.path.join(hc_lib_path, "hardlycode/hardlycode")
if not os.path.islink(dst_dir):
os.symlink(hc_lib_dir, dst_dir)
|
UTF-8
|
Python
| false | false | 2,012 |
10,273,561,800,222 |
a4f939562c87adf6e2619ba823905cea97606d22
|
5239a148e9604265d3a1e66047ebeb417aebbb71
|
/test_settings.py
|
52c8df1143333208021aaf67ac463f61aeb63cea
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
non_permissive
|
yoyossy/python-google-spreadsheet
|
https://github.com/yoyossy/python-google-spreadsheet
|
59cbe2692eac0b15914ee86ee0ac990afe7e9eb4
|
4587cf0521293661753ef2ca5678b1a78f569b8d
|
refs/heads/master
| 2020-12-30T19:22:19.483930 | 2012-06-07T09:47:29 | 2012-06-07T09:47:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
GOOGLE_SPREADSHEET_USER = '[email protected]'
GOOGLE_SPREADSHEET_PASSWORD = 'data0071!'
GOOGLE_SPREADSHEET_SOURCE = 'Cleverblocks'
GOOGLE_SPREADSHEET_KEY = 'tkZQWzwHEjKTE5L9WFFCAgw'
GOOGLE_WORKSHEET_KEY = 'od7'
|
UTF-8
|
Python
| false | false | 2,012 |
7,713,761,306,422 |
46e95eeeb2b91342e0564d5bb01e5b10697996eb
|
fa0a995a2bec9e4c0d7d8873d967c5a570ca3aae
|
/deployment/pkgmanager/__init__.py
|
8bde34a8fa596b2f10282224d3259f9759a8cfe0
|
[
"CDDL-1.0",
"LicenseRef-scancode-philippe-de-muyter",
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only"
] |
non_permissive
|
emulbreh/ecs
|
https://github.com/emulbreh/ecs
|
655cfe9437def28510eca25974bfd6cceff21fe5
|
157ab27b8f8b31fcf25b4ae007eb7b877c713303
|
refs/heads/master
| 2021-01-16T19:57:57.646519 | 2010-11-26T13:34:15 | 2010-11-26T13:34:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import sys
from deployment.utils import which
def _get_pkg_manager():
from deployment.pkgmanager.apt import Apt
from deployment.pkgmanager.zypper import Zypper
from deployment.pkgmanager.macports import Macports
from deployment.pkgmanager.homebrew import Homebrew
from deployment.pkgmanager.windows import Windows
from deployment.pkgmanager.openbsd import OpenBSD
supported_systems = (
('linux2', 'apt-get', Apt ),
('linux2', 'zypper', Zypper ),
('darwin', 'brew', Homebrew),
('darwin', 'port', Macports),
('win32', None, Windows ),
('cygwin', None, Windows ),
('openbsd4', 'pkg_add', OpenBSD ),
)
for platform, command, pkg_manager in supported_systems:
if sys.platform == platform:
if not command or len(list(which(command))):
return pkg_manager()
return None
pkg_manager = _get_pkg_manager()
|
UTF-8
|
Python
| false | false | 2,010 |
5,420,248,739,628 |
266a0c3b19bc1711271ff82a81d54c037194a87c
|
29ee3188dac5c066b228292de961d328d766a6b1
|
/test/test_discovery_test.py
|
77c868f8249667661082bd916d470519dc467bfe
|
[
"Apache-2.0"
] |
permissive
|
skimbrel/Testify
|
https://github.com/skimbrel/Testify
|
edf571c3b0ef83304abb2d9ad422b07a52f49e72
|
ebbbfd49e2850b453008228d0be126986fc8802f
|
refs/heads/master
| 2020-04-05T23:26:30.472244 | 2012-07-12T05:32:41 | 2012-07-12T05:32:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from functools import wraps
from testify import TestCase, run, test_discovery, assert_length
from os.path import dirname, join, abspath
from os import getcwd, chdir
HERE = dirname(abspath(__file__))
class DiscoveryTestCase(TestCase):
def discover(self, path):
# Exhaust the generator to catch exceptions
[mod for mod in test_discovery.discover(path)]
def relative(func):
'decorator for tests that rely on relative paths'
@wraps(func)
def wrapped(*args, **kwargs):
cwd = getcwd()
chdir(HERE)
result = func(*args, **kwargs)
chdir(cwd)
return result
return wrapped
class TestDiscoverDottedPath(DiscoveryTestCase):
@relative
def test_dotted_path(self):
self.discover('subdir.test')
class TestDiscoverFilePath(DiscoveryTestCase):
@relative
def test_file_path(self):
self.discover('subdir/test')
@relative
def test_file_path_with_py_suffix(self):
self.discover('subdir/test.py')
@relative
def test_file_path_with_non_normal_path(self):
self.discover('./subdir///test.py')
def test_file_absolute_path(self):
self.discover(join(HERE, 'subdir/test.py'))
class TestDiscoverIgnoreImportedThings(DiscoveryTestCase):
def test_imported_things_are_ignored(self):
#TODO CHANGE MY NAME
discovered_imported = list(test_discovery.discover('test.test_suite_subdir.import_testcase'))
discovered_actually_defined_in_module = list(test_discovery.discover('test.test_suite_subdir.define_testcase'))
assert_length(discovered_imported, 0)
assert_length(discovered_actually_defined_in_module, 1)
if __name__ == '__main__':
run()
# vim: set ts=4 sts=4 sw=4 et:
|
UTF-8
|
Python
| false | false | 2,012 |
1,271,310,326,323 |
b89963ed7bf378c13b507b26e3286e48ae1b0df9
|
e503ed48393444d7b5662d4b096ca8deb7e55204
|
/main_handler.py
|
94806171481eb0b33d83ebe6d3fc276c6113da14
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
jbyeung/glassgtasks
|
https://github.com/jbyeung/glassgtasks
|
961740e57b47c1c4ad6092a8933cd0cf687c9633
|
0cd4b0016091ef0ae0f02d4395995a7108898f14
|
refs/heads/master
| 2021-01-16T17:45:42.752935 | 2013-08-13T19:24:20 | 2013-08-13T19:24:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright (C) 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Request Handler for /main endpoint."""
__author__ = '[email protected] (Jeff Yeung)'
import io
import jinja2
import logging
import json
import os
import webapp2
import time, threading
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import db
from google.appengine.ext import deferred
import custom_item_fields
import httplib2
from apiclient import errors
from apiclient.http import MediaIoBaseUpload
from apiclient.http import BatchHttpRequest
from oauth2client.appengine import StorageByKeyName
from apiclient.discovery import build
from oauth2client.appengine import OAuth2Decorator
from model import Credentials
from model import TasklistStore
import util
from tasks import auto_refresh, get_html_from_tasks, TIMELINE_ITEM_TEMPLATE_URL
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
TIMELINE_ITEM_TEMPLATE_URL = '/templates/card.html'
class _BatchCallback(object):
"""Class used to track batch request responses."""
def __init__(self):
"""Initialize a new _BatchCallbaclk object."""
self.success = 0
self.failure = 0
def callback(self, request_id, response, exception):
"""Method called on each HTTP Response from a batch request.
For more information, see
https://developers.google.com/api-client-library/python/guide/batch
"""
if exception is None:
self.success += 1
else:
self.failure += 1
logging.error(
'Failed to insert item for user %s: %s', request_id, exception)
class MainHandler(webapp2.RequestHandler):
"""Request Handler for the main endpoint."""
def _render_template(self, message=None):
"""Render the main page template."""
userid, creds = util.load_session_credentials(self)
tasks_service = util.create_service('tasks', 'v1', creds)
template_values = {'userId': self.userid}
if message:
template_values['message'] = message
# self.mirror_service is initialized in util.auth_required
subscriptions = self.mirror_service.subscriptions().list().execute()
for subscription in subscriptions.get('items', []):
collection = subscription.get('collection')
if collection == 'timeline':
template_values['timelineSubscriptionExists'] = True
# pull from tasks api, list of tasklists
tasklists = tasks_service.tasklists().list().execute()
template_values['tasklists'] = tasklists['items']
#load the tasklist names and ids from db if exists
#q = db.GqlQuery("SELECT * FROM TasklistStore " +
# "WHERE owner = " + userid)
q = TasklistStore.all()
q.filter("owner = ",self.userid)
TASKLIST_NAMES = []
for p in q.run():
TASKLIST_NAMES.append(p.my_name)
if TASKLIST_NAMES == []:
TASKLIST_NAMES.append("None")
template_values['synced'] = TASKLIST_NAMES
template = jinja_environment.get_template('templates/index.html')
self.response.out.write(template.render(template_values))
@util.auth_required
def get(self):
"""Render the main page."""
# Get the flash message and delete it.
message = memcache.get(key=self.userid)
memcache.delete(key=self.userid)
self._render_template(message)
@util.auth_required
def post(self):
"""Execute the request and render the template."""
operation = self.request.get('operation')
# Dict of operations to easily map keys to methods.
operations = {
# 'refresh': self._refresh_list,
'new_tasklist': self._new_tasklist,
'select_tasklist': self._select_tasklist
}
if operation in operations:
message = operations[operation]()
else:
message = "I don't know how to " + operation
# Store the flash message for 5 seconds.
memcache.set(key=self.userid, value=message, time=5)
self.redirect('/')
def _select_tasklist(self):
# selects tasklist, assigns to TASKLIST_NAME
userid, creds = util.load_session_credentials(self)
tasks_service = util.create_service('tasks', 'v1', creds)
tasklist_id = self.request.get('select')
logging.info("select")
logging.info(self.request.get('select'))
if tasklist_id == '':
return "Please select a tasklist before trying to add it."
else:
#set name/id to db
my_tasklist = TasklistStore(owner=self.userid)
my_tasklist.my_id = tasklist_id
#TASKLIST_NAMES.append(tasklist_title)
tasklists = tasks_service.tasklists().list().execute()
for tasklist in tasklists['items']:
if tasklist_id == tasklist['id']:
my_tasklist.my_name = tasklist['title']
#TASKLIST_IDS[tasklist_title] = tasklist['id']
my_tasklist.put()
return my_tasklist.my_name + " selected successfully"
def _new_tasklist(self):
userid, creds = util.load_session_credentials(self)
tasks_service = util.create_service('tasks', 'v1', creds)
mirror_service = util.create_service('mirror', 'v1', creds)
logging.info('Inserting timeline items')
# Note that icons will not show up when making counters on a
# locally hosted web interface.
#mirror_service = util.create_service('mirror', 'v1', creds)
#tasks_service = util.create_service('tasks', 'v1', creds)
############################ TASKS API STUFF #######
# create empty task list @glass if none selected or none exist
#q = db.GqlQuery("SELECT * FROM TasklistStore " +
# "WHERE owner = " + userid)
q = TasklistStore.all()
q.filter("owner = ",self.userid)
q.run()
#if no tasklists, insert a default one
if q:
logging.info('not inserting')
else:
logging.info('no tasklist selected, inserting @glass ')
tasklist = {
'title': '@glass'
}
result = tasks_service.tasklists().insert(body=tasklist).execute()
my_tasklist = TasklistStore(owner = userid, my_name = tasklist_title,
my_id = result['id'])
my_tasklist.put()
## now for each selected tasklist, post tasks to timeline
for p in q:
tasklist_id = p.my_id
tasklist_name = p.my_name
# insert seed tasks
task = {
'title': 'Glass interface synced to this list!',
'notes': 'Try adding a new task with the voice command!'
}
result = tasks_service.tasks().insert(tasklist=tasklist_id, body=task).execute()
# grab all the tasks in tasklist to display
result = tasks_service.tasks().list(tasklist=tasklist_id).execute()
#filter out completed tasks
tasks = []
for i, task in enumerate(result['items']):
if task['status'] != 'completed':
tasks.append(task)
#grabbing all tasks now instead of just 5
#indx = 5 if len(tasks) > 4 else len(tasks)
#tasks = tasks[0:indx]
if len(tasks) == 0:
tasks.append({'title': 'No tasks!'})
#render html
# new_fields = {
# 'list_title': tasklist_name,
# 'tasks': tasks
# }
body = {
'notification': {'level': 'DEFAULT'},
'title': tasklist_id, #secret way of stashing the tasklist id in the timeline item
'html': get_html_from_tasks(tasks_service, tasklist_id, tasklist_name),
'menuItems': [
{
'action': 'REPLY',
'id': 'create_task',
'values': [{
'displayName': 'New Task',
'iconUrl': util.get_full_url(self, '/static/images/new_task.png')}]
},
{
'action': 'CUSTOM',
'id': 'refresh',
'values': [{
'displayName': 'Refresh',
'iconUrl': util.get_full_url(self, '/static/images/refresh2.png')}]
},
{'action': 'TOGGLE_PINNED'},
{'action': 'DELETE'}
]
}
# custom_item_fields.set_multiple(body, new_fields, TIMELINE_ITEM_TEMPLATE_URL)
# self.mirror_service is initialized in util.auth_required.
# add card to timeline
try:
result = self.mirror_service.timeline().insert(body=body).execute()
if result:
item_id = result['id']
# logging.info('mainhandler about to defer')
# deferred.defer(auto_refresh, creds, mirror_service, tasks_service, item_id, tasklist_name, tasklist_id, True)
# logging.info('mainhandler deferred')
except errors.HttpError, error:
logging.info ('an error has occured %s ', error)
return 'New tasklists have been inserted to the timeline.'
MAIN_ROUTES = [
('/', MainHandler)
]
|
UTF-8
|
Python
| false | false | 2,013 |
3,667,902,114,747 |
07f63ed19d0252e280c0329cce726964c37946fb
|
1dd743c6c0f9d458d9602125df2e27584db9b12e
|
/cabochon/lib/helpers.py
|
463e23b6110b6bb111eda78fe5a303f30907fe9a
|
[
"GPL-2.0-only"
] |
non_permissive
|
socialplanning/Cabochon
|
https://github.com/socialplanning/Cabochon
|
c16f791cb37b879ef4d37ab8b9054751e72a9e37
|
aaeb147359c48631ceb5d4266bd0bf3506bc5047
|
refs/heads/master
| 2020-05-17T08:00:09.619489 | 2010-11-02T16:56:14 | 2010-11-02T16:56:14 | 2,361,237 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Helper functions
All names available in this module will be available under the Pylons h object.
"""
from pylons.controllers.util import log
from pylons.i18n import get_lang, set_lang
from webhelpers import *
from webhelpers.rails import tags
from webhelpers.rails.secure_form_tag import secure_form
from webhelpers.rails.urls import convert_boolean_attributes
from pprint import pformat as txt_pformat
def secure_button_to(name, url='', **html_options):
"""
Generates a form containing a sole button that submits to the
URL given by ``url``, securely. Based on button_to from webhelpers.
"""
if html_options:
convert_boolean_attributes(html_options, ['disabled'])
method_tag = ''
method = html_options.pop('method', '')
if method.upper() in ['PUT', 'DELETE']:
method_tag = tags.tag('input', type_='hidden', id='_method', name_='_method',
value=method)
form_method = (method.upper() == 'GET' and method) or 'POST'
confirm = html_options.get('confirm')
if confirm:
del html_options['confirm']
html_options['onclick'] = "return %s;" % confirm_javascript_function(confirm)
if callable(url):
ur = url()
url, name = ur, name or tags.escape_once(ur)
else:
url, name = url, name or url
submit_type = html_options.get('type')
img_source = html_options.get('src')
if submit_type == 'image' and img_source:
html_options.update(dict(type=submit_type, value=name,
alt=html_options.get('alt', name)))
html_options['src'] = compute_public_path(img_source, 'images', 'png')
else:
html_options.update(dict(type='submit', value=name))
return secure_form(url, method=form_method, _class="button-to") + """<div>""" + method_tag + tags.tag("input", **html_options) + "</div></form>"
def pformat(obj):
return '<pre>%s</pre>' % txt_pformat(obj)
|
UTF-8
|
Python
| false | false | 2,010 |
8,280,696,959,955 |
e6b15cebcc9061360b32d4e725fea89f0dc8a3aa
|
532436d7a66ed4df3fb2fa4e2ed1f26fec371998
|
/patches/june_2013/p07_taxes_price_list_for_territory.py
|
1cdb78399cfb9c0aea5376bf04ca47c95ed95c42
|
[
"GPL-3.0-only",
"CC-BY-SA-3.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
nabinhait/erpnext
|
https://github.com/nabinhait/erpnext
|
627caa622fb298dcd3dcb9ddca4bad8cdc93d47c
|
21ce1282ca7d19e34262644b8b904c2d30bdac8e
|
refs/heads/master
| 2023-08-11T13:56:47.889283 | 2013-12-02T10:42:04 | 2013-12-02T10:42:04 | 1,864,450 | 4 | 1 |
NOASSERTION
| true | 2020-05-14T10:14:13 | 2011-06-08T09:12:28 | 2015-09-01T06:09:21 | 2020-05-11T10:43:17 | 872,479 | 2 | 0 | 0 |
Python
| false | false |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import webnotes
def execute():
webnotes.reload_doc("setup", "doctype", "applicable_territory")
webnotes.reload_doc("stock", "doctype", "price_list")
webnotes.reload_doc("accounts", "doctype", "sales_taxes_and_charges_master")
webnotes.reload_doc("accounts", "doctype", "shipping_rule")
from setup.utils import get_root_of
root_territory = get_root_of("Territory")
for parenttype in ["Sales Taxes and Charges Master", "Price List", "Shipping Rule"]:
for name in webnotes.conn.sql_list("""select name from `tab%s` main
where not exists (select parent from `tabApplicable Territory` territory
where territory.parenttype=%s and territory.parent=main.name)""" % \
(parenttype, "%s"), (parenttype,)):
doc = webnotes.doc({
"doctype": "Applicable Territory",
"__islocal": 1,
"parenttype": parenttype,
"parentfield": "valid_for_territories",
"parent": name,
"territory": root_territory
})
doc.save()
|
UTF-8
|
Python
| false | false | 2,013 |
5,781,026,021,795 |
833aa411ffe773ef94faf0737a96f5647718a384
|
4fdaee9f2612a8c429991a2042dffcee80e7a641
|
/rootfs/qboxhd/rootfs/usr/local/lib/enigma2/python/ServiceReference.py
|
f9ecf87834d1c6f4f88b2955696e80182ec10410
|
[] |
no_license
|
OpenSH4/qboxhd
|
https://github.com/OpenSH4/qboxhd
|
841072db3b0eaecdcac116b5f96268d47115cdec
|
91dd37a5311b5c53fb088ab0ce902ee49552ece0
|
refs/heads/master
| 2020-09-07T17:55:36.114816 | 2012-01-08T21:33:02 | 2012-01-08T21:33:02 | 220,866,062 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from qboxhd import QBOXHD
from enigma import eServiceReference, eServiceCenter
class ServiceReference(eServiceReference):
def __init__(self, ref):
if not isinstance(ref, eServiceReference):
self.ref = eServiceReference(ref or "")
else:
self.ref = ref
self.serviceHandler = eServiceCenter.getInstance()
def __str__(self):
return self.ref.toString()
def getServiceName(self):
info = self.info()
return info and info.getName(self.ref) or ""
def info(self):
return self.serviceHandler.info(self.ref)
def list(self):
return self.serviceHandler.list(self.ref)
|
UTF-8
|
Python
| false | false | 2,012 |
11,802,570,138,602 |
6c152d39bf995b4180fe8e56ba6836824db1a5b1
|
2d6b01d37288856fe65e6e5704a88a1db1999307
|
/Chapter 6/graph_matrix_example.py
|
fc225d6643e5be4f46efd645ce7468d1d87db1b4
|
[
"GPL-2.0-only"
] |
non_permissive
|
TMJUSTNOW/pythonbook
|
https://github.com/TMJUSTNOW/pythonbook
|
9e3f6b28e0040246f4a00fa4131e4f516c0508c6
|
bb80ba8d6746477357edcae36669cf786d34e61d
|
refs/heads/master
| 2021-05-28T20:40:41.409861 | 2014-11-11T07:36:22 | 2014-11-11T07:36:24 | 105,764,720 | 0 | 1 | null | true | 2017-10-04T12:19:21 | 2017-10-04T12:19:21 | 2017-06-22T22:57:23 | 2015-05-05T12:19:46 | 250 | 0 | 0 | 0 | null | null | null |
import graph_matrix
node_numbers = {'John':0, 'Sally':1, 'George':2,
'Phil':3, 'Rose':4, 'Alice':5}
G = graph_matrix.Graph(len(node_numbers))
for node in node_numbers:
G.set_vertex(node_numbers[node],node)
G.add_edge(node_numbers['John'],node_numbers['Sally'])
G.add_edge(node_numbers['John'],node_numbers['George'])
G.add_edge(node_numbers['John'],node_numbers['Rose'])
G.add_edge(node_numbers['George'],node_numbers['Sally'])
G.add_edge(node_numbers['Phil'],node_numbers['Sally'])
G.add_edge(node_numbers['Rose'],node_numbers['Alice'])
G.print_graph()
|
UTF-8
|
Python
| false | false | 2,014 |
17,239,998,739,273 |
2ecfbb509fe062f91d97b0540a67a2db89b23db7
|
a47db955104293fe90cac8e0f5502cf0629804d1
|
/dispatch.py
|
19a0f66edd61ddcd66bdfeed6315fe50a1264d41
|
[] |
no_license
|
spottedzebra/LATP
|
https://github.com/spottedzebra/LATP
|
44e3b8f1ff06c953a0bb124369d7c3f0210068a6
|
f6792db695c7bf19d6f58bbd12c95f0c22e97bb9
|
refs/heads/master
| 2016-05-25T21:29:22.276540 | 2014-10-11T02:50:35 | 2014-10-11T02:50:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from fileIO import fileOperations
from handle import handler
class dispatcher:
def __init__(self, inputFileName):
self.inFile = fileOperations(inputFileName)
self.h = handler(self.inFile)
self.commandFncDict = {"UNITS" : handler.changeUnits, "SOILLAYER" : handler.createSoilLayer, "SOILPROFILE": handler.createSoilProfile, "VERTICALPRESSURE": handler.calcVerticalPressure, "LATERALPRESSURE": handler.calcLateralPressure, "OUTPUT": handler.output}
#so I can have a cool method name to call to run the program
def run(self):
self.getCommand()
def getCommand(self):
line = self.inFile.getNextLine()
while line != EOF:
data = line.split()
self.executeCommand(data)
line = self.inFile.getNextLine()
def executeCommand(self, data):
try:
for command in commandFncDict:
if command[key] == data[0]:
#still wondering if this will work
self.h.command[data[0]](data)
except KeyError:
#will need to change for client input
print "command not in command function dictionary, see dispatcher class"
|
UTF-8
|
Python
| false | false | 2,014 |
10,874,857,202,643 |
70f22a42261bf67dec1826399014ba3e4ac8646e
|
f0b232004e810c5c95ad5997d5019a663ace69a4
|
/stget.py
|
7a93289ddbf46f2bf1d3668faf5c79e64f8ab4e0
|
[] |
no_license
|
calvinlee/stget
|
https://github.com/calvinlee/stget
|
7861804fad4858b05953e0673bf234e6018eefd5
|
b2d0df829192ae948d5a83d7c0ccbd1160e815b4
|
refs/heads/master
| 2021-01-23T03:33:30.837304 | 2012-12-16T17:46:33 | 2012-12-16T17:46:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# Calvin.Lee<[email protected]> @ Mon Dec 17 01:21:18 CST 2012
# Small script helps downloading tracks from songtaste.com
import eyed3
import os
import re
from sets import Set
import urllib2
MUSIC_ROOT = os.path.expanduser("~/Music/songtaste")
# FIXME
MUSIC_HTML_SOUCE = os.path.expanduser("~/Desktop/playmusic.php")
def main():
if not os.path.exists(MUSIC_ROOT):
os.makedirs(MUSIC_ROOT)
#file = codecs.open('/home/calvin/Desktop/playmusic.php', 'r', 'utf-8')
#p = re.compile(r"(?<=WrtSongLine.*)http://[\da-z/.]*mp3")
# FIXME
p = re.compile(r"http://[\da-z/.]*\.mp3")
songuri = []
file = open(MUSIC_HTML_SOUCE)
for line in file:
m = p.search(line)
if m:
songuri.append(m.group())
# FIXME
uriSet = Set(songuri)
for url in uriSet:
localFile = os.path.join(MUSIC_ROOT, os.path.basename(url))
try:
download(url, localFile)
smartRename(localFile)
except Exception, e:
print "Error downloading from %s" % url
# Look at me: if 403 returned by server, regenerate the MUSIC_HTML_SOUCE and try again
print e
# os.unlink(localFile)
def download(remote, local):
print "downloading from %s..." % remote
"""
GET /201212162147/5a928a315369cd293aa2dae5d6c182b4/d/dd/dd3db4d5f4f9600e5fef065377976cbc.mp3 HTTP/1.1
Host: md.songtaste.com
Connection: keep-alive
User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.4 (KHTML, like Gecko) Chrome/22.0.1229.94 Safari/537.4
Accept: */*
Referer: http://songtaste.com/song/3017342/
Accept-Encoding: gzip,deflate,sdch
Accept-Language: zh-CN,zh;q=0.8
Accept-Charset: UTF-8,*;q=0.5
Cookie: __utma=148846773.1768489944.1355665648.1355665648.1355665648.1; __utmb=148846773.2.10.1355665648; __utmc=148846773; __utmz=148846773.1355665648.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); pgv_pvi=2716645376; pgv_si=s244701184; valid=1
HTTP/1.0 200 OK
Expires: Fri, 11 Jan 2013 08:59:47 GMT
Date: Wed, 12 Dec 2012 08:59:47 GMT
Server: nginx/0.8.36
Content-Type: audio/mpeg
Content-Length: 15051723
Last-Modified: Wed, 28 Mar 2012 14:47:32 GMT
Cache-Control: max-age=2592000
srvtag: CAIYUN-SR024
Accept-Ranges: bytes
Age: 362893
Via: 1.0 wzpy186:80 (Cdn Cache Server V2.0), 1.0 jsyc76:8101 (Cdn Cache Server V2.0)
Connection: keep-alive
"""
headers = {
'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.4 (KHTML, like Gecko) Chrome/22.0.1229.94 Safari/537.4',
'Referer':'http://songtaste.com/'
}
request = urllib2.Request(url = remote, headers = headers)
response = urllib2.urlopen(request).read()
with open(local, "wb") as musicData:
musicData.write(response)
#urllib.urlretrieve(url, local)
def smartRename(filePath):
# http://www.blog.pythonlibrary.org/2010/04/22/parsing-id3-tags-from-mp3s-using-python/
audioFile = eyed3.load(filePath)
filename, extension = os.path.splitext(filePath)
if audioFile.tag:
if audioFile.tag.title:
filename = audioFile.tag.title
if audioFile.tag.artist:
# I want music file named like artist-title
# filename = '%s-%s' % (filename, audioFile.tag.artist)
# http://greaterdebater.com/blog/gabe/post/7
# http://code.activestate.com/recipes/578333-python-string-concatenation/
filename = ''.join([filename, "-", audioFile.tag.artist])
elif audioFile.tag.album:
# fallback
# filename = audioFile.tag.album
pass
newname = "".join((filename, extension))
newfilepath = os.path.join(os.path.dirname(os.path.abspath(filePath)), newname)
os.rename(filePath, newfilepath)
def test():
url = "http://md.songtaste.com/201212162205/1b09794e98c77096cfd78a6c02fd84c5/d/d5/d5f8c5b40045c6908b7aa85a2086bb32.mp3"
download(url, "test.mp3")
if __name__ == "__main__":
main()
#test()
|
UTF-8
|
Python
| false | false | 2,012 |
19,275,813,246,958 |
ba2637f309c206161020d9defb5aaaf8d8b8ae16
|
5d2784bc4ce1dc01f35c94f21f60ed5faf97d23c
|
/zinnia/views/authors.py
|
6a73a71bac10ff9023d3e42abd0788b4c6abd215
|
[
"BSD-2-Clause"
] |
permissive
|
westinedu/newertrends
|
https://github.com/westinedu/newertrends
|
db8155bbc46198d25399029db8f0c7ed9ca88a5d
|
8ef18178de65027c7f908efd3e513e6dc8a2ac2c
|
refs/heads/master
| 2020-04-07T13:54:08.584365 | 2012-07-24T08:27:50 | 2012-07-24T08:27:50 | 5,103,917 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""Views for Zinnia authors"""
from django.shortcuts import get_object_or_404
from django.views.generic.list import ListView
from django.views.generic.list import BaseListView
from zinnia.models import Author
from zinnia.settings import PAGINATION
from zinnia.views.mixins.callable_queryset import CallableQuerysetMixin
from zinnia.views.mixins.templates import EntryQuerysetTemplateResponseMixin
class AuthorList(CallableQuerysetMixin, ListView):
"""View returning a list of all published authors"""
queryset = Author.published.all
class AuthorDetail(EntryQuerysetTemplateResponseMixin, BaseListView):
"""Display the entries of an author"""
model_type = 'author'
paginate_by = PAGINATION
def get_model_name(self):
"""The model name is the author's username"""
return self.author.username
def get_queryset(self):
"""Return a queryset of entries published
belonging to the current author"""
self.author = get_object_or_404(
Author, username=self.kwargs['username'])
return self.author.entries_published()
def get_context_data(self, **kwargs):
"""Add the current author in context"""
context = super(AuthorDetail, self).get_context_data(**kwargs)
context['author'] = self.author
return context
|
UTF-8
|
Python
| false | false | 2,012 |
19,507,741,461,914 |
8ca13810372562a47a4b84d2f7565a9a40e22a25
|
124dbb018fa6e8c4d6d947f739b88ddb332d6c2c
|
/ploopie.py
|
b12501a54962ca46f1ebe82d2b245f7cf6292b9e
|
[] |
no_license
|
pabgn/Babyplop
|
https://github.com/pabgn/Babyplop
|
2437c953c4437074ffa65690be7bf8c48b02e40f
|
4ec903af2d79a04e13f6a438393bce5f74a34e77
|
refs/heads/master
| 2021-01-01T17:28:17.294750 | 2012-06-22T23:25:45 | 2012-06-22T23:25:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
class Ploopie:
eyes = False
hair = False
skin = False
age = 0
gender = False
def __init__ (self, parentAA, parentAB, parentBA, parentBB):
self.eyes = Eyes(parentAA, parentAB, parentBA, parentBB)
self.hair = Hair(parentAA, parentAB, parentBA, parentBB)
self.skin = Skin(parentAA, parentAB, parentBA, parentBB)
self.gender = Gender(parentAA, parentAB, parentBA, parentBB)
|
UTF-8
|
Python
| false | false | 2,012 |
10,660,108,876,805 |
5db858271d0538e1567dd70fdf7d03215d7ad3f6
|
34ace45486afe6ba24739a44ac6f2500af262ead
|
/SbsOnDemand/Media.py
|
c9baf43587a6110c5f6df3f0509142afd742aef1
|
[] |
no_license
|
yec/sbsOnDemand
|
https://github.com/yec/sbsOnDemand
|
088e21fb85cfcf618d78bd691a0c1dd7e4411a54
|
31e32edf1b11cd07f96e4f262fc926de33506d3c
|
refs/heads/master
| 2021-01-15T18:15:12.657984 | 2013-01-03T11:25:03 | 2013-01-03T11:25:03 | 7,362,139 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
## @namespace SbsOnDemand::Media
# Module for managing a/v media
import urllib
import urlparse
import config
import re
TYPE_RTMP = 'RTMP secured'
TYPE_PUBLIC = 'Public'
## Represents an exception that occurs when a method is invoked on a media that doesn't support that method
class InvalidMediaType(Exception):
pass
## This exception is raised when a method expects an RTMP URL, but doesn't have one
class NotRTMPException(Exception):
def __str__(self):
return "Not an RTMP URL"
## Represents a media (rendition) for a single video
class Media(object):
## Creates a media object
# @param params the media data
def __init__(self,params):
self.audioChannels = params.get('plfile$audioChannels',None)
self.audioSampleRate = params.get('plfile$audioSampleRate',None)
self.bitrate = params.get('plfile$bitrate',None)
self.checksums = params.get('plfile$checksums',None)
self.contentType = params.get('plfile$contentType',None)
self.duration = params.get('plfile$duration',None)
self.expression = params.get('plfile$expression',None)
self.fileSize = params.get('plfile$fileSize',None)
self.frameRate = params.get('plfile$frameRate',None)
self.format = params.get('plfile$format',None)
self.height = params.get('plfile$height',None)
self.isDefault = params.get('plfile$isDefault',None)
self.language = params.get('plfile$language',None)
self.sourceTime = params.get('plfile$sourceTime',None)
self.url = params.get('plfile$downloadUrl',None)
self.width = params.get('plfile$width',None)
self.assetTypes = params.get('plfile$assetTypes',None)
self.id = params.get('id', None)
self._smil = None
self._smilDOM = None
## Get the raw url for the media
# @return a absolute url to the content or SMIL file for the content
def getUrl(self):
return self.url
## Get the raw SMIL data for the media
# @return a string of raw SMIL data from the server
# @warning This function is only valid for video media, calling it on other contentTypes will trigger a InvalidMediaType exception
def getSMIL(self):
if self.contentType != "video":
raise InvalidMediaType();
if self._smil is None:
smil_uri = ''
opener = urllib.FancyURLopener(config.PROXY)
fullurl = "{0}{1}".format(config.ONDEMAND_UI_BASE_URI,self.id)
f = opener.open(fullurl)
og_video = re.findall('<.*?og:video.*?>', f.read())
if (len(og_video) < 1):
print "Can't find the video part on the webpage. HELP!"
pass #Need to complain loudly
else:
m = re.search('content="(.+?)"', og_video[0])
videourl = m.group(1)
p = urlparse.parse_qs(videourl)
smil_uri = p.get(config.RELEASE_URL_KEY,[''])[0]
if (smil_uri != ''):
smil_uri += "&format=smil"
if len(smil_uri) > 0:
f = opener.open(smil_uri)
self._smil = f.read()
return self._smil
## Get the SMIL data parsed by xml.dom.minidom
# @return a Document parsed by xml.dom.minidom
# @warning This function is only valid for video media, calling it on other contentTypes will trigger a InvalidMediaType exception
def getSMILDOM(self):
if self.contentType != "video":
raise InvalidMediaType();
if self._smilDOM is None:
self._parseSMIL()
return self._smilDOM
## Parse the SMIL data with xml.dom.minidom
# @warning This function is only valid for video media, calling it on other contentTypes will trigger a InvalidMediaType exception
def _parseSMIL(self):
if self.contentType != "video":
raise InvalidMediaType();
import xml.dom.minidom
self._smilDOM = xml.dom.minidom.parseString(self.getSMIL())
## Get the base url
# @return the base url (usually for rtmp streams in the form of "rtmp://server/path?auth=token")
# @warning This function is only valid for video media, calling it on other contentTypes will trigger a InvalidMediaType exception
def getBaseUrl(self):
if self.contentType != "video":
raise InvalidMediaType();
if self._smilDOM is None:
self._parseSMIL()
for meta in self._smilDOM.getElementsByTagName('meta'):
if len(meta.getAttribute('base'))>0:
return meta.getAttribute('base')
## Get the video url
# @return the video url (usually for rtmp streams in the form of "mp4:path/video.mp4")
# @warning This function is only valid for video media, calling it on other contentTypes will trigger a InvalidMediaType exception
def getVideoUrl(self):
if self.contentType != "video":
raise InvalidMediaType();
print self.assetTypes
if self.assetTypes[0] == TYPE_PUBLIC:
return self.url
elif self.assetTypes[0] == TYPE_RTMP:
medias = self._videosFromSmil()
try:
return self._watchableUrlFromSrc(medias[self.bitrate])
except:
return medias.items().pop()[1]
## Get captions for the media
# @return an array of dict objects, each containing the src, lang, and type of the caption
# @warning This function is only valid for video media, calling it on other contentTypes will trigger a InvalidMediaType exception
def getCaptions(self):
if self.contentType != "video":
raise InvalidMediaType();
if self._smilDOM is None:
self._parseSMIL()
captions = []
for textstream in self._smilDOM.getElementsByTagName('textstream'):
if len(textstream.getAttribute('src')) == 0:
continue
captions.append({
"src":textstream.getAttribute('src'),
"lang":textstream.getAttribute('lang'),
"type":textstream.getAttribute('type')
})
return captions
def _watchableUrlFromSrc(self,src):
url = re.sub('(\d+)K.mp4', r',\1,K.mp4', src)
return url + config.VALID_URL_SUFFIX
## Get the associated videos from smil file
def _videosFromSmil(self):
if self._smilDOM is None:
self._parseSMIL()
medias = {}
for video in self._smilDOM.getElementsByTagName('video'):
if len(video.getAttribute('src'))>0:
bitrate = int(video.getAttribute('system-bitrate'))
src = video.getAttribute('src')
medias[bitrate] = src
print '{0} {1}'.format(bitrate, src)
return medias
## @see getBaseUrl
baseUrl = property(getBaseUrl)
## @see getVideoUrl
videoUrl = property(getVideoUrl)
## @see getCaptions
captions = property(getCaptions)
|
UTF-8
|
Python
| false | false | 2,013 |
19,602,230,769,298 |
aa1619387ec5ef0194c020ba9af2f8b1f55c6bab
|
c2e06f868f629377150b3e161017c47e9406007d
|
/addons/source-python/packages/source-python/commands/server/command.py
|
7b30bf543f012de804195674aaa5434c1f9e6dba
|
[] |
no_license
|
MrMalina/Source.Python
|
https://github.com/MrMalina/Source.Python
|
fe0a145fa20deff7d68011b7fd5fd64506356404
|
c07cbd146a9041b2a5ef715b26e645131c02510c
|
refs/heads/master
| 2020-12-14T09:49:41.710691 | 2014-03-19T06:14:52 | 2014-03-19T06:14:52 | 17,912,771 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# ../commands/server/command.py
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python Imports
# Commands
from commands.command import _BaseCommand
from commands.server.manager import ServerCommandManager
# =============================================================================
# >> ALL DECLARATION
# =============================================================================
# Add all the global variables to __all__
__all__ = [
'ServerCommand',
]
# =============================================================================
# >> CLASSES
# =============================================================================
class ServerCommand(_BaseCommand):
'''Decorator class used to register a server command'''
# Store the class used to (un)register server commands
_manager_class = ServerCommandManager
|
UTF-8
|
Python
| false | false | 2,014 |
1,666,447,340,439 |
6bb5e8d6fdd6d3f301b1a2498425ea3fd1d960c6
|
5ad915500fdc443e9b7dc8ae098360849b840de8
|
/worklog/admin.py
|
43896eff7a827f4c02b3ec2452385b353c91f430
|
[] |
no_license
|
bmbouter/worklog
|
https://github.com/bmbouter/worklog
|
dfe9168d815197778b2ba46d368908804fb113a2
|
e8e8e35a5c4c7cb888b6a8aab49be3b74be564ef
|
refs/heads/master
| 2021-01-25T05:16:37.428161 | 2012-12-12T22:31:09 | 2012-12-12T22:31:09 | 699,060 | 7 | 2 | null | false | 2014-01-23T14:14:23 | 2010-06-02T11:59:35 | 2013-10-02T08:34:10 | 2012-12-12T22:31:33 | 509 | 6 | 2 | 3 |
Python
| null | null |
import csv
import operator
from django.contrib import admin
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.conf.urls.defaults import *
from django.db.models import Sum
from worklog import timesheet
from worklog.models import WorkItem, Job, WorkLogReminder, BillingSchedule, Funding
from worklog.models import BiweeklyEmployee, Holiday, WorkPeriod
def mark_invoiced(modeladmin, request, queryset):
queryset.update(invoiced=True)
mark_invoiced.short_description = "Mark selected work items as invoiced."
def mark_not_invoiced(modeladmin, request, queryset):
queryset.update(invoiced=False)
mark_not_invoiced.short_description = "Mark selected work items as not invoiced."
def mark_invoiceable(modeladmin, request, queryset):
queryset.update(do_not_invoice=False)
mark_invoiceable.short_description = "Unmark selected items as 'Do Not Invoice.'"
def mark_not_invoiceable(modeladmin, request, queryset):
queryset.update(do_not_invoice=True)
mark_not_invoiceable.short_description = "Mark selected items as 'Do Not Invoice.'"
class WorkItemAdmin(admin.ModelAdmin):
list_display = ('user','date','hours','text','job','invoiced','do_not_invoice')
list_filter = ('user','date','job', 'invoiced','do_not_invoice')
actions = [mark_invoiced, mark_not_invoiced, mark_invoiceable, mark_not_invoiceable]
#sort the items by time in descending order
ordering = ['-date']
def changelist_view(self, request, extra_context=None):
# Look for 'export_as_csv' in the HTTP Request header. If it is found,
# we export CSV. If it is not found, defer to the super class.
if 'export_as_csv' in request.POST:
def getusername(item):
if item.user.last_name:
return '{0} {1}'.format(item.user.first_name,item.user.last_name)
# if no first/last name available, fall back to username
else:
return item.user.username
csvfields = [
# Title, function on item returning value
('User Key',operator.attrgetter('user.pk')),
('User Name',getusername),
('Job',operator.attrgetter('job.name')),
('Date',operator.attrgetter('date')),
('Hours',operator.attrgetter('hours')),
('Task',operator.attrgetter('text')),
]
ChangeList = self.get_changelist(request)
# see django/contrib/admin/views/main.py for ChangeList class.
cl = ChangeList(request, self.model, self.list_display, self.list_display_links, self.list_filter,
self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page, self.list_max_show_all, self.list_editable, self)
header = list(s[0] for s in csvfields)
rows = [header]
# Iterate through currently displayed items.
for item in cl.query_set:
row = list(s[1](item) for s in csvfields)
rows.append(row)
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=worklog_export.csv'
writer = csv.writer(response)
for row in rows:
writer.writerow(row)
return response
else:
# Get total number of hours for current queryset
ChangeList = self.get_changelist(request)
# see django/contrib/admin/views/main.py for ChangeList class.
cl = ChangeList(request, self.model, self.list_display, self.list_display_links, self.list_filter,
self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page, self.list_max_show_all, self.list_editable, self)
if not extra_context:
extra_context = cl.get_query_set(request).aggregate(Sum('hours'))
else:
extra_context.update(cl.get_query_set(request).aggregate(Sum('hours')))
return super(WorkItemAdmin,self).changelist_view(request, extra_context)
class BillingScheduleInline(admin.StackedInline):
model = BillingSchedule
class FundingInline(admin.StackedInline):
model = Funding
class JobAdmin(admin.ModelAdmin):
list_display = ('name','open_date','close_date','do_not_invoice')
actions = [mark_invoiceable, mark_not_invoiceable]
inlines = [
BillingScheduleInline,
FundingInline,
]
class WorkPeriodAdmin(admin.ModelAdmin):
list_display = ('payroll_id', 'start_date', 'end_date',)
list_filter = ('start_date', 'end_date',)
class HolidayAdmin(admin.ModelAdmin):
list_display = ('description', 'start_date', 'end_date',)
list_filter = ('start_date', 'end_date',)
admin.site.register(WorkItem, WorkItemAdmin)
admin.site.register(Job, JobAdmin)
admin.site.register(WorkLogReminder)
admin.site.register(BiweeklyEmployee)
admin.site.register(WorkPeriod, WorkPeriodAdmin)
admin.site.register(Holiday, HolidayAdmin)
|
UTF-8
|
Python
| false | false | 2,012 |
7,945,689,522,545 |
3b9aa0e1697a25a3adcbc3e30daedde0c0ade834
|
115e12062b6fe3e51eba8868583a0703244e0584
|
/gstranslator2-service/gstranslator2_service.py
|
415fae40b6c6ffcdde31e51898da965c2ecad410
|
[] |
no_license
|
thof/gstranslator2
|
https://github.com/thof/gstranslator2
|
49c13f0de0964bfe406413bb973057d52813cbcc
|
32bb2befaefbc04014041fff7acf8149020a9275
|
refs/heads/master
| 2016-09-15T17:49:36.363502 | 2013-03-31T12:30:42 | 2013-03-31T12:30:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python2
#
# main.py
# Copyright (C) 2012 thof <[email protected]>
#
# gstranslator2-service is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gstranslator2-service is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import urllib2
import json
import signal
from gi.repository import Gio, Gtk, Gdk
import dbus.service
import gobject
from copy import copy
from dbus.mainloop.glib import DBusGMainLoop
import textwrap
#import pprint
class Gstranslator(dbus.service.Object):
def __init__(self):
bus_name = dbus.service.BusName('com.github.thof.gstranslator2', bus=dbus.SessionBus())
dbus.service.Object.__init__(self, bus_name, '/com/github/thof/gstranslator2')
self.translation = ''
self.clipboardCurrent = ''
self.clipboardPrevious = ''
self.trans_width = 80
self.trans_expanded_width = 120
self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_PRIMARY)
self.languages_list = []
self.current_index = 0
self.states = []
self.states.append([0, 0, 0, 0, 0])
self.states.append([0, 0, 0, 0, 0])
self.setState()
self.clipboardPrevious = self.clipboard.wait_for_text()
signal.signal(signal.SIGALRM, self.handler)
self.load_config()
def getClipboard(self):
self.clipboardCurrent = self.clipboard.wait_for_text()
text = self.fix_string(self.clipboardCurrent)
return text
def translateText(self, text, expanded):
translation = self.getGoogleTranslation(text)
if expanded:
self.translation = self.parseJsonExpandedTranslation(translation)
else:
self.translation = self.parseJsonTranslation(translation)
def translateCustom(self, text):
translation = self.getGoogleTranslation(text)
translation = self.parseJsonExpandedTranslation(translation)
return translation
def getGoogleTranslation(self, text_trans):
headers = {'User-Agent' : 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}
text = urllib2.quote(text_trans)
url = 'http://translate.google.com/translate_a/t?client=json&sl='+self.languages_list[self.current_index][0]+'&tl='+self.languages_list[self.current_index][1]+'&hl='+self.languages_list[self.current_index][1]+'&text='+text
request = urllib2.Request(url, '', headers)
#print("Request. Wait...")
#signal.alarm(5)
response = urllib2.urlopen(request)
#signal.alarm(0)
#print("Got Answer")
#print response.info()
translation = response.read()
return translation
def parseJsonTranslation(self, translation):
term_string = ''
dump = json.loads(translation)
#pprint.pprint(dump)
dict_path = dump['sentences']
trans_string = self.parseBasicTranslation(dict_path, self.trans_width)
try:
dict_path = dump['dict']
except KeyError:
return trans_string[:-2]
for d in dict_path:
trans_string += d['pos']+"\n"
for term in d['terms']:
term_string += term+", "
term_string = textwrap.fill(term_string, self.trans_width)
trans_string += term_string[:-1]+"\n\n"
term_string = ''
return trans_string[:-2]
def parseJsonExpandedTranslation(self, translation):
term_string = ''
dump = json.loads(translation)
#pprint.pprint(dump)
dict_path = dump['sentences']
trans_string = self.parseBasicTranslation(dict_path, self.trans_expanded_width)
try:
dict_path = dump['dict']
except KeyError:
return trans_string[:-2]
for d in dict_path:
trans_string += d['pos']+"\n"
for e in d['entry']:
term_string += e['word']+" - "
try:
for r in e['reverse_translation']:
term_string += r+", "
except KeyError:
term_string = term_string[:-3]
term_string = textwrap.fill(term_string, self.trans_expanded_width)
trans_string += term_string[:-1]+"\n"
term_string = ''
trans_string += "\n"
return trans_string[:-2]
def parseBasicTranslation(self, dict_path, width):
orig_string = ""
trans_string = ""
for d in dict_path:
orig_string += d['orig']
trans_string += d['trans'];
if len(trans_string)<50:
trans_string = textwrap.fill(orig_string+" - "+trans_string, width)
else:
trans_string = textwrap.fill(trans_string, width)
trans_string += "\n\n"
return trans_string
def getTranslation(self, state, text=None):
# default translation from clipboard
if state == 0:
self.translateText(self.getClipboard(), False)
# expanded translation from clipboard
elif state == 1:
self.translateText(self.getClipboard(), True)
# default translation from provided text
elif state == 2:
self.translateText(text, False)
# expanded translation from provided text
elif state == 3:
self.translateText(text, True)
def openTranslation(self, state, checkClipboard=True, text=None):
if checkClipboard:
if self.compareClipboards(): # if contents of previous and current clipboard are the same then use previous translation
self.setState(state)
return "-222"
self.getTranslation(state, text)
self.setState(state)
return self.translation
def closeTranslation(self, state):
if self.compareClipboards(): # if an old word is in the clipboard then close
self.setState()
return "-111"
else: # otherwise prepare new translation
self.getTranslation(state)
return self.translation
def checkCond(self, defPrev, expPrev, defCurr, expCurr):
if self.states[0][0]==defPrev and self.states[0][1]==expPrev and self.states[1][0]==defCurr and self.states[1][1]==expCurr:
return True
else:
return False
@dbus.service.method('com.github.thof.gstranslator2')
def dbusGetTranslation(self, text, state):
#state = int(state_string)
# if languages were changed then prepare new translation in any case
if self.isOpen(4):
return self.openTranslation(state, False, text)
# conditions regarding translation from input field
elif state == 0 and (self.isOpen(2) or self.isOpen(3)):
return self.closeTranslation(state)
elif state == 2 or state == 3:
return self.openTranslation(state, False, text)
# conditions regarding translation from clipboard
elif (state==0 or state==1) and self.checkCond(0,0,0,0): # 1 and 2
return self.openTranslation(state)
elif (state==0 and self.checkCond(0, 0, 1, 0)) or (state==1 and self.checkCond(0, 0, 0, 1)): # 3 and 6
return self.closeTranslation(state)
elif (state==1 and self.checkCond(0, 0, 1, 0)) or (state==0 and self.checkCond(0, 0, 0, 1)): # 4 and 5
return self.openTranslation(state, False)
# 3rd level
elif state==0 and self.checkCond(1,0,0,0): # 1st
return self.openTranslation(state)
elif state==1 and self.checkCond(1,0,0,0): # 2nd
return self.openTranslation(state, False)
elif state==0 and self.checkCond(1,0,0,1): # 3rd
return self.openTranslation(state, False)
elif state==1 and self.checkCond(1,0,0,1): # 4th
return self.closeTranslation(state)
elif state==0 and self.checkCond(0,1,1,0): # 5th
return self.closeTranslation(state)
elif state==1 and self.checkCond(0,1,1,0): # 6th
return self.openTranslation(state, False)
elif state==0 and self.checkCond(0,1,0,0): # 7th
return self.openTranslation(state, False)
elif state==1 and self.checkCond(0,1,0,0): # 8th
return self.openTranslation(state)
@dbus.service.method('com.github.thof.gstranslator2')
def getCurrentLangs(self, text=''):
return self.languages_list[self.current_index][0]+" -> "+self.languages_list[self.current_index][1]
@dbus.service.method('com.github.thof.gstranslator2')
def changeLangs(self, direction):
self.setState(4)
if direction:
if(self.current_index == len(self.languages_list)-1):
self.current_index = 0
else:
self.current_index = self.current_index+1
else:
if(self.current_index == 0):
self.current_index = len(self.languages_list) - 1
else:
self.current_index = self.current_index - 1
return self.languages_list[self.current_index][0]+" -> "+self.languages_list[self.current_index][1]
def fix_string (self, text):
text = text.strip()
if self.languages_list[self.current_index][0]=="en":
for i in range(len(text)):
if(text[i].isalnum()):
text = text[i:]
break
for i in range(len(text)):
if(text[-(i+1)].isalnum()):
text = text[:len(text)-i]
break
text = text.replace('-\n', '')
text = text.replace('\n', ' ')
if text.isupper():
text = text.lower()
return text
def handler (self, signum, frame):
print 'Signal handler called with signal', signum
raise IOError("Could not open url!")
def compareClipboards(self):
self.clipboardCurrent = self.clipboard.wait_for_text()
if self.clipboardCurrent == None or self.clipboardCurrent == self.clipboardPrevious:
return True
else:
self.clipboardPrevious = self.clipboardCurrent
return False
def setState (self, number=None):
self.states[0] = copy(self.states[1])
self.states[1][0] = 0 # translation from clipboard
self.states[1][1] = 0 # expanded translation from clipboard
self.states[1][2] = 0 # translation from input field
self.states[1][3] = 0 # expanded translation from input field
self.states[1][4] = 0 # change of languages
if number is not None:
self.states[1][number] = 1
print "prev: ",self.states[0][0]," ",self.states[0][1]," ",self.states[0][2]," ",self.states[0][3]," ",self.states[0][4]
print "curr: ",self.states[1][0]," ",self.states[1][1]," ",self.states[1][2]," ",self.states[1][3]," ",self.states[1][4]
print "\n"
def isOpen (self, state):
if self.states[1][state]:
return True
else:
return False
def wasOpen (self, state):
if self.states[0][state]:
return True
else:
return False
def load_config(self):
schema = 'org.gnome.shell.extensions.gstranslator2'
settings = Gio.Settings.new(schema)
self.trans_width = int(settings.get_string('width'))
self.trans_full_width = int(settings.get_string('width-wide'))
self.load_languages(settings.get_string('items'))
def load_languages(self, lang_list):
langs = lang_list.split('|')
for l in langs:
temp = l.split(';')
if int(temp[2]):
self.languages_list.append([temp[0], temp[1]])
def main(self):
mainloop = gobject.MainLoop()
mainloop.run()
if __name__ == "__main__":
DBusGMainLoop(set_as_default=True)
gstrans = Gstranslator()
print("Ready!")
gstrans.main()
|
UTF-8
|
Python
| false | false | 2,013 |
15,229,954,041,934 |
bf5f0238048153e24f4f2365d5a0bce266e2f163
|
c4fe6a42f979af4740ac8cda953c19a5876f20b1
|
/example/display.py
|
b2893b5525a22a1545f69054289367d17b0a8446
|
[] |
no_license
|
HyperSuprime-Cam/distEst
|
https://github.com/HyperSuprime-Cam/distEst
|
3baa3c37cd2a623ea8a56a2d7b4f7eed2a1202f1
|
eea7699e91c4db0e549384cfedcb3e7fc9fbd490
|
refs/heads/master
| 2020-06-05T11:22:16.422808 | 2014-06-27T18:08:22 | 2014-06-27T18:08:22 | 16,705,545 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#
# LSST Data Management System
# Copyright 2008, 2009, 2010 LSST Corporation.
#
# This product includes software developed by the
# LSST Project (http://www.lsst.org/).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program. If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#
import math
import numpy
import matplotlib.pyplot as plt
import lsst.afw.cameraGeom as cameraGeom
import lsst.afw.geom as afwGeom
import lsst.obs.hscSim as hscSim
import lsst.pipette.config as pipConfig
import lsst.pipette.distortion as pipDist
SAMPLE = 100
def main(camera, distortionConfig):
fig = plt.figure(1)
fig.clf()
ax = fig.add_axes((0.1, 0.1, 0.8, 0.8))
# ax.set_autoscale_on(False)
# ax.set_ybound(lower=-0.2, upper=0.2)
# ax.set_xbound(lower=-17, upper=-7)
ax.set_title('Distorted CCDs')
for raft in camera:
raft = cameraGeom.cast_Raft(raft)
for ccd in raft:
ccd = cameraGeom.cast_Ccd(ccd)
size = ccd.getSize()
width, height = 2048, 4096
dist = pipDist.createDistortion(ccd, distortionConfig)
corners = ((0.0,0.0), (0.0, height), (width, height), (width, 0.0), (0.0, 0.0))
for (x0, y0), (x1, y1) in zip(corners[0:4],corners[1:5]):
if x0 == x1 and y0 != y1:
yList = numpy.linspace(y0, y1, num=SAMPLE)
xList = [x0] * len(yList)
elif y0 == y1 and x0 != x1:
xList = numpy.linspace(x0, x1, num=SAMPLE)
yList = [y0] * len(xList)
else:
raise RuntimeError("Should never get here")
xDistort = []; yDistort = []
for x, y in zip(xList, yList):
distorted = dist.actualToIdeal(afwGeom.Point2D(x, y))
xDistort.append(distorted.getX())
yDistort.append(distorted.getY())
ax.plot(xDistort, yDistort, 'k-')
plt.show()
if __name__ == '__main__':
camera = hscSim.HscSimMapper().camera
config = pipConfig.Config()
config['class'] = "hsc.meas.match.hscDistortion.HscDistortion"
main(camera, config)
|
UTF-8
|
Python
| false | false | 2,014 |
8,220,567,451,998 |
164d472903cc6252eccdf693fe9634d4fdbbc261
|
41ee2edcd294b4edb94b10461ed3acccde83407a
|
/feature/similarity/token_reducer.py
|
7b1eece4794e0e8c78a626a7c632214f6f759dfb
|
[] |
no_license
|
ucb-stat-157/Old-Machine
|
https://github.com/ucb-stat-157/Old-Machine
|
b50a057accc3abab4e229e002affca34384f2734
|
62fbcdcd1acd3ae9888320604ae159bf9a5b496a
|
refs/heads/master
| 2016-09-06T16:21:48.735584 | 2014-12-08T21:41:00 | 2014-12-08T21:41:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#authors: Junjun Qian, Xiaoqin Zhou, Shirui Ouyang, Yanan Wang
"""
input: two different lines grouped by user
output: instance with gender and age
"""
from operator import itemgetter
import sys
current_key = None
key = None
token = None
#input comes from STDIN
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# parse the input we got from nb_step3_mapper.py
line = line.split('\t')
key = line[0]
if (line[1]) == "0":
token = line[2]
#compute and output the predition, click and impression
if current_key == key and line[1] == "1":
if token != None:
print "%s\t%s" % (line[2], token)
else:
print "%s\t%s" % (line[2], -1)
else:
if line[1] == "1":
print "%s\t%s" % (line[2], -1)
token = None
elif current_key and line[1] == "1":
print "%s\t%s" % (line[2], token)
current_key = key
|
UTF-8
|
Python
| false | false | 2,014 |
16,071,767,630,915 |
13671f486196c4854cf85a5a7acb64e16ed72f41
|
7e05c7782612c358d5f2383024fc306eb12d8df0
|
/setup.py
|
22c231bd96fdf9d9adfa9b07146fb78657919a82
|
[] |
no_license
|
KLab/fluenpy
|
https://github.com/KLab/fluenpy
|
0a7c138781854be1a5e57ede1dd5bdd82a75e971
|
3da8d90498b82de4a77e78f84ceff1a93aee9c8b
|
refs/heads/master
| 2021-01-10T20:18:52.375667 | 2013-02-15T16:16:43 | 2013-02-15T16:16:43 | 4,162,029 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# load __version__ without importing any other files.
exec(open('fluenpy/version.py').read())
setup(name="fluenpy",
version=__version__,
packages=['fluenpy',
'fluenpy.plugins'
],
scripts=['scripts/fluen.py'],
install_requires=[
'msgpack-python',
'gevent>=1.0b2',
]
)
|
UTF-8
|
Python
| false | false | 2,013 |
6,279,242,202,803 |
890c8cdcb14abaa1e80eb14880906dc4a989e09c
|
a649733d2b897bf174d6d6b6f49fe2d6e6c1885b
|
/src/frontends/console/connection.py
|
8cf258780f7fe3f4133198223b0606c91b0941ed
|
[
"MIT"
] |
permissive
|
maffe/anna
|
https://github.com/maffe/anna
|
f9279fbbca5adc44d7c3216a942bf65ad4ae43f5
|
9737607452b08e3149626319df9310171dcb2d79
|
refs/heads/master
| 2020-04-23T03:06:08.424674 | 2010-12-18T02:25:57 | 2010-12-18T02:25:57 | 1,150,995 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""This is the console frontend to the Anna bot.
It allows the user to communicate with the bot directly through stdin
and stdout but lacks certain functionality for obvious reasons (such as
group-chat support).
"""
import getpass
import os
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
import aihandler
import communication as c
import config
from frontends import BaseConnection
from frontends.console.parties import Individual
USAGE = u"""\
Welcome to the interactive Anna shell. Just type a message as you
normally would.
WARNING: this frontend blocks the stdout on every prompt. To prevent the
output buffer from growing too big, it should only be used alone or at
least not left without input for long periods of time while other
frontends produce lots of output.
"""
class Connection(BaseConnection, _threading.Thread):
def __init__(self):
_threading.Thread.__init__(self, name="console frontend")
self.idnty = Individual(getpass.getuser())
self.def_AI = config.get_conf_copy().misc["default_ai"]
def connect(self):
# The connection will be closed when this is set to True.
self.halt = False
# Exit when this is the only thread left (in particular: when the main
# thread has exited).
self.setDaemon(True)
self.start()
def disconnect(self):
self.halt = True
def run(self):
"""Take over the stdin and do nifty stuff... etc.
This method is called as a seperate thread from the main script so it
must be thread-safe.
"""
c.stdout_block(USAGE)
self.idnty.set_AI(aihandler.get_oneonone(self.def_AI)(self.idnty))
try:
while not self.halt:
# The AI can change at run-time.
ai = self.idnty.get_AI()
ai.handle(c.stdin(u"<%s> " % self.idnty))
except EOFError:
c.stdout_block(u"\n")
|
UTF-8
|
Python
| false | false | 2,010 |
4,002,909,556,528 |
daf4003107c9d063b649d2e51ffceb83dcef8daf
|
8f137e1fe05b16a22faf3ab5bf9e123dc765db75
|
/dj-server/services/heligated.py
|
82db872b45886fa34693d9ef4093800c17b89e82
|
[
"GPL-3.0-only"
] |
non_permissive
|
lexsos/heligate
|
https://github.com/lexsos/heligate
|
5d39e7f08f42e44a1551b9edcacf5effdaf020d6
|
032e0500278feee3dc87f0d5b31f02c306a160d7
|
refs/heads/master
| 2016-09-05T23:31:35.723718 | 2014-07-23T09:31:26 | 2014-07-23T09:31:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import django_header
import sys
import os
import signal
import pika
import time
from optparse import OptionParser
from message_bus.event import (
event_system_start,
event_system_stop,
apply_system_start,
apply_system_stop,
)
from message_bus.event import apply_events
from message_bus.utils import run_events_loop
from core.log import except_hook, logger
from core.daemon import Daemon
running = True
class Heligated(Daemon):
def run(self):
logger.info('starting heligated')
event_system_start()
apply_system_start()
logger.info('heligated started')
global running
wait_timer = 0
while running:
try:
logger.info('try connect to rabbitmq server')
time.sleep(wait_timer)
run_events_loop(apply_events)
except pika.exceptions.ConnectionClosed:
wait_timer = 0
logger.info('rabbitmq connection closed')
except pika.exceptions.AMQPConnectionError:
logger.info("can't connect to rabbitmq server")
except AttributeError:
pass
except KeyboardInterrupt:
running = False
if wait_timer < 1000:
wait_timer += 1
event_system_stop()
apply_system_stop()
logger.info('heligated stoped')
def sig_handler(signum, frame):
global running
if running:
running = False
logger.debug('heligated caught siglan {0}'.format(signum))
logger.info('heligated stopping')
os.exit(0)
def get_args():
parser = OptionParser()
parser.add_option(
'-a',
'--action',
help='[stop | start | interactive]',
choices=['stop', 'start', 'interactive'],
action='store',
)
return parser.parse_args()
if __name__ == '__main__':
sys.excepthook = except_hook
signal.signal(signal.SIGTERM, sig_handler)
heligated = Heligated('/var/run/heligate/heligated.pid')
(options, args) = get_args()
if options.action == 'start':
heligated.start()
elif options.action == 'stop':
heligated.stop()
elif options.action == 'interactive':
heligated.run()
|
UTF-8
|
Python
| false | false | 2,014 |
5,428,838,668,101 |
8ead123bf27bfcb946511b54eeea3520933e2c8d
|
9a103d8d03062a5f69b80671cfb519c094b816b2
|
/heap.py
|
3b9f110df5d614c828ba50f33765f333cf77a0d9
|
[] |
no_license
|
pestretsov/algorithms
|
https://github.com/pestretsov/algorithms
|
3c94457832940013bd8e718f0658cab81070e566
|
071ee5a1fd4217eec3905cd43d10e0be19bcef08
|
refs/heads/master
| 2021-01-19T15:34:05.569432 | 2014-10-26T23:44:39 | 2014-10-26T23:44:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class Heap:
def __init__(self, arguments=None):
self.arr = []
self.make_heap(arguments)
def make_heap(self, arguments):
if arguments:
if type(arguments) != list:
self.arr.append(arguments)
else:
[self.insert(i) for i in arguments]
def parent(self, i):
return (i-1)//2
def left(self, i):
return (i*2)+1
def right(self, i):
return (i*2)+2
def size(self):
return len(self.arr)
def min_child(self, i):
l = self.left(i)
r = self.right(i)
s = self.size()-1
if l >= s:
return -1
else:
return r if (self.arr[l][1] > self.arr[r][1]) else l
def bubble_up(self, i):
p = self.parent(i)
while (p >= 0) and (self.arr[i][1] < self.arr[p][1]):
self.arr[i], self.arr[p] = self.arr[p], self.arr[i]
i = p
p = self.parent(i)
def sift_down(self, i):
t = self.min_child(i)
while (t > -1) and (self.arr[t][1] < self.arr[i][1]):
self.arr[i], self.arr[t] = self.arr[t], self.arr[i]
i = t
t = self.min_child(i)
def insert(self, node):
i = self.size()
self.arr.append(node)
self.bubble_up(i)
def extract_min(self):
if self.empty():
return None
else:
min = self.arr[0]
self.arr[0] = self.arr[self.size()-1]
del self.arr[self.size()-1]
self.sift_down(0)
return min
def decrease_key(self, f, t):
i = [x[0] for x in self.arr].index(f[0])
self.arr[i] = (f[0],t)
self.bubble_up(i)
def empty(self):
return not(self.size())
def print_heap(self):
print(self.arr)
|
UTF-8
|
Python
| false | false | 2,014 |
18,330,920,435,392 |
4b942698cab9edae79fb900d72676401c62090ed
|
8e1c5c81dfcc338cae6edc3773501df82abef1b3
|
/senseo/httpd/helper.py
|
068c56a52ac6d935efb984d0606e11d27d369f40
|
[] |
no_license
|
showi/senseo
|
https://github.com/showi/senseo
|
2854098ec1d904704b988853ac9d22b8177e8642
|
12834d9ff72cbbd3998f89f8d46133cb0b3f9c8d
|
refs/heads/master
| 2015-08-13T05:24:19.290042 | 2014-08-31T23:00:31 | 2014-08-31T23:00:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import request
class Helper(object):
@classmethod
def limit(cls):
default = 1000
limit = request.args.get('limit')
if limit is None:
return default
limit = int(limit)
if limit <= 0 or limit > default:
return default
return limit
|
UTF-8
|
Python
| false | false | 2,014 |
14,276,471,313,494 |
baeb7bfa1284b822fb86009cdb9ab658331af785
|
2316d0c8950153f993e59cc8d90904fabecf3ce7
|
/tests/correct3/assign3.py
|
8657466bd8ae2bcf9a5daf1b71a4f761d9f05f7b
|
[] |
no_license
|
SsnL/compyler
|
https://github.com/SsnL/compyler
|
6efb1f9a09a3915faf75b2e9def426080f488454
|
fd05dc49babd90b4953088aadb026885bc8a3388
|
refs/heads/master
| 2020-04-05T22:49:55.377343 | 2013-12-14T00:57:31 | 2013-12-14T00:57:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# assign: basic chained assignment
a = b = c = d = e = f = "hello world"
print a, b, c, d, e, f
|
UTF-8
|
Python
| false | false | 2,013 |
2,937,757,645,241 |
8f20920551e7205137110d154991f852b76dc61f
|
ce7c942aa6e2cbf460e3a0a2c1cb8e1f488f525a
|
/net/__init__.py
|
6a2ac12625ea4032bd78c8b1f4f5250cae863b6d
|
[
"GPL-3.0-only"
] |
non_permissive
|
thepian/thepian-lib
|
https://github.com/thepian/thepian-lib
|
a2c25609e97eadf0617d6770f83c5edac854b9de
|
5e9c057b311a87df8f3d6dc6fa25c2181531b17c
|
refs/heads/master
| 2021-01-18T14:07:27.679073 | 2011-10-10T04:34:52 | 2011-10-10T04:34:52 | 173,386 | 5 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from net import get_ip4_addresses, get_ip4_address, get_mac_addresses, get_mac_address, get_mac_address_hex, ip4_to_ip6
|
UTF-8
|
Python
| false | false | 2,011 |
1,116,691,513,124 |
ebcb26254628717d284a73eed8cd3ac1b66204a0
|
628fb2714a178ac30f35f33c0a8f27a185de640e
|
/dataDaemonScheduler/utilold.py
|
5102f9ea615c6dee223d9c6aab13c2014d0c98a6
|
[] |
no_license
|
ddccffvv/tesla
|
https://github.com/ddccffvv/tesla
|
8e7647b065577713e0d9675fee16d60ab9a968e8
|
d4b27d2c47d9474a258aa06a4614c150200283ad
|
refs/heads/master
| 2020-06-07T02:19:57.550946 | 2013-09-15T20:26:03 | 2013-09-15T20:26:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def get_timestamp():
import time, datetime
return datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
def log(msg):
print "[" + get_timestamp() + "] " + str(msg)
|
UTF-8
|
Python
| false | false | 2,013 |
16,544,214,043,927 |
7c299c40095a187a13254b8644481bdbdd57a693
|
516d4ad6647805e939b09035456a22aad462a97a
|
/projects/uva/collatz/Collatz.py
|
3f0aac717b5ed032c053f331d8b7cd6f10da034c
|
[] |
no_license
|
mrhota/cs373
|
https://github.com/mrhota/cs373
|
968df984f04b42fb7059d19777537c3d0205dd34
|
8179ee8e975b07e0d2b9f75356f7890fcc9db993
|
refs/heads/master
| 2021-01-23T19:35:13.750145 | 2011-04-30T00:57:11 | 2011-04-30T00:57:11 | 1,568,183 | 1 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# ---------------------------
# projects/collatz/Collatz.py
# Copyright (C) 2011
# Glenn P. Downing
# ---------------------------
# ------------
# collatz_read
# ------------
def collatz_read (r, a) :
"""
reads two ints into a[0] and a[1]
r is a reader
a is an array on int
return true if that succeeds, false otherwise
"""
s = r.readline()
if s == "" :
return False
l = s.split()
a[0] = int(l[0])
a[1] = int(l[1])
assert a[0] > 0
assert a[1] > 0
return True
# ------------
# collatz_eval
# ------------
def collatz_eval (i, j) :
"""
i is the beginning of the range, inclusive
j is the end of the range, inclusive
return the max cycle length in the range [i, j]
"""
assert i > 0
assert j > 0
# <your code>
v = 1
assert v > 0
return v
# -------------
# collatz_print
# -------------
def collatz_print (w, i, j, v) :
"""
prints the values of i, j, and v
w is a writer
i is the beginning of the range, inclusive
j is the end of the range, inclusive
v is the max cycle length
"""
w.write(str(i) + " " + str(j) + " " + str(v) + "\n")
# -------------
# collatz_solve
# -------------
def collatz_solve (r, w) :
"""
read, eval, print loop
r is a reader
w is a writer
"""
a = [0, 0]
while collatz_read(r, a) :
v = collatz_eval(a[0], a[1])
collatz_print(w, a[0], a[1], v)
|
UTF-8
|
Python
| false | false | 2,011 |
12,996,571,083,419 |
22c75eb725be88a711bf135a466e353cfddefce4
|
dd949f215d968f2ee69bf85571fd63e4f085a869
|
/systems/css-2011-teams/yellow/subarchitectures/planner.sa/src/python/standalone/pddl/parser.py
|
cf48bf10a99a2afe1f540038933924a07efdf90b
|
[] |
no_license
|
marc-hanheide/cogx
|
https://github.com/marc-hanheide/cogx
|
a3fd395805f1b0ad7d713a05b9256312757b37a9
|
cb9a9c9cdfeba02afac6a83d03b7c6bb778edb95
|
refs/heads/master
| 2022-03-16T23:36:21.951317 | 2013-12-10T23:49:07 | 2013-12-10T23:49:07 | 219,460,352 | 1 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/env python
# -*- coding: latin-1 -*-
class Token(object):
def __init__(self, token, line, file):
self.string = token
self.line = line
self.file = file
def error(self, message):
raise ParseError(self, message)
def check_keyword(self, keyword):
if self.string != keyword:
raise UnexpectedTokenError(self, "'%s'" % keyword)
def __eq__(self, other):
if isinstance(other, Token):
return self.string == other.string and self.line == other.line and self.file == other.file
return self.string == other
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "%s (line:%d, file:%s)" % (self.string, self.line, self.file)
class Element(list):
def __init__(self, token, children=None):
self.parent = None
self.token = token
self.endtoken = None
self.terminal = (children is None)
if children is not None:
for child in children:
self.append(child)
child.parent = self
def append(self, child):
if self.terminal:
return
list.append(self, child)
def end(self, token):
self.endtoken = token
def is_terminal(self):
return self.terminal
def line(self):
return self.token.line
def file(self):
return self.token.file
def __iter__(self):
return ElementIterator(self)
class ElementIterator(object):
def __init__(self, element):
if element.is_terminal():
raise UnexpectedTokenError(element.token, "list")
self.element = element
self.it = list.__iter__(element)
def __iter__(self):
return self
def next(self):
return self.it.next()
def end(self):
return self.element.endtoken
def reset(self):
return ElementIterator(self.element)
def no_more_tokens(self, message=None):
try:
token =self.next().token
except StopIteration:
return
if not message:
raise UnexpectedTokenError(token, "')'")
else:
raise ParseError(token, message)
def get(self, expected=None, message=None):
try:
elem = self.it.next()
if expected is not None:
if expected == list and elem.is_terminal():
if message is None:
message = "'('"
raise UnexpectedTokenError(elem.token, message)
elif expected == "terminal" and not elem.is_terminal():
if message is None:
message = "identifier"
raise UnexpectedTokenError(elem.token, message)
elif expected != list and expected != "terminal" and elem.token.string != expected:
if message is None:
message = "'%s'" % expected
raise UnexpectedTokenError(elem.token, message)
return elem
except StopIteration:
raise EndOfListError(self.end())
class ParseError(Exception):
def __init__(self, token, message):
self.token = token
self._message = message
def _get_message(self): return self._message
def _set_message(self, message): self._message = message
message = property(_get_message, _set_message)
def __str__(self):
return "Error in line %d of %s: %s" % (self.token.line, self.token.file, self._message)
class UnexpectedTokenError(ParseError):
def __init__(self, token, expected=None):
self.token = token
if expected:
self._message = "Expected %s, found '%s'" % (expected, token.string)
else:
self._message = "Unexpected token: %s" % token.string
class EndOfListError(ParseError):
def __init__(self, token):
self.token = token
self._message = "Unexpected end of list."
class Parser(object):
"""Generic parser for LISP-like languages. Creates a parse tree
with file/linenumber annotations"""
def __init__(self, lines, source=None, separators=[]):
self.separators = ["(",")"] + separators
self.source = source
self.root = None
tokens = self.tokenize(lines, source)
try:
token = tokens.next()
except StopIteration:
raise ParseError(Token("", 0, source), "Empty File")
if token != "(":
raise UnexpectedTokenError(token, "'('")
self.root = self.parse(token, tokens)
try:
spurious = tokens.next()
raise UnexpectedTokenError(spurious, "end of file")
except StopIteration:
pass
@staticmethod
def parse_file(filename, separators=[]):
f = open(filename)
try:
p = Parser(f, filename, separators)
except:
f.close()
raise
f.close()
return p
@staticmethod
def parse_as(lines, _class, *args):
p = Parser(lines)
return _class.parse(iter(p.root), *args)
def parse(self, head, tokens):
try:
element = Element(head, [])
token = tokens.next()
while token != ")":
if token == "(":
element.append(self.parse(token, tokens))
else:
element.append(Element(token))
token = tokens.next()
element.end(token)
return element
except StopIteration:
raise ParseError(head, "No closing ')' before end of file")
def tokenize(self, input, source=""):
for i, line in enumerate(input):
line = line.split(";",1)[0]
for sep in self.separators:
line = line.replace(sep, " "+sep+" ")
for token in line.split():
token = token.strip(" \t\n")
if token != "":
yield Token(token.lower(), i+1, source)
def parse_typed_list(it, leftFunc, rightFunc, expectedLeft="identifiers", expectedRight="identifier", rightSideRequired = False):
left = []
foundSep = False
for elem in it:
if elem.token == "-":
if not left or foundSep:
raise ParseError(elem.token, "expected %s before '-'" % expectedLeft)
foundSep = True
continue
if foundSep:
right = rightFunc(elem)
yield (left, right)
left = []
foundSep = False
continue
left.append(leftFunc(elem))
if foundSep:
raise UnexpectedTokenError(it.end(), expectedRight)
if left and rightSideRequired:
raise UnexpectedTokenError(it.end(), "-")
elif left:
yield (left, None)
|
UTF-8
|
Python
| false | false | 2,013 |
13,597,866,505,062 |
1fa2cc2063ce7d7981cdedee8185ce9a795a5e64
|
a9324245d79dfc866fbb17ccfab6ffb1051603ec
|
/mysite_hipopathy/polls/models.py
|
3cd7b5e9771705b6e0523e57b11428e1f3fe8aaa
|
[] |
no_license
|
adhanani/hipopathy
|
https://github.com/adhanani/hipopathy
|
33cce4ff44dbd8aeac95c46429b418eb430b37ae
|
45d7c20008094469941f6ca494f32505a88636d2
|
refs/heads/master
| 2021-01-16T20:51:09.133316 | 2012-09-25T00:50:52 | 2012-09-25T00:50:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import datetime
from django.db import models
from django.utils import timezone
# Create your models here.
class Poll(models.Model):
question = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
# Make the output in a better format
def __unicode__(self):
return self.question
# Create a better representation for the date
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
poll = models.ForeignKey(Poll)
choice = models.CharField(max_length=200)
votes = models.IntegerField()
def __unicode__(self):
return self.choice
class MyArtist(models.Model):
artist_name = models.CharField(max_length=200)
alias = models.CharField(max_length=200)
def __unicode__(self):
return self.artist_name
class MySong(models.Model):
singer = models.ForeignKey(MyArtist)
song = models.CharField(max_length=200)
album = models.CharField(max_length=200)
def __unicode__(self):
return self.song
class Artist(models.Model):
artistid = models.IntegerField(primary_key=True)
artist = models.CharField(max_length=765)
type = models.CharField(max_length=765)
start = models.DateField()
end = models.DateField()
alias = models.TextField()
city = models.CharField(max_length=765)
state = models.CharField(max_length=765)
county = models.CharField(max_length=765)
country = models.CharField(max_length=765)
latitude = models.CharField(max_length=765)
longitude = models.CharField(max_length=765)
origin_city = models.CharField(max_length=765)
origin_state = models.CharField(max_length=765)
origin_county = models.CharField(max_length=765)
origin_country = models.CharField(max_length=765)
origin_latitude = models.CharField(max_length=765)
origin_longitude = models.CharField(max_length=765)
folderid = models.CharField(max_length=765)
class Meta:
db_table = u'artist'
def __unicode__(self):
return self.artist
class Album(models.Model):
albumid = models.IntegerField(primary_key=True)
album = models.CharField(max_length=765)
release_date = models.DateField()
units_sold = models.IntegerField()
record_label = models.CharField(max_length=765)
artistid = models.ForeignKey(Artist, db_column='artistid', to_field='artistid')
folderid = models.CharField(max_length=765)
class Meta:
db_table = u'album'
class Song(models.Model):
songid = models.IntegerField(primary_key=True)
#albumid = models.IntegerField()
albumid = models.ForeignKey(Album, db_column='albumid', to_field='albumid')
#theartist = models.ForeignKey(Artist)
title = models.CharField(max_length=765)
lyrics = models.TextField()
omp_score = models.CharField(max_length=765)
waveform_jpg_url = models.CharField(max_length=765)
wordclouds_jpg_url = models.CharField(max_length=765)
smogdataid = models.IntegerField()
artistid = models.ForeignKey(Artist, db_column='artistid', to_field='artistid')
song_styleid = models.IntegerField()
typed_by = models.CharField(max_length=765)
filenameid = models.CharField(max_length=765)
producer = models.CharField(max_length=765)
class Meta:
db_table = u'song'
def __unicode__(self):
return self.title
class Snippet(models.Model):
snippetid = models.IntegerField(primary_key=True)
songid = models.ForeignKey(Song, db_column='songid', to_field='songid')
snippet = models.TextField()
answer = models.CharField(max_length=765)
comments = models.TextField()
def __unicode__(self):
return self.snippet
class Userdata(models.Model):
userdataid = models.IntegerField(primary_key=True)
snippet = models.ForeignKey(Snippet)
name = models.CharField(max_length=765)
answer = models.CharField(max_length=765)
class Frame(models.Model):
frameid = models.IntegerField(primary_key=True, db_column='Frameid') # Field name made lowercase.
name = models.CharField(max_length=240, unique=True, db_column='Name', blank=True) # Field name made lowercase.
definition = models.TextField(db_column='Definition', blank=True) # Field name made lowercase.
createddate = models.DateTimeField(null=True, db_column='CreatedDate', blank=True) # Field name made lowercase.
createdby = models.CharField(max_length=120, db_column='CreatedBy', blank=True) # Field name made lowercase.
image = models.TextField(db_column='Image', blank=True) # Field name made lowercase.
symbolicrep = models.CharField(max_length=240, db_column='SymbolicRep', blank=True) # Field name made lowercase.
modifieddate = models.DateTimeField(db_column='ModifiedDate') # Field name made lowercase.
class Meta:
db_table = u'Frame'
def __unicode__(self):
return self.name
class Frameelement(models.Model):
frameelementid = models.IntegerField(primary_key=True, db_column='FrameElementid') # Field name made lowercase.
name = models.CharField(max_length=240, db_column='Name', blank=True) # Field name made lowercase.
abbrev = models.CharField(max_length=120, db_column='Abbrev', blank=True) # Field name made lowercase.
definition = models.TextField(db_column='Definition', blank=True) # Field name made lowercase.
semrolerank = models.IntegerField(null=True, db_column='SemRoleRank', blank=True) # Field name made lowercase.
type = models.CharField(max_length=48, db_column='Type', blank=True) # Field name made lowercase.
core = models.CharField(max_length=3, db_column='Core', blank=True) # Field name made lowercase.
createddate = models.DateTimeField(null=True, db_column='CreatedDate', blank=True) # Field name made lowercase.
createdby = models.CharField(max_length=120, db_column='CreatedBy', blank=True) # Field name made lowercase.
#frameid = models.IntegerField(null=True, db_column='Frameid', blank=True) # Field name made lowercase.
frameid = models.ForeignKey(Frame, db_column='frameid', to_field='frameid')
modifieddate = models.DateTimeField(db_column='ModifiedDate') # Field name made lowercase.
class Meta:
db_table = u'FrameElement'
def __unicode__(self):
return self.name
|
UTF-8
|
Python
| false | false | 2,012 |
4,217,657,914,401 |
6f97cedb667ae7a412edb21551612562e3354b40
|
ef618bab764b0ae863f9e1e61e2e960d43f979bc
|
/docker_pull
|
eddf9aa0d9e051b2f48c60c46d78a78936d926a2
|
[
"MIT"
] |
permissive
|
hughdbrown/ansible-docker-pull
|
https://github.com/hughdbrown/ansible-docker-pull
|
7da7b0f1623d106625e0ee7fd073bf9b2188364c
|
5e15bd044b5a6ab73b4f4a9834eeb56968ef6614
|
refs/heads/master
| 2020-12-25T11:21:45.008522 | 2014-08-15T13:28:53 | 2014-08-15T13:28:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
from hashlib import md5
from ansible.module_utils.basic import *
class DockerPuller(object):
def __init__(self, module):
self.module = module
self.repo = module.params['repo']
self.tag = module.params.get('tag', 'latest')
def pull_repo(self, docker='docker'):
checksum = self._get_images_checksum(docker)
repo = "{0.repo}:{0.tag}".format(self)
cmd = [self._docker(docker), 'pull', repo]
rc, out, err = self._run(cmd)
if rc is not None and rc != 0:
self._fail(err, rc)
if out.strip().startswith('Usage:'):
self._fail(
('Got usage output for cmd=%r:\n' % (cmd,)) + out + err, rc
)
return (
(rc, out, err),
self._get_images_checksum(docker) != checksum
)
def _get_images_checksum(self, docker='docker'):
cmd = [self._docker(docker), 'images', '-q']
rc, out, err = self._run(cmd)
if rc is not None and rc != 0:
self._fail(err, rc)
return _md5sum(out.strip())
def _run(self, cmd):
return self.module.run_command(cmd)
def _docker(self, docker):
return self.module.get_bin_path(docker, True)
def _fail(self, err, rc):
self.module.fail_json(
repo=self.repo, tag=self.tag, msg=err, rc=rc
)
def _md5sum(string):
return md5(string).hexdigest()
def main():
module = AnsibleModule(
argument_spec={
'state': {'default': 'present', 'choices': ['present']},
'repo': {'required': True},
'tag': {'default': '', 'required': False},
},
supports_check_mode=True
)
if module.check_mode:
module.exit_json(changed=False)
puller = DockerPuller(module)
rc, out, err = None, '', ''
(rc, out, err), changed = puller.pull_repo()
handle, content = ('stdout', out) if out else ('stderr', err)
result = {
'changed': (rc is not None) and changed,
handle: content,
}
module.exit_json(**result)
# include magic from lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,014 |
8,607,114,506,359 |
be6f3b507516eba133056dd892e9930540d7bef8
|
95352339a059ce076da46898f9da6262b455ebd1
|
/examples.py
|
00697f9c2ae386763670d2f0e7494038d13857d5
|
[] |
no_license
|
simplyali241/news-media-topics
|
https://github.com/simplyali241/news-media-topics
|
ff3b3fcb2aaa3d71c1a7d448c9054be78ba52578
|
b56f7b29953a08b01a2154afc39e7a918b7abc85
|
refs/heads/master
| 2020-12-28T22:00:45.086740 | 2014-12-09T18:50:07 | 2014-12-09T18:50:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from words_graph import SimpleGraphBuilder, NounPhraseGraphBuilder
from extractor import NewsScraper
import graph_cluster
import text_processing
import time
import community
def get_words_by_partition(partition):
"""
Given a community partition of the form:
{ "word1": 2, "word2": 1, "word3": 1 .... }
it returns the inverse dictionary:
{ 1: ["word1", "word3"], 2: ["word2"] .... }
"""
words_by_part = {}
for elem in partition:
if partition[elem] not in words_by_part:
words_by_part[partition[elem]] = [elem]
else:
words_by_part[partition[elem]].append(elem)
return words_by_part
t0 = time.time()
news = NewsScraper('http://cnn.com', nthreads = 10)
news.pull()
news.scrape(10)
texts = (article['text'] for article in news.polished())
t1 = time.time()
print "Data retrieved in %.2f sec" %(t1-t0)
# Create a graph builder
gb = SimpleGraphBuilder(text_processing.clean_punctuation_and_stopwords)
gb.load_texts(texts)
# Show texts in the builder
# for text in texts:
# print text
# print "##################################################"
#
# print "##################################################"
# print "TOKENIZED SENTENCES"
# print "##################################################"
# Show tokenized sentences
for text in gb.text_sentences[:1]:
print "##################################################"
for sentence in text:
print sentence
# Building graph
G = gb.create_graph()
t2 = time.time()
print "Graph built in %.2f sec" %(t2-t1)
# Clustering
# ex = 2
# r = 2
# tol = 1e-3
# threshold = 1e-5
# M = graph_cluster.MCL_cluster(G,ex,r,tol,threshold)
# t3 = time.time()
# print "Graph clustered in %.2f sec" %(t3-t2)
# LOUVAIN
partition = community.best_partition(G)
words_by_part = get_words_by_partition(partition)
# OVERLAPPING
words_by_part = graph_cluster.get_overlap_clusters(G, 2, 1)
# In order to get partitions in a given level of the dendogram (bigger level, smaller communities)
# although it seems that there are only usually 2 levels...
#dendogram = community.generate_dendogram(G)
#partition = community.partition_at_level(dendogram, 0)
#partition = community.partition_at_level(dendogram, 1)
# -- example using noun phrases
#
# gb = NounPhraseGraphBuilder(text_processing.clean_punctuation_and_stopwords)
# texts = (article['text'] for article in news.polished())
# gb.load_texts(texts)
# G = gb.create_graph(graphtype='occurence')
#
# partition = community.best_partition(G)
# words_by_part = get_words_by_partition(partition)
#
#
# for counter in xrange(0, len(words_by_part)):
# print '\nTopic {}:\n----------'.format(counter)
# H = G.subgraph(words_by_part[counter])
# print ', '.join(graph_cluster.pagerank_top_k(H, 10))
# -- example using non dictionary words
gb = SimpleGraphBuilder(text_processing.only_non_dictionary_words, stem_words=False)
texts = (article['text'] for article in news.polished())
gb.load_texts(texts)
G = gb.create_graph()
partition = community.best_partition(G)
words_by_part = get_words_by_partition(partition)
for counter in xrange(0, len(words_by_part)):
print '\nTopic {}:\n----------'.format(counter)
H = G.subgraph(words_by_part[counter])
print ', '.join(graph_cluster.pagerank_top_k(H, 10))
|
UTF-8
|
Python
| false | false | 2,014 |
4,887,672,801,720 |
2114b06d58ed533ceff35261a900a3a8a6075ffc
|
c968e34f2e6083135fd50e80908a5464a311a3e4
|
/src/native_client/src/trusted/handle_pass/build.scons
|
22b8786bb11985308280540080002eaf4e516119
|
[
"SunPro",
"MPL-1.1",
"dtoa",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"GFDL-1.1-or-later",
"LicenseRef-scancode-nilsson-historical",
"LicenseRef-scancode-other-copyleft",
"GFDL-1.1-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-intel-osl-1993",
"HPND-sell-variant",
"ICU",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GCC-exception-3.1",
"W3C-19980720",
"LicenseRef-scancode-amd-historical",
"LicenseRef-scancode-x11-hanson",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-protobuf",
"Spencer-94",
"Zlib",
"Classpath-exception-2.0",
"BSD-Source-Code",
"PSF-2.0",
"LZMA-exception",
"LicenseRef-scancode-newlib-historical",
"bzip2-1.0.6",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"CC0-1.0",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-generic-exception",
"SMLNJ",
"BSD-3-Clause",
"HP-1986",
"LicenseRef-scancode-free-unknown",
"GPL-2.0-only",
"BSL-1.0",
"LGPL-2.0-only",
"LicenseRef-scancode-ietf",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"NTP",
"W3C",
"GPL-1.0-or-later",
"CPL-1.0",
"NPL-1.1",
"SAX-PD",
"CC-BY-2.5"
] |
non_permissive
|
chromium-googlesource-mirror/native_client_old_2
|
https://github.com/chromium-googlesource-mirror/native_client_old_2
|
1a97f5c1b415ae941687d1838ad1825886344b5b
|
11a465e26aaaab09f543d8306d67975e7bbd0de6
|
refs/heads/master
| 2016-08-22T15:44:35.931830 | 2012-02-07T19:18:32 | 2012-02-07T19:18:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- python -*-
# Copyright 2008 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
Import('env')
# These libraries are used only under Windows when integrated with Chrome.
# Build is done separately to ensure it continues to work.
if env.Bit('windows'):
env.Append(
CPPDEFINES = ['XP_WIN', 'WIN32', '_WINDOWS'],
)
browser_lib_inputs = [
'browser_handle.cc',
]
ldr_lib_inputs = [
'ldr_handle.cc',
]
browser_lib_inputs = env.ComponentObject(browser_lib_inputs)
ldr_lib_inputs = env.ComponentObject(ldr_lib_inputs)
env.ComponentLibrary('browserhandle', browser_lib_inputs)
env.ComponentLibrary('ldrhandle', ldr_lib_inputs)
|
UTF-8
|
Python
| false | false | 2,012 |
13,735,305,430,345 |
8f9f979a9b172f8ca0f8005a23447b5374fcec74
|
3285d65ba44b3224f67adc564b2cbc39dd68bb94
|
/grammar/functions/deff.py
|
74ac3611389526afc057d8c5db2bce35a2c92a36
|
[
"MIT"
] |
permissive
|
xurxodiz/cardwalker
|
https://github.com/xurxodiz/cardwalker
|
992ee4fd1b6a1d811722592443299b13cd02f8b7
|
110e391d71854aaaa6d2b231d6c29f7d4f2d1dfa
|
refs/heads/master
| 2021-01-20T10:29:24.726342 | 2014-02-17T18:54:57 | 2014-02-17T18:54:57 | 16,876,198 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from pyparsing import *
from ..constants.punctuation.deff import COMMA
from ..constants.connectors.deff import AND, OR
def delimitedListAnd(elem):
return delimitedList(elem, OneOrMore(COMMA|AND))
def delimitedListOr(elem):
return delimitedList(elem, OneOrMore(COMMA|OR))
def oneOfNamed(ssv):
# ssv - space separated values
lst = ssv.split()
return MatchFirst(map(CaselessKeyword, lst)).setResultsName(lst[0])
def named(sstr):
return CaselessLiteral(sstr).setResultsName(sstr.replace (" ", "_"))
def loadFromFile(path):
with open(path) as f:
return MatchFirst([oneOfNamed(line.strip()) for line in f.read().splitlines()])
def loadLinesFromFile(path):
with open(path) as f:
return MatchFirst([named(line.strip()) for line in f.read().splitlines()])
|
UTF-8
|
Python
| false | false | 2,014 |
3,667,902,074,964 |
7a069d00737202d83d6306fe967c57488fdf52e0
|
8776f6b1a6d226b313941fecec1efda4f5a87abf
|
/dormserv/kitchen/views.py
|
c55add4729e9bbee2bd3dbf1cd8bf40a3a1822ca
|
[] |
no_license
|
dsethan/tripping-dangerzone
|
https://github.com/dsethan/tripping-dangerzone
|
82c0676428492d64547266aae4537cf8d05c4fed
|
5dfeaa5c774a047b9ff3c8673946319678bb0c4c
|
refs/heads/master
| 2021-01-23T12:11:26.227419 | 2014-10-14T02:15:35 | 2014-10-14T02:15:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render
from dispatch.models import Dispatch, DispatchOrder
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response
import django.contrib.auth
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from dispatch.models import Dispatch, DispatchOrder
import datetime
from datetime import date, time, timedelta
from orders.models import Order, OrderItem
from cal.models import Entry
from item.models import Item
from django.utils import timezone
from restaurants.models import Restaurant
from googlemaps import GoogleMaps
from users.models import User, UserProfile
from drivers.models import DriverProfile
from users.forms import UserForm
from time import sleep
import random
def kitchen(request):
context = RequestContext(request)
user = request.user
if user.is_staff:
today = datetime.date.today() - datetime.timedelta(days=1)
dispatches = {}
for order in DispatchOrder.objects.all():
if order.dispatch.date == today:
dispatch = order.dispatch
if dispatch not in dispatches.keys():
dispatches[dispatch] = []
current_list = dispatches[dispatch]
current_list.append(order)
return render_to_response(
'kitchen.html',
{'dispatches':dispatches},
context)
return HttpResponse("No permission")
# Create your views here.
|
UTF-8
|
Python
| false | false | 2,014 |
8,675,833,967,730 |
9d9dbddb0a7beeb853aee5b190730a73df6468c5
|
2e09701aca3c8ac748ca1967ba10528a74ac4e55
|
/apps/hypervisor/views.py
|
fd3a238542116c3ae8dd168491d51e73a9afa6bb
|
[] |
no_license
|
cloudcache/kontrolvm
|
https://github.com/cloudcache/kontrolvm
|
f681330b0aa8ac3900f603881d2c2588bd2f3639
|
251a5503c05b3c40664a0b97d5292c121c97522d
|
refs/heads/master
| 2021-01-18T04:03:30.543431 | 2012-12-21T00:31:39 | 2012-12-21T00:31:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.template import RequestContext, loader
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponse, Http404
from apps.hypervisor.models import Hypervisor
from apps.hypervisor.forms import HypervisorForm
from django.contrib import messages
import persistent_messages
import simplejson
@staff_member_required
def index(request):
hypervisors = Hypervisor.objects.all()
return render_to_response('hypervisor/index.html', {
'hypervisors': hypervisors,
},
context_instance=RequestContext(request))
@staff_member_required
def add(request):
form = HypervisorForm()
if request.method == "POST":
form = HypervisorForm(request.POST)
if form.is_valid():
(hypervisor, created) = Hypervisor.objects.get_or_create(
name=form.cleaned_data['name'],
location=form.cleaned_data['location'],
address=form.cleaned_data['address'],
timeout=form.cleaned_data['timeout'],
node_address=form.cleaned_data['node_address'],
install_medium_path=form.cleaned_data['install_medium_path']
)
if created: hypervisor.save()
return redirect('/hypervisor/')
return render_to_response('hypervisor/add.html', {
'form': form,
},
context_instance=RequestContext(request))
@staff_member_required
def edit(request):
if request.is_ajax() and request.method == 'POST':
json = request.POST
try:
hypervisor = Hypervisor.objects.get(pk=json['pk'])
orig_name = hypervisor.name
orig_value = None
if json['name'] == 'name':
orig_value = hypervisor.name
hypervisor.name = json['value']
elif json['name'] == 'status':
orig_value = hypervisor.status
hypervisor.status = json['value']
elif json['name'] == 'location':
orig_value = hypervisor.location
hypervisor.location = json['value']
elif json['name'] == 'address':
orig_value = hypervisor.address
hypervisor.address = json['value']
elif json['name'] == 'node_address':
orig_value = hypervisor.node_address
hypervisor.node_address = json['value']
else:
raise Http404
hypervisor.save()
messages.add_message(request, persistent_messages.SUCCESS,
'Changed Hypervisor %s %s from %s to %s' % (orig_name, json['name'], orig_value, json['value']))
except Hypervisor.DoesNotExist:
raise Http404
return HttpResponse('{}', mimetype="application/json")
raise Http404
@staff_member_required
def start(request, pk):
hypervisor = get_object_or_404(Hypervisor, pk=pk)
hypervisor.start()
messages.add_message(request, persistent_messages.SUCCESS,
'Started Hypervisor %s' % (hypervisor))
return redirect('/hypervisor/')
@staff_member_required
def stop(request, pk):
hypervisor = get_object_or_404(Hypervisor, pk=pk)
hypervisor.stop()
messages.add_message(request, persistent_messages.SUCCESS,
'Stopped Hypervisor %s' % (hypervisor))
return redirect('/hypervisor/')
@staff_member_required
def delete(request, pk):
hypervisor = get_object_or_404(Hypervisor, pk=pk)
hypervisor.delete()
return redirect('/hypervisor/')
@staff_member_required
def update(request, pk):
hypervisor = get_object_or_404(Hypervisor, pk=pk)
conn = hypervisor.get_connection(True)
return redirect('/hypervisor/')
|
UTF-8
|
Python
| false | false | 2,012 |
7,825,430,415,645 |
6a89898f705be7d225a81ecc83252fa8f391d5f2
|
d3bdc8bb02d14ff94a15ea89bacfefc42dbd2b77
|
/madz/language/c/language.py
|
bc5f02e71f7e4ddce477d614a94f355e231d19b5
|
[] |
no_license
|
OffByOneStudios/massive-dangerzone
|
https://github.com/OffByOneStudios/massive-dangerzone
|
f51aebd6186f4bc14cb43e4e484de49d25eea85d
|
b3fd3ebb4f63957c0cb6a9f1577d8556dc554bda
|
refs/heads/master
| 2020-03-30T00:11:52.864060 | 2014-09-23T19:58:08 | 2014-09-23T19:58:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""language.py
@OffbyoneStudios 2013
The language object pulling togeather all of the pieces needed for a plugin of this language.
"""
import os
import glob
import re
from ...compiler import mingw_compiler, cl_compiler, clang_compiler
from ...config import *
from ...fileman import *
from .._base import language
from .._base.compiler import NewCompilerWrapper
from . import clean
from . import compiler_gcc, compiler_mingw, compiler_clang, compiler_cl
from . import wrapgen
class LanguageC(language.BaseLanguage):
"""Language object for C.s"""
compilers = {
"""List of compatible compilers with C and MADZ."""
"gcc": compiler_gcc.GCCCompiler,
"mingw": NewCompilerWrapper(mingw_compiler.MingwCompiler),
"clang": NewCompilerWrapper(clang_compiler.ClangCompiler),
"cl": NewCompilerWrapper(cl_compiler.ClCompiler),
}
default_compiler = "gcc"
def get_language_name(self):
"""Returns the name of the language."""
return "c"
def make_cleaner(self):
"""Creates the cleaner object."""
return clean.Cleaner(self)
def make_builder(self):
"""Creates the builder object."""
return self.get_compiler()
def make_wrapper(self):
"""Creates the wrapper object."""
return wrapgen.WrapperGenerator(self)
@property
def wrap_directory(self):
"""Returns the directory of the wrapper."""
return self.plugin_stub.directory.madz().dir("c", ".wrap-c")
@property
def build_directory(self):
"""Returns the directory of the builder."""
return self.plugin_stub.directory.madz().dir("c", ".build-c")
def get_c_header_filename(self):
"""Returns the path to the filename of the madz header."""
return self.wrap_directory.file("madz.h")
def get_c_code_filename(self):
"""Returns the path to the filename of the c code."""
return self.wrap_directory.file("_madz.c")
def get_internal_source_files(self):
"""Returns a list of the internal c source files."""
return [self.get_c_code_filename()]
def get_debug_files(self):
"""Returns a list of debug data files"""
return self.build_directory.list(["pdb"])
def get_source_files(self):
return self.plugin_stub.directory.list(["c"])
|
UTF-8
|
Python
| false | false | 2,014 |
9,234,179,718,813 |
29ff4f9ea95337ab4dd70aaa33182ca4b33a1297
|
4569d707a4942d3451f3bbcfebaa8011cc5a128d
|
/cryptoplugin/trunk/crypto/tests/web_ui.py
|
0eb4df6b73ca6f77f386a9d5f317d31a6d38238f
|
[
"BSD-3-Clause",
"GPL-1.0-or-later",
"LicenseRef-scancode-pycrypto"
] |
non_permissive
|
woochica/trachacks
|
https://github.com/woochica/trachacks
|
28749b924c897747faa411876a3739edaed4cff4
|
4fcd4aeba81d734654f5d9ec524218b91d54a0e1
|
refs/heads/master
| 2021-05-30T02:27:50.209657 | 2013-05-24T17:31:23 | 2013-05-24T17:31:23 | 13,418,837 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Steffen Hoffmann <[email protected]>
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import shutil
import tempfile
import unittest
from trac.test import EnvironmentStub, Mock
from trac.web.chrome import Chrome
from crypto.web_ui import CommonTemplateProvider, UserCryptoPreferences
class CommonTemplateProviderTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(enable=['trac.*', 'crypto.*'])
self.env.path = tempfile.mkdtemp()
# CommonTemplateProvider is abstract, test it using a subclass.
self.crypto_up = UserCryptoPreferences(self.env)
def tearDown(self):
shutil.rmtree(self.env.path)
def test_template_dir_added(self):
self.assertTrue(self.crypto_up in Chrome(self.env).template_providers)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(CryptoTemplateProviderTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
UTF-8
|
Python
| false | false | 2,013 |
18,098,992,190,689 |
a9191d5dc4a0ef11c5fa26e8b443888f7dbe3d32
|
91d38104cb7aeb06daec1af1fb78c1e359434104
|
/flying-club.appspot.com/conf.py
|
c2d68b3ede3a0a27e15a2068332d0a26f97143b6
|
[
"GPL-2.0-only"
] |
non_permissive
|
freeflightsim/fg-flying-club
|
https://github.com/freeflightsim/fg-flying-club
|
1271a4def36c7b416faa03839f80af4d997919e2
|
4ae873da5002ab3ea39b3292cabd77fc61d3ffce
|
refs/heads/master
| 2020-05-31T10:53:42.939809 | 2010-04-24T00:55:24 | 2010-04-24T00:55:24 | 625,848 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import os
if os.environ.get('SERVER_SOFTWARE','').startswith('Devel'):
DEBUG = True
SERVER = 'http://localhost:8080'
else:
DEBUG = False
SERVER = 'http://flying-club.freeflightsim.org'
APP_ID = 'flying-club'
EMAIL = '[email protected]'
RPX_API_KEY = '76e64fe2ffbcd37e983f1826d7add3151943be45'
tm = "<span class='tm'>FreeFlightSim</span>" #®
SITE_TITLE = "FlightGear Flying Club"
GOOGLE_PROJECT = "freeflightsim"
ISSUES_FEED = 'http://code.google.com/feeds/issues/p/freeflightsim/issues/full'
## Location of Javascript libs etc on CDN
CDN = 'http://ffs-cache.appspot.com'
CAL_URL = 'http://www.google.com/calendar/render?cid=%s' % EMAIL
MYSQL_DATETIME = '%Y-%m-%d %H:%M:%S'
## Langs - TODO
"""
langs = [ {'code': 'En', 'label': 'English'},
{'code': 'Fi', 'label': 'French'},
{'code': 'Es', 'label': 'Spanish'},
{'code': 'De', 'label': 'German'}
]
"""
|
UTF-8
|
Python
| false | false | 2,010 |
9,929,964,418,662 |
1f3b5c05a4b9cc2d7bedda25023ca861051ce086
|
f902802dc3fb35e4532cb2c255d4c7544f0e75ad
|
/spell.py
|
7882fb03b3919d5c69b5084acc0b57e219b22133
|
[] |
no_license
|
PlumpMath/Warlocks
|
https://github.com/PlumpMath/Warlocks
|
1a3a99a337460d2d1bfb2cb76b7552a64fc7f1b8
|
5ab65e0529cf9e4235e335f295c12ce399418de4
|
refs/heads/master
| 2021-01-25T06:55:38.740105 | 2012-02-09T12:04:52 | 2012-02-09T12:04:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class Spell:
def __init__(self):
self.damage=0
self.target_knockback=0
self.self_knockback=0
self.range=0
self.speed=0
self.aoe=False
self.aoe_range=0
self.targeting=False
self.casting_time=0
self.interruptable=False
self.model="media/spells/blockyball"
|
UTF-8
|
Python
| false | false | 2,012 |
19,009,525,253,235 |
3a4c3780e5f40c5662a5a5693eb3804dd091a4ac
|
2b42b40ae2e84b438146003bf231532973f1081d
|
/spec/mgm4456368.3.spec
|
1e60a201bb1a10a5eff9194878aafb4c9890cfe1
|
[] |
no_license
|
MG-RAST/mtf
|
https://github.com/MG-RAST/mtf
|
0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a
|
e2ddb3b145068f22808ef43e2bbbbaeec7abccff
|
refs/heads/master
| 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
{
"id": "mgm4456368.3",
"metadata": {
"mgm4456368.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 548080,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 9108,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 307,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/100.preprocess.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 450,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/205.screen.h_sapiens_asm.info"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 1617,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 434779,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 381,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/299.screen.passed.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 55553,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 309,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 582117,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 49,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 10694,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 49039,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 1395469,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/450.rna.sims.gz"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 4434,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 38,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 6829,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 336,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 9133,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 7831489,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 89,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 16,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 336,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 28,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 1006,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 1065,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 493,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 114,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 22716,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 80,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 2796,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456368.3/file/999.done.species.stats"
}
},
"id": "mgm4456368.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4456368.3"
}
},
"raw": {
"mgm4456368.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4456368.3"
}
}
}
|
UTF-8
|
Python
| false | false | 2,012 |
8,710,193,702,935 |
980c9e646f15adb784bb24c80f9d522719d131c9
|
f97d126de5c549cfded372228979e4d182e2fd72
|
/src/words.py
|
cc812e89a5273186ce14b37fccc4d75e928ef664
|
[
"GPL-3.0-only"
] |
non_permissive
|
luckyluke/rezzlu
|
https://github.com/luckyluke/rezzlu
|
7efc14c462d528e9f3a3ea59c9f29845f7924907
|
c37f537c95c41c6a200822bb025a8f58b03d57de
|
refs/heads/master
| 2016-09-07T18:40:45.624880 | 2013-12-29T21:44:29 | 2013-12-29T21:44:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#
# Copyright (C) 2013 Luca Dariz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import rezzlu_game
class SchemaManager(object):
cfg = None
def do_config(self, cfg):
self.cfg = rezzlu_game.game_config_t()
self.cfg.rows = cfg.rows
self.cfg.cols = cfg.columns
self.cfg.solve_all = int(cfg.solve_all)
self.load_dict(cfg.lang)
def get_timer(self):
if self.cfg.rows*self.cfg.cols == 16:
return 120
else:
return 180
def get_schema(self):
#self.wdict.build_stats() # TODO
g = rezzlu_game.game_alloc(self.cfg)
rezzlu_game.game_gen(g)
return g
def solve_schema(self, schema, progress_cb=None):
sol = rezzlu_game.solution_t()
rezzlu_game.solve_game(schema, sol)
return sol
def load_dict(self, lang):
self.wdict = rezzlu_game.load_dict('../dicts/'+lang+'.dict\0')
self.cfg.dict = self.wdict
def get_gm(self, progress_cb=None):
gm = GameManager()
gm.schema = self.get_schema()
gm.bonus = {'dw':(1, 0),
'dc':(3, 2),
'tw':(1, 1),
'tc':(2, 1)}
gm.rows = self.cfg.rows
gm.cols = self.cfg.cols
gm.cvalues = self.get_values()
if self.cfg.solve_all:
gm.sol = self.solve_schema(gm.schema, progress_cb)
else:
gm.wdict = self.wdict
return gm
def get_values(self):
vals = {} # TODO
# minv = 1/max(self.wdict.stats)
# for k, v in self.wdict.stats.iteritems():
# vals[k] = int(1/(v*minv))
return vals
class GameManager(object):
""" logica di gestione del gioco """
def __init__(self):
self._tmpw = []
self.schema = []
self.bonus = {}
self.rows = 0
self.cols = 0
self.sol = []
self.wdict = None
self.cvalues = {}
self.found = []
self.score = 0
self.gstat = rezzlu_game.game_status_t()
def get_cell_char(self, row, col):
return rezzlu_game.char_get(self.schema.ch, row, col)
def put_char(self, char):
""" Ritorna la parola selezionata"""
# if self._tmpw:
# # tornato indietro di una
# if len(self._tmpw) > 1 and char is self._tmpw[-2]:
# self._tmpw = self._tmpw[:-1]
# return self._tmpw
# # lettera gia selezionata
# elif char in self._tmpw:
# return self._tmpw
# # lettera troppo distante dall'ultima
# lastc = self._tmpw[-1]
# if abs(lastc.row - char.row) > 1 or abs(lastc.col - char.col) > 1:
# return self._tmpw
# self._tmpw.append(char)
return rezzlu_game.game_put_char(self.gstat, 'c')
# return self._tmpw
def stop_word(self):
# word = self._tmpw
# self._tmpw = []
# return word
return rezzlu_game.game_stop_word(self.gstat)
def check_word(self, word, path):
# word = word.lower()
# if (word not in self.found):
# if (self.sol and word in [w for w, path in self.sol]) or\
# (self.wdict and self.wdict.find(word) == 0):
# print "Trovato", word, "!!!!!!"
# self.found.append(word)
# self.score += self.calc_score(word, path)
# return True
# else:
# return False
return rezzlu_game.game_check_word(self.gstat, word)
def calc_score(self, word, path):
mul = 1
summ = 1
if self.bonus['dw'] in path:
mul *= 2
if self.bonus['tw'] in path:
mul *= 3
if self.bonus['dc'] in path:
dci = path.index(self.bonus['dc'])
dcc = path.count(self.bonus['dc'])
summ += self.cvalues[word[dci]]*dcc
if self.bonus['tc'] in path:
tci = path.index(self.bonus['tc'])
tcc = path.count(self.bonus['tc'])
summ += 2*self.cvalues[word[tci]]*tcc
return mul*(sum([self.cvalues[c] for c in word]) + summ)
|
UTF-8
|
Python
| false | false | 2,013 |
11,055,245,851,732 |
d3701ffd458151231aee93dc8362e71c1246620e
|
b1fc997484ea9c3b2be6b7151ddb6600821daea9
|
/UbiGraph-alpha-0.2.4-Linux64-Ubuntu-8.04/examples/Python/example.py
|
bf273cec1d007c687739ab3ddaa14001f9c56857
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
non_permissive
|
JLiangWaterloo/sat
|
https://github.com/JLiangWaterloo/sat
|
0e76f055fc9cd38e850f448ea49f9e730a423a46
|
df5f09cda06c8ab70ff9eacf098d8ce6bb9b91fc
|
refs/heads/master
| 2021-01-21T12:43:29.168066 | 2013-11-29T16:08:50 | 2013-11-29T16:08:50 | 13,397,989 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import xmlrpclib
# Create an object to represent our server.
server_url = 'http://127.0.0.1:20738/RPC2'
server = xmlrpclib.Server(server_url)
server.ubigraph.clear()
# Create a graph
for i in range(0,10):
server.ubigraph.new_vertex_w_id(i)
# Make some edges
for i in range(0,10):
server.ubigraph.new_edge(i, (i+1)%10)
|
UTF-8
|
Python
| false | false | 2,013 |
13,975,823,582,927 |
4bc3e25fdad938a6ba1c665b39e9bc9a362052fc
|
9bc7235645199a7c8e38e2f2c90cfe70f5c1293d
|
/juego/juego/apps/usuario/forms.py
|
62b9fddb541824391c59d7f240e3cf0d4e1e1689
|
[] |
no_license
|
gabynaty/proyecto
|
https://github.com/gabynaty/proyecto
|
9d0a5b07021282c1863865bca832751912fc3621
|
5251b9e678e07de72aa1716c050dddd2d1022892
|
refs/heads/master
| 2021-01-20T20:57:27.365806 | 2014-10-08T04:58:39 | 2014-10-08T04:58:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#encoding:utf-8
from django import forms
from django.contrib.auth.models import User
from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
#class UsuarioForm(forms.Form):
# class Meta:
# form=User
# exclude=["username"]
class fusuario(UserCreationForm):
username=forms.CharField(max_length=40,required=True,help_text=False,label="Nick")
password2=forms.CharField(help_text=False,label="Contraseña de confirmación", widget=forms.PasswordInput)
first_name=forms.CharField(max_length=50,required=True,label="Nombre")
last_name=forms.CharField(max_length=50,required=True,label="Apellido")
email=forms.EmailField(max_length=100,required=True,label="Email")
class Meta:
model=User
fields=("username","password1","password2","first_name","last_name","email")
def save(self, commit=True):
user=super(fusuario,self).save(commit=False)
user.first_name=self.cleaned_data.get("first_name")
user.last_name=self.cleaned_data.get("last_name")
user.email=self.cleaned_data.get("email")
if commit:
user.save()
return user
|
UTF-8
|
Python
| false | false | 2,014 |
12,644,383,768,289 |
fc6ebc967a14a17422c984469a77e51c525fb6a8
|
8b599c4bc4e0b01ac04732731eaffab11704de53
|
/pulp_rpm/test/unit/server/test_resolve_dependencies.py
|
9925c93b0d8b6e7e09e371f8518af0f1d3964ccc
|
[] |
no_license
|
jwmatthews/pulp_rpm
|
https://github.com/jwmatthews/pulp_rpm
|
414350b360ae83c00385709d577fd31213f4cfc4
|
81643aaaa084575f4e651d3de75a86a9d31a8f49
|
refs/heads/master
| 2021-01-17T06:35:14.134732 | 2013-03-05T21:39:55 | 2013-03-05T21:39:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
#
# Copyright (c) 2012 Red Hat, Inc.
#
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
import mock
import os
import shutil
import sys
import tempfile
import time
import unittest
import itertools
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)) + "/../../../plugins/importers/")
import importer_mocks
import constants
from yum_importer.importer import YumImporter
from pulp_rpm.yum_plugin import util
from pulp.plugins.model import Repository, Unit
from yum_importer.importer_rpm import RPM_TYPE_ID
import rpm_support_base
class TestResolveDeps(rpm_support_base.PulpRPMTests):
def setUp(self):
super(TestResolveDeps, self).setUp()
self.temp_dir = tempfile.mkdtemp()
self.working_dir = os.path.join(self.temp_dir, "working")
self.pkg_dir = os.path.join(self.temp_dir, "packages")
self.data_dir = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__)), "../data"))
def tearDown(self):
super(TestResolveDeps, self).tearDown()
self.clean()
def clean(self):
shutil.rmtree(self.temp_dir)
# clean up dir created by yum's repostorage
if os.path.exists("./test_resolve_deps"):
shutil.rmtree("test_resolve_deps")
if os.path.exists("/tmp/test_resolve_deps"):
shutil.rmtree("/tmp/test_resolve_deps")
def test_resolve_deps(self):
repo = mock.Mock(spec=Repository)
repo.working_dir = "/tmp/test_resolve_deps"
repo.id = "test_resolve_deps"
unit_key_a = {'id' : '','name' :'pulp-server', 'version' :'0.0.309', 'release' : '1.fc17', 'epoch':'0', 'arch' : 'noarch', 'checksumtype' : 'sha256',
'checksum': 'ee5afa0aaf8bd2130b7f4a9b35f4178336c72e95358dd33bda8acaa5f28ea6e9', 'type_id' : 'rpm'}
unit_key_a_obj = Unit(RPM_TYPE_ID, unit_key_a, {}, '')
unit_key_a_obj.metadata = constants.PULP_SERVER_RPM_METADATA
unit_key_b = {'id' : '', 'name' :'pulp-rpm-server', 'version' :'0.0.309', 'release' :'1.fc17', 'epoch':'0','arch' : 'noarch', 'checksumtype' :'sha256',
'checksum': '1e6c3a3bae26423fe49d26930b986e5f5ee25523c13f875dfcd4bf80f770bf56', 'type_id' : 'rpm', }
unit_key_b_obj = Unit(RPM_TYPE_ID, unit_key_b, {}, '')
unit_key_b_obj.metadata = constants.PULP_RPM_SERVER_RPM_METADATA
existing_units = []
for unit in [unit_key_a_obj, unit_key_b_obj]:
existing_units.append(unit)
conduit = importer_mocks.get_dependency_conduit(type_id=RPM_TYPE_ID, existing_units=existing_units, pkg_dir=self.pkg_dir)
config = importer_mocks.get_basic_config()
importer = YumImporter()
units = [Unit(RPM_TYPE_ID, unit_key_b, {}, '')]
result = importer.resolve_dependencies(repo, units, conduit, config)
self.assertEqual(len(list(itertools.chain(*result['resolved'].values()))), 1)
self.assertEqual(len(list(itertools.chain(*result['unresolved'].values()))), 0)
|
UTF-8
|
Python
| false | false | 2,013 |
14,963,666,105,157 |
b21e5a1dfbfff54ebfb5688d8363abf9762ec98b
|
bc543ca413d627e35da81494ff9cd4d1db0849b5
|
/pagination/tests.py
|
8637b638b8b2e59cc4915e9dc033418ee9589769
|
[
"BSD-3-Clause"
] |
permissive
|
paxapy/django-clean-pagination
|
https://github.com/paxapy/django-clean-pagination
|
36210f2077c8248ee95b0b2f6e222c277bcca14f
|
e189c30ad629a7077a1e8ff46860c21a54be3afb
|
refs/heads/master
| 2021-01-18T08:52:46.408072 | 2013-05-18T20:21:45 | 2013-05-18T20:21:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
>>> from django.core.paginator import Paginator
>>> from pagination.templatetags.pagination_tags import paginate
>>> from django.template import Template, Context
>>> p = Paginator(range(15), 2)
>>> pg = paginate({'paginator': p, 'page_obj': p.page(1)})
>>> pg['pages']
[1, 2, 3, 4, 5, 6, 7, 8]
>>> pg['records']['first']
1
>>> pg['records']['last']
2
>>> p = Paginator(range(15), 2)
>>> pg = paginate({'paginator': p, 'page_obj': p.page(8)})
>>> pg['pages']
[1, 2, 3, 4, 5, 6, 7, 8]
>>> pg['records']['first']
15
>>> pg['records']['last']
15
>>> p = Paginator(range(17), 2)
>>> paginate({'paginator': p, 'page_obj': p.page(1)})['pages']
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> p = Paginator(range(19), 2)
>>> paginate({'paginator': p, 'page_obj': p.page(1)})['pages']
[1, 2, 3, 4, None, 7, 8, 9, 10]
>>> p = Paginator(range(21), 2)
>>> paginate({'paginator': p, 'page_obj': p.page(1)})['pages']
[1, 2, 3, 4, None, 8, 9, 10, 11]
# Testing orphans
>>> p = Paginator(range(5), 2, 1)
>>> paginate({'paginator': p, 'page_obj': p.page(1)})['pages']
[1, 2]
>>> p = Paginator(range(21), 2, 1)
>>> pg = paginate({'paginator': p, 'page_obj': p.page(1)})
>>> pg['pages']
[1, 2, 3, 4, None, 7, 8, 9, 10]
>>> pg['records']['first']
1
>>> pg['records']['last']
2
>>> p = Paginator(range(21), 2, 1)
>>> pg = paginate({'paginator': p, 'page_obj': p.page(10)})
>>> pg['pages']
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> pg['records']['first']
19
>>> pg['records']['last']
21
>>> t = Template("{% load pagination_tags %}{% autopaginate var 2 %}{% paginate %}")
>>> from django.http import HttpRequest as DjangoHttpRequest
>>> class HttpRequest(DjangoHttpRequest):
... page = 1
>>> t.render(Context({'var': range(21), 'request': HttpRequest()}))
u'\\n\\n<div class="pagination">...
>>>
>>> t = Template("{% load pagination_tags %}{% autopaginate var %}{% paginate %}")
>>> t.render(Context({'var': range(21), 'request': HttpRequest()}))
u'\\n\\n<div class="pagination">...
>>> t = Template("{% load pagination_tags %}{% autopaginate var 20 %}{% paginate %}")
>>> t.render(Context({'var': range(21), 'request': HttpRequest()}))
u'\\n\\n<div class="pagination">...
>>> t = Template("{% load pagination_tags %}{% autopaginate var by %}{% paginate %}")
>>> t.render(Context({'var': range(21), 'by': 20, 'request': HttpRequest()}))
u'\\n\\n<div class="pagination">...
>>> t = Template("{% load pagination_tags %}{% autopaginate var by as foo %}{{ foo }}")
>>> t.render(Context({'var': range(21), 'by': 20, 'request': HttpRequest()}))
u'[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]'
"""
|
UTF-8
|
Python
| false | false | 2,013 |
16,870,631,578,907 |
74f95f9d3804b7702c3104678107b1f4bab5debb
|
93f8128c0b187cb4aeaf0d27e046d4e90c91ec71
|
/logic/experiment_builder/imbir/kamil_oddball/kamil_oddball_17_05_2011.py.fixed.py
|
5ea82038ca9c855e69a094d422e7be4e4d0400c7
|
[] |
no_license
|
FreeBCI/openbci
|
https://github.com/FreeBCI/openbci
|
56cedea748d449753daab91382acfc66887515c5
|
c9261199d99d18b09575604d075deb25e38f21a1
|
HEAD
| 2016-09-08T04:11:05.998775 | 2013-08-28T07:35:10 | 2013-08-28T07:35:10 | 12,414,484 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This experiment was created using PsychoPy2 Experiment Builder
If you publish work using this script please cite the relevant PsychoPy publications
Peirce (2007) Journal of Neuroscience Methods 162:8-1
Peirce (2009) Frontiers in Neuroinformatics, 2: 10"""
from numpy import * #many different maths functions
from numpy.random import * #maths randomisation functions
import os #handy system and path functions
from psychopy import core, data, event, visual, gui
import psychopy.log #import like this so it doesn't interfere with numpy.log
#store info about the experiment
expName='None'#from the Builder filename that created this script
expInfo={'participant':'ID01', 'session':001}
dlg=gui.DlgFromDict(dictionary=expInfo,title=expName)
if dlg.OK==False: core.quit() #user pressed cancel
expInfo['date']=data.getDateStr()#add a simple timestamp
expInfo['expName']=expName
#setup files for saving
if not os.path.isdir('data'):
os.makedirs('data')#if this fails (e.g. permissions) we will get error
filename= 'data/%s_%s' %(expInfo['participant'], expInfo['date'])
logFile=open(filename+'.log', 'w')
psychopy.log.console.setLevel(psychopy.log.WARNING)#this outputs to the screen, not a file
#setup the Window
win = visual.Window(size=[1280, 800], fullscr=True, screen=0, allowGUI=False,
monitor='testMonitor', color='white', colorSpace='rgb')
#Initialise components for routine:int1
int1Clock=core.Clock()
dsf=visual.TextStim(win=win, ori=0,
wrapWidth=2.0, bold=True, font='Courier',
text=u'Zapraszamy do udzia\u0142u w badaniu.',
pos=[0, 0], height=0.08,
color='black', colorSpace='rgb')
#Initialise components for routine:int2
int2Clock=core.Clock()
uiuiyhiu=visual.TextStim(win=win, ori=0,
wrapWidth=2.0, bold=True, font='Courier',
text=u' Twoje zadanie b\u0119dzie polega\u0142o na czytaniu i \n ocenianiu s\u0142\xf3w pojawiaj\u0105cych si\u0119 kolejno na \n ekranie. \n Wci\u015bnij klawisz Czerwony zawsze wtedy, \n kiedy prezentowane s\u0142owo b\u0119dzie zwi\u0105zane z \n emocjami, \na klawisz Zielony je\u015bli b\u0119dzie to s\u0142owo neutralne. \n Nie naciskaj \u017cadnego z klawiszy, \n je\u015bli b\u0119dzie to s\u0142owo Drewno. \n Aby przej\u015b\u0107 dalej naci\u015bnij SPACE. ',
pos=[0,0], height=0.08,
color=u'black', colorSpace=u'rgb')
#create our own class to store info from keyboard
class KeyResponse:
def __init__(self):
self.keys=[]#the key(s) pressed
self.corr=0#was the resp correct this trial? (0=no, 1=yes)
self.rt=None#response time
self.clock=None#we'll use this to measure the rt
#Initialise components for routine:int3
int3Clock=core.Clock()
apoidf=visual.TextStim(win=win, ori=0,
wrapWidth=2.0, bold=True, font='Courier',
text=u'Postaraj si\u0119 odpowiada\u0107 jak najszybciej potrafisz.\n Wa\u017cne jest, \u017ceby usi\u0105\u015b\u0107 w wygodnej pozycji z \n lekko przymru\u017conymi oczyma. \n Aby przej\u015b\u0107 dalej naci\u015bnij SPACE. ',
pos=[0, 0], height=0.08,
color='black', colorSpace='rgb')
#Initialise components for routine:int4
int4Clock=core.Clock()
uivgtdv=visual.TextStim(win=win, ori=0,
wrapWidth=2.0, bold=True, font='Courier',
text=u' Skupiaj swoj\u0105 uwag\u0119 na znaku + \n na \u015brodku ekranu. \n \n + \n \n Staraj si\u0119 mruga\u0107 tylko wtedy, \n gdy pojawi si\u0119 taka instrukcja. \nAby przej\u015b\u0107 dalej naci\u015bnij SPACE.',
pos=[0, 0], height=0.08,
color='black', colorSpace='rgb')
#Initialise components for routine:int5
int5Clock=core.Clock()
sodfsdlkj=visual.TextStim(win=win, ori=0,
wrapWidth=2.0, bold=True, font='Courier',
text=u' Wci\u015bnij klawisz Czerwony zawsze wtedy, \n kiedy prezentowane s\u0142owo b\u0119dzie zwi\u0105zane z \n emocjamia, \na klawisz Zielony je\u015bli b\u0119dzie to s\u0142owo neutralne. \n Nie naciskaj \u017cadnego z klawiszy, \n je\u015bli b\u0119dzie to s\u0142owo Drewno. \n Aby przej\u015b\u0107 do sesji treningowej naci\u015bnij SPACE. ',
pos=[0, 0], height=0.08,
color='black', colorSpace='rgb')
#set up handler to look after randomisation of trials etc
trialskjhk=data.TrialHandler(nReps=1, method='sequential', extraInfo=expInfo,
trialList=data.importTrialList('samples.csv'))
thisTrialskjh=trialskjhk.trialList[0]#so we can initialise stimuli with some values
#abbrieviate parameter names if possible (e.g. rgb=thisTrialskjh.rgb)
if thisTrialskjh!=None:
for paramName in thisTrialskjh.keys():
exec(paramName+'=thisTrialskjh.'+paramName)
#Initialise components for routine:test
testClock=core.Clock()
fxxxxx=visual.TextStim(win=win, ori=0,
text='+',
pos=[0, 0], height=0.1,
color='black', colorSpace='rgb')
lkhkkki=visual.TextStim(win=win, ori=0,
text=word,
pos=[0, 0], height=0.1,
color='black', colorSpace='rgb')
#Initialise components for routine:baseline_inst
baseline_instClock=core.Clock()
lkslkdlkdkdkdk=visual.TextStim(win=win, ori=0,
wrapWidth=2.0, bold=True, font='Courier',
text=u'Za chwil\u0119 na minut\u0119 na ekranie pojawi si\u0119 znak +. \nW tym czasie rozlu\u017anij si\u0119 i po prostu patrz na +. \n Po tym rozpocznie si\u0119 w\u0142a\u015bciwy eksperyment. \n Je\u015bli jeste\u015b got\xf3w, naci\u015bnij SPACE. ',
pos=[0, 0], height=0.08,
color='black', colorSpace='rgb')
#Initialise components for routine:baseline
baselineClock=core.Clock()
aaassssss=visual.TextStim(win=win, ori=0,
text='+',
pos=[0, 0], height=0.1,
color='black', colorSpace='rgb')
#Initialise components for routine:test_b
test_bClock=core.Clock()
lkhhguyu=visual.TextStim(win=win, ori=0,
wrapWidth=2.0, bold=True, font='Courier',
text=u' Mrugaj teraz. \nJe\u015bli jeste\u015b gotowy naci\u015bnij SPACE.',
pos=[0, 0], height=0.08,
color='black', colorSpace='rgb')
#Initialise components for routine:int6
int6Clock=core.Clock()
lkhoh=visual.TextStim(win=win, ori=0,
wrapWidth=2.0, bold=True, font='Courier',
text=u'Aby przej\u015b\u0107 do w\u0142a\u015bciwego badania naci\u015bnij SPACE.',
pos=[0, 0], height=0.08,
color='black', colorSpace='rgb')
#set up handler to look after randomisation of trials etc
trials=data.TrialHandler(nReps=1, method='sequential', extraInfo=expInfo,
trialList=data.importTrialList('words.csv'))
thisTrial=trials.trialList[0]#so we can initialise stimuli with some values
#abbrieviate parameter names if possible (e.g. rgb=thisTrial.rgb)
if thisTrial!=None:
for paramName in thisTrial.keys():
exec(paramName+'=thisTrial.'+paramName)
#Initialise components for routine:trial
trialClock=core.Clock()
t_fix=visual.TextStim(win=win, ori=0,
text='+',
pos=[0, 0], height=0.1,
color='black', colorSpace='rgb')
t_word=visual.TextStim(win=win, ori=0,
text=word,
pos=[0, 0], height=0.1,
color='black', colorSpace='rgb')
curr_time = 0
import time
import os
import kamil_consts, kamil_helper
kamil_helper.create_words_file()
trials=data.TrialHandler(nReps=1, method='sequential', extraInfo=expInfo,
trialList=data.importTrialList('words.csv'))
#Initialise components for routine:blink
blinkClock=core.Clock()
xxx=visual.TextStim(win=win, ori=0,
wrapWidth=2.0, bold=True, font='Courier',
text=u' Mrugaj teraz. \n \nJe\u015bli jeste\u015b gotowy naci\u015bnij SPACE.',
pos=[0, 0], height=0.08,
color='black', colorSpace='rgb')
trials_count = 0
trials_to_blink = kamil_consts.BLINK_MAX
after_blink_break = 1
#Initialise components for routine:blink_sth
blink_sthClock=core.Clock()
slsdfdkeifjfk=visual.TextStim(win=win, ori=0,
text='',
pos=[0, 0], height=0.1,
color='white', colorSpace='rgb')
#Initialise components for routine:bye
byeClock=core.Clock()
lksjwpepowkf=visual.TextStim(win=win, ori=0,
wrapWidth=2.0, bold=True, font='Courier',
text=u'Dzi\u0119kuj\u0119 za udzia\u0142 w badaniu.',
pos=[0, 0], height=0.08,
color='black', colorSpace='rgb')
#update component parameters for each repeat
#run the trial
continueInt1=True
t=0; int1Clock.reset()
while continueInt1 and (t<2.0000):
#get current time
t=int1Clock.getTime()
#update/draw components on each frame
if (0.0<= t < (0.0+2.0)):
dsf.draw()
#check for quit (the [Esc] key)
if event.getKeys(["escape"]): core.quit()
event.clearEvents()#so that it doesn't get clogged with other events
#refresh the screen
win.flip()
#end of this routine (e.g. trial)
#update component parameters for each repeat
resplkhlkljlk = KeyResponse()#create an object of type KeyResponse
#run the trial
continueInt2=True
t=0; int2Clock.reset()
while continueInt2 and (t<1000000.0000):
#get current time
t=int2Clock.getTime()
#update/draw components on each frame
if (0.0 <= t):
uiuiyhiu.draw()
if (0.0 <= t):
theseKeys = event.getKeys(keyList="['space']")
if len(theseKeys)>0:#at least one key was pressed
resplkhlkljlk.keys=theseKeys[-1]#just the last key pressed
#abort routine on response
continueInt2=False
#check for quit (the [Esc] key)
if event.getKeys(["escape"]): core.quit()
event.clearEvents()#so that it doesn't get clogged with other events
#refresh the screen
win.flip()
#end of this routine (e.g. trial)
#update component parameters for each repeat
khklhjpj = KeyResponse()#create an object of type KeyResponse
#run the trial
continueInt3=True
t=0; int3Clock.reset()
while continueInt3 and (t<1000000.0000):
#get current time
t=int3Clock.getTime()
#update/draw components on each frame
if (0.0 <= t):
apoidf.draw()
if (0.0 <= t):
theseKeys = event.getKeys(keyList="['space']")
if len(theseKeys)>0:#at least one key was pressed
khklhjpj.keys=theseKeys[-1]#just the last key pressed
#abort routine on response
continueInt3=False
#check for quit (the [Esc] key)
if event.getKeys(["escape"]): core.quit()
event.clearEvents()#so that it doesn't get clogged with other events
#refresh the screen
win.flip()
#end of this routine (e.g. trial)
#update component parameters for each repeat
respiojoi = KeyResponse()#create an object of type KeyResponse
#run the trial
continueInt4=True
t=0; int4Clock.reset()
while continueInt4 and (t<1000000.0000):
#get current time
t=int4Clock.getTime()
#update/draw components on each frame
if (0.0 <= t):
uivgtdv.draw()
if (0.0 <= t):
theseKeys = event.getKeys(keyList="['space']")
if len(theseKeys)>0:#at least one key was pressed
respiojoi.keys=theseKeys[-1]#just the last key pressed
#abort routine on response
continueInt4=False
#check for quit (the [Esc] key)
if event.getKeys(["escape"]): core.quit()
event.clearEvents()#so that it doesn't get clogged with other events
#refresh the screen
win.flip()
#end of this routine (e.g. trial)
#update component parameters for each repeat
resplkdsjwo = KeyResponse()#create an object of type KeyResponse
my_metadata = {'emotional_button':'c','non-emotional_button':'n'}
#run the trial
continueInt5=True
t=0; int5Clock.reset()
while continueInt5 and (t<1000000.0000):
#get current time
t=int5Clock.getTime()
#update/draw components on each frame
if (0.0 <= t):
sodfsdlkj.draw()
if (0.0 <= t):
theseKeys = event.getKeys(keyList="['space']")
if len(theseKeys)>0:#at least one key was pressed
resplkdsjwo.keys=theseKeys[-1]#just the last key pressed
#abort routine on response
continueInt5=False
#check for quit (the [Esc] key)
if event.getKeys(["escape"]): core.quit()
event.clearEvents()#so that it doesn't get clogged with other events
#refresh the screen
win.flip()
#end of this routine (e.g. trial)
for thisTrialskjh in trialskjhk:
#abbrieviate parameter names if possible (e.g. rgb=thisTrialskjh.rgb)
if thisTrialskjh!=None:
for paramName in thisTrialskjh.keys():
exec(paramName+'=thisTrialskjh.'+paramName)
#update component parameters for each repeat
lkhkkki.setText(word)
respkljlkj = KeyResponse()#create an object of type KeyResponse
#run the trial
continueTest=True
t=0; testClock.reset()
while continueTest and (t<3.0000):
#get current time
t=testClock.getTime()
#update/draw components on each frame
if (0.0<= t < (0.0+1.0)):
fxxxxx.draw()
if (1.0<= t < (1.0+1.0)):
lkhkkki.draw()
if (1.0<= t < (1.0+2.0)):
theseKeys = event.getKeys(keyList="['space']")
if len(theseKeys)>0:#at least one key was pressed
respkljlkj.keys=theseKeys[-1]#just the last key pressed
#check for quit (the [Esc] key)
if event.getKeys(["escape"]): core.quit()
event.clearEvents()#so that it doesn't get clogged with other events
#refresh the screen
win.flip()
#end of this routine (e.g. trial)
if len(respkljlkj.keys)>0:#we had a response
trialskjhk.addData('respkljlkj.keys',respkljlkj.keys)
#completed 1 repeats of 'trialskjhk' repeats
trialskjhk.saveAsPickle(filename+'trialskjhk')
trialskjhk.saveAsExcel(filename+'.xlsx', sheetName='trialskjhk',
stimOut=['word', 'group', 'fix_time', ],
dataOut=['n','all_mean','all_std', 'all_raw'])
psychopy.log.info('saved data to '+filename+'.dlm')
#update component parameters for each repeat
respkdlfjsdlkf = KeyResponse()#create an object of type KeyResponse
#run the trial
continueBaseline_inst=True
t=0; baseline_instClock.reset()
while continueBaseline_inst and (t<1000000.0000):
#get current time
t=baseline_instClock.getTime()
#update/draw components on each frame
if (0.0 <= t):
lkslkdlkdkdkdk.draw()
if (0.0 <= t):
theseKeys = event.getKeys(keyList="['space']")
if len(theseKeys)>0:#at least one key was pressed
respkdlfjsdlkf.keys=theseKeys[-1]#just the last key pressed
#abort routine on response
continueBaseline_inst=False
#check for quit (the [Esc] key)
if event.getKeys(["escape"]): core.quit()
event.clearEvents()#so that it doesn't get clogged with other events
#refresh the screen
win.flip()
#end of this routine (e.g. trial)
#update component parameters for each repeat
my_metadata['baseline_start_timestamp'] = repr(time.time())
my_metadata['baseline_length'] = str(60.0)
#run the trial
continueBaseline=True
t=0; baselineClock.reset()
while continueBaseline and (t<60.0000):
#get current time
t=baselineClock.getTime()
#update/draw components on each frame
if (0.0<= t < (0.0+60.0)):
aaassssss.draw()
#check for quit (the [Esc] key)
if event.getKeys(["escape"]): core.quit()
event.clearEvents()#so that it doesn't get clogged with other events
#refresh the screen
win.flip()
#end of this routine (e.g. trial)
#update component parameters for each repeat
resposoewoewoiew = KeyResponse()#create an object of type KeyResponse
#run the trial
continueTest_b=True
t=0; test_bClock.reset()
while continueTest_b and (t<1000000.0000):
#get current time
t=test_bClock.getTime()
#update/draw components on each frame
if (0.0 <= t):
lkhhguyu.draw()
if (0.0 <= t):
theseKeys = event.getKeys(keyList="['space']")
if len(theseKeys)>0:#at least one key was pressed
resposoewoewoiew.keys=theseKeys[-1]#just the last key pressed
#abort routine on response
continueTest_b=False
#check for quit (the [Esc] key)
if event.getKeys(["escape"]): core.quit()
event.clearEvents()#so that it doesn't get clogged with other events
#refresh the screen
win.flip()
#end of this routine (e.g. trial)
#update component parameters for each repeat
resplkgikh = KeyResponse()#create an object of type KeyResponse
#run the trial
continueInt6=True
t=0; int6Clock.reset()
while continueInt6 and (t<1000000.0000):
#get current time
t=int6Clock.getTime()
#update/draw components on each frame
if (0.0 <= t):
lkhoh.draw()
if (0.0 <= t):
theseKeys = event.getKeys(keyList="['space']")
if len(theseKeys)>0:#at least one key was pressed
resplkgikh.keys=theseKeys[-1]#just the last key pressed
#abort routine on response
continueInt6=False
#check for quit (the [Esc] key)
if event.getKeys(["escape"]): core.quit()
event.clearEvents()#so that it doesn't get clogged with other events
#refresh the screen
win.flip()
#end of this routine (e.g. trial)
for thisTrial in trials:
#abbrieviate parameter names if possible (e.g. rgb=thisTrial.rgb)
if thisTrial!=None:
for paramName in thisTrial.keys():
exec(paramName+'=thisTrial.'+paramName)
#update component parameters for each repeat
t_word.setText(word)
t_resp = KeyResponse()#create an object of type KeyResponse
curr_time = 0
s_st = list(kamil_consts.s_st)
s_dur = list(kamil_consts.s_dur)
#Set first variable duration time (fixation)
s_dur[0] = float(fix_time)
for i in range(1, kamil_consts.NUM_OF_VALS):
s_st[i] = float(fix_time)
#run the trial
continueTrial=True
t=0; trialClock.reset()
while continueTrial and (t<s_dur[-1]+s_st[-1]):
#get current time
t=trialClock.getTime()
#update/draw components on each frame
if (s_st[0]<= t < (s_st[0]+s_dur[0])):
t_fix.draw()
if (s_st[1]<= t < (s_st[1]+s_dur[1])):
t_word.draw()
if (s_st[2]<= t < (s_st[2]+s_dur[2])):
if t_resp.clock==None: #if we don't have one we've just started
t_resp.clock=core.Clock()#create one (now t=0)
theseKeys = event.getKeys(keyList="['c', 'n']")
if len(theseKeys)>0:#at least one key was pressed
if t_resp.keys==[]:#then this was the first keypress
t_resp.keys=theseKeys[0]#just the first key pressed
t_resp.rt = t_resp.clock.getTime()
if (s_st[1]<= t < (s_st[1]+s_dur[1])):
if curr_time == 0:
#a first frame of an image
#send trigger, save image onset data
#expand image start time (st) so that
#a time needed for sending a trigger will not
#influence image duration
before_time = time.time()
#send trigger ...
kamil_helper.send()
curr_time = time.time()
trials.addData('onset_time', str("%.5f"%curr_time))
diff_time = curr_time - before_time
for i in range(1, kamil_consts.NUM_OF_VALS):
s_st[i] = s_st[i] + diff_time
t_resp.clock=core.Clock()
#check for quit (the [Esc] key)
if event.getKeys(["escape"]): core.quit()
event.clearEvents()#so that it doesn't get clogged with other events
#refresh the screen
win.flip()
#end of this routine (e.g. trial)
if len(t_resp.keys)>0:#we had a response
trials.addData('t_resp.keys',t_resp.keys)
trials.addData('t_resp.rt',t_resp.rt)
if len(t_resp.keys)==0:
trials.addData('t_resp.keys','')
trials.addData('t_resp.rt', -1)
#update component parameters for each repeat
respkl = KeyResponse()#create an object of type KeyResponse
trials_count += 1
if not (trials_count % trials_to_blink == 0):
after_blink_break = 1
continue
else:
after_blink_break = 0
trials_count = 0
trials_to_blink = randint(kamil_consts.BLINK_MIN, kamil_consts.BLINK_MAX)
#run the trial
continueBlink=True
t=0; blinkClock.reset()
while continueBlink and (t<1000000.0000):
#get current time
t=blinkClock.getTime()
#update/draw components on each frame
if (0.0 <= t):
xxx.draw()
if (0.0 <= t):
theseKeys = event.getKeys(keyList="['space']")
if len(theseKeys)>0:#at least one key was pressed
respkl.keys=theseKeys[-1]#just the last key pressed
#abort routine on response
continueBlink=False
respkl.keys = ''
#check for quit (the [Esc] key)
if event.getKeys(["escape"]): core.quit()
event.clearEvents()#so that it doesn't get clogged with other events
#refresh the screen
win.flip()
#end of this routine (e.g. trial)
if len(respkl.keys)>0:#we had a response
trials.addData('respkl.keys',respkl.keys)
#update component parameters for each repeat
if after_blink_break == 1:
continue
#run the trial
continueBlink_sth=True
t=0; blink_sthClock.reset()
while continueBlink_sth and (t<2.0000):
#get current time
t=blink_sthClock.getTime()
#update/draw components on each frame
if (0.0<= t < (0.0+2.0)):
slsdfdkeifjfk.draw()
#check for quit (the [Esc] key)
if event.getKeys(["escape"]): core.quit()
event.clearEvents()#so that it doesn't get clogged with other events
#refresh the screen
win.flip()
#end of this routine (e.g. trial)
#completed 1 repeats of 'trials' repeats
trials.saveAsPickle(filename+'trials')
trials.saveAsExcel(filename+'.xlsx', sheetName='trials',
stimOut=['group', 'word', 'fix_time', ],
dataOut=['n','all_mean','all_std', 'all_raw'])
psychopy.log.info('saved data to '+filename+'.dlm')
#update component parameters for each repeat
respkjftess = KeyResponse()#create an object of type KeyResponse
#run the trial
continueBye=True
t=0; byeClock.reset()
while continueBye and (t<1000000.0000):
#get current time
t=byeClock.getTime()
#update/draw components on each frame
if (0.0 <= t):
lksjwpepowkf.draw()
if (0.0 <= t):
theseKeys = event.getKeys(keyList="['space']")
if len(theseKeys)>0:#at least one key was pressed
respkjftess.keys=theseKeys[-1]#just the last key pressed
#abort routine on response
continueBye=False
#check for quit (the [Esc] key)
if event.getKeys(["escape"]): core.quit()
event.clearEvents()#so that it doesn't get clogged with other events
#refresh the screen
win.flip()
#end of this routine (e.g. trial)
ff = open(filename+'.metadata.pytxt', 'w')
ff.write(str(my_metadata))
ff.close()
os.remove(filename+'.xlsx')
trials.saveAsExcel(filename+'.xlsx', sheetName='trials',
stimOut=['word', 'group', 'fix_time', ],
dataOut=['all_raw'] #dataOut=['n','all_mean','all_std', 'all_raw']
)
logFile.close()
win.close()
core.quit()
|
UTF-8
|
Python
| false | false | 2,013 |
7,327,214,209,470 |
74b2bee5f741c50e10cf93a7c9719816823b1316
|
2b5c9b82cc999d46c035d5f3d82f8a0db2743b63
|
/basil/lang/c/tests/mycmodulesetup.py
|
3060d4347a66ada479185946a82526f613345150
|
[] |
no_license
|
EvelynHf/basil
|
https://github.com/EvelynHf/basil
|
739a9de1a2ebdd0fc2d9a1c044c197d9b208cc16
|
39a2c349eab37e9f8393f49acc048cea4f3a6db3
|
refs/heads/master
| 2021-01-10T21:04:58.728618 | 2014-07-11T15:27:48 | 2014-07-11T15:27:48 | 42,280,677 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/env python
# ______________________________________________________________________
"""Script mycmodulesetup.py
Example of how to build mycmodule.c using distutils. Usage:
$ python mycmodulesetup.py build
Jonathan Riehl
"""
# ______________________________________________________________________
# Module imports
from distutils.core import setup, Extension
# ______________________________________________________________________
# Module data
mycmodule_ext = Extension('mycmodule', sources = ['mycmodule.c'])
# ______________________________________________________________________
# Main (distutils) routine
if __name__ == "__main__":
setup(name = 'mycmodule', version = '1.0',
description = 'This is a test extension package.',
ext_modules = [mycmodule_ext])
# ______________________________________________________________________
# End of mycmodulesetup.py
|
UTF-8
|
Python
| false | false | 2,014 |
19,628,000,579,846 |
92f97c42ef9265abe14100cfff5145102f9045ad
|
8dd56408ae8513838c2a62cf63291022635990be
|
/AFRQ.py
|
a8141882154376a7275e0d9f17cb8dc2c52fd3e5
|
[] |
no_license
|
yesimon/rosalind
|
https://github.com/yesimon/rosalind
|
4d043e3c87d7d42d814fd9343b0bf64cbc69c621
|
8014e9d64f804bf663a5f672407fa259301a7925
|
refs/heads/master
| 2021-01-23T03:08:19.527590 | 2013-03-14T04:39:45 | 2013-03-14T04:39:45 | 7,267,196 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import sys
from numpy import roots
if __name__ == '__main__':
faas = [float(x) for x in sys.stdin.read().strip().split()]
for faa in faas:
q = faa ** 0.5
p = max(roots([1, 2*q, q ** 2 - 1]))
sys.stdout.write('%s ' % (2 * p * q + q ** 2))
sys.stdout.write('\n')
|
UTF-8
|
Python
| false | false | 2,013 |
3,753,801,448,859 |
2d9d80d251cf95781656c52c8f8e18fbf14790aa
|
bbe53d0171efbc78ca43f409b4a5235df51f36fa
|
/learning/djangoLearning/ddtcms/src/ddtcms/utils/uploadfile.py
|
6e5b6c4b12b438d0ff55d445304810840c2308ff
|
[] |
no_license
|
brianwang/gftop
|
https://github.com/brianwang/gftop
|
2758ec93e326ba5e801af48f951c73b5761bb25d
|
12a48eafb5114da325515fce4b97e744638e6faf
|
refs/heads/master
| 2021-01-12T08:16:43.816679 | 2012-12-12T16:25:29 | 2012-12-12T16:25:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os,datetime,random
from django.conf import settings
def genfilename(filext):
randomfilename = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + str(random.randrange(0, 100, 2)).rjust(2,'0') + random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
randomfilename ="%s%s" % (randomfilename , filext)
return randomfilename
def randomfilename(filename):
if len(filename)>0:
base, ext = os.path.splitext(filename)
ran_filename = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + str(random.randrange(0, 100, 2)).rjust(2,'0') + random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
ran_filename = "%s%s" % (ran_filename , ext)
return ran_filename
else:
return datetime.datetime.now().strftime("%Y%m%d%H%M%S") + str(random.randrange(0, 100, 2)).rjust(2,'0') +".tmp"
def handle_uploaded_file(f):
UPLOAD_TO = 'attachment/%s/' % (datetime.datetime.now().strftime('%Y/%m/%d'))
SAVE_TO = os.path.join(settings.MEDIA_ROOT,UPLOAD_TO)
if not os.path.exists(SAVE_TO):
os.makedirs(SAVE_TO)
try:
fileext=os.path.splitext(f.name)[1]
except:
fileext='.tmp'
filename = genfilename(fileext)
upfilename = os.path.join(UPLOAD_TO,filename)
diskfilename = os.path.join(SAVE_TO,filename)
destination = open(diskfilename, 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
#try:
# im = PIL.Image.open(filename)
# im=im.convert('RGB')
# name = settings.STATIC_UPLOAD+'face/u%s.png' % (datetime.datetime.now().strftime('%Y-%m-%d'))
# im.save(file(name, 'wb'), 'PNG')
#except:
# return "ERROR"
return upfilename,diskfilename
|
UTF-8
|
Python
| false | false | 2,012 |
11,269,994,234,081 |
750701b77a36c4193ffead8b72277b55c97d2a3f
|
d4bc2e38c0e1cabd2ebba68d64d0a9818c6c5f0a
|
/src/controllers/item.py
|
8431cba8e3b99fbfbc23c2734264d1fb487cb6e2
|
[] |
no_license
|
wliao008/mysteryleague
|
https://github.com/wliao008/mysteryleague
|
33d28877a96d376919b843bdf9b6b792ea0258d0
|
7cc847ed16ba6cfc5ecb118e372072b1fd46a2cc
|
refs/heads/master
| 2021-01-25T04:09:02.203794 | 2012-11-14T00:36:16 | 2012-11-14T00:36:16 | 1,784,390 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from google.appengine.ext import db, webapp
from google.appengine.ext.webapp import template
from google.appengine.api import users, memcache
import os
import cgi
import models.model
import html2text
import markdown
import helper.user_helper as user_helper
from django.utils import simplejson
VIEWS_PATH = os.path.join(os.path.dirname(__file__), '../views/')
class ItemDetail(webapp.RequestHandler):
def get(self, item_type, key):
item_template = ''
book = None
item = None
if item_type == '1':
item_template = 'articledetail.html'
elif item_type == '2':
item_template = 'bookdetail.html'
elif item_type == '3':
item_template = 'persondetail.html'
try:
item = db.get(key)
path = os.path.join(os.path.dirname(VIEWS_PATH), item_template)
item.hits += 1
item.put()
#curr_user = users.get_current_user()
openid = user_helper.get_current_openid()
login_url = ""
login_msg = ""
if not openid:
#login_url = users.create_login_url(self.request.uri)
login_url = "/login?continue=" + self.request.uri
login_msg = "Please <a href=" + login_url + ">login</a> to leave comment ;)"
user = memcache.get('user')
tags = db.get(item.tags)
tagcount = len(tags)
model = {'book': book, 'item': item, 'tags': tags, 'tagcount': tagcount, 'curr_user': openid, 'user': user, 'login_url': login_url, 'login_msg': login_msg}
self.response.out.write(template.render(path, model))
except db.Error:
self.error(500)
self.redirect('/notfound')
#self.response.out.write('err')
def post(self, item_type, key):
user = memcache.get('user')
article = db.get(key)
review = models.model.Review()
review.content_html = cgi.escape(self.request.get('content'))
review.item = article
review.user = user
review.put()
self.redirect("/item/%(item_type)s-%(key)s" % {'item_type': item_type, 'key': key})
class ItemEdit(webapp.RequestHandler):
def get(self, item_type, key=None):
item_template = ''
item = None
if item_type == '1':
item_template = 'articledetailedit.html'
elif item_type == '2':
item_template = 'articledetailedit.html'#'bookdetailedit.html'
elif item_type == '3':
item_template = 'persondetailedit.html'
try:
if key:
item = db.get(key)
if not item.content_wmd:
item.content_wmd = html2text.html2text(item.content_html)
else:
#TODO: make sure user is logged in first
curr_user = users.get_current_user()
if not curr_user:
login_url = "/login?continue=" + self.request.uri
self.redirect(login_url)
else:
title = "new"
summary = ""
usr = memcache.get('user')
item = models.model.Article(item_type=int(item_type),title=title, summary=summary, user=usr)
item.content_wmd = '**hi**'
path = os.path.join(os.path.dirname(VIEWS_PATH), item_template)
model = {'item': item}
self.response.out.write(template.render(path, model))
except db.Error:
self.error(500)
self.redirect('/notfound')
def post(self, item_type, key=None):
title = self.request.get('title')
tags = self.request.get('tags[term][]', allow_multiple=True)
content_wmd = self.request.get('content_wmd')
item = None
if key:
item = db.get(key)
else:
summary = ""
usr = memcache.get('user')
item = models.model.Article(item_type=int(item_type),title=title, summary=summary, user=usr)
item.title = title
item.content_wmd = content_wmd
item.content_html = markdown.markdown(content_wmd, output_format='html')
for tag in tags:
query = db.GqlQuery("SELECT * FROM Tag WHERE name = :1", tag)
curr_tag = query.fetch(1)
if curr_tag and curr_tag[0]:
curr_tag[0].tagcount += 1
curr_tag[0].put()
item.tags.append(curr_tag[0].key())
else:
new_tag = models.model.Tag(name=tag)
new_tag.put()
item.tags.append(new_tag.key())
#self.response.out.write(item.content_html)
item.put()
self.redirect('/')
class ItemReview(webapp.RequestHandler):
def get(self, item_type, key):
path = os.path.join(os.path.dirname(VIEWS_PATH), 'review.html')
item = db.get(key)
model = {'item': item}
self.response.out.write(template.render(path, model))
def post(self, item_type, key):
title = self.request.get('title')
summary = "this is a test"
content_html = self.request.get('content')
usr = models.model.User(email="[email protected]",nickname="wliao2")
usr.put()
article = models.model.Article(item_type=1,title=title, summary=summary, content_html=content_html, user=usr)
article.put()
self.redirect('/item/' + item_type + '-' + key)
class SearchTags(webapp.RequestHandler):
def get(self, term):
model = {'msg': 'hello ajax'}
self.response.out.write(simplejson.dumps(model))
|
UTF-8
|
Python
| false | false | 2,012 |
910,533,111,901 |
18b958d0d65fcd600878e82c34d71f766d7ea250
|
4c681ee9ac0102e003d64ef3e3b8a84bf1136464
|
/cgi-bin/utiles.py
|
9661f4452c7fffcde38f01115b4ded0728f95f48
|
[] |
no_license
|
Pazitos10/Distribuidos
|
https://github.com/Pazitos10/Distribuidos
|
e3be5b256297225b7a5848b030b0352ec85c98fa
|
b1277352015c62c83d422aafecac24b40f768173
|
refs/heads/master
| 2021-01-01T19:20:18.510894 | 2014-11-03T20:01:55 | 2014-11-03T20:01:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from ast import literal_eval
# -*- coding: utf-8 -*-
def buscarEnArchivo(nombreDelArchivo, fcComparacion, elementoBuscado):
"""
Utiliza fcComparacion para buscar elementoBuscado en el archivo nombreDelArchivo
Trata a todas las lineas del archivo como si fueran diccionario, por lo cual
el archivo debe tener el formato correcto, por ejemplo:
alumnos.txt:
{'edad': '23', 'apellido': 'Morales', 'pClave': '1234', 'nombre': 'Leonardo', 'nroLegajo': '5421', 'sexo': 'Masculino'}
{'edad': '25', 'apellido': 'Perez', 'pClave': '1234', 'nroLegajo': '5432', 'sexo': 'Femenino', 'nombre': 'Maria'}
...
{'edad': '24', 'apellido': 'Dominguez', 'pClave': '1234', 'nroLegajo': '4587', 'sexo': 'Masculino', 'nombre': 'Emmanuel'}
sessiones.txt:
{<<cockieId>>: {'Edad': '23', 'apellido': 'Morales', 'pClave': '1234', 'nombre': 'Leonardo', 'nroLegajo': '5421', 'sexo': 'Masculino'}}
La fcComparacion debe comparar unalineadelarchivoconvertidaendiccionario con el elementoBuscado.
En caso de +exito retorna el objeto encontrado y el numero de linea donde fue encontrado.
En caso de -exito retorna None,None
"""
archi=open(nombreDelArchivo,'r')
lineas = archi.readlines()
archi.close()
for pos,l in enumerate(lineas):
try:
alumno_i = literal_eval(l[:-1]) #le sacamos el \n
except SyntaxError:
return None, None
#alumno_i = literal_eval(l) #le sacamos el \n
guardarEnArchivo("log.txt", "Buscando...%s%s"%(type(alumno_i), alumno_i))
if fcComparacion(alumno_i,elementoBuscado):
return alumno_i, pos
return None, None
def guardarEnArchivo(nombreDelArchivo, dato):
'''
Guarda el dato en el archivo nombreDelArchivo
'''
arch=open(nombreDelArchivo,'a')
arch.write(str(dato) + '\n')
arch.close()
|
UTF-8
|
Python
| false | false | 2,014 |
2,018,634,638,622 |
1f324a75d4305945d0aa608ab6a6b359b513bf59
|
bc941cb5e2c75fd6767739c76b6b54b55080e40c
|
/session_scanner/ocr.py
|
3a0417dfd3b4fdee3162513e7df82bce658c584d
|
[
"GPL-2.0-only"
] |
non_permissive
|
malev/legislature-scanner
|
https://github.com/malev/legislature-scanner
|
602cf2e085333147b537dd6552a83cb922c06fe6
|
d7a19362ca93d8464bd87aeeba117d02bf0a77b3
|
refs/heads/master
| 2016-09-02T05:33:42.545025 | 2014-12-15T22:01:26 | 2014-12-15T22:01:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import subprocess
from pprint import pprint
class OCR(object):
def __init__(self, image_dir):
self.image_dir = image_dir
def __call__(self):
output = []
for index in range(4):
output.append(self._get_text(index))
return output
def _get_text(self, index):
output = ''
filename = '%s/part_%i.jpg' % (self.image_dir, index)
subprocess.call(['tesseract', filename, 'output'])
with open('output.txt', 'r') as f:
output = f.read()
return output
if __name__ == '__main__':
ocr = OCR('build/14-10-21-21/')
pprint(ocr())
|
UTF-8
|
Python
| false | false | 2,014 |
10,694,468,603,413 |
64c43f0bea12ed3731d8964fd3801010ba21e464
|
edd776c873b9de82d11e77ba0572293af3987bd6
|
/apps/redirect/views.py
|
bb525472a2ea0df786fe0a3073c8066f955c05d1
|
[] |
no_license
|
travistanderson/Timeshare-Rentals
|
https://github.com/travistanderson/Timeshare-Rentals
|
6d4aa0ef9ce8443c8d61dc47dde585ee9cdeab81
|
7b355a8196ce3914a1f37f5231cb439cf7df9a66
|
refs/heads/master
| 2021-05-01T04:52:32.348605 | 2012-12-03T03:55:47 | 2012-12-03T03:55:47 | 768,930 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# redirect/views.py
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponseRedirect, Http404, HttpResponse
# from django.core.urlresolvers import reverse
from django.template import RequestContext
from redirect.models import Redirector
def redirecturl(request,old_url):
return render_to_response('redirector.html', {"u":old_url,},context_instance = RequestContext(request),)
def the404(request):
return render_to_response('404.html',context_instance=RequestContext(request),)
|
UTF-8
|
Python
| false | false | 2,012 |
13,254,269,122,212 |
3637ce4e7d31641d5a7b62646edafb5a30132d06
|
af99d2e77cdd36b5def5c05a8ae063112fc9ef7b
|
/tests/datastuff/formatting/david_averager.py
|
6077b96ac3ba831269d7a02f9e981242be13e9a3
|
[] |
no_license
|
CarletonDevX/room-draw
|
https://github.com/CarletonDevX/room-draw
|
0b18044efaa2a51365a87ae340d54eb2c7936ac0
|
7230596c62c54c6e1bbc27e304f14915a7e4f4da
|
refs/heads/master
| 2016-09-02T05:17:25.102079 | 2014-08-20T07:00:09 | 2014-08-20T07:00:09 | 19,219,327 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''Uses data from roomdata.csv and roomtags.csv to fill in the columns NUMSUSED, AVGNUM, STDEV, HALL, and ROOMNUM in temproomtags.
David Pickart 4/28/14'''
import numpy
import namereplace
def buildDict(file):
'''Builds a dictionary of rooms out of a csv file and returns that dictionary.
Every entry is of format= room: (size,subfree,quiet,mens,womens,numsused,avgnum,stdev)'''
next(file) #Skip header
roomdict = {}
for line in file:
if line != '\n':
line = line.rstrip() #Get rid of \r\n's at the end of each line
values = line.split(",")
roomname = values[0]
if not roomname in roomdict.keys():
roomdict[roomname] = ('', '', '', '', '', values[3], 0, 0)
else:
#Compile a list of the numbers used
numsused = roomdict[roomname][5]
numsused += ";" + values[3]
roomdict[roomname] = ('', '', '', '', '', numsused, 0, 0)
#for key in roomdict.keys():
# print key, roomdict[key]
return roomdict
def addHallAndNum(line, roomname):
'''Adds columns HALL and ROOMNUM to a row and fills them in using info from roomname'''
name = namereplace.keyToName(roomname)
number = roomname[5:]
line += "," + name + "," + number + "\n"
return line
def findAvgStdev(line, usednames, roomdict):
'''Takes a row of data and returns that row with the avg and stdev columns filled in, as well
as a running list of rooms that were drawn in 2013.'''
values = line.split(",")
roomname = values[0]
usednames.append(roomname)
if roomname in roomdict.keys():
numsused = roomdict[roomname][5]
individual_nums = numsused.split(";")
numlist = []
numsum = 0.0
for n in individual_nums:
numsum += int(n)
numlist.append(int(n))
avg = numsum / len(individual_nums)
stdev = numpy.std(numlist)
newline = roomname + ',' + values[1] + ",'','','',''," + numsused + ',' + str(avg) + ',' + str(stdev)
else:
newline = line.rstrip() #Remove the newline if there is one
newline = addHallAndNum(newline, roomname)
return newline, usednames
def columnAdder(readfile, writefile, roomdict):
'''Adds columns NUMSUSED, AVGNUM and STDEV to a csv file and fills them in with values from a dictionary.'''
newtext = ''
usednames = []
next(readfile) #Skip header
#Go through roomtags and get info for each room
for line in readfile:
line = line.rstrip() #Get rid of \r\n's at the end of each line
newline, usednames = findAvgStdev(line, usednames, roomdict)
newtext += newline
#Optional separator: newtext += "END OF ROOMS DRAWN IN 2013,,,,,,,, \n"
#Add rooms not drawn last year
for key in roomdict.keys():
if key not in usednames:
line = key + "," + str(roomdict[key])[1:-1]
newline, usednames = findAvgStdev(line, usednames, roomdict)
newtext += newline
newtext = "HOUSENAME,SIZE,SUBFREE,QUIET,MENS,WOMENS,NUMSUSED,AVGNUM,STDEV,HALL,ROOMNUM\n" + newtext
writefile.write(newtext)
return
def main():
file = open('roomdata.csv', 'r')
roomdict = buildDict(file)
file.close()
readfile = open('roomtags.csv', 'r')
writefile = open('temproomtags.csv', 'w')
columnAdder(readfile, writefile, roomdict)
readfile.close()
writefile.close()
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 2,014 |
4,836,133,201,719 |
41889717846992f87c93b8622c235397dd5c5118
|
45aade1968b93ddad0927d6821da65153e1e067a
|
/django_site/looking_glass/forms.py
|
72eaf0f3a58e956f448022da0d39d1db4a10c29d
|
[] |
no_license
|
eskriett/comp3020
|
https://github.com/eskriett/comp3020
|
aecefa2be98afad32a19e99a792257922349eaf1
|
5b8b5da4eccee21c7071f606fc64a89f07e480d8
|
refs/heads/master
| 2016-09-11T08:06:06.625663 | 2014-04-13T07:17:31 | 2014-04-13T07:17:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
.. module:: forms
:synopsis: Creates the forms used in the Looking Glass application
.. moduleauthor:: Hayden Eskriett <[email protected]>
"""
from django import forms
from django.contrib.auth.models import User
from looking_glass.validators import validate_ipv6_addresses
from looking_glass.models import Host, Test
from django.forms.widgets import CheckboxSelectMultiple
class UserForm(forms.ModelForm):
"""
Used to create the form which is used to login and register users
"""
username = forms.CharField(required=True, widget=forms.TextInput(attrs={'class': 'form-control'}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control'}))
class Meta:
model = User
fields = ['username', 'password']
class AddHostForm(forms.Form):
"""
Used to create the form which is used to add hosts
"""
textFieldWidget = forms.TextInput(attrs={'class': 'form-control'})
name = forms.CharField(required=True, widget=textFieldWidget)
description = forms.CharField(required=False, widget=forms.Textarea(attrs={'class': 'form-control', 'rows': '2'}))
addresses = forms.CharField(required=True, widget=forms.Textarea(attrs={'class': 'form-control', 'rows': '5'}),
validators=[validate_ipv6_addresses])
class ExecuteTestForm(forms.Form):
"""
Used to create the form which is used to execute tests against a host
"""
def __init__(self, host, *args, **kwargs):
super(ExecuteTestForm, self).__init__(*args, **kwargs)
addresses = host.addresses.all()
if len(addresses) > 1:
self.fields['addresses'] = forms.MultipleChoiceField(choices=[(a.pk, str(a)) for a in addresses],
required=True, widget=CheckboxSelectMultiple)
else:
self.fields['addresses'] = forms.GenericIPAddressField(label="Address", protocol='IPv6',
initial=addresses[0], required=True)
self.fields['addresses'].widget.attrs['readonly'] = 'True'
self.fields['addresses'].widget.attrs['class'] = 'form-control'
class EHTestForm(forms.Form):
"""
Used to create the form which displays the extension header tests which are available
"""
eh_tests = forms.ModelMultipleChoiceField(queryset=Test.objects.all(), widget=forms.CheckboxSelectMultiple(),
required=True)
|
UTF-8
|
Python
| false | false | 2,014 |
18,262,200,959,231 |
251e95c954fa1c6a22ddf5646f24185e6a9c5099
|
df512781529fb270eb1123e135459f6c2c944903
|
/raw_code/scripts/xFaToMerDis.py
|
4d7f4861eb112f5298c9dfbcb215c1dfccb195bb
|
[] |
no_license
|
nimezhu/zlab
|
https://github.com/nimezhu/zlab
|
2fd37cd3b6f902979ba69d7530c934de2ed7ce21
|
078dc7b93f716725d1abf21c037637e35896f5e2
|
refs/heads/master
| 2016-08-08T12:54:14.011095 | 2013-04-26T15:20:55 | 2013-04-26T15:20:55 | 1,550,567 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# programmer : zhuxp
# usage:
from zlab.zbed import *
from zlab.codon import *
import sys
def Main():
f=open(sys.argv[1])
mer_length=int(sys.argv[2])
print "# Count ",mer_length,"mer of file", sys.argv[1]
a=faReader(f)
while 1:
name,seq=a.next()
if name is None: break
name=name.strip()
count=[0]*(4**mer_length)
for i in range(len(seq)-mer_length+1):
mer=seq[i:i+mer_length]
sign=0
for j in mer:
if j=="n" or j=="N":
sign=1
if sign==0:
count[U.mer2number(mer)]+=1
print name[1:],
for i in count:
print "\t",i,
print
if __name__=="__main__":
Main()
|
UTF-8
|
Python
| false | false | 2,013 |
850,403,571,867 |
e2995ae3ad9ab95304522a16df159d923373e2ef
|
f05837615cec3b5968d134d07d7f0b61619e5349
|
/src/SConscript
|
8fc7b4a7c1a63e8a5c55f71938a70ad4bb1f62a1
|
[] |
no_license
|
lsst/cpptests
|
https://github.com/lsst/cpptests
|
6a986667eb8a86657ccc864a53d58b6eae81da9f
|
c1a943b35910ef06a897536bad7006e5120c55a5
|
refs/heads/master
| 2021-01-11T11:02:59.718506 | 2011-11-16T06:34:47 | 2011-11-16T06:34:47 | 23,013,295 | 0 | 24 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- python -*-
Import("env")
import glob
for src in glob.glob("*.cc"):
env.Object(src, LIBS="")
|
UTF-8
|
Python
| false | false | 2,011 |
2,439,541,445,343 |
8d550ddcd18cf5b45bc03cba448859765dffe57e
|
63923062a944b4b959778e2683cbba8341ee6b55
|
/utils.py
|
6d3434cf22f7b04a424c4a1fea50126bc6da60cf
|
[] |
no_license
|
asmoore/searchfda
|
https://github.com/asmoore/searchfda
|
912c15758619e614a6ece88fc375622d9651862d
|
bef1744f44233c9a6c99dde177d5ff8a2bd1a38c
|
refs/heads/master
| 2021-01-23T21:35:47.027418 | 2014-09-16T23:23:20 | 2014-09-16T23:23:20 | 21,547,474 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
utils
--------------
Utility functions for the `searchfda` module.
"""
from collections import namedtuple
import urllib2
import json
from datetime import datetime, time, timedelta, date
import re
import os
from pyquery import PyQuery
import praw
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from whoosh.qparser import QueryParser
from whoosh.query import FuzzyTerm
from whoosh.index import open_dir
from whoosh.fields import *
import app
import settings
def fetch_search(search,page):
"""
Fetch search results.
"""
search_results = []
root = test = os.path.dirname(os.path.realpath('__file__'))
ix = open_dir(root+"/data/")
with ix.searcher() as searcher:
query = QueryParser("name", ix.schema, termclass=FuzzyTerm).parse(search)
results = searcher.search_page(query,page,10)
total = results.total
print total
for hit in results:
print hit
if hit["category"] == "B":
category = "Brand name drug"
elif hit["category"] == "G":
category = "Generic drug"
else:
category = hit["category"]
search_results.append({"name": hit["name"].capitalize(),
"category": category,
"adverse": hit["adverse"],
"recall": hit["recall"],
"label": hit["label"]})
#total = 2
return (search_results,total)
def fetch_adverse_event_count(search,count):
"""
Fetch adverse event counts from OpenFDA.
"""
limit="20"
api_key = "QxCHqxHE1kHDwbBFj2WRh3w8y3aepivT42vgCQDH"
openfda_url = ''.join(["https://api.fda.gov/drug/event.json?",
"api_key=",api_key,
"&search=",search,
"&count=",count,
"&limit=",limit])
openfda_url = openfda_url.replace(" ","%20")
openfda_url = openfda_url.replace('"',"%22")
print openfda_url
response = urllib2.urlopen(openfda_url)
jdata = json.load(response)
return jdata["results"]
def fetch_adverse_event(search,report):
"""
Fetch adverse events from OpenFDA.
"""
limit="1"
skip=str(int(report)-1)
api_key = "QxCHqxHE1kHDwbBFj2WRh3w8y3aepivT42vgCQDH"
openfda_url = ''.join(["https://api.fda.gov/drug/event.json?",
"api_key=",api_key,
"&search=",search,
"&limit=",limit,
"&skip=",skip])
response = urllib2.urlopen(openfda_url)
jdata = json.load(response)
results = jdata["results"]
total_reports = jdata["meta"]["results"]["total"]
return (results, total_reports)
def fetch_recall_count(search, count):
"""
Fetch recalls from OpenFDA.
"""
limit="20"
api_key = "QxCHqxHE1kHDwbBFj2WRh3w8y3aepivT42vgCQDH"
openfda_url = ''.join(["https://api.fda.gov/drug/enforcement.json?",
"api_key=",api_key,
"&search=",search,
"&count=",count,
"&limit=",limit])
openfda_url = openfda_url.replace(" ","%20")
openfda_url = openfda_url.replace('"',"%22")
print openfda_url
response = urllib2.urlopen(openfda_url)
jdata = json.load(response)
return jdata["results"]
def fetch_label(search):
"""
Fetch label from OpenFDA.
"""
api_key = "QxCHqxHE1kHDwbBFj2WRh3w8y3aepivT42vgCQDH"
limit = "1"
openfda_url = ''.join(["https://api.fda.gov/drug/label.json?",
"api_key=",api_key,
"&search=",search,
"&limit=",limit])
response = urllib2.urlopen(openfda_url)
jdata = json.load(response)
spl_set_id = jdata["results"][0]["openfda"]["spl_set_id"][0]
#for key, value in jdata["results"][0].iteritems():
# print key
# print value
# if type(value) is list:
# text = value[0].encode('ascii', 'ignore')
# pattern = re.compile(key.replace("_"," "), re.IGNORECASE)
# print pattern.sub("", text)
return (jdata["results"], spl_set_id)
def fetch_label_media(spl_id):
"""
Fetch label media from Dailymed
"""
dailymed_url = ''.join(["http://dailymed.nlm.nih.gov/dailymed/services/v2/spls/",
spl_id,
"/media.json"])
response = urllib2.urlopen(dailymed_url)
jdata = json.load(response)
media_url = jdata["data"]["media"]
return media_url
def fetch_description(search):
"""
fetch description from MedlinePlus
"""
medline_url = "http://apps.nlm.nih.gov/medlineplus/services/mpconnect_service.cfm?mainSearchCriteria.v.cs=2.16.840.1.113883.6.88&mainSearchCriteria.v.dn="+search+"&informationRecipient.languageCode.c=en&knowledgeResponseType=application/json"
response = urllib2.urlopen(medline_url)
jdata = json.load(response)
description_url = jdata["feed"]["entry"][0]["link"][0]["href"]
pq = PyQuery(description_url)
description = pq("p")[0].text
return description
def get_ae_number(drug_name):
"""
Get number of adverse events for drug
"""
limit="1";
api_key = "QxCHqxHE1kHDwbBFj2WRh3w8y3aepivT42vgCQDH"
drug_name = drug_name.replace(" ","%20")
search = 'patient.drug.medicinalproduct:"'+drug_name+'"'
openfda_url = ''.join(["https://api.fda.gov/drug/event.json?",
"api_key=",api_key,
"&search=",search,
"&limit=",limit])
try:
response = urllib2.urlopen(openfda_url)
jdata = json.load(response)
count = jdata['meta']['results']['total']
except:
count = 0
#indication = jdata['results'][0]['term']
return count
def get_recall_number(drug_name):
"""
Get number of recalls
"""
limit="1";
api_key = "QxCHqxHE1kHDwbBFj2WRh3w8y3aepivT42vgCQDH"
drug_name = drug_name.replace(" ","%20")
search = 'openfda.substance_name:"'+drug_name+'"'
openfda_url = ''.join(["https://api.fda.gov/drug/enforcement.json?",
"api_key=",api_key,
"&search=",search,
"&limit=",limit])
try:
response = urllib2.urlopen(openfda_url)
jdata = json.load(response)
count = jdata['meta']['results']['total']
except:
count = 0
return count
if __name__ == '__main__':
#engine = create_engine('sqlite:///games.db')
#engine = create_engine(os.environ['DATABASE_URL'])
Session = sessionmaker(bind=engine)
session = Session()
session._model_changes = {}
|
UTF-8
|
Python
| false | false | 2,014 |
7,773,890,823,493 |
87e6b244b13396efc25945714616c90626cf81cd
|
7abe199dbf81125fd67197c49f28b22b7a82234e
|
/server/recorvva_start.py
|
8fcb3b2849b4f683784e1f8821e67e38ac93cc9a
|
[] |
no_license
|
jycr753/ReCoRVVA
|
https://github.com/jycr753/ReCoRVVA
|
949820ef0bbfab08a0029c37471d89cf537c78fa
|
b0d01a1c3e7294f7bbd5385b79f4dd175b243529
|
refs/heads/master
| 2021-01-14T14:02:26.930797 | 2014-05-03T18:44:54 | 2014-05-03T18:44:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.IN)
while True:
if(GPIO.input(11) == True):
print("switch ON")
else:
print("switch OFF")
|
UTF-8
|
Python
| false | false | 2,014 |
386,547,080,378 |
19bfaa411caf482220ca6fa6c707cd573adddce7
|
e74e2d3a8babb739b15c825bdfb14c7828af2344
|
/tests/siteparser.py
|
0828cb19d0a33111a911661f5011dad12a3c165d
|
[] |
no_license
|
fenn/skdb
|
https://github.com/fenn/skdb
|
97ea9a4cc584c4b24ea5275ac36cfdd3f34118de
|
dfb6ec81952030c2d0c1dec880eb93da1d629667
|
refs/heads/master
| 2020-05-07T21:55:52.979394 | 2014-11-15T12:33:28 | 2014-11-15T12:33:28 | 3,238,526 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import libxml2
fh = open("sitetest.xml")
input = libxml2.inputBuffer(fh)
reader = input.newTextReader("")
sites = {}
reader.Read()
siteID = ""
while reader.Read():
if reader.Name() == "site":
siteID = reader.GetAttribute("id")
sites[siteID] = {}
if reader.Name() in ("datetime", "name", "locname", "latitude", "longitude", "website", "access"):
print "found " + reader.Name() + " node"
sites[siteID][reader.Name()] = reader.ReadInnerXml()
print sites
|
UTF-8
|
Python
| false | false | 2,014 |
10,282,151,732,913 |
d0b6796de47e4f157dc09c14d0ef390611d1b8cf
|
6745ff9de047fc5ef5233269c7a6da4a84f2f4d5
|
/librpg/tools/mapeditor/engine.py
|
ede62cb5fd03aa0b93ae618a071f4b1550f9d431
|
[
"LGPL-3.0-only",
"GPL-3.0-only"
] |
non_permissive
|
CNXTEoE/librpg
|
https://github.com/CNXTEoE/librpg
|
a19e8fb61024176a1fe00e8fc39f2a692c228c9f
|
757c33b1a6850b744472fa086f8a10fd747a5c57
|
refs/heads/master
| 2017-06-23T17:53:37.682964 | 2010-06-16T00:29:09 | 2010-06-16T00:29:09 | 83,179,467 | 1 | 0 | null | true | 2017-02-26T03:16:18 | 2017-02-26T03:16:18 | 2015-04-20T04:30:13 | 2015-04-20T04:31:56 | 2,904 | 0 | 0 | 0 | null | null | null |
import pygame
from pygame.locals import QUIT
#import settings as set
from map import *
from tile import *
class Engine(object):
def __init__(self, startmap_xml):
pass
def run(self):
pygame.init()
pygame.display.set_mode((set.resW,set.resH), set.displayFlags, set.resDepth)
while True:
if QUIT in [e.type for e in pygame.event.get()]:
break
#m.draw(screen, 0, 0)
pygame.display.flip()
|
UTF-8
|
Python
| false | false | 2,010 |
10,402,410,800,962 |
e21fa8dc757d4d65a64ca26e12c22747a80da489
|
68a7949fa28aceebd1fc149e785fd9b0017773d0
|
/discourse/evaluation.py
|
f2d95f08df4328aa51ac38f658dbaeae3daea117
|
[] |
no_license
|
kedz/discourse
|
https://github.com/kedz/discourse
|
4cc15b9e94ae06bd932087becc23870ba93f2ebe
|
259a19ad690e76c363a6a807e813083c84ef827f
|
refs/heads/master
| 2021-01-10T18:49:38.822505 | 2014-04-29T03:02:31 | 2014-04-29T03:02:31 | 12,629,350 | 1 | 0 | null | false | 2014-04-29T03:02:31 | 2013-09-05T21:27:14 | 2014-04-29T03:02:31 | 2014-04-29T03:02:31 | 72,032 | 1 | 0 | 1 |
OpenEdge ABL
| null | null |
from itertools import izip
from discourse.hypergraph import s2i, recover_order
import discourse
from collections import OrderedDict
import textwrap
from discourse.models.rush import BigramCoherenceInstance
import scipy as sp
import pandas as pd
def explain_predicted(test_docs, predy, model, feats):
# Print stats for individual test instances.
for test_idx, datum in enumerate(izip(test_docs, predY), 1):
test_doc, predy = datum
print u'TEST NO. {:4}\n=============\n'.format(test_idx)
# Print Kendalls Tau and pvalue for baseline and new model
# for this test instance.
kt, pval = kendalls_tau(predy)
print u'Kendall\'s Tau: {:.3f} (pval: {:.3f})'.format(kt, pval)
# Print bigram gold sequence overlap (accuracy) for baseline
# and new model.
bg_acc = bigram_acc(predy)
print u'Bigram Accuracy: {:.3f}'.format(bg_acc)
print
def eval_against_baseline(testX, baselineY, newY, baseline_model, new_model,
base_feats, new_feats,
baseline_pred_trainY=None,
new_pred_trainY=None):
"""
Evaluate differences in two models. Prints out per instance
analysis of transitions predicted by baseline and new models.
testX -- A list of corenlp.Document objects to evaluate on.
baselineY -- A list of lists of discourse.hypergraph.Transition
objects predicted by the baseline model for the documents
in testX.
newY -- A list of lists of discourse.hypergraph.Transition
objects predicted by the new model for the documents
in testX.
baseline_model -- A discourse.perceptron.Perceptron object trained
on the features in base_feats.
new_model -- A discourse.perceptron.Perceptron object trained
on the features in new_feats.
base_feats -- A dict of feature names to boolean values,
indicating the features active in the baseline model.
new_feats -- A dict of feature names to boolean values,
indicating the features active in the new model.
"""
# Limit text output to 80 chars and wrap nicely.
wrapper = textwrap.TextWrapper(subsequent_indent='\t')
print u'OVERALL STATS FOR TEST DOCUMENTS'
# Print macro averaged Kendall's Tau and pvalues for baseline
# and new model.
bl_avg_kt, bl_avg_pval = avg_kendalls_tau(baselineY)
new_avg_kt, new_avg_pval = avg_kendalls_tau(newY)
print u'\t | BASELINE | NEW'
print u'{:14} {:.3f} ({:.3f}) | {:.3f} ({:.3f})\n'.format(u'Kendalls Tau',
bl_avg_kt,
bl_avg_pval,
new_avg_kt,
new_avg_pval)
# Print bigram gold sequence overlap (accuracy) for baseline and
# new model.
bl_bg_acc = mac_avg_bigram_acc(baselineY)
new_bg_acc = mac_avg_bigram_acc(newY)
print u'\t | BASELINE | NEW'
print u'{:12} | {:.3f} | {:.3f} \n'.format(u'bigram acc',
bl_bg_acc,
new_bg_acc)
if baseline_pred_trainY is not None or new_pred_trainY is not None:
if baseline_pred_trainY is not None:
bl_avg_kt_train, bl_avg_pval_train = avg_kendalls_tau(
baseline_pred_trainY)
bl_bg_acc_train = mac_avg_bigram_acc(baseline_pred_trainY)
else:
bl_avg_kt_train = float('nan')
bl_avg_pval_train = float('nan')
bl_bg_acc_train = float('nan')
if new_pred_trainY is not None:
new_avg_kt_train, new_avg_pval_train = avg_kendalls_tau(
new_pred_trainY)
new_bg_acc_train = mac_avg_bigram_acc(new_pred_trainY)
else:
new_avg_kt_train = float('nan')
new_avg_pval_train = float('nan')
new_bg_acc_train = float('nan')
print u'OVERALL STATS FOR TRAINING DOCUMENTS'
print u'\t | BASELINE | NEW'
print u'{:14} {:.3f} ({:.3f}) | {:.3f} ({:.3f})\n'.format(
u'Kendalls Tau',
bl_avg_kt_train,
bl_avg_pval_train,
new_avg_kt_train,
new_avg_pval_train)
print u'\t | BASELINE | NEW'
print u'{:12} | {:.3f} | {:.3f} \n'.format(u'bigram acc',
bl_bg_acc_train,
new_bg_acc_train)
# Print stats for individual test instances.
for test_idx, datum in enumerate(izip(testX, baselineY, newY), 1):
testx, baseliney, newy = datum
print u'TEST NO. {:4}\n=============\n'.format(test_idx)
# Print Kendalls Tau and pvalue for baseline and new model
# for this test instance.
bl_kt, bl_pval = kendalls_tau(baseliney)
new_kt, new_pval = kendalls_tau(newy)
print u'\t | BASELINE | NEW'
print u'{:14} {:.3f} ({:.3f}) | {:.3f} ({:.3f})\n'.format(u'K. Tau',
bl_kt,
bl_pval,
new_kt,
new_pval)
# Print bigram gold sequence overlap (accuracy) for baseline
# and new model.
bl_acc = bigram_acc(baseliney)
new_acc = bigram_acc(newy)
print u'\t | BASELINE | NEW'
print u'{:12} | {:.3f} | {:.3f} \n'.format(u'bigram acc',
bl_acc,
new_acc)
# Print document sentences in correct order.
print u'GOLD TEXT\n=========\n'
for i, s in enumerate(testx):
print wrapper.fill(u'({:3}) {}'.format(i, unicode(s)))
print u'\n\n'
# Print document sentences in baseline order.
print u'BASELINE TEXT\n=========\n'
indices = [s2i(t.sents[0]) for t in recover_order(baseliney)[:-1]]
for i in indices:
print wrapper.fill(u'({}) {}'.format(i, unicode(testx[i])))
print u'\n\n'
# Print document sentences in new model order.
print u'NEW MODEL TEXT\n=========\n'
indices = [s2i(t.sents[0]) for t in recover_order(newy)[:-1]]
for i in indices:
print wrapper.fill(u'({}) {}'.format(i, unicode(testx[i])))
print u'\n\n'
# Get predicted transitions in order for both models.
# NOTE: The predict function of the Perceptron object returns
# the predicted transitions in no particular order.
# When in doubt, use recover_order on any predicted output
# if you want to iterate over it as if you were traversing the
# graph of sentence transitions.
baseline_trans = discourse.hypergraph.recover_order(baseliney)
new_trans = discourse.hypergraph.recover_order(newy)
# Map tail sentence of a transition to the transition.
p2t_baseline = _position2transition_map(baseline_trans)
p2t_new = _position2transition_map(new_trans)
# For each transition leaving the same sentence, if the models
# disagree on what the next sentence is, print analysis of
# the model features.
for pos, t_bl in p2t_baseline.items():
if p2t_new[pos].sents[0] != t_bl.sents[0]:
t_new = p2t_new[pos]
# Print tail sentence.
if pos > -1:
pos_str = unicode(testx[pos])
else:
pos_str = u'START'
print u'=' * 80
print wrapper.fill(u'({:3}) {}'.format(pos, pos_str))
print (u'-' * 80)
print u' |\n V'
# Print baseline head sentence
if s2i(t_bl.sents[0]) is not None:
bl_str = unicode(testx[s2i(t_bl.sents[0])])
else:
bl_str = u'END'
print wrapper.fill(u'(OLD) {}\n'.format(bl_str)) + u'\n'
# Print baseline model features for the predicted
# baseline transition.
explain(t_bl, baseline_model, new_model, testx,
base_feats, new_feats)
# Print new model head sentence.
if s2i(t_new.sents[0]) is not None:
new_str = unicode(testx[s2i(t_new.sents[0])])
else:
new_str = 'END'
print wrapper.fill(u'(NEW) {}\n'.format(new_str)) + u'\n'
# Print new model features for the predicted new
# model transition.
explain(t_new, baseline_model, new_model, testx,
base_feats, new_feats)
# Print gold head sentence, that is, the sentence the
# models should have selected.
if pos + 1 < len(testx):
gstr = u'(GLD) {}\n'.format(unicode(testx[pos + 1]))
print wrapper.fill(gstr) + u'\n'
if pos + 1 == s2i(t_bl.sents[0], end=len(testx)):
print 'OLD MODEL IS CORRECT\n'
if pos + 1 == s2i(t_new.sents[0], end=len(testx)):
print 'NEW MODEL IS CORRECT\n'
print
def explain_transition(transition, model, testx):
weights = model.dsm._vec.inverse_transform(model.sp.w)[0]
feats = model.dsm._vec.inverse_transform(
model.dsm.joint_feature(testx, [transition]))[0].keys()
# Print baseline model features for this transition and their score
# under the baseline and new model.
# output = u''
# output += u'FEATURE | WEIGHT\n'
# output += u'--------------------------------------------\n'
points = []
for feat in feats:
w = weights[feat] if feat in weights else 0.0
points.append((feat, w))
#output += u'{:35} | {:3}\n'.format(feat, w)
points.sort(key=lambda x: x[0])
indices = (point[0] for point in points)
data = [[point[1]] for point in points]
df = pd.DataFrame(data, index=indices)
#output += u'\n'
return df
def compare_model(t, baseline_model, new_model, testdoc, base_feats, new_feats):
"""
Prints the features and feature scores for a transition t under a
baseline and new model.
t -- A discourse.hypergraph.Transition object to explain.
baseline_model -- A discourse.perceptron.Perceptron object trained
on the features in base_feats.
new_model -- A discourse.perceptron.Perceptron object trained
on the features in new_feats.
testdoc -- A corenlp.Document object corresponding to the test
instance that the transition t is from.
base_feats -- A dict of feature names to boolean values,
indicating the features active in the baseline model.
new_feats -- A dict of feature names to boolean values,
indicating the features active in the new model.
"""
# Create RushModels for each model feature set.
base_rmodel = RushModel(testdoc, history=2, features=base_feats)
new_rmodel = RushModel(testdoc, history=2, features=new_feats)
# Get each model's weight vector.
bl_weights = baseline_model.dsm._vec.inverse_transform(
baseline_model.sp.w)[0]
new_weights = new_model.dsm._vec.inverse_transform(
new_model.sp.w)[0]
# Get baseline and new model features for this transition t.
bl_feat = set(baseline_model.dsm._vec.inverse_transform(
baseline_model.dsm.psi(base_rmodel, [t]))[0].keys())
new_feat = set(new_model.dsm._vec.inverse_transform(
new_model.dsm.psi(new_rmodel, [t]))[0].keys())
# Print baseline model features for this transition and their score
# under the baseline and new model.
print '\tBASELINE FEATURES'
print 'FEATURE |BASELINE | NEW'
for feat in bl_feat:
bl_w = bl_weights[feat] if feat in bl_weights else '?'
new_w = new_weights[feat] if feat in new_weights else '?'
print u'{:28} | {:11} | {}'.format(feat, bl_w, new_w)
print
# Print new model features for this transition and their score
# under the baseline and new model.
print '\tNEW MODEL FEATURES'
print 'FEATURE |BASELINE | NEW'
for feat in new_feat:
bl_w = bl_weights[feat] if feat in bl_weights else '?'
new_w = new_weights[feat] if feat in new_weights else '?'
print u'{:28} | {:11} | {}'.format(feat, bl_w, new_w)
print
print
def avg_kendalls_tau(dataY):
"""
Returns the macro averaged Kendall's tau and pvalues for list
of lists of predicted transitions.
dataY -- A list of lists of discourse.hypergaph.Transition objects
predicted by a discourse model.
returns (avg_kt, avg_pval)
"""
# Sum all kendalls tau and pvalues over predicted instances.
kt_sum = 0
pval_sum = 0
for transitions in dataY:
kt, pval = kendalls_tau(transitions)
kt_sum += kt
pval_sum += pval
# Return (None, None) if dataY is empty, else return macro
# averaged Kendall's tau and pvals.
if len(dataY) > 0:
avg_kt = float(kt_sum) / len(dataY)
avg_pval = float(pval_sum) / len(dataY)
else:
avg_kt = None
avg_pval = None
return (avg_kt, avg_pval)
def kendalls_tau(transitions):
"""
Compute Kendall's tau and pvalue for a list of
discourse.hypergraph.Transition objects.
transitions -- A list of discourse.hypergaph.Transition objects.
returns (kt, pval)
"""
# Get list sentence indices implied by the transition set.
indices = [s2i(t.sentences[0]) for t in recover_order(transitions)[:-1]]
# Get gold indices.
gold = [i for i in range(len(indices))]
# Compute Kendall's tau for these two sequences.
kt, pval = sp.stats.kendalltau(indices, gold)
return kt, pval
def mac_avg_bigram_acc(dataY):
"""
Computes the macro average bigram overlap (accuracy) for list of
lists of predicted Transitions.
dataY -- A list of lists of discourse.hypergaph.Transition objects
predicted by a discourse model.
returns avg_acc
"""
ndata = len(dataY)
# If dataY is empty, return None, else return avg acc.
if ndata == 0:
return None
sum_acc = 0
for y in dataY:
acc = bigram_acc(y)
sum_acc += acc
avg_acc = sum_acc / float(ndata)
return avg_acc
def bigram_acc(transitions):
"""
Compute the bigram overlap (accuracy) for a list of predicted
Transitions.
transitions -- A list of discourse.hypergaph.Transition objects.
returns bigram overlap (accuracy)
"""
ntrans = len(transitions)
# Get predicted bigrams.
pred_bg = set([(s2i(t.sentences[1]), s2i(t.sentences[0], end='end'))
for t in recover_order(transitions)])
# Create gold bigrams.
gold = set([(i, i+1) for i in range(-1, ntrans - 2)])
gold.add((ntrans - 2, 'end'))
# If either sets are empty return None.
if len(pred_bg) == 0 or len(gold) == 0:
return None
nbigrams = len(gold)
acc = len(pred_bg & gold) / float(nbigrams)
return acc
def avg_oso_acc(dataY):
ndata = len(dataY)
# If dataY is empty, return None, else return avg acc.
if ndata == 0:
return None
sum_acc = 0
for y in dataY:
acc = oso_acc(y)
sum_acc += acc
avg_acc = sum_acc / float(ndata)
return avg_acc
def oso_acc(transitions):
ntrans = len(transitions)
# Get predicted bigrams.
pred = [s2i(t.sentences[0], end=ntrans-1)
for t in recover_order(transitions)]
if tuple(pred) == tuple([i for i in range(ntrans)]):
return 1
else:
return 0
def _position2transition_map(transitions):
"""
Return a dict mapping transition tail sentence indices to
transitions.
transitions -- A list of discourse.hypergaph.Transition objects.
"""
m = OrderedDict()
for t in transitions:
m[s2i(t.sents[1])] = t
return m
|
UTF-8
|
Python
| false | false | 2,014 |
4,844,723,133,642 |
e536498ee88d361a34995f54d486b2bb14f67779
|
691cc7fce7fc451542b16b2fd6b09b98c366c6e6
|
/flumotion/component/consumers/httpstreamer/wizard_gtk.py
|
db7c75f6b2d697324266526ca0c1464aeb30cfb3
|
[
"LGPL-2.0-or-later",
"LGPL-2.1-only",
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
ApsOps/flumotion-orig
|
https://github.com/ApsOps/flumotion-orig
|
ad09228c4a9e06f078939ad6108990e2e1a9c0fb
|
821dc69b5275d6bfa4c3a8937be305f08be5e7e1
|
refs/heads/master
| 2020-12-11T02:14:06.272596 | 2014-05-07T05:20:52 | 2014-05-07T05:20:52 | 17,538,830 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
"""HTTP wizard integration
This provides a step which you can chose:
- http port
- bandwidth/client limit
- mount point (eg, the url it will be accessed as)
- burst on connect
- cortado java applet
A component of type 'http-streamer' will always be created.
In addition, if you include the java applet, a 'porter' and
'http-server' will be included to share the port between the streamer
and the server and to serve an html file plus the java applet itself.
On the http-server the applet will be provided with help of a plug.
"""
import gettext
import re
import os
import gobject
from kiwi.utils import gsignal
import gtk
from twisted.internet import defer
from zope.interface import implements
from flumotion.admin.assistant.interfaces import IConsumerPlugin
from flumotion.admin.assistant.models import Consumer, Porter
from flumotion.admin.gtk.basesteps import ConsumerStep
from flumotion.configure import configure
from flumotion.common import errors, log, messages
from flumotion.common.i18n import N_, gettexter, ngettext
__version__ = "$Rev$"
_ = gettext.gettext
T_ = gettexter()
class HTTPStreamer(Consumer):
"""I am a model representing the configuration file for a
HTTP streamer component.
@ivar has_client_limit: If a client limit was set
@ivar client_limit: The client limit
@ivar has_bandwidth_limit: If a bandwidth limit was set
@ivar bandwidth_limit: The bandwidth limit
@ivar set_hostname: If a hostname was set
@ivar hostname: the hostname this will be streamed on
@ivar port: The port this server will be listening to
"""
componentType = 'http-streamer'
requiresPorter = True
prefix = 'http'
def __init__(self):
super(HTTPStreamer, self).__init__()
self.setPorter(
Porter(worker=None, port=configure.defaultHTTPStreamPort))
self.has_client_limit = False
self.client_limit = 1000
self.has_bandwidth_limit = False
self.bandwidth_limit = 500.0
self.set_hostname = False
self.hostname = ''
self.port = None
self.properties.burst_on_connect = False
# Public
def getURL(self):
"""Fetch the url to this stream
@returns: the url
"""
return 'http://%s:%d%s' % (
self.getHostname(),
self.getPorter().getPort(),
self.properties.mount_point)
def getHostname(self):
"""Fetch the hostname this stream will be published on
@returns: the hostname
"""
return self.hostname
def setData(self, model):
"""
Sets the data from another model so we can reuse it.
@param model : model to get the data from
@type model : L{HTTPStreamer}
"""
self.has_client_limit = model.has_client_limit
self.has_bandwidth_limit = model.has_bandwidth_limit
self.client_limit = model.client_limit
self.bandwidth_limit = model.bandwidth_limit
self.set_hostname = model.set_hostname
self.hostname = model.hostname
self.properties.burst_on_connect = model.properties.burst_on_connect
self.port = model.port
# Component
def getPorter(self):
"""
Obtains this streamer's porter model.
"""
porter = Consumer.getPorter(self)
porter.worker = self.worker
if self.port:
porter.properties.port = self.port
return porter
def getProperties(self):
properties = super(HTTPStreamer, self).getProperties()
if self.has_bandwidth_limit:
properties.bandwidth_limit = int(self.bandwidth_limit * 1e6)
if self.has_client_limit:
properties.client_limit = self.client_limit
porter = self.getPorter()
hostname = self.getHostname()
if hostname and self.set_hostname:
properties.hostname = hostname
properties.porter_socket_path = porter.getSocketPath()
properties.porter_username = porter.getUsername()
properties.porter_password = porter.getPassword()
properties.type = 'slave'
# FIXME: Try to maintain the port empty when we are slave. Needed
# for now as the adminwindow tab shows the URL based on this property.
properties.port = self.port or self.getPorter().getProperties().port
return properties
class HTTPSpecificStep(ConsumerStep):
"""I am a step of the configuration wizard which allows you
to configure a stream to be served over HTTP.
"""
section = _('Consumption')
gladeFile = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'wizard.glade')
def __init__(self, wizard):
self.model = HTTPStreamer()
ConsumerStep.__init__(self, wizard)
def updateModel(self, model):
"""
There is a previous httpstreamer step from where the data can be copied
It will be copied to the actual model and the advanced
tab would be hidden.
@param model: The previous model we are going to copy.
@type model: L{HTTPStreamer}
"""
self.model.setData(model)
self.expander.set_expanded(False)
self._proxy2.set_model(self.model)
# ConsumerStep
def getConsumerModel(self):
return self.model
def getServerConsumers(self):
for line in self.plugarea.getEnabledLines():
yield line.getConsumer(self.model,
self.wizard.getScenario().getAudioProducer(self.wizard),
self.wizard.getScenario().getVideoProducer(self.wizard))
# WizardStep
def setup(self):
self.mount_point.data_type = str
self.bandwidth_limit.data_type = float
self.burst_on_connect.data_type = bool
self.client_limit.data_type = int
self.port.data_type = int
self.hostname.data_type = str
self.model.properties.mount_point = self._getDefaultMountPath()
self._proxy1 = self.add_proxy(self.model.properties,
['mount_point', 'burst_on_connect'])
self._proxy2 = self.add_proxy(
self.model, ['has_client_limit',
'has_bandwidth_limit',
'client_limit',
'bandwidth_limit',
'set_hostname',
'hostname',
'port'])
self.client_limit.set_sensitive(self.model.has_client_limit)
self.bandwidth_limit.set_sensitive(self.model.has_bandwidth_limit)
self.hostname.set_sensitive(self.model.set_hostname)
self.port.connect('changed', self.on_port_changed)
self.mount_point.connect('changed', self.on_mount_point_changed)
def workerChanged(self, worker):
self.model.worker = worker
d = self._runChecks()
d.addCallback(self._populatePlugins)
return d
def getNext(self):
def setModel(next):
if next and next.model.componentType == self.model.componentType:
next.updateModel(self.model)
return next
d = defer.maybeDeferred(ConsumerStep.getNext, self)
d.addCallback(setModel)
return d
# Private
def _getDefaultMountPath(self):
encodingStep = self.wizard.getStep('Encoding')
return '/%s-%s/' % (str(encodingStep.getMuxerFormat()),
self.getConsumerType(), )
def _suggestMountPoint(self, mountPoint):
# FIXME: Generalise this method and use the same in f.a.a.save module.
# Resolve naming conflicts, using a simple algorithm
# First, find all the trailing digits, for instance in
# 'audio-producer42' -> '42'
mountPoint = mountPoint.rstrip('/')
pattern = re.compile('(\d*$)')
match = pattern.search(mountPoint)
trailingDigit = match.group()
# Now if we had a digit in the end, convert it to
# a number and increase it by one and remove the trailing
# digits the existing component name
if trailingDigit:
digit = int(trailingDigit) + 1
mountPoint = mountPoint[:-len(trailingDigit)]
# No number in the end, use 2 the first one so we end up
# with 'audio-producer' and 'audio-producer2' in case of
# a simple conflict
else:
digit = 2
return mountPoint + str(digit) + '/'
def _populatePlugins(self, canPopulate):
if not canPopulate:
return
self.plugarea.clean()
def gotEntries(entries):
log.debug('httpwizard', 'got %r' % (entries, ))
for entry in entries:
if not self._canAddPlug(entry):
continue
def response(factory, entry):
# FIXME: verify that factory implements IHTTPConsumerPlugin
plugin = factory(self.wizard)
if hasattr(plugin, 'workerChanged'):
d = plugin.workerChanged(self.worker)
def cb(found, plugin, entry):
self._addPlug(plugin.getPlugWizard(
N_(entry.description)), found)
d.addCallback(cb, plugin, entry)
else:
self._addPlug(plugin.getPlugWizard(
N_(entry.description)), True)
d = self.wizard.getWizardPlugEntry(entry.componentType)
d.addCallback(response, entry)
d = self.wizard.getWizardEntries(wizardTypes=['http-consumer'])
d.addCallbacks(gotEntries)
def _canAddPlug(self, entry):
# This function filters out entries which are
# not matching the accepted media types of the entry
muxerTypes = []
audioTypes = []
videoTypes = []
for mediaType in entry.getAcceptedMediaTypes():
kind, name = mediaType.split(':', 1)
if kind == 'muxer':
muxerTypes.append(name)
elif kind == 'video':
videoTypes.append(name)
elif kind == 'audio':
audioTypes.append(name)
else:
raise AssertionError
encoding_step = self.wizard.getStep('Encoding')
if encoding_step.getMuxerFormat() not in muxerTypes:
return False
audioFormat = encoding_step.getAudioFormat()
videoFormat = encoding_step.getVideoFormat()
if ((audioFormat and audioFormat not in audioTypes) or
(videoFormat and videoFormat not in videoTypes)):
return False
return True
def _addPlug(self, plugin, enabled):
plugin.setEnabled(enabled)
self.plugarea.addLine(plugin)
def _runChecks(self):
self.wizard.waitForTask('http streamer check')
def hostnameErrback(failure):
failure.trap(errors.RemoteRunError)
self.wizard.taskFinished(blockNext=True)
return False
def gotHostname(hostname):
self.model.hostname = hostname
self._proxy2.update('hostname')
self.wizard.taskFinished()
return True
def getHostname(result):
if not result:
return False
d = self.wizard.runInWorker(
self.worker, 'flumotion.worker.checks.http',
'runHTTPStreamerChecks')
d.addCallback(gotHostname)
d.addErrback(hostnameErrback)
return d
def checkImport(elements):
if elements:
self.wizard.taskFinished(blockNext=True)
return False
d = self.wizard.requireImport(
self.worker, 'twisted.web', projectName='Twisted project',
projectURL='http://www.twistedmatrix.com/')
d.addCallback(getHostname)
return d
# first check elements
d = self.wizard.requireElements(self.worker, 'multifdsink')
d.addCallback(checkImport)
return d
def _checkMountPoint(self, port=None, worker=None,
mount_point=None, need_fix=False):
"""
Checks whether the provided mount point is available with the
current configuration (port, worker). It can provide a valid
mountpoint if it is required with need_fix=True.
@param port : The port the streamer is going to be listening.
@type port : int
@param worker : The worker the streamer will be running.
@type worker : str
@param mount_point : The desired mount point.
@type mount_point : str
@param need_fix : Whether the method should search for a valid
mount_point if the provided one is not.
@type need_fix : bool
@returns : True if the mount_point can be used, False if it is in use.
@rtype : bool
"""
self.wizard.clear_msg('http-streamer-mountpoint')
port = port or self.model.port
worker = worker or self.model.worker
mount_point = mount_point or self.model.properties.mount_point
self.wizard.waitForTask('http-streamer-mountpoint')
if self.wizard.addMountPoint(worker, port, mount_point,
self.getConsumerType()):
self.wizard.taskFinished()
return True
else:
if need_fix:
while not self.wizard.addMountPoint(worker, port,
mount_point,
self.getConsumerType()):
mount_point=self._suggestMountPoint(mount_point)
self.model.properties.mount_point = mount_point
self._proxy1.update('mount_point')
self.wizard.taskFinished()
return True
message = messages.Error(T_(N_(
"The mount point %s is already being used for worker %s and "
"port %s. Please correct this to be able to go forward."),
mount_point, worker, port))
message.id = 'http-streamer-mountpoint'
self.wizard.add_msg(message)
self.wizard.taskFinished(True)
return False
# Callbacks
def on_mount_point_changed(self, entry):
if not entry.get_text():
self.wizard.clear_msg('http-streamer-mountpoint')
message = messages.Error(T_(N_(
"Mountpoint cannot be left empty.\n"
"Fill the text field with a correct mount point to"
"be able to go forward.")))
message.id = 'http-streamer-mountpoint'
self.wizard.add_msg(message)
self.wizard.blockNext(True)
else:
self._checkMountPoint(mount_point=entry.get_text())
def on_has_client_limit_toggled(self, cb):
self.client_limit.set_sensitive(cb.get_active())
def on_has_bandwidth_limit_toggled(self, cb):
self.bandwidth_limit.set_sensitive(cb.get_active())
def on_set_hostname__toggled(self, cb):
self.hostname.set_sensitive(cb.get_active())
def on_port_changed(self, widget):
if widget.get_text().isdigit():
self._checkMountPoint(port=int(widget.get_text()))
class HTTPBothStep(HTTPSpecificStep):
name = 'HTTPStreamerBoth'
title = _('HTTP Streamer (Audio and Video)')
sidebarName = _('HTTP Audio/Video')
docSection = 'help-configuration-assistant-http-streaming-both'
docAnchor = ''
docVersion = 'local'
# ConsumerStep
def getConsumerType(self):
return 'audio-video'
class HTTPAudioStep(HTTPSpecificStep):
name = 'HTTPStreamerAudio'
title = _('HTTP Streamer (Audio Only)')
sidebarName = _('HTTP Audio')
docSection = 'help-configuration-assistant-http-streaming-audio-only'
docAnchor = ''
docVersion = 'local'
# ConsumerStep
def getConsumerType(self):
return 'audio'
class HTTPVideoStep(HTTPSpecificStep):
name = 'HTTPStreamerVideo'
title = _('HTTP Streamer (Video Only)')
sidebarName = _('HTTP Video')
docSection = 'help-configuration-assistant-http-streaming-video-only'
docAnchor = ''
docVersion = 'local'
# ConsumerStep
def getConsumerType(self):
return 'video'
class HTTPGenericStep(HTTPSpecificStep):
name = 'HTTPStreamerGeneric'
title = _('HTTP Streamer (Generic)')
sidebarName = _('HTTP Generic')
docSection = 'help-configuration-assistant-http-streaming-generic'
docAnchor = ''
docVersion = 'local'
def __init__(self, wizard, type):
self._consumertype = type
HTTPSpecificStep.__init__(self, wizard)
# ConsumerStep
def getConsumerType(self):
return self._consumertype
class HTTPStreamerWizardPlugin(object):
implements(IConsumerPlugin)
def __init__(self, wizard):
self.wizard = wizard
def getConsumptionStep(self, type):
if type == 'video':
return HTTPVideoStep(self.wizard)
elif type == 'audio':
return HTTPAudioStep(self.wizard)
elif type == 'audio-video':
return HTTPBothStep(self.wizard)
else:
return HTTPGenericStep(self.wizard, type)
|
UTF-8
|
Python
| false | false | 2,014 |
19,164,144,112,545 |
d158261d5e940a7cf1f26ed5f2ef9da8a9fb60f5
|
1d550f4c6c5f95f53d4659ac713dd1fe1835367c
|
/ats/static/views.py
|
cc3d595245f10fe1fce2542dd58e7b87732afb5e
|
[] |
no_license
|
caroman/ats
|
https://github.com/caroman/ats
|
3376f180f1548efea2bf98a477e132ed1eecafd1
|
99a445ed959cbfdcee3d3ec9f6e5a786c4c80220
|
refs/heads/master
| 2016-09-05T20:29:17.239467 | 2011-06-15T01:28:57 | 2011-06-15T01:28:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib.auth.decorators import user_passes_test
from django.http import HttpResponse, Http404
from django.core import serializers
from django.db.models import Q
from ats.static.models import *
##decorators####################################################################
login_needed = user_passes_test( lambda u: not u.is_anonymous()
,login_url = '/ats/login/' )
#ACTIVITY#######################################################################
#@login_needed
def activity_list( request ):
total = Activity.objects.count()
queryset = Activity.objects\
.filter( Q( priority__isnull = False ) )\
.order_by('priority','title')
data = '{"total": %s, "results": %s}' %\
( total, serializers.serialize( 'json',
queryset ) )
return HttpResponse( data
,mimetype = 'application/json' )
#MACSTATUS######################################################################
#@login_needed
def mac_status_list( request ):
total = MACStatus.objects.count()
queryset = MACStatus.objects\
.filter( Q( priority__isnull = False ) )\
.order_by('priority','title')
data = '{"total": %s, "results": %s}' %\
( total, serializers.serialize( 'json',
queryset ) )
return HttpResponse( data, mimetype = 'application/json' )
#MANDATESTATUS##################################################################
#@login_needed
def mandate_status_list( request ):
total = MandateStatus.objects.count()
queryset = MandateStatus.objects\
.filter( Q( priority__isnull = False ) )\
.order_by('priority','title')
data = '{"total": %s, "results": %s}' %\
( total, serializers.serialize( 'json',
queryset ) )
return HttpResponse( data, mimetype = 'application/json' )
#HIRING MANAGER#################################################################
@login_needed
def hiring_manager_list( request ):
total = HiringManager.objects.count()
queryset = HiringManager.objects\
.filter( Q( priority__isnull = False ) )\
.order_by('priority','name')
data = '{"total": %s, "results": %s}' %\
( total, serializers.serialize( 'json',
queryset ) )
return HttpResponse( data, mimetype = 'application/json' )
#PROFESSIONAL DESIGNATION#######################################################
@login_needed
def professional_designation_list( request ):
total = ProfessionalDesignation.objects.count()
queryset = ProfessionalDesignation.objects\
.filter( Q( priority__isnull = False ) )\
.order_by('priority','title')
data = '{"total": %s, "results": %s}' %\
( total, serializers.serialize( 'json',
queryset ) )
return HttpResponse( data, mimetype = 'application/json' )
#MANAGMENT EXPERIENCE###########################################################
@login_needed
def managment_experience_list( request ):
total = ManagmentExperience.objects.count()
queryset = ManagmentExperience.objects\
.filter( Q( priority__isnull = False ) )\
.order_by('priority','title')
data = '{"total": %s, "results": %s}' %\
( total, serializers.serialize( 'json',
queryset ) )
return HttpResponse( data, mimetype = 'application/json' )
#WORK LOCATION##################################################################
@login_needed
def work_location_list( request ):
total = WorkLocation.objects.count()
queryset = WorkLocation.objects\
.filter( Q( priority__isnull = False ) )\
.order_by('priority','title')
data = '{"total": %s, "results": %s}' %\
( total, serializers.serialize( 'json',
queryset ) )
return HttpResponse( data, mimetype = 'application/json' )
#WORK TYPE######################################################################
@login_needed
def work_type_list( request ):
total = WorkType.objects.count()
queryset = WorkType.objects\
.filter( Q( priority__isnull = False ) )\
.order_by('priority','title')
data = '{"total": %s, "results": %s}' %\
( total, serializers.serialize( 'json',
queryset ) )
return HttpResponse( data, mimetype = 'application/json' )
################################################################################
|
UTF-8
|
Python
| false | false | 2,011 |
12,902,081,807,166 |
a5ecb1175242c67663f8eca2d82a646d502cc5dd
|
fff11f7ae8a8a5c9accaea586048067f53b0d69f
|
/hw1.py
|
d0155a3ea28b9714556dcffd89c59179f0cc3283
|
[] |
no_license
|
farken24/csf
|
https://github.com/farken24/csf
|
4e103df1d565ebe9cf13c9c45530226b87dbc372
|
d7d27ee80dd84d8a71a5d7b08399dec3071d52a4
|
refs/heads/master
| 2019-01-02T08:32:04.290386 | 2013-11-14T18:47:53 | 2013-11-14T18:47:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Name: Kenneth Faria
# Evergreen Login: farken24
# Computer Science Foundations
# Programming as a Way of Life
# Homework 1
# You may do your work by editing this file, or by typing code at the
# command line and copying it into the appropriate part of this file when
# you are done. When you are done, running this file should compute and
# print the answers to all the problems.
import math # makes the math.sqrt function available
###
### Problem 1
###
print "Problem 1 solution follows:"
a = 1
b = -5.86
c = 8.5408
posroot = (-b + math.sqrt(b ** 2 - 4 * a * c)) / 2 * a
negroot = (-b - math.sqrt(b ** 2 - 4 * a * c)) / 2 * a
print posroot
print negroot
###
### Problem 2
###
print "Problem 2 solution follows:"
import hw1_test
print hw1_test.a
print hw1_test.b
print hw1_test.c
print hw1_test.d
print hw1_test.e
print hw1_test.f
###
### Problem 3
###
print "Problem 3 solution follows:"
print ((hw1_test.a and hw1_test.b) or (not hw1_test.c) and not (hw1_test.d or hw1_test.e or hw1_test.f))
###
### Collaboration
###
# ... Stephen and Cody
# This assignemnt and all the readings took me around 3 hours to complete.
# Did the readings, tutorials, and lecture contain everything you
# needed to know to solve this problem? Yes and no.
#The readings and lecture contained valuable info, however I found the most helpful info working with my fellow classmates.
|
UTF-8
|
Python
| false | false | 2,013 |
11,244,224,418,851 |
47febad3c887d05cf232ceff27f777938a99a400
|
732305d7fc721d0090e465bda635592f3f5e1ae7
|
/pad/pad
|
5361a009f1d92ff0a1535d4c23600542ed4c18b6
|
[
"GPL-3.0-only"
] |
non_permissive
|
cheery/language
|
https://github.com/cheery/language
|
8e2711c8f6079d2d85ea09f19bc748b9d7649cbb
|
629bfc722f5c7f64ceb786ea825dda1dc3a86ef8
|
refs/heads/master
| 2021-01-10T22:11:50.502117 | 2014-09-02T14:03:45 | 2014-09-02T14:03:45 | 19,088,666 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python3
import curses
from codecard import *
high = 1 << 32
tabsize = 4
def main(screen):
curses.cbreak()
curses.noecho()
curses.meta(1)
screen.keypad(0)
card = Card()
#pad = Pad(Card(), motion, "pad")
key = ''
#while pad.modes:
#pad.modes[-1](screen, pad)
while key != '\x1b':
draw(screen, card, repr(key))
key = screen.getkey()
if key == '\n':
card.put(card.bot, TextLineBuffer(None, Line(), [], Line()))
else:
card.put(card.bot, TextBuffer(None, list(key)))
def draw(screen, card, message):
height, width = screen.getmaxyx()
screen.clear()
for i, line in enumerate(card.lines):
screen.addstr(i, line.indent * tabsize, ''.join(line.text))
screen.addstr(height-1, 0, message)
# screen.move(y, x)
screen.refresh()
#def motion(screen, pad):
# card = pad.card
# #card.y = clamp(card.y, 0, len(card.lines) - 1)
# #card.x = clamp(card.x, 0, len(card.line) - 1)
# draw(screen, card, pad.message, card.head, card.y)
# pad.message = ''
# text = screen.getkey()
# if text == '\x1b':
# pad.modes.pop(-1)
# elif text == 'i':
# pad.modes.append(insert)
# elif text == 'I':
# card.x = 0
# pad.modes.append(insert)
# elif text == 'a':
# card.x = card.head + 1
# pad.modes.append(insert)
# elif text == 'A':
# card.x = high
# pad.modes.append(insert)
# elif text == '0':
# card.x = 0
# elif text == '$':
# card.x = high
# elif text == 'h' and card.head > card.line.base:
# card.x = card.head - 1
# elif text == '\x7f' and card.head > card.line.base:
# card.x = card.head - 1
# elif text == '\x7f' and card.y > 0:
# card.x = len(card.line)
# card.y -= 1
# elif text == 'l' and card.head < card.line.tail - 1:
# card.x = card.head + 1
# elif text == 'j' or text == '\n':
# card.y = clamp(card.y+1, 0, len(card.lines) - 1)
# elif text == 'J' and card.y + 1 < len(card.lines):
# card.x = card.join_line(card.y)
# elif text == 'k':
# card.y = clamp(card.y-1, 0, len(card.lines) - 1)
# elif text == 'A':
# card.x = len(card.line)
# pad.modes.append(insert)
# elif text == 'o':
# card.y = card.insert_line(card.y+1, Line('', card.line.indent))
# card.x = 0
# pad.modes.append(insert)
# elif text == 'O':
# card.y = card.insert_line(card.y, Line('', card.line.indent))
# card.x = 0
# pad.modes.append(insert)
# elif text == '<' and card.line.indent > 0:
# card.line.indent -= 1
# elif text == '>':
# card.line.indent += 1
# elif text == '_':
# card.x = card.line.base
# elif text == 'x':
# card.line.remove(card.head)
# elif text == 'd':
# card.lines.pop(card.y)
# if len(card.lines) == 0:
# card.lines.append(Line(''))
# card.y = clamp(card.y, 0, len(card.lines) - 1)
#
#def insert(screen, pad):
# card = pad.card
# draw(screen, card, '-- insert --' + pad.message, card.index, card.y)
# text = screen.getkey()
# if text == '\x1b':
# pad.modes.pop(-1)
# elif text == '\x7f' and card.index > card.line.base:
# index = card.index - 1
# card.x = index
# card.line.remove(index)
# elif text == '\x7f' and card.index > 0:
# card.line.indent -= 1
# card.x = card.line.base
# elif text == '\x7f' and card.index == 0 and card.y > 0:
# card.x = card.join_line(card.y-1)
# card.y = card.y - 1
# elif text == '\n':
# card.lines[card.y], tail = card.line.split(card.index)
# card.y = card.insert_line(card.y+1, tail)
# card.x = card.line.base
# elif text == '\t':
# base = card.line.base
# card.line.indent += 1
# card.x = card.index + card.line.base - base
# else:
# card.x = card.line.insert(card.index, text)
# pad.message = repr(text)
#
#
#class Pad:
# def __init__(self, card, mode, message):
# self.card = card
# self.modes = [mode]
# self.message = message
#
#class Card:
# def __init__(self, lines=None, x=0, y=0):
# self.lines = lines or [Line()]
# self.x = x
# self.y = y
#
# @property
# def index(self):
# return clamp(self.x, self.line.base, self.line.tail)
#
# @property
# def head(self):
# return clamp(self.x, self.line.base, self.line.tail-1)
#
# @property
# def line(self):
# assert 0 <= self.y <= len(self.lines)
# return self.lines[self.y]
#
# def insert_line(self, y, line):
# assert isinstance(line, Line)
# self.lines.insert(y, line)
# return y
#
# def remove_line(self, y):
# return self.lines.pop(y)
#
# def join_line(self, y):
# i = self.lines[y].tail
# self.lines[y] += self.lines.pop(y+1)
# return i
#
#class Line:
# def __init__(self, text='', indent=0):
# self.text = text
# self.indent = indent
#
# @property
# def base(self):
# return self.indent * tabsize
#
# @property
# def tail(self):
# return self.indent * tabsize + len(self.text)
#
# def insert(self, i, text):
# i -= self.base
# assert 0 <= i <= len(self.text)
# self.text = self.text[:i] + text + self.text[i:]
# return i + len(text) + self.base
#
# def remove(self, i, length=1):
# i -= self.base
# assert 0 <= i <= len(self.text)
# text = self.text[i:i+length]
# self.text = self.text[:i] + self.text[i+length:]
# return text
#
# def split(self, i):
# i -= self.base
# head = Line(self.text[:i], self.indent)
# tail = Line(self.text[i:], self.indent)
# return head, tail
#
# def __add__(self, other):
# return Line(self.text + other.text, self.indent)
def clamp(x, mi, ma):
return max(mi, min(ma, x))
if __name__=='__main__':
curses.wrapper(main)
|
UTF-8
|
Python
| false | false | 2,014 |
10,213,432,231,202 |
c748e0959c078793e42fc86a33d620c2895cbe95
|
ffe8e7944554622ca5a991dcb0a6215eed411de3
|
/empathica/applications/empathica/controllers/conflict.py
|
3ab441b8d9ca687320d4f4d5584e3817fee296f3
|
[
"MIT",
"LGPL-3.0-only",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"GPL-2.0-only",
"Apache-2.0",
"BSD-2-Clause"
] |
non_permissive
|
SEA000/uw-empathica
|
https://github.com/SEA000/uw-empathica
|
08785b002a62f22d8bf7f001bde7fd5216283e4c
|
084ec1c953bcb5492f866d95f50b9547a8089e0e
|
refs/heads/master
| 2020-12-24T13:36:24.339339 | 2013-12-14T17:25:17 | 2013-12-14T17:25:17 | 15,189,169 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Conflict Controller
"""
import logging
import random
from gluon.contrib import simplejson as json
if settings.web2py_runtime_gae:
from google.appengine.api import taskqueue
@auth.requires_login()
@onerror
def new():
"""
Creates a new Conflict and associates it with two perspectives
and 3 security groups.
"""
response.title = "New Conflict"
form = SQLFORM.factory(db.Conflict, db.GroupTempInput, table_name="NewConflict", submit_button="Create", formstyle='divs', _id="NewConflict")
# Accessibility
form.element(_name='title')['_tabindex'] = 1
form.element(_name='description')['_tabindex'] = 2
form.element(_name='name1')['_tabindex'] = 3
form.element(_name='name2')['_tabindex'] = 4
form.element(_name='desc1')['_tabindex'] = 5
form.element(_name='desc2')['_tabindex'] = 6
form.element(_type='submit')['_tabindex'] = 9
# Javascript validators
form.element(_name='title')['_class'] += " validate[required,maxSize[40]]"
form.element(_name='description')['_class'] += " validate[maxSize[2000]]"
form.element(_name='name1')['_class'] += " validate[required,maxSize[40]]"
form.element(_name='desc1')['_class'] += " validate[maxSize[2000]]"
form.element(_name='name2')['_class'] += " validate[required,maxSize[40]]"
form.element(_name='desc2')['_class'] += " validate[maxSize[2000]]"
if form.accepts(request.vars, hideerror=True):
cid = db.Conflict.insert(**db.Conflict._filter_fields(form.vars))
# Set permissions
adminGroupId = auth.add_group('conflict_%s_admin' % cid, 'Admin role for conflict_%s' % cid)
auth.add_permission(adminGroupId, 'read', db.Conflict, cid)
auth.add_permission(adminGroupId, 'update', db.Conflict, cid)
auth.add_permission(adminGroupId, 'delete', db.Conflict, cid)
auth.add_membership(adminGroupId)
db.Conflict[cid] = dict(authorized_users=[auth.user.id])
# Now create the two groups for the conflict
group_one = db.GroupPerspective.insert(name=form.vars.name1, description=form.vars.desc1, id_conflict=cid)
group_one_members = auth.add_group('group_%s_members' % group_one, 'Member role for group_%s' % group_one)
group_one_admins = auth.add_group('group_%s_admins' % group_one, 'Admin role for group_%s' % group_one)
auth.add_permission(group_one_members, 'read', db.GroupPerspective, group_one)
auth.add_permission(group_one_admins, 'read', db.GroupPerspective, group_one)
auth.add_permission(group_one_admins, 'update', db.GroupPerspective, group_one)
auth.add_permission(group_one_admins, 'delete', db.GroupPerspective, group_one)
group_two = db.GroupPerspective.insert(name=form.vars.name2, description=form.vars.desc2, id_conflict=cid)
group_two_members = auth.add_group('group_%s_members' % group_two, 'Member role for group_%s' % group_two)
group_two_admins = auth.add_group('group_%s_admins' % group_two, 'Admin role for group_%s' % group_two)
auth.add_permission(group_two_members, 'read', db.GroupPerspective, group_two)
auth.add_permission(group_two_admins, 'read', db.GroupPerspective, group_two)
auth.add_permission(group_two_admins, 'update', db.GroupPerspective, group_two)
auth.add_permission(group_two_admins, 'delete', db.GroupPerspective, group_two)
# Finally create the required maps
# loop through group_id, second_group_id,
map_info = (
[form.vars.name1 + "'s perspective", group_one, group_one],
[form.vars.name1 + "'s perspective according to " + form.vars.name2, group_one, group_two],
[form.vars.name2 + "'s perspective", group_two, group_two],
[form.vars.name2 + "'s perspective according to " + form.vars.name1, group_two, group_one])
for info in map_info:
map_id = db.Map.insert(title=info[0], id_group = info[1], id_secondary = info[2])
member_group_id = auth.id_group('group_%s_members' % info[1])
admin_group_id = auth.id_group('group_%s_admins' % info[2])
auth.add_permission(member_group_id, 'read', db.Map, map_id)
auth.add_permission(member_group_id, 'update', db.Map, map_id)
auth.add_permission(admin_group_id, 'read', db.Map, map_id)
auth.add_permission(admin_group_id, 'update', db.Map, map_id)
auth.add_permission(admin_group_id, 'delete', db.Map, map_id)
auth.add_membership(member_group_id)
auth.add_membership(admin_group_id)
for email in form.vars.users1.split(','):
if(email != auth.user.email and len(email) > 0):
user_id = db(db.auth_user.email == email).select().first()
invite_id = db.Invite.insert(invitee_email = email, id_user = user_id, id_group = group_one, inviter_email = auth.user.email)
if settings.web2py_runtime_gae:
taskqueue.add(url='/empathica/conflict/send_invite_email/%s' % (invite_id), method='GET')
for email in form.vars.users2.split(','):
if(email != auth.user.email and len(email) > 0):
user_id = db(db.auth_user.email == email).select().first()
invite_id = db.Invite.insert(invitee_email = email, id_user = user_id, id_group = group_two, inviter_email = auth.user.email)
if settings.web2py_runtime_gae:
taskqueue.add(url='/empathica/conflict/send_invite_email/%s' % (invite_id), method='GET')
redirect(URL('overview', args=[cid]))
elif form.errors:
response.flash = "Please make corrections to the form"
return dict(form=form)
@auth.requires_login()
@onerror
def manage():
response.title = "Manage Conflicts"
open = []
closed = []
conflicts = db(db.Conflict.authorized_users.contains(auth.user.id)).select()
for conflict in conflicts:
admin_group = auth.id_group('conflict_%s_admin' % conflict.id)
groups = db(db.GroupPerspective.id_conflict == conflict.id).select()
conflictD = conflict.as_dict()
conflictD['is_admin'] = auth.has_membership(admin_group)
record = {
'conflict' : conflictD,
'groups' : groups.as_dict().values()
}
if conflict.open_conflict == True:
open.append(record)
else:
closed.append(record)
invites = []
for invite in db(db.Invite.id_user == auth.user.id).select():
group = db.GroupPerspective[invite.id_group]
conflict = db.Conflict[group.id_conflict]
invite = {
'id' : invite.id ,
'conflict' : conflict.title,
'invite_from' : invite.inviter_email,
'group' : group.name
}
invites.append(invite)
return dict(open=open, closed=closed, invites=invites)
@auth.requires_login()
@onerror
def overview():
try:
conflict_id = request.args(0)
conflict = db.Conflict[conflict_id]
authorized_conflicts = [c.id for c in db(db.Conflict.authorized_users.contains(auth.user.id)).select(db.Conflict.id)]
if(conflict.id in authorized_conflicts):
is_admin = False
admin_group_id = auth.id_group('conflict_%s_admin' % conflict_id)
if(auth.has_membership(admin_group_id)):
is_admin = True
groups = db(db.GroupPerspective.id_conflict == conflict_id).select()
for group in groups:
group['maps'] = db((db.Map.id_group == group.id) & (db.Map.id_secondary == group.id)).select().as_dict().values()
response.title = "Overview - %s" % conflict.title
return dict(conflict=conflict.as_dict(), groups = groups.as_dict().values(), is_admin = is_admin)
else:
raise HTTP(403)
except KeyError:
raise HTTP(400)
return dict()
@auth.requires_login()
@onerror
def correlate():
"""
need list of all nodes in graph a
need list of all nodes in graph b
need list of paired nodes
need to restrict access
"""
id_one = int(request.args(0))
id_two = int(request.args(1))
if id_two < id_one:
id_one, id_two = id_two, id_one
graph_one = db.Map(id_one)
graph_two = db.Map(id_two)
if not graph_one or not graph_two:
raise HTTP(400)
if graph_one.id_group.id_conflict.id != graph_two.id_group.id_conflict.id:
raise HTTP(400)
conflict = db.Conflict(graph_one.id_group.id_conflict)
graph_one_nodes = db(db.Node.id_map == graph_one).select()
graph_two_nodes = db(db.Node.id_map == graph_two).select()
mapping = db((db.NodeMapping.map_one == graph_one) & (db.NodeMapping.map_two == graph_two)).select()
# Store the mapped ids in a hash table (assumes no id collisions)
mapped_nodes = {}
for row in mapping:
mapped_nodes[row.node_one.id] = {}
mapped_nodes[row.node_two.id] = {}
filtered_graph_one = []
for node in graph_one_nodes:
if node.id not in mapped_nodes:
filtered_graph_one.append(node)
else:
entry = mapped_nodes[node.id]
entry['id_map'] = node.id_map
entry['name'] = node.name
filtered_graph_two = []
for node in graph_two_nodes:
if node.id not in mapped_nodes:
filtered_graph_two.append(node)
else:
entry = mapped_nodes[node.id]
entry['id_map'] = node.id_map
entry['name'] = node.name
related_nodes = []
for row in mapping:
node_one_id = row.node_one.id
node_two_id = row.node_two.id
node_one = mapped_nodes[node_one_id]
node_two = mapped_nodes[node_two_id]
related_nodes.append((node_one['id_map'], node_one_id, node_one['name'], node_two['id_map'], node_two_id, node_two['name'], row.identical))
return dict(conflict = conflict, a_nodes = filtered_graph_one, b_nodes = filtered_graph_two, related_nodes = related_nodes)
@auth.requires_login()
@onerror
def compare():
id_one = int(request.args(0))
id_two = int(request.args(1))
if id_two < id_one:
id_one, id_two = id_two, id_one
graph_one = db.Map(id_one)
graph_two = db.Map(id_two)
conflict = graph_one.id_group.id_conflict
lookup_table = []
opposite_nodes = []
one_to_harm = {}
two_to_harm = {}
map_one_nodes = db(db.Node.id_map == graph_one).select()
map_two_nodes = db(db.Node.id_map == graph_two).select()
# Establish node mappings
max_id = 0
not_equal = False
for record in db((db.NodeMapping.map_one == graph_one) & (db.NodeMapping.map_two == graph_two)).select():
lookup_table.append((record.id, record.map_one, record.node_one, record.map_two, record.node_two, record.identical))
if not record.identical:
opposite_nodes.append((record.node_two.id))
# Keep track of the highest id
if record.id > max_id:
max_id = record.id
# Make sure each node has a mapping record
for n in map_one_nodes:
found = False
for record in lookup_table:
if record[2] == n.id:
one_to_harm[n.id] = record[0]
found = True
break
if found == False:
not_equal = True
max_id += 1
one_to_harm[n.id] = max_id
lookup_table.append((max_id, graph_one.id, n.id, graph_two.id, None, None))
for n in map_two_nodes:
found = False
for record in lookup_table:
if record[4] == n.id:
two_to_harm[n.id] = record[0]
found = True
break
if found == False:
not_equal = True
max_id += 1
two_to_harm[n.id] = max_id
lookup_table.append((max_id, graph_one.id, None, graph_two.id, n.id, None))
harm_map_one = {}
for n in map_one_nodes:
for record in lookup_table:
if record[2] == n.id:
harm_map_one[record[0]] = n.valence
harm_map_two = {}
for n in map_two_nodes:
for record in lookup_table:
if record[4] == n.id:
multiplier = 1.0
if n.id in opposite_nodes:
multiplier = -1.0
harm_map_two[record[0]] = n.valence * multiplier
for record in lookup_table:
if record[2] is None:
harm_map_one[record[0]] = 0
if record[4] is None:
harm_map_two[record[0]] = 0
harm_adj_one = {}
for id in harm_map_one:
harm_adj_one[id] = {}
harm_adj_two = {}
for id in harm_map_two:
harm_adj_two[id] = {}
for edge in db(db.Connection.id_map == graph_one).select():
harm_adj_one[one_to_harm[edge.id_first_node]][one_to_harm[edge.id_second_node]] = edge.valence
harm_adj_one[one_to_harm[edge.id_second_node]][one_to_harm[edge.id_first_node]] = edge.valence
for edge in db(db.Connection.id_map == graph_two).select():
multipler = 1.0
if edge.id_first_node in opposite_nodes:
multiplier = multiplier * -1.0
if edge.id_second_node in opposite_nodes:
multiplier = multiplier * -1.0
harm_adj_two[two_to_harm[edge.id_first_node]][two_to_harm[edge.id_second_node]] = edge.valence * multiplier
harm_adj_two[two_to_harm[edge.id_second_node]][two_to_harm[edge.id_first_node]] = edge.valence * multiplier
GraphComprehension = local_import('GraphComprehension')
gc = GraphComprehension.GraphComprehender()
difference = gc.graph_diff(harm_map_one, harm_adj_one, harm_map_two, harm_adj_two)
ret_list = []
for val, id in difference:
for record in lookup_table:
if id != record[0]:
continue
if record[2] is not None:
ret_list.append((val, db.Node(record[2]).name))
else:
ret_list.append((val, db.Node(record[4]).name))
break
if not_equal:
response.flash=T("For more accurate results correlate the concepts first.")
return dict(conflict = conflict, ret_list = ret_list)
@auth.requires_login()
@onerror
def compromise():
id_one = int(request.args(0))
id_two = int(request.args(1))
if id_two < id_one:
id_one, id_two = id_two, id_one
graph_one = db.Map(id_one)
graph_two = db.Map(id_two)
conflict = graph_one.id_group.id_conflict
group1 = graph_one.id_group.name
group2 = graph_two.id_group.name
lookup_table = []
opposite_nodes = []
one_to_harm = {}
two_to_harm = {}
map_one_nodes = db(db.Node.id_map == graph_one).select()
map_two_nodes = db(db.Node.id_map == graph_two).select()
# Establish node mappings
max_id = 0
not_equal = False
for record in db((db.NodeMapping.map_one == graph_one) & (db.NodeMapping.map_two == graph_two)).select():
lookup_table.append((record.id, record.map_one, record.node_one, record.map_two, record.node_two, record.identical))
if(record.identical == False):
opposite_nodes.append((record.node_two.id))
# Keep track of the highest id
if record.id > max_id:
max_id = record.id
for n in map_one_nodes:
found = False
for record in lookup_table:
if record[2] == n.id:
one_to_harm[n.id] = record[0]
found = True
break
if found == False:
not_equal = True
max_id += 1
one_to_harm[n.id] = max_id
lookup_table.append((max_id, graph_one.id, n.id, graph_two.id, None, None))
for n in map_two_nodes:
found = False
for record in lookup_table:
if record[4] == n.id:
two_to_harm[n.id] = record[0]
found = True
break
if found == False:
not_equal = True
max_id += 1
two_to_harm[n.id] = max_id
lookup_table.append((max_id, graph_one.id, None, graph_two.id, n.id, None))
harm_map_one = {}
for n in map_one_nodes:
for record in lookup_table:
if record[2] == n.id:
harm_map_one[record[0]] = n.valence
harm_map_two = {}
for n in map_two_nodes:
for record in lookup_table:
if record[4] == n.id:
multiplier = 1.0
if(n.id in opposite_nodes):
multiplier = -1.0
harm_map_two[record[0]] = n.valence * multiplier
for record in lookup_table:
if record[2] is None:
harm_map_one[record[0]] = 0
if record[4] is None:
harm_map_two[record[0]] = 0
harm_adj_one = {}
for id in harm_map_one:
harm_adj_one[id] = {}
harm_adj_two = {}
for id in harm_map_two:
harm_adj_two[id] = {}
for edge in db(db.Connection.id_map == graph_one).select():
harm_adj_one[one_to_harm[edge.id_first_node]][one_to_harm[edge.id_second_node]] = edge.valence
harm_adj_one[one_to_harm[edge.id_second_node]][one_to_harm[edge.id_first_node]] = edge.valence
for edge in db(db.Connection.id_map == graph_two).select():
multipler = 1.0
if edge.id_first_node in opposite_nodes:
multiplier = multiplier * -1.0
if edge.id_second_node in opposite_nodes:
multiplier = multiplier * -1.0
harm_adj_two[two_to_harm[edge.id_first_node]][two_to_harm[edge.id_second_node]] = edge.valence * multiplier
harm_adj_two[two_to_harm[edge.id_second_node]][two_to_harm[edge.id_first_node]] = edge.valence * multiplier
GraphComprehension = local_import('GraphComprehension')
gc = GraphComprehension.GraphComprehender()
(solns, best_sol) = gc.compromise(harm_map_one, harm_adj_one, harm_map_two, harm_adj_two)
harm_to_ab = {}
for record in lookup_table:
harm_to_ab[record[0]] = (record[2], record[4])
ret_list = []
for sol in solns:
ret_val = []
for c in sol[2]:
if harm_to_ab[c][0] != None:
ret_val.append((db.Node(harm_to_ab[c][0]).name,sol[2][c]))
else:
ret_val.append((db.Node(harm_to_ab[c][1]).name,sol[2][c]))
ret_list.append((sol[0], sol[1], ret_val))
if not_equal:
response.flash=T("For more accurate results correlate the concepts first.")
return dict(conflict = conflict, ret_list = ret_list, best_sol = best_sol, group1=group1, group2=group2)
@auth.requires_login()
@onerror
def invite():
group_id = request.args(0)
group = db.GroupPerspective[group_id]
form = FORM('Email:', INPUT(_name='invitee_email'), INPUT(_type='submit'))
if form.accepts(request.vars):
user_id = None
existingUser = db(db.auth_user.email == form.vars.invitee_email).select()
if existingUser:
user_id = existingUser[0].id
invite_id = db.Invite.insert(invitee_email = form.vars.invitee_email, id_user = user_id, id_group = group_id, inviter_email = auth.user.email)
from google.appengine.api import taskqueue
taskqueue.add(url='/empathica/conflict/send_invite_email/%s' % (invite_id), method='GET')
redirect(URL('manage'))
elif form.errors:
response.flash = form.errors
return dict(form = form, group = group.as_dict())
@auth.requires_login()
@onerror
def accept_invite():
invite = db.Invite[request.args(0)]
if(auth.user.id == invite.id_user):
# This user is the original intended recipient, all ok
group = db.GroupPerspective[invite.id_group]
perspective_groupid = auth.id_group('group_%s_members' % (invite.id_group))
authorized_users = db.Conflict[group.id_conflict].authorized_users
authorized_users.append(invite.id_user)
db.Conflict[group.id_conflict] = dict(authorized_users=authorized_users)
conflict_groupid = auth.id_group('conflict_%s_members' % (db.Conflict[group.id_conflict].id))
auth.add_membership(conflict_groupid)
auth.add_membership(perspective_groupid)
del db.Invite[invite.id]
redirect(URL('manage'))
@auth.requires_login()
@onerror
def ignore_invite():
invite = db.Invite[request.args(0)]
if(auth.user.id == invite.id_user):
# This user is the original intended recipient, all ok
# Just delete the invite because the user does not want it.
del db.Invite[invite.id]
redirect(URL('manage'))
@auth.requires_login()
@onerror
def claim_token():
token = request.args(0)
invite = db(db.Invite.invite_token == token).select()
if not invite:
# could be claiming a proxy token
invite = db(db.Invite.proxy_token == token).select()
if not invite:
redirect(URL('manage'))
invite = invite[0]
if(auth.user.email == invite.claimed_email):
db.Invite[invite.id] = dict(id_user = auth.user.id)
db.commit()
redirect(URL('manage'))
else:
invite = invite[0]
# we're looking at the original token
if(auth.user.id == invite.id_user):
# this user was alredy registered when the invite was sent
redirect(URL('manage'))
if((invite.id_user == None) & (auth.user.email == invite.invitee_email)):
# the original email recipient matches, this user was not registered
# when the invite was sent
db.Invite[invite.id] = dict(id_user = auth.user.id)
db.commit()
redirect(URL('manage'))
else:
#the google account claiming the token is not who the token was sent to
#we have to email the original account and get them to authorize again
import uuid
db.Invite[invite.id] = dict(proxy_token = str(uuid.uuid1()), claimed_email = auth.user.email)
context = dict(claimed = invite.claimed_email, invitee=invite.invitee_email, invitetoken = invite.proxy_token, server = request.env.server_name, port = request.env.server_port)
message = response.render('invite_authorize.html', context)
mail.send(to = invite.invitee_email,
subject = T('You\'re a Wizard, Harry'),
message = message)
db.commit()
redirect(URL('manage'))
return dict()
@auth.requires_login()
def call():
session.forget()
return service()
@service.json
def close_conflict(id):
"""
Updates a conflict to the 'closed' state.
Parameters:
- id:
The database id of the conflict to close
"""
if not auth.has_permission('update', db.Conflict, id):
return dict(success=False)
db.Conflict[id] = dict(open_conflict=False)
db.commit()
return dict(success=True)
@service.json
def delete_conflict(id):
'''
Deletes a conflict so long as the current user
has delete permissions for the conflict. Also
removes the admin and member groups corresponding to the
conflict.
BUGBUG: We need to make sure that deleting a group clears permissions
'''
if(auth.has_permission('update', db.Conflict, id)):
memberGroupId = auth.id_group('conflict_' + str(id) + '_members')
adminGroupId = auth.id_group('conflict_' + str(id) + '_admin')
auth.del_group(memberGroupId)
auth.del_group(adminGroupId)
db(db.Conflict.id == id).delete()
db.commit()
return dict(success=True)
else:
return dict(success=False)
@service.json
def edit_title(conflict_id, title):
if(auth.has_permission('update', db.Conflict, conflict_id) == false):
db.rollback()
return dict(success=False)
else:
db.Conflict[conflict_id] = dict(title=title)
db.commit()
return dict(success=False)
@service.json
def edit_description(conflict_id, description):
if(auth.has_permission('update', db.Conflict, conflict_id) == false):
db.rollback()
return dict(success=False)
else:
db.Conflict[conflict_id] = dict(description=description)
db.commit()
return dict(success=True)
@service.json
def create_group(conflict_id, name, description):
'''
Adds a group to a conflict.
NB. A group my be a single person
'''
if(auth.has_permission('update', db.Conflict, conflict_id)):
group_id = db.GroupPerspective.insert(name=name,description=description, id_conflict=conflict_id)
member_group_id = auth.add_group('group_' + str(group_id) + '_members')
admin_group_id = auth.add_group('group_' + str(group_id) + '_admin')
auth.add_permission(member_group_id, 'read', db.GroupPerspective, group_id)
auth.add_permission(admin_group_id, 'read', db.GroupPerspective, group_id)
auth.add_permission(admin_group_id, 'update', db.GroupPerspective, group_id)
auth.add_permission(admin_group_id, 'delete', db.GroupPerspective, group_id)
auth.add_membership(member_group_id)
auth.add_membership(admin_group_id)
db.commit()
return dict(success=True, group_id=group_id)
else:
db.rollback()
return dict(success=False)
@service.json
def rename_group(group_id, name):
if(auth.has_permission('update', db.GroupPerspective, group_id) == false):
db.rollback()
return dict(success=False)
else:
db.GroupPerspective[group_id] = dict(name=name)
db.commit()
return dict(success=True)
@service.json
def delete_group(group_id):
if(auth.has_permission('delete', db.group, group_id) == false):
db.rollback()
return dict(success=False)
else:
member_group_id = auth.id_group('group_' + str(group_id) + '_members')
admin_group_id = auth.id_group('group_' + str(group_id) + '_admin')
auth.del_group(member_group_id)
auth.del_group(admin_group_id)
del db.GroupPerspective[group_id]
db.commit()
return dict(success=True)
def send_invite_email():
invite_id = request.args(0)
invite = db.Invite[invite_id]
if not invite:
logging.info('Got a null invite id. Not sending anything')
return
if invite.email_sent == True:
logging.info('Already sent invite %s' % invite.id)
return
context = dict(inviter=invite.inviter_email, invitetoken = invite.invite_token, server = request.env.server_name, port = request.env.server_port)
message = response.render('invitation.html', context)
mail.send(to = invite.invitee_email,
subject = T('Invitation to Participate in Conflict Resolution'),
message = message)
db.Invite[invite_id] = dict(email_sent = True)
db.commit()
return
@service.json
def create_map(group_id, group_secondary_id, title):
'''
Creates a map given two groups in a conflict.
Returns the id of the created map
'''
if(auth.has_permission('read', db.GroupPerspective, group_id)):
map_id = db.Map.insert(title=title, id_group=group_id, id_secondary=group_secondary_id)
member_group_id = auth.id_group('group_' + str(group_id) + '_members')
admin_group_id = auth.id_group('group_' + str(group_id) + '_admin')
auth.add_permission(member_group_id, 'read', db.Map, map_id)
auth.add_permission(admin_group_id, 'read', db.Map, map_id)
auth.add_permission(admin_group_id, 'update', db.Map, map_id)
auth.add_permission(admin_group_id, 'delete', db.Map, map_id)
auth.add_membership(member_group_id)
auth.add_membership(admin_group_id)
db.commit()
return dict(success=True, map_id=map_id)
else:
db.rollback()
return dict(success=False)
@service.json
def delete_map(map_id):
if(auth.has_permission('delete', db.Map, map_id)):
#Clear nodes and connections from the map
del db.Map[map_id]
db.commit()
return dict(success=True)
else:
db.rollback()
return dict(success=False)
@service.json
def correlate_nodes(map_one_id, map_two_id):
id_one = int(map_one_id)
id_two = int(map_two_id)
if id_two < id_one:
id_one, id_two = id_two, id_one
# Delete all old nodes
db((db.NodeMapping.map_one == id_one) & (db.NodeMapping.map_two == id_two)).delete()
pairs = json.loads(request.body.read())
for pair in pairs:
map1 = int(pair[0])
node1 = int(pair[1])
map2 = int(pair[2])
node2 = int(pair[3])
same = pair[4]
if map1 > map2:
map1, map2 = map2, map1
node1, node2 = node2, node1
db.NodeMapping.insert(map_one = map1, node_one = node1, map_two = map2, node_two = node2, identical = same)
db.commit()
return dict(success=True)
|
UTF-8
|
Python
| false | false | 2,013 |
16,638,703,342,623 |
2d7c9f32a124af56d51d0ee0afb43f2adc98e912
|
52c4f2ab8ec3b9399391b2311eaac486567fba48
|
/__init__.py
|
77df28a4859a74d7833ca2ff31874d3d34a71d84
|
[] |
no_license
|
brianjgeiger/flaskpractice
|
https://github.com/brianjgeiger/flaskpractice
|
707a24f575d6ba76e776190200384e7897ff70ef
|
46d9091e1550c5d4ba99c1071c33dabc25fd7c65
|
refs/heads/master
| 2016-09-05T22:13:02.619821 | 2014-05-04T01:15:20 | 2014-05-04T01:15:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'patrickgorman'
from flask import Flask, render_template
import system
import gdata.youtube
import gdata.youtube.service
import urllib2
import SQL
from database import init_db
app = Flask(__name__)
class Video_selector(object):
video_data = {}
videos = []
def __init__(self):
self.unsorted_video_ids = []
self.yt_service = gdata.youtube.service.YouTubeService()
self.yt_service.ssl = True
self.sorted_video_ids = [""]
self.video_number = 5
self.sorted_video_ids *= self.video_number
def get_videos(self):
req = urllib2.Request('http://www.youtube.com/playlist?list=PLEA1FEF17E1E5C0DA')
response = urllib2.urlopen(req)
the_page = response.read()
video_locator = "data-video-id="
for x in range(self.video_number):
location = the_page.find(video_locator)
the_page = the_page.replace(the_page[:location + 15], '')
video_id = the_page[0:11]
self.unsorted_video_ids.append(video_id)
self.videos.append("https://www.youtube.com/embed/" + video_id)
Video_selector.get_titles(self, self.unsorted_video_ids)
def add_video(self, new_video, video_id):
self.videos.append(new_video)
self.video_number += 1
self.unsorted_video_ids.append(video_id)
Video_selector.get_titles(self, self.unsorted_video_ids)
def remove_video(self, video):
self.videos.remove(video)
def get_titles(self, video_ids):
for x in video_ids:
entry = self.yt_service.GetYouTubeVideoEntry(video_id=x)
self.video_data[x] = entry.media.title.text
def save_titles(self):
SQL.save_videos(self.video_data)
@staticmethod
def load_titles():
SQL.load_videos()
@app.route('/<video_id>')
def load_video(video_id):
video = 'https://www.youtube.com/embed/' + video_id
if video not in Video_selector.videos and len(video_id) == 11:
Video_selector.add_video(Video_selector(), video, video_id)
print Video_selector.video_data
print Video_selector.video_data[video_id]
return render_template('template2.html', input=Video_selector.video_data,
object=Video_selector.video_data[video_id], link=video)
elif len(video_id) == 16:
Video_selector.remove_video(Video_selector(), video)
return render_template('template1.html', input=Video_selector.video_data)
else:
return render_template('template2.html', input=Video_selector.video_data,
object=Video_selector.video_data[video_id], link=video)
@app.route('/')
def template():
return render_template('template1.html', input=Video_selector.video_data)
if __name__ == "__main__":
init_db()
Video_selector.get_videos(Video_selector())
print SQL.load_videos()
startup = False
while not startup:
try:
app.run(debug=True)
startup = True
except:
system.server_reset()
|
UTF-8
|
Python
| false | false | 2,014 |
19,404,662,277,049 |
1fff5f3802e374f22fdc63af984fd4f17c87304a
|
f206957e5664756f503540b743e0a009926da4ba
|
/src/dnspod/api/root.py
|
dd183fd2754d52f7cb6ea67307400a9a51465a4e
|
[
"MIT"
] |
permissive
|
aucampia/dnspod-int-py
|
https://github.com/aucampia/dnspod-int-py
|
3df0090bb53b7ff22b7f6a44adba543e4d849f90
|
30e341a3c740874449efc7b62fc4cf650de14e72
|
refs/heads/master
| 2017-10-30T14:09:07.735769 | 2014-09-26T23:16:55 | 2014-09-26T23:16:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import dnspod
import enum
import urllib.parse
import urllib.request
import urllib.response
import posixpath
def _evaluate_preference( *args, name ):
for arg in args:
if arg != None:
return arg
return None
class Polar( enum.Enum ):
no = 0
yes = 1
@classmethod
def from_boolean( cls, boolean ):
if boolean is None:
return None
if boolean is True:
return cls.yes
if boolean is False:
return cls.no
class Root( object ):
class ApiFormat( enum.Enum ):
xml = 0
json = 1
#class ErrorOnEmpty( enum.Enum ):
# yes = 0
# no = 1
#class LoginRemember( enum.Enum ):
# yes = 0
# no = 1
def __init__( self, *,
username = None,
password = None,
user_id = None,
login_code = None,
login_remember = True,
url="https://api.dnspod.com",
error_on_empty = False,
api_format = ApiFormat.json,
verbose_writer = dnspod.VerboseWriter() ):
self._username = username
self._password = password
self._user_id = user_id
self._login_code = login_code
self.login_remember = login_remember
self._url = url
self.error_on_empty = error_on_empty
self.api_format = api_format
self._verbosity = verbosity
self._verbose_write = verbose_write
# parse URL
# extract username + password + user_id + login_code + login_remember + error_on_empty + api_format + verbose_writer
#self._url_parsed = urllib.parse.urlsplit( self._url )
#self._url_qs_parsed = urllib.parse.parse_qs( self._url_parsed.query )
def _username( self, *, username = None ):
return _evaluate_preference( username, self.username )
def _password( self, *, password = None ):
return _evaluate_preference( password, self.password )
def _user_id( self, *, user_id = None ):
return _evaluate_preference( user_id, self.user_id )
def _login_code( self, *, login_code = None ):
return _evaluate_preference( login_code, self.login_code )
def url( self, path, params ):
url_parsed = urllib.parse.urlsplit( self._url )
url_parsed.path = posixpath.normpath( posixpath.join( url_parsed.path, path ) )
url_qs_parsed = urllib.parse.parse_qs( self._url_parsed.query )
url_qs_parsed[ "error_on_empty" ] = Polar.from_boolean( self.error_on_empty ).name
url_qs_parsed[ "api_format" ] = Polar.from_boolean( self.api_format ).name
url_qs_parsed.update( params )
url_parsed.query = urllib.parse.urlencode( url_qs_parsed, doseq=True )
return urllib.parse.urlunsplit( url_parsed )
def auth_parameters( self ):
tmp = {
"username": self._username,
"password": self._password
};
if self._user_id is not None:
tmp[ "user_id" ] = self._user_id;
if self._login_code is not None:
tmp[ "login_code" ] = self._login_code;
if self.login_remember is not None:
tmp[ "login_remember" ] = self.login_remember;
return tmp;
def auth( self ):
request = urllib.request.Request( self.url( ApiCommand.auth, self.auth_parameters() ) )
verbose_writer.write( 0, "request.full_url = {}".format( request.full_url ) );
response = urllib.urlopen( request )
response_data = response.read()
def version( self ):
return
def execute( self ):
return
|
UTF-8
|
Python
| false | false | 2,014 |
1,099,511,656,492 |
f09c1593c8ddf8160e6b4b0c3c248b7aacfd85a0
|
7bcffe889eb65eda92144827a64c355412be5883
|
/python_modules/plearn/pyplearn/OptionBase.py
|
48e65ecdfb85571611f45dac2734c98ab73cf2a6
|
[
"BSD-2-Clause",
"GPL-1.0-or-later"
] |
non_permissive
|
dreadlord1984/PLearn
|
https://github.com/dreadlord1984/PLearn
|
364fff2ca0b3be34131acfdd59134d1d9375d779
|
76684e2a2af134859be2ef621e84af0dbd838f2e
|
refs/heads/master
| 2021-01-20T19:49:47.469922 | 2011-09-23T19:52:15 | 2011-09-23T19:52:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""Very core classes of the pyplearn mechanism.
To be done:
- PLOptionDict should be generalized to OptionDict. This OptionDict class
should understand inner classes derived from OptionBase, e.g.
class OptionDict(dict): # Not sure for dict yet...
pass
class PyPLearnObject(OptionDict)
OptionType = PLOption
class Other(PyPLearnObject):
class KWArg(OptionBase): pass
and the MetaOptionDict mechanism should be able to find and SEQUENTIALLY
(the mechanism is actually recursive) that Other manages PLOption and KWArg
instances.
"""
import copy, inspect, re
deprecated_methods = [ 'allow_unexpected_options', ### No more a classmethod
'get_members',
"option_names",
'option_pairs',
'to_list' ### In PyPLearnList
]
# Since those methods were added to PyPLearnObject (or its subclass) using
# the lower_case_with_underscores convention, these are prone to clashing
# with plearn options. An explicit check is made to avoid this
# eventuality. This should be fixed whenever possible.
_clash_prone_methods = [ 'allow_unexpected_options',
'instances',
'classname',
'plearn_repr' ]
def checkForNameClashes(key, value):
if key in _clash_prone_methods and not callable(value):
PyPLearnError("It seems you are trying to set an PLOption %s "
"which clashes with the %s internal method. "
"Contact support.")
def inherited_options(clsinstance, OptionType):
inhoptions = []
for cls in clsinstance.__mro__[1:]:
if cls is object:
continue
MetaClass = clsinstance.__metaclass__
options_slot = MetaClass._options_slot_%(cls.__name__,OptionType.__name__)
try:
inhoptions.extend( cls.__dict__[options_slot] )
except KeyError, kerr:
pass
return inhoptions
def class_options(clsinstance, OptionType):
MetaClass = clsinstance.__metaclass__
options_slot = MetaClass._options_slot_%(clsinstance.__name__, OptionType.__name__)
class_options = copy.deepcopy(clsinstance.__dict__[options_slot])
return class_options+inherited_options(clsinstance, OptionType)
def non_option_class_variable(clsinstance, variable_name):
# Class variable must have been defined along with the class
if not hasattr(clsinstance, variable_name):
return False
MetaClass = clsinstance.__metaclass__
candidate_slot = re.compile(MetaClass._options_slot_%(clsinstance.__name__, ".+"))
for attr_name in clsinstance.__dict__:
if candidate_slot.search(attr_name):
options_slot = getattr(clsinstance, attr_name)
if variable_name in options_slot:
return False
return True
def init_options(instance, OptionType, **overrides):
for optname in class_options(instance.__class__, OptionType):
# There is no need to manage options that are overriden, hence,
# for sake of efficiency, we don't
if not optname in overrides:
optval = getattr(instance,optname)
# Even if the list contains only option names, if the
# option was inherited it will already have been expended
if isinstance(optval, OptionType):
setattr(instance, optname, optval())
class OptionBase:
__option_id = 0
def __init__(self, value, *args, **kwargs):
assert not isinstance(value, OptionBase)
self.__class__.__option_id += 1
self._id = self.__option_id
self._doc = kwargs.pop('doc', '')
if ( inspect.ismethod(value) or inspect.isfunction(value)
or inspect.isroutine(value) or inspect.isclass(value) ):
self._callable = value
self._args = args
self._kwargs = kwargs
else:
assert len(args) == 0 and len(kwargs) == 0, \
"args: %s\nkwargs: %s"%(args, kwargs)
self._callable = copy.deepcopy
self._args = [ value ]
self._kwargs = kwargs
def __call__(self):
return self._callable(*self._args, **self._kwargs)
def __cmp__(self, opt2):
assert isinstance(opt2, self.__class__), \
"__cmp__(%s, %s): the later argument is not an instance of %s" \
% (self, opt2, self.__class__.__name__)
return cmp(self._id, opt2._id)
def __str__(self):
return "%s(\n "%self.__class__.__name__ +\
"\n ".join([\
"_id=%d"%self._id,
"_callable=%s"%self._callable,
"_args=%s"%str(self._args),
"_kwargs=%s"%str(self._kwargs)
]) + ")"
def OptionDictMetaClass(OptionType, SuperMeta=type):
"""Generates a metaclass MetaOptionDict handling OptionBase-like options.
@param OptionType: The type of the options to be managed by this metaclass.
@param SuperMeta: The class this metaclass must inherit from. Not that
super isn't used everywhere in the class definition. Direct reference
to \I{type} is sometimes used instead. The reason is simple: the
I{SuperMeta} parameter is meant to allow one to derive a
I{MetaClassOptionDict} from another. If super was called on such a
subclass, redundancy would occur.
PLEASE AVOID MULTIPLE INHERITANCE WITH THIS CLASS...
"""
assert issubclass(OptionType, OptionBase), OptionType.__class__
assert SuperMeta is type or SuperMeta._options_slot_
class _OptionDictMetaClass( SuperMeta ):
_options_slot_ = '_%s__options__%s_'
def __new__(metacls, clsname, bases, dic):
newcls = SuperMeta.__new__(metacls, clsname, bases, dic)
inherited = inherited_options(newcls, OptionType)
options_slot = metacls._options_slot_%(clsname,OptionType.__name__)
if options_slot not in dic:
reversed_option_pairs = [
(optval,optname)
for optname,optval in dic.iteritems()
if isinstance(optval, OptionType) and optname not in inherited
]
reversed_option_pairs.sort()
setattr(newcls, options_slot, [ optname for optval,optname in reversed_option_pairs ])
return newcls
# Direct reference to type (see docstring)
def __getattribute__(self, name):
value = type.__getattribute__(self, name)
if isinstance(value, OptionBase):
return value()
return value
# Direct reference to type (see docstring)
def __setattr__(self, name, value):
checkForNameClashes(name, value)
# Is an option: check that it is wrapped and add it to the list if needed
if not name.startswith('_') \
and not non_option_class_variable(self, name):
# Ensure that it is wrapped...
if isinstance(value, OptionBase):
ActualOptionType = value.__class__
else:
value = OptionType(value)
ActualOptionType = OptionType
# Retreive the appropriate slot
options_slot = _OptionDictMetaClass._options_slot_%(self.__name__,ActualOptionType.__name__)
option_list = self.__dict__[options_slot]
# ... and add the option's name to the list if needed
if name not in option_list:
option_list.append(name)
# Call inherited __setattr__
type.__setattr__(self, name, value)
# Direct reference to type (see docstring)
def __delattr__(self, name):
attr = type.__getattribute__(self, name)
if not name.startswith('_') and isinstance(attr, OptionBase):
options_slot = _OptionDictMetaClass._options_slot_%(self.__name__,attr.__class__.__name__)
self.__dict__[options_slot].remove(name)
# Actual deletion
type.__delattr__(self, name)
return _OptionDictMetaClass
|
UTF-8
|
Python
| false | false | 2,011 |
14,637,248,555,419 |
45f606415bc0bc7c4039946ed51f6f6b1d60a734
|
f493d0c32f8ab1cad27238f2bcda035ea1ddf21c
|
/lab1/cse337a1/a1-source/one.py
|
df642cf8d98dbb3d11765ce260e6f880c619ef9b
|
[] |
no_license
|
mjsalerno/Scripting-Languages
|
https://github.com/mjsalerno/Scripting-Languages
|
4f48086811fe430d35491ba736cdf819efc53c38
|
d8c0074136805f3c146d02a0903f77aeb0a59b1a
|
refs/heads/master
| 2021-01-16T21:50:32.142356 | 2014-05-09T02:56:48 | 2014-05-09T02:56:48 | 17,765,134 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'michael'
#one
def main():
"""print the second largest number in a set"""
s = set()
while True:
n = input('Enter a number: ')
if n == -99:
break
s.add(n)
l = list(s)
if len(l) < 2:
print 'sorry but the list is too small'
exit(1)
l.sort()
print 'The second largest number is', l[-2]
main()
|
UTF-8
|
Python
| false | false | 2,014 |
5,403,068,887,916 |
7ce74d6b30186910648fad230270e9e6b9bc8681
|
bdf9d174a59e6bfcf9ea0d4cbdb07d4af681d9c5
|
/src/StaticFlow/render/tags/box/BoxTag.py
|
fa0a3fa209b7852f694386285f454ad0dfc5ad3e
|
[] |
no_license
|
sernst/StaticFlow
|
https://github.com/sernst/StaticFlow
|
856bcc88a6d6a601df47cba8844655407438535e
|
66c7f4ab788bf63f9a252a0319ade40dc4191499
|
refs/heads/master
| 2021-01-19T15:14:59.735896 | 2014-10-27T13:54:11 | 2014-10-27T13:54:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# BoxTag.py
# (C)2012-2013
# Scott Ernst
from pyaid.color.ColorValue import ColorValue
from pyaid.ArgsUtils import ArgsUtils
from StaticFlow.render.enum.GeneralSizeEnum import GeneralSizeEnum
from StaticFlow.render.attributes.LayoutAttributeParser import LayoutAttributeParser
from StaticFlow.render.enum.TagAttributesEnum import TagAttributesEnum
from StaticFlow.render.tags.MarkupBlockTag import MarkupBlockTag
#___________________________________________________________________________________________________ BoxTag
class BoxTag(MarkupBlockTag):
"""A class for..."""
#===================================================================================================
# C L A S S
TAG = 'box'
TEMPLATE = 'markup/box/default.mako'
BLOCK_DISPLAY = True
STRIP_POLICY = MarkupBlockTag.STRIP_ALL
NEWLINE_POLICY = MarkupBlockTag.BREAK_ON_NEWLINES
#===================================================================================================
# G E T / S E T
#___________________________________________________________________________________________________ GS: apiLevel
@property
def apiLevel(self):
return 0
#===================================================================================================
# P U B L I C
#___________________________________________________________________________________________________ getAttributeList
@classmethod
def getAttributeList(cls):
t = TagAttributesEnum
return MarkupBlockTag.getAttributeList() + t.COLOR + t.ALIGNMENT + t.PADDING + t.ROUNDNESS + \
t.COLOR + t.REACH + t.SCALE + t.BORDER
#===================================================================================================
# P R O T E C T E D
#___________________________________________________________________________________________________ _renderImpl
def _renderImpl(self, **kwargs):
a = self.attrs
LayoutAttributeParser.parseScale(a, True, kwargs)
LayoutAttributeParser.parseAlignment(a, True, kwargs)
LayoutAttributeParser.parsePadding(
a, True, kwargs, group=a.styleGroup, defaultValue=GeneralSizeEnum.xsmall[0])
color = a.getAsColorValue(
TagAttributesEnum.COLOR,
ArgsUtils.get('colorDef', None, kwargs),
kwargs)
if not ArgsUtils.get('skipBorder', False, kwargs):
LayoutAttributeParser.parseBorder(
a, True, kwargs,
group=a.styleGroup,
defaultColor=ArgsUtils.get(
'borderColorDef', color.shiftColors[1] if color else None, kwargs) )
inline = a.getAsBool(
TagAttributesEnum.INLINE,
ArgsUtils.get('inlineDef', None, kwargs),
kwargs)
roundness = a.getAsEnumerated(
TagAttributesEnum.ROUNDNESS,
GeneralSizeEnum,
ArgsUtils.get('roundnessDef', GeneralSizeEnum.none, kwargs),
kwargs)
#-------------------------------------------------------------------------------------------
# BACKGROUND COLOR
if not ArgsUtils.get('skipBackground', False, kwargs):
if isinstance(color, ColorValue):
a.styles.add('background-color', color.web, a.styleGroup)
elif a.explicitAccent or a.themeChanged:
self.useBackground()
#-------------------------------------------------------------------------------------------
# ROUNDNESS
if roundness == 'xxs':
r = '0.13em'
elif roundness == 'xs':
r = '0.25em'
elif roundness == 's':
r = '0.5em'
elif roundness == 'm':
r = '0.75em'
elif roundness == 'l':
r = '1.0em'
elif roundness == 'xl':
r = '1.25em'
elif roundness == 'xxl':
r = '1.5em'
else:
r = None
if r:
a.styles.add('border-radius', r, a.styleGroup)
if inline:
a.styles.add('display', 'inline-block')
a.classes.add('v-gvml-push', a.styleGroup)
|
UTF-8
|
Python
| false | false | 2,014 |
19,043,885,023,989 |
68b497b04c419a52caebf4b1f4ce03a42af01b47
|
4f7971c106f2d38a3ba1705c6846c5254e588839
|
/usejing.py
|
66c5dddff68ad93d8bd2fa06ad134c9e405996c3
|
[
"LicenseRef-scancode-public-domain"
] |
non_permissive
|
mcamiano/gnitpick
|
https://github.com/mcamiano/gnitpick
|
b5f340c43d2cd4e9e88bc8c0986d016cd62a7c88
|
faac90f7a81ed6a5bed1497d3cc6b591259c3f45
|
refs/heads/master
| 2021-01-01T19:28:44.938099 | 2011-12-19T19:00:22 | 2011-12-19T19:00:22 | 3,014,249 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import com.thaiopensource.relaxng.translate.util.InvalidParamsException as InvalidParamsException
import com.thaiopensource.relaxng.edit.SchemaCollection as SchemaCollection
import com.thaiopensource.relaxng.input.InputFailedException as InputFailedException
import com.thaiopensource.relaxng.input.InputFormat as InputFormat
import com.thaiopensource.relaxng.input.MultiInputFormat as MultiInputFormat
import com.thaiopensource.relaxng.input.xml.XmlInputFormat as XmlInputFormat
import com.thaiopensource.relaxng.input.dtd.DtdInputFormat as DtdInputFormat
import com.thaiopensource.relaxng.input.parse.compact.CompactParseInputFormat as CompactParseInputFormat
import com.thaiopensource.relaxng.input.parse.sax.SAXParseInputFormat as SAXParseInputFormat
import com.thaiopensource.relaxng.output.LocalOutputDirectory as LocalOutputDirectory
import com.thaiopensource.relaxng.output.OutputDirectory as OutputDirectory
import com.thaiopensource.relaxng.output.OutputFailedException as OutputFailedException
import com.thaiopensource.relaxng.output.OutputFormat as OutputFormat
import com.thaiopensource.relaxng.output.dtd.DtdOutputFormat as DtdOutputFormat
import com.thaiopensource.relaxng.output.rnc.RncOutputFormat as RncOutputFormat
import com.thaiopensource.relaxng.output.rng.RngOutputFormat as RngOutputFormat
import com.thaiopensource.relaxng.output.xsd.XsdOutputFormat as XsdOutputFormat
import com.thaiopensource.xml.sax.ErrorHandlerImpl as ErrorHandlerImpl
import com.thaiopensource.util.Localizer as Localizer
import com.thaiopensource.util.OptionParser as OptionParser
import com.thaiopensource.util.UriOrFile as UriOrFile
import com.thaiopensource.util.Version as Version
import org.xml.sax.SAXException as SAXException
import java.io.File as File
import java.io.IOException as IOException
import org.xml.sax.SAXException as SAXException
import java.lang.String as String
import java.lang.Integer as Integer
import java.util.Iterator as Iterator
import array
import sys
def suffix(s):
dot = s.rfind(".")
if (dot < 0):
return ""
return s[dot+1:(len(s))]
class filetypeError(Exception):
def __init__(self, msg):
self.message = msg
def __str__(self):
return repr(self.message)
def inputTypeHandler(suffix):
handlers = {
'rng': SAXParseInputFormat,
'dtd': DtdInputFormat,
'rnc': CompactParseInputFormat,
'xml': XmlInputFormat
}
if ( not( handlers.__contains__(suffix) ) ):
raise( filetypeError( "unrecognized input type '" + suffix + "'" ) )
return handlers[suffix]()
def outputTypeHandler(suffix):
handlers = {
'dtd': DtdOutputFormat,
'rng': RngOutputFormat,
'rnc': RncOutputFormat,
'xsd': XsdOutputFormat
}
if ( not( handlers.__contains__(suffix) ) ):
raise( filetypeError( "unrecognized output type '" + suffix + "'" ) )
return handlers[suffix]()
class schemaxlator:
""" Adapted from James Clark's Trang tool, specifically Driver.java
Convert a schema in RNG, RNC, or DTD format, or an XML sample instance,
into RNG, RNC, DTD, or WXS format, using the Jing api.
"""
OUTPUT_ENCODING = "UTF-8"
LINE_LENGTH = 72
INDENT = 2
def __init__(self,inuri, outuri):
self.errorhandler = ErrorHandlerImpl()
try:
inputType = suffix(inuri).lower()
inFormat = inputTypeHandler( inputType )
outputType = suffix(outuri).lower()
outFormat = outputTypeHandler( outputType )
inputParamArray = array.array(String, [] )
self.schemacollection = inFormat.load( UriOrFile.toUri(inuri), inputParamArray, outputType, self.errorhandler)
outputdir = LocalOutputDirectory( self.schemacollection.getMainUri(),
File( outuri ),
outputType,
self.OUTPUT_ENCODING,
self.LINE_LENGTH,
self.INDENT)
outputParamArray = array.array(String, [] )
outFormat.output( self.schemacollection, outputdir, outputParamArray, inputType.lower(), self.errorhandler)
except IOException, e:
self.errorhandler.printException(e)
except SAXException, e:
self.errorhandler.printException(e)
# Stand-alone Unit Execution
if __name__ == "__main__":
if ( len(sys.argv) == 1 ): # Convert RelaxNG XML syntax into compact syntax
foo = schemaxlator( "menulayout.xml", "foo.rng" )
bar = schemaxlator( "foo.rng", "bar.rnc" )
foobar = schemaxlator( "bar.rnc", "foobar.rng" )
# foobar.rng and foo.rng should be equivalent
print dir( bar.schemacollection.getSchemaDocumentMap() )
iter = bar.schemacollection.getSchemaDocumentMap().entrySet().iterator()
while iter.hasNext():
entry = iter.next()
print "Key=" + entry.getKey() + ", value="
vectr = entry.getValue().getPattern() # .getFollowingElementAnnotations()
print vectr.class
print vectr.getComponents().size()
# navigate to the component patterns of the schema
i = 0
len = vectr.getComponents().size()
while i < len:
c = vectr.getComponents().get(i)
print "Name: " + c.getName()
print "Body: " + c.getBody().toString()
print dir(c.attributeAnnotations)
i=i+1
elif ( len(sys.argv) == 3): # convert fromfile into tofile, inferring the format from the file extension
jv = schemaxlator( sys.argv[1], sys.argv[2] )
else:
print "Wrong number of arguments ( " + Integer( len( sys.argv ) ).toString() + ") "
|
UTF-8
|
Python
| false | false | 2,011 |
11,278,584,123,992 |
93b6daca0cc78347deba519e4edf93f15cd229b5
|
ca423d9436a6fc2d80ddc8d0242c76643a3fd611
|
/blog/models.py
|
e1c4ec443070ac436a25b11c7e25b847af4aaa8f
|
[] |
no_license
|
paul8620/django-blog
|
https://github.com/paul8620/django-blog
|
73fead8d9e3849523dc68ee2e2b3f59187700dbf
|
9fb027f3181ae51abe4518cf635cbc43882b8bd5
|
refs/heads/master
| 2015-08-09T06:23:51.321877 | 2013-10-25T06:11:22 | 2013-10-25T06:11:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
from taggit.managers import TaggableManager
class Post(models.Model):
title=models.CharField(max_length=100)
body=models.TextField()
create=models.DateTimeField()
tags =TaggableManager()
def __unicode__(self):
return self.title
|
UTF-8
|
Python
| false | false | 2,013 |
11,871,289,607,292 |
aaed6011c54ed3181c6bb26c22644610137429d8
|
eaaed8b58643ad3d9ca3dffb479962ca8a907fed
|
/lib/ixle/agents/body.py
|
f6cea47a4bcbc02d60287ea5a5898fd73b954e20
|
[] |
no_license
|
mattvonrocketstein/ixle
|
https://github.com/mattvonrocketstein/ixle
|
bbe27bc7500d7ef4eaf1e11d5564611672dab31b
|
f5e894716e0ca7cf9d09017ec7272b9a23928a79
|
refs/heads/master
| 2021-01-01T17:46:48.547020 | 2014-03-31T23:34:47 | 2014-03-31T23:34:47 | 9,435,583 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
""" ixle.agents.body
"""
from report import report
from ixle.python import ope
from .base import ItemIterator
import pyPdf
#print getPDFContent("test.pdf").encode("ascii", "ignore")
def getPDFContent(path):
content = ""
# Load PDF into pyPDF
pdf = pyPdf.PdfFileReader(file(path, "rb"))
# Iterate pages
for i in range(0, pdf.getNumPages()):
# Extract text from page and add to content
content += pdf.getPage(i).extractText() + "\n"
# Collapse whitespace
content = " ".join(content.replace(u"\xa0", " ").strip().split())
return content
def item2text(item):
report('reading file')
if ope(item.abspath):
with open(item.abspath, 'r') as fhandle:
if item.fext in 'txt'.split():
return fhandle
elif item.fext=='pdf':
import StringIO
body = StringIO.StringIO(getPDFContent(item.abspath))
return body
#scanner
class Body(ItemIterator):
nickname = 'body'
covers_fields = ['body']
def callback(self, item, fname=None, **kargs):
report(item.fname)
if item.file_type=='document':
if self.force or not item.has_body:
if not item.exists():
report('doesnt exist')
self.complain_missing(item.abspath)
return
if item.file_type=='document':
success = self.set_attachment(item)
if success:
item.has_body = True
report('finished setting body.')
self.save(item)
else:
report("no success")
else:
report('Not a document')
else:
report('already handled')
else:
report(str(item.file_type))
def set_attachment(self, item):
contents = item2text(item)
report('saving attachment: ')
report.console.draw_line()
print contents
report.console.draw_line()
attachment_filename = 'body.txt'
r = self.database.delete_attachment(item, attachment_filename)
try:
self.database.put_attachment(
item, contents,
filename=attachment_filename,
content_type=None, # depends on filename ^
)
report('set attachment')
return True
except Exception,e:
report("could not save body."+str(e))
return False
|
UTF-8
|
Python
| false | false | 2,014 |
17,368,847,760,787 |
18debb12d1b77ff16bea30bee7e851a43ce7fe0b
|
41fc660a538742eb9b0608f087ea4f6a09a630ca
|
/gireoan/repo/File.py
|
77a23a8b519ba5043c41bc731face2e17824f61c
|
[
"MIT"
] |
permissive
|
saeschdivara/GitRepoAnalyser
|
https://github.com/saeschdivara/GitRepoAnalyser
|
751a67f493956382b45eb2c8bd87fc8d1c5858c3
|
d27fef56bfc6fa588f4285369dda74fe8cabf735
|
refs/heads/master
| 2016-09-05T19:30:39.644363 | 2014-06-16T07:07:04 | 2014-06-16T07:07:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class File(object):
@classmethod
def get_ending(cls, file_path):
"""
"""
return file_path.split('.')[-1]
def __init__(self, path):
"""
"""
self.path = path
self.ending = ''
self.commits = []
self.code_lines = 0
|
UTF-8
|
Python
| false | false | 2,014 |
4,578,435,150,400 |
e0f9dde6780106b5c2eea5ed1f6cdd0143cdbf98
|
dc13636c35adefbf1579c93705a155781c071d5c
|
/settings.py
|
149f0635066988bbc4ed948231a8d590cf0e1a34
|
[
"GPL-3.0-only",
"GPL-3.0-or-later"
] |
non_permissive
|
rosix-ru/barbaris
|
https://github.com/rosix-ru/barbaris
|
289047d19a6712d54210190498958425f5de94f0
|
1d300a65ef62285c54e748a8fec8cef32a5848ba
|
refs/heads/master
| 2021-01-10T04:26:21.554920 | 2014-06-07T01:46:31 | 2014-06-07T01:46:31 | 44,802,688 | 1 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
###############################################################################
# Copyright 2012 Grigoriy Kramarenko.
###############################################################################
# This file is part of Barbaris.
#
# Barbaris is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Barbaris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Barbaris. If not, see <http://www.gnu.org/licenses/>.
#
# Этот файл — часть Barbaris.
#
# Barbaris - свободная программа: вы можете перераспространять ее и/или
# изменять ее на условиях Стандартной общественной лицензии GNU в том виде,
# в каком она была опубликована Фондом свободного программного обеспечения;
# либо версии 3 лицензии, либо (по вашему выбору) любой более поздней
# версии.
#
# Barbaris распространяется в надежде, что она будет полезной,
# но БЕЗО ВСЯКИХ ГАРАНТИЙ; даже без неявной гарантии ТОВАРНОГО ВИДА
# или ПРИГОДНОСТИ ДЛЯ ОПРЕДЕЛЕННЫХ ЦЕЛЕЙ. Подробнее см. в Стандартной
# общественной лицензии GNU.
#
# Вы должны были получить копию Стандартной общественной лицензии GNU
# вместе с этой программой. Если это не так, см.
# <http://www.gnu.org/licenses/>.
###############################################################################
"""
# Django settings for this project.
from django.utils.translation import ugettext_lazy as _
import os
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
def abspath(*paths):
return os.path.abspath(os.path.join(PROJECT_PATH, *paths)).replace('\\','/')
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Grigoriy Kramarenko', '[email protected]'),
)
MANAGERS = ADMINS
try:
f = open(abspath('AUTHORS'), 'rb')
AUTHORS = f.readlines()
f.close()
except:
AUTHORS = ('Webmaster Name', 'Manager Name')
COPYRIGHT = u'Григорий Крамаренко / www.rosix.ru'
COPYRIGHT_YEAR = 2010 # start year of copyright
PROJECT_NAME = u'АИС «Барбарис»'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': abspath('sqlite.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Asia/Vladivostok' #'Europe/Moscow'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'ru-ru'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
LOCALE_PATHS = (
# abspath('locale'),
)
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = abspath('..', 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = abspath('..', 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
#abspath('static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'generate-this-unique-key!!!'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'project.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'project.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
abspath("templates"),
abspath("app","templates"),
)
TEMPLATE_CONTEXT_PROCESSORS = ('django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# append:
'django.core.context_processors.request'
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
#~ 'project.auth_fix',
'project.app',
'pytils',
)
# Settings for applications:
START_YEAR = 2011
DEFAULT_ANALYSE_DAYS = 7
STATE_ORDER_CREATE = 1
STATE_ORDER_ACCEPT = 2
STATE_ORDER_CLOSE = 3
STATE_ORDER_CANCEL = 4
STATE_ORDER_TRASH = 5
STATE_ORDER_CHOICES = (
(STATE_ORDER_CREATE, u'Создан'),
(STATE_ORDER_ACCEPT, u'Принят'),
(STATE_ORDER_CLOSE, u'Закрыт'),
(STATE_ORDER_CANCEL, u'Отменён'),
(STATE_ORDER_TRASH, u'Удалён'),
)
SELECT_LIST_ORDERS = [1,2,3,4]
SELECT_WORK_ORDERS = [2,3]
STATE_INVOICE_CREATE = 1
STATE_INVOICE_AVANCE = 2
STATE_INVOICE_PAYMENT = 3
STATE_INVOICE_TRASH = 4
STATE_INVOICE_CHOICES = (
(STATE_INVOICE_CREATE, u'Создан'),
(STATE_INVOICE_AVANCE, u'Аванс'),
(STATE_INVOICE_PAYMENT, u'Оплачен'),
(STATE_INVOICE_TRASH, u'Удалён'),
)
SELECT_INVOICES = [1,2,3]
SELECT_WORK_INVOICES = [1,2,3]
SELECT_CASH_INVOICES = [2,3]
PAYMENT_INVOICE_CASH = 1
PAYMENT_INVOICE_CASHLESS = 2
PAYMENT_INVOICE_CARD = 3
PAYMENT_INVOICE_CHOICES = (
(PAYMENT_INVOICE_CASH, u'Наличный'),
(PAYMENT_INVOICE_CASHLESS, u'Безналичный'),
(PAYMENT_INVOICE_CARD, u'Карта банка'),
)
DIVIDER_DAY = 1
DIVIDER_HOUR = 2
DIVIDER_MONTH = 3
DIVIDER_PRICE_CHOICES = (
(DIVIDER_DAY, u'Сутки'),
(DIVIDER_HOUR, u'Час'),
(DIVIDER_MONTH, u'Месяц'),
)
CATEGORY_CHOICES = (
(u'Hotel',u'Гостиница'),
(u'Sauna',u'Сауна'),
(u'Kitchen',u'Кухня'),
(u'Parking',u'Автостоянка'),
)
DOCUMENT_CHOICES = (
('act', u'Акт'),
('invoice', u'Счёт'),
('person', u'Карточка персоны'),
('org', u'Карточка организации'),
)
STATE_ROOM_FREE = u'Свободен сейчас и в дальнейшем.'
STATE_ROOM_NONFREE = u'Свободен сейчас, но заказан в дальнейшем.'
STATE_ROOM_RELEASED_FREE = u'Освобождается сегодня, свободен в дальнейшем.'
STATE_ROOM_RELEASED_NONFREE = u'Освобождается сегодня, но заказан в дальнейшем.'
STATE_ROOM_NONRELEASED_FREE = u'Занят, нет дальнейших заказов.'
STATE_ROOM_NONRELEASED_NONFREE = u'Занят, есть дальнейшие заказы.'
BUTTON_CLASSES_STATE_ROOM = {
STATE_ROOM_FREE: 'btn btn-success',
STATE_ROOM_NONFREE: 'btn',
STATE_ROOM_RELEASED_FREE: 'btn btn-info',
STATE_ROOM_RELEASED_NONFREE: 'btn btn-warning',
STATE_ROOM_NONRELEASED_FREE: 'btn btn-primary',
STATE_ROOM_NONRELEASED_NONFREE: 'btn btn-danger',
}
ESTIMATED_TIME = True
ESTIMATED_TIME_HOUR = 12
ESTIMATED_TIME_MINUTE = 0
GROUP_ADMINS = u'Администраторы'
GROUP_MANAGERS = u'Менеджеры'
GROUP_OPERATORS = u'Операторы'
SELECT_WORK_GROUPS = (
GROUP_MANAGERS, GROUP_OPERATORS,
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# This import re-definition current top settings,
# e.g. DATABASES, SECRET_KEY, etc.
# Default path: ../securesettings.py
# outer from project paths and unavailable in Mercurial repository.
try:
from securesettings import *
except:
pass
|
UTF-8
|
Python
| false | false | 2,014 |
13,804,024,897,688 |
a7d69ee77dfa69aab69f62e389f3a5c0003333a1
|
4ec116afe2949c961677d9597dfcc2fb7ae926e2
|
/Problem_2.py
|
ba6159e13f4299730030678bf9f96960b471e95b
|
[] |
no_license
|
jwshumaker/ProjectEuler
|
https://github.com/jwshumaker/ProjectEuler
|
3a46422beefefd0de8ba0d108c381a6f08f77547
|
3877eeba461bd5009748d9f24ea93c578f6b5865
|
refs/heads/master
| 2021-01-20T10:42:03.500074 | 2014-05-06T03:42:07 | 2014-05-06T03:42:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on Apr 7, 2014
@author: JWShumaker
Problem ID 2
Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.
'''
# Again, let's make this a function
# Note that our fib series begins with 1 and 2 and that sum begins at 2
def SumEvenFibs(max, term1 = 1, term2 = 2, sum = 2):
# Now we loop until our second term EXCEEDS the max
while(term2 <= max):
# This iterates our fibonacci sequence
# Note that the PREVIOUS value of term1 is used in calculating the assignment
# for term2
term1, term2 = term2, term1 + term2
# We add the new term to our sum iff it is divisible by 2
if not term2 % 2:
sum += term2
return sum
print(SumEvenFibs(4000000))
|
UTF-8
|
Python
| false | false | 2,014 |
16,131,897,208,473 |
c04a6de8f0fc5862a29ae46047115ac39cfa0d19
|
efe0ac69166623292c4eea1e5445b3d0e44ecbb1
|
/testauth.py
|
adf31c7fb8a89b8e3cbb0fc719125c9ad3d1ab10
|
[] |
no_license
|
cloudaice/sinaapp
|
https://github.com/cloudaice/sinaapp
|
a48c8cef87495dfc62ae37434dd27f142a0533e4
|
4f7cd9775893c7177f4ef1fef1f3add33a5a7758
|
refs/heads/master
| 2021-01-25T10:29:33.851197 | 2013-01-23T02:55:49 | 2013-01-23T02:55:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#-*- coding: utf-8 -*-
import urllib,urllib2
from weibo import APIClient
#APP_KEY = "3302956248"
#APP_SECRET="a3d8968cc21fd8115ad6e90994755cfc"
APP_KEY = "202088835"
APP_SECRET="9567e3782151dcfd1a9bd2dd099d957f"
CALLBACK_URL = "https://api.weibo.com/oauth2/default.html"
#CALLBACK_URL = "http://me.cloudaice.com"
#CALLBACK_URL = "http://www.baidu.com"
DOMAIN = "http://api.t.sina.com.cn/oauth/access_token"
client = APIClient(app_key = APP_KEY,app_secret = APP_SECRET,redirect_uri = CALLBACK_URL)
url = client.get_authorize_url(redirect_uri = CALLBACK_URL)
#print url
import webbrowser
webbrowser.open(url)
code = raw_input("input code: ")
#access_token = "2.00FvIAQCsnrWbD4018a02070fVoPjD"
|
UTF-8
|
Python
| false | false | 2,013 |
979,252,588,330 |
7177bfa0f1884d1bb6585e492fd085049220337a
|
94accea2a6e3c0235d119bc5c418e496dd3c982f
|
/board.py
|
5cb2d8a6f0c19a7d4c1c595a796dd392facbbdf6
|
[] |
no_license
|
christian-oudard/logic-puzzle-toolkit
|
https://github.com/christian-oudard/logic-puzzle-toolkit
|
a24378454ad43bf4339d7438129a7552293642c6
|
6f728a1aa6947bed64436a2b15474f4dc7ba7edd
|
refs/heads/master
| 2016-09-05T10:14:53.115183 | 2013-01-04T00:28:36 | 2013-01-04T00:28:36 | 532,141 | 6 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from copy import copy
from constants import (
BLACK,
WHITE,
UNKNOWN,
CONTRADICTION,
GIVENS,
DEBUG,
is_success,
)
from utility import mdist
class Board(object):
"""
Implements solving and assumption tracking logic.
Every non-abstract subclass of board must implement an is_valid function.
This function returns False if the current state is certainly invalid, or
True if it is valid or potentially valid.
"""
def __init__(self):
self.last_conclusion = None # used for search heuristics
self.limit = None
def solve(self, max_depth=1, verify_unique=False):
if not self.is_valid():
return False
Board.early_solution = None
Board.verify_unique = verify_unique
Board.max_depth = max_depth
Board.depth_reached = 0
Board.is_valid_count = 0
solve_thread = self.solve_thread(depth=0)
result = None
for result in solve_thread:
if self.limit and Board.is_valid_count > self.limit:
return
if result is True:
self.data = Board.early_solution.data
return True
if DEBUG(1):
if is_success(result):
print self
print
elif result == False:
print 'board unsolvable'
if result is False or len(self.unknown_positions) > 0:
return False # incomplete
else:
return True # fully solved
def solve_thread(self, depth):
if depth > Board.max_depth:
return
if depth > Board.depth_reached:
Board.depth_reached = depth
yield None
while True:
for result in self.conclusion_thread(depth):
if result is None:
yield None
elif result is True:
yield True
return
else:
position, color = result
self.last_conclusion = position
self.set_value(position, color)
Board.is_valid_count += 1
if not self.is_valid(position, color):
yield False
return
yield result
if not Board.verify_unique and len(self.unknown_positions) == 0:
Board.early_solution = self
yield True
return
break # restart while loop, continue searching
else:
return # conclusion thread found nothing, stop searching
def conclusion_thread(self, depth):
assumption_threads = []
for pos in self.prioritized_positions():
for color in (BLACK, WHITE):
assumption_threads.append(self.assumption_thread(pos, color, depth))
while assumption_threads:
finished_threads = []
for at in assumption_threads:
result = None
try:
result = at.next()
except StopIteration:
finished_threads.append(at)
if result is None:
pass
elif result is True:
yield True
return
else:
yield result
return
for ft in finished_threads:
assumption_threads.remove(ft)
yield None # now that all threads have gone once, pass control
def assumption_thread(self, position, color, depth):
self.set_value(position, color)
Board.is_valid_count += 1
valid = self.is_valid(position, color)
self.set_value(position, UNKNOWN)
if not valid:
yield (position, opposite_color(color))
yield None
assumption_board = self.copy()
assumption_board.set_value(position, color)
assumption_board.last_conclusion = position
for result in assumption_board.solve_thread(depth + 1):
if result is None:
yield None
elif result is False:
yield (position, opposite_color(color))
elif result is True:
yield True
return
def is_valid(self, position=None, color=None):
"""
Determine whether a board has a legal or illegal position.
Each subclass must provide a validity_checks list.
The position and color arguments that are passed on to each validity
function are the position and color of the most changed cell, and are
only used for optimization, not for correctness.
"""
for valid_func in self.validity_checks:
if not valid_func(self, position, color):
return False
return True
# optimization #
def prioritized_positions(self):
priority_dict = {}
for pos in self.unknown_positions:
priority_dict[pos] = self.priority(pos)
position_list = list(self.unknown_positions)
return sorted(position_list, key=priority_dict.__getitem__, reverse=True)
def priority(self, position):
score = 0
if self.last_conclusion is not None:
dist = mdist(position, self.last_conclusion)
score += max(5 - dist, 0)
for adj in self.adjacencies[position]:
if not self.is_unknown(adj):
score += 1
return score
def update_color_caches(self, pos, value):
if pos in self.black_positions:
self.black_positions.remove(pos)
if pos in self.white_positions:
self.white_positions.remove(pos)
if pos in self.unknown_positions:
self.unknown_positions.remove(pos)
if value == BLACK:
self.black_positions.add(pos)
elif value == WHITE or value in GIVENS:
self.white_positions.add(pos)
elif value == UNKNOWN:
self.unknown_positions.add(pos)
# grid overrides #
def _in_bounds(self, x, y):
"""Determine whether a particular point is within the hexagonal boundary of the board."""
return False
def _adjacencies(self, pos):
"""Return all in-bounds adjacencies of the given position."""
return []
def __str__(self):
return repr(self)
def copy(self):
#TODO: implement more precise copying, less shotgun approach.
new_board = copy(self)
new_board.data = copy(self.data)
new_board.positions = copy(self.positions)
new_board.black_positions = copy(self.black_positions)
new_board.white_positions = copy(self.white_positions)
new_board.unknown_positions = copy(self.unknown_positions)
return new_board
def clear_data(self):
for pos in self.positions:
self.set_unknown(pos)
def is_black(self, pos):
return self[pos] == BLACK
def is_white(self, pos):
value = self[pos]
return value == WHITE or value in GIVENS # givens are white
def is_unknown(self, pos):
return self[pos] == UNKNOWN
def set_black(self, pos):
self.set_value(pos, BLACK)
def set_white(self, pos):
self.set_value(pos, WHITE)
def set_unknown(self, pos):
self.set_value(pos, UNKNOWN)
def set_value(self, pos, value):
if pos in self.positions:
if self[pos] != value:
self[pos] = value
self.update_color_caches(pos, value)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __eq__(self, other):
return self.data == other.data
def __ne__(self, other):
return not (self == other)
def opposite_color(color):
if color == WHITE:
return BLACK
if color == BLACK:
return WHITE
|
UTF-8
|
Python
| false | false | 2,013 |
12,996,571,083,996 |
7b315604ec706f906e46be2b2cd66ed4e29a6e4e
|
015795a84ce6e76ac436eda5a546a439faf16bfe
|
/app/filters.py
|
865e5d29552b7713931937adbeaefb2b9dcfc954
|
[
"LicenseRef-scancode-proprietary-license"
] |
non_permissive
|
Blender3D/hwswap
|
https://github.com/Blender3D/hwswap
|
12e3fd8e4e2d54434123578aa86d1d64dc479cd0
|
d61b0bd44d4c29158c00b7a26acabc542944862b
|
refs/heads/master
| 2016-09-06T21:30:29.271776 | 2013-07-14T20:31:34 | 2013-07-14T20:31:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from app import app
@app.template_filter('isinstance')
def template_isinstance(obj, name):
return isinstance(obj, __builtins__[name])
|
UTF-8
|
Python
| false | false | 2,013 |
15,479,062,166,415 |
3cdbf8f52a80f4aeed495e327fb2bf15ba91794e
|
15077b8f0787e87758f85cd6777f1495cf9475a6
|
/requirements/R500/configuration_scripts/file_hash_test_config_2.py
|
6c9b4e9c51936c98950d3990bcbf6378ca230e87
|
[] |
no_license
|
jan-cerny/ValidationTestSuite
|
https://github.com/jan-cerny/ValidationTestSuite
|
126c8f99a0843a172de7b984e7ea3e33fa223932
|
42984c64e28881f282ad9870684af5e3fe819f71
|
refs/heads/master
| 2020-12-25T18:23:04.095612 | 2014-12-16T11:53:03 | 2014-12-16T11:53:03 | 28,082,788 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
__author__ = 'matt.kerr'
__copyright__ = "Copyright 2011, G2, Inc."
__credits__ = ["Matt Kerr"]
__license__ = "TODO"
__version__ = "TODO"
__maintainer__ = 'matt.kerr'
__email__ = "TODO"
__status__ = "Alpha"
import os
def performConfig(workingDir=None):
file_path=r"C:/scap_validation_content"
if not os.path.exists(file_path):
os.makedirs(file_path)
file_path = os.path.join(file_path,'ind_file_hash')
if not os.path.exists(file_path):
os.makedirs(file_path)
if not os.path.exists(os.path.join(file_path,r'e')):
os.makedirs(os.path.join(file_path,r'e'))
if not os.path.exists(os.path.join(file_path,r'1e')):
os.makedirs(os.path.join(file_path,r'1e'))
create_file(r"C:\scap_validation_content\ind_file_hash\e\test.bak",'OVAL Test')
return
def create_file(filename, content):
with open(filename,'w') as a:
a.write(content)
return
if __name__=="__main__":
performConfig()
|
UTF-8
|
Python
| false | false | 2,014 |
10,883,447,161,361 |
ce8995ffae623f7a61829adf17885c6b451f206c
|
162d9a54fddc636dadebd7ccba5124e10171e0d2
|
/src/listener.py
|
9bf75a7c8874ed98f1446c5da01b69d8f794a747
|
[] |
no_license
|
michalneoral/clopema_model_by_vision
|
https://github.com/michalneoral/clopema_model_by_vision
|
2b1380b0940549dd38adf10bba2ca1862797c609
|
178fc00438783667dc2b2f6a3a5fae2fc6565bf0
|
refs/heads/master
| 2020-05-18T20:49:36.441242 | 2013-11-18T10:35:04 | 2013-11-18T10:35:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import roslib; roslib.load_manifest('beginner_tutorials')
import rospy
import rosbag
from sensor_msgs.msg import *
bag = rosbag.Bag('test2.bag', 'w')
def callback(data):
#rospy.loginfo(rospy.get_name() + ": I heard %s" % data.name[1])
print data
try:
bag.write('/joint_states', data.message)
finally:
bag.close()
def listener():
rospy.init_node('listener', anonymous=True)
rospy.Subscriber("/joint_states", JointState, callback)
rospy.spin()
if __name__ == '__main__':
listener()
|
UTF-8
|
Python
| false | false | 2,013 |
15,281,493,649,288 |
306ce4a899d67cc9b2293e248b68524cb431235b
|
cafff971232c04ae181085bf2b86ba80df3f0e19
|
/sgp/db_repository/versions/025_migration.py
|
e319f207d0eb3e95e7c5f60e585c1b9b549b2dbd
|
[] |
no_license
|
maurovera/sgp
|
https://github.com/maurovera/sgp
|
a1f96e028a56898700db32ba4d07bfd17412c161
|
17efc392f2457f1b7542d848159e5cacdae06d84
|
refs/heads/master
| 2021-01-01T05:42:38.321970 | 2013-07-05T03:09:21 | 2013-07-05T03:09:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
relacion = Table('relacion', post_meta,
Column('idRelacion', Integer, primary_key=True, nullable=False),
Column('tipo', String(length=45), nullable=False),
Column('idSucesor', Integer),
Column('idAntecesor', Integer),
Column('idProyecto', Integer),
Column('idFase', Integer),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['relacion'].columns['idFase'].create()
post_meta.tables['relacion'].columns['idProyecto'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['relacion'].columns['idFase'].drop()
post_meta.tables['relacion'].columns['idProyecto'].drop()
|
UTF-8
|
Python
| false | false | 2,013 |
5,978,594,505,100 |
2aa4115ec27a405e8e9ab7902e7ec4efeb47f7fe
|
595f46cf48ae7b643d1e7e78a41fdc23979e4fb4
|
/problem032.py
|
3602fda8b834c0dce482ed0f39757283ac90cc70
|
[] |
no_license
|
ibno/euler
|
https://github.com/ibno/euler
|
d9164312c844b913ddaedec6a5e5570caef011ed
|
5596b5d53965bacb66ee89d6e10f3ea2ddf4086a
|
refs/heads/master
| 2016-09-02T06:37:05.209364 | 2012-10-15T10:42:59 | 2012-10-15T11:05:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# Euler Project Problem 32
"""Benchmark
Intel Core2 Duo CPU P8400 @ 2.26GHz
real 0m0.844s
user 0m0.792s
sys 0m0.044s
"""
import itertools
pandigitals = [x for x in itertools.permutations(range(1, 10))]
products = set()
for p in pandigitals:
c = p[0]*(1000*p[1] + 100*p[2] + 10*p[3] + p[4])
d = (10*p[0] + p[1])*(100*p[2] + 10*p[3] + p[4])
ab = 1000*p[5] + 100*p[6] + 10*p[7] + p[8]
if c == ab or d == ab:
products.add(ab)
print 'Answer to problem 32:',sum(products)
|
UTF-8
|
Python
| false | false | 2,012 |
4,234,837,798,415 |
46cc026e70044032ef4338589832fedc220aecc5
|
b795acde532d5a17b0b9806058a6b110519a1aad
|
/util.py
|
5cee0bd290fa406e16432d383bd5cee123c5118c
|
[] |
no_license
|
wavebeem/cakewm
|
https://github.com/wavebeem/cakewm
|
703dc45416c6a40882154b3bf12acfbca4106239
|
5e0254391dcd408f345eb27b1b5a7a5c682cd9d7
|
refs/heads/master
| 2020-04-11T10:49:19.413789 | 2011-08-26T01:04:19 | 2011-08-26T01:04:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import sys
import itertools
import pypixel
# from conf import Conf
def clear():
"Clear the screen"
os.system("clear")
def echo(*args):
"print wrapper"
print " ".join(map(str, args))
def invert_dict(d):
return dict((v,k) for k in d for v in d[k])
def rgb_assert(rgb):
bad_val = lambda val: not (0 <= val <= 255)
any_bad_vals = any(map(bad_val, rgb))
if any_bad_vals:
color_error()
def hsl_assert(hsl):
h, s, l = hsl
good_h = 0 <= h <= 360
good_s = 0 <= s <= 100
good_l = 0 <= l <= 100
all_good_vals = all((good_h, good_s, good_l))
if not all_good_vals:
color_error()
# The ranges and order are the same for HSL and HSV colors
hsv_assert = hsl_assert
def color_error():
errors(
"Bad color value",
"Hex colors must be 3 or 6 digits",
"RGB colors must be in the range 0-255",
"As for HSL and HSV colors,",
"Hue must be in the range 0-360",
"Saturation, lightness, and value must all be in the range 0-100"
)
def clamp(x, a, b):
'''Clamp x between a and b'''
return min(max(x, a), b)
def clamp2(x, q):
'''Clamp x between 0 and q - 1'''
return clamp(x, 0, q - 1)
def wrap(x, n):
return x % n
def between(x, a, b):
'''Returns if a number is in the range [a, b]'''
return a <= x and x <= b
def between2(x, a):
'''Returns if a number is in the range [0, a)'''
return 0 <= x and x < a
def swap(ary, i, j):
'''Swaps the values at locations i and j in the list ary'''
ary[i], ary[j] = \
ary[j], ary[i]
def doc(d):
'''Takes a dictionary of functions and docstrings, assigning them
appropriately'''
for function, docstring in d.iteritems():
function.__doc__ = docstring
def lazy(f):
'''Returns a new function with delayed execution
Examples:
class A(object):
@util.lazy
def add(x, y):
print x + y
obj = A()
dispatcher = {
"a": obj.add(1, 2),
"b": obj.add(4, 9),
}'''
return lambda *args, **kwargs: lambda: f(*args, **kwargs)
_use_debug = True
def debug(*args):
if _use_debug:
prefix = "cakewm: "
sys.stdout.write(
prefix
+ " ".join(map(str, args))
+ "\n"
)
def debugs(*args):
for arg in args: debug(arg)
def error(message):
sys.stderr.write("cakewm: error: %s\n" % message)
sys.exit(1)
def errors(message0, *messages):
sys.stderr.write("cakewm: error: %s\n" % message0)
# Pad out the subsequent messages
spacing = "-" * len("error:")
for message in messages:
sys.stderr.write("cakewm: %s %s\n" % (spacing, message))
sys.exit(1)
INFINITE_RAINBOW = itertools.cycle(
map(
lambda hue: pypixel.hsl2rgb((hue, 50, 40)),
xrange(0, 360, 20)
)
)
|
UTF-8
|
Python
| false | false | 2,011 |
13,185,549,636,128 |
50f2756f2495d005b0c8d13f36a26af0e84a233a
|
758dfc6fe20b382ecead7c02c2e597d4b74488db
|
/server/batch/voicetotemplate.py
|
152a1c1aaafb9570fe5a9907584d28ee9b9f0796
|
[] |
no_license
|
manjushamohan/imagequick-interface
|
https://github.com/manjushamohan/imagequick-interface
|
0945e330864d19699bccb7676ffd8e73c2746be3
|
409dfe19f61f001f7b0af3957df7ccb386454015
|
refs/heads/master
| 2021-01-19T07:18:36.956660 | 2013-11-28T18:40:45 | 2013-11-28T18:40:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from common import database
import os
def station_frequency_position(format,voice):
format_id = str(database.db.formats.find_one({'name':format['name']})['uid'])
voice_id = str(database.db.voices.find_one({'name':voice['name']})['uid'])
#database.backup('templates')
database.db.templates.update({"formatids":format_id,"posWords":{"$in":[" ",""]}},{"$addToSet": {"statVoiceids":voice_id,"freVoiceids":voice_id } },multi=True)
database.db.templates.update({"formatids":format_id,"posWords":{"$nin":[" ",""]}},{"$addToSet": {"statVoiceids":voice_id,"freVoiceids":voice_id,"posVoiceids":voice_id } },multi=True)
return True
def station_frequency(format,voice):
format_id = str(database.db.formats.find_one({'name':format['name']})['uid'])
voice_id = str(database.db.voices.find_one({'name':voice['name']})['uid'])
#database.backup('templates')
database.db.templates.update({"formatids":format_id},{"$addToSet": {"statVoiceids":voice_id,"freVoiceids":voice_id } },multi=True)
return True
def station(format,voice):
format_id = str(database.db.formats.find_one({'name':format['name']})['uid'])
voice_id = str(database.db.voices.find_one({'name':voice['name']})['uid'])
#database.backup('templates')
database.db.templates.update({"formatids":format_id},{"$addToSet": {"statVoiceids":voice_id} },multi=True)
return True
def frequency(format,voice):
format_id = str(database.db.formats.find_one({'name':format['name']})['uid'])
voice_id = str(database.db.voices.find_one({'name':voice['name']})['uid'])
print format_id,voice_id
#database.backup('templates')
database.db.templates.update({"formatids":format_id},{"$addToSet": {"freVoiceids":voice_id} },multi=True)
return True
def position(format,voice):
format_id = str(database.db.formats.find_one({'name':format['name']})['uid'])
voice_id = str(database.db.voices.find_one({'name':voice['name']})['uid'])
#database.backup('templates')
database.db.templates.update({"formatids":format_id,"posWords":{"$nin":[" ",""]}},{"$addToSet": {"posVoiceids":voice_id} },multi=True)
return True
|
UTF-8
|
Python
| false | false | 2,013 |
16,518,444,259,156 |
370ea7a5f9d32886d0ed43f2f7c046fb8109c718
|
7404eb6fd13a2bfeaa9ab349d2441b8c020c07e4
|
/ckanext/spatial/model.py
|
b242845e98c124b63dfc7eba50001c0246517d25
|
[] |
no_license
|
kindly/ckanext-spatial
|
https://github.com/kindly/ckanext-spatial
|
7e61c1e46070af3e958e1cfa6afde26ad6082c84
|
65fe19442824ff7950212e0607000463abbf7290
|
refs/heads/master
| 2016-09-06T10:18:10.581077 | 2011-09-30T17:09:21 | 2011-09-30T17:09:21 | 2,559,082 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from ckan.lib.base import config
from ckan.model import Session
from ckan.model.meta import *
from ckan.model.domain_object import DomainObject
from geoalchemy import *
from geoalchemy.postgis import PGComparator
db_srid = int(config.get('ckan.spatial.srid', '4326'))
package_extent_table = Table('package_extent', metadata,
Column('package_id', types.UnicodeText, primary_key=True),
GeometryExtensionColumn('the_geom', Geometry(2,srid=db_srid)))
class PackageExtent(DomainObject):
def __init__(self, package_id=None, the_geom=None):
self.package_id = package_id
self.the_geom = the_geom
mapper(PackageExtent, package_extent_table, properties={
'the_geom': GeometryColumn(package_extent_table.c.the_geom,
comparator=PGComparator)})
# enable the DDL extension
GeometryDDL(package_extent_table)
DEFAULT_SRID = 4326
def setup(srid=None):
if not srid:
srid = DEFAULT_SRID
srid = str(srid)
connection = Session.connection()
connection.execute('CREATE TABLE package_extent(package_id text PRIMARY KEY)')
connection.execute('SELECT AddGeometryColumn(\'package_extent\',\'the_geom\', %s, \'GEOMETRY\', 2)',srid)
Session.commit()
|
UTF-8
|
Python
| false | false | 2,011 |
2,267,742,739,721 |
67d4b7b5baafef6171f01393da5cf0bf0dbc239a
|
2caf9b7b4235ba4ba8d9530ea135cbe9837142ab
|
/archeobases/branches/1.9.x/PetroglyphsMap/petroglyphsmap/model/rock.py
|
46a6c9906f260f991382d28c9f082ab8a96a2285
|
[
"AGPL-3.0-only"
] |
non_permissive
|
lazaret/archeobases
|
https://github.com/lazaret/archeobases
|
ffaf4802e7b9c5baae262c6e358bbddcae74e1e6
|
99e019ee7c743db6ae8f8e49506b6675762a65ce
|
refs/heads/master
| 2021-01-21T16:32:18.964693 | 2014-04-16T13:29:45 | 2014-04-16T13:29:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from sqlalchemy import *
from sqlalchemy.orm import *
from geoalchemy import *
from petroglyphsmap.model.meta import Base
class Rock(Base):
"""Table for the geographical rocks - POINTS - Lambert93"""
__tablename__ = 'rock'
rock_id = Column(Integer, primary_key=True)
rock_number = Column(Unicode(10), nullable=False)
group_id = Column(Integer, ForeignKey('group.group_id')) # Many-to-one relationship
point_x = Column(Float, nullable=False)
point_y = Column(Float, nullable=False)
point_z = Column(Float)
geo_point = GeometryColumn(Point(2, srid=2154, spatial_index=True))
year = Column(Integer)
# Relationship between figures and rocks
rock_figures = relationship('Figure', backref='rock')
def __init__(self, zone, group, rock, x, y, z):
self.rock = rock
self.group_id = groupid_from_zg(zone, group)
self.point_x = x
self.point_y = y
self.point_z = z
wkt = "POINT(" + x + " " + y + ")"
self.geo_point = WKTSpatialElement(wkt) # Geometric object (2D)
|
UTF-8
|
Python
| false | false | 2,014 |
4,810,363,397,310 |
2434068fe1c99afddc5ad6494a0f99172a770324
|
8d13760ca9819002aac69ca881ae4f3c140aeade
|
/news/views.py
|
d4407af703339f721c4af75a73bf19ce374f0aa6
|
[
"BSD-3-Clause"
] |
permissive
|
jokey2k/ShockGsite
|
https://github.com/jokey2k/ShockGsite
|
9b60a7eece4f24d5a814f1b7f6114ca64e046ef6
|
d3944d515fa2a430be5cd4b4367299b860f443f7
|
refs/heads/master
| 2016-09-05T22:40:37.546924 | 2011-08-02T21:29:13 | 2011-08-02T21:29:13 | 1,974,892 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.utils.safestring import mark_safe
from django.shortcuts import render, get_object_or_404
from news.models import NewsItem
# Create your views here.
def recent_entries(request, entrycount=4, template='news/recents.html', offset=None):
"""Render a set of recent entries"""
if offset is not None:
entries = NewsItem.objects.all()[offset:offset+entrycount]
else:
entries = NewsItem.objects.all()[:entrycount]
return render(request, template, {'entries':entries})
|
UTF-8
|
Python
| false | false | 2,011 |
6,906,307,417,987 |
983885365cd32f133cb9d119fca15d006cb7527f
|
828d1a6c9bc82a746dcd41dfb500f2fdcd732665
|
/setup.py
|
5d5be4e37fe2109dba2625af1027609cef7966cc
|
[] |
no_license
|
timguo/CoolBall
|
https://github.com/timguo/CoolBall
|
0ac07cbe6ef6c403bd028dd638d11467ab7f7d70
|
9789688c33228a2f3847195a7f146931f21bac30
|
refs/heads/master
| 2021-01-18T00:26:52.946932 | 2012-06-11T02:50:59 | 2012-06-11T02:50:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from distutils.core import setup
import py2exe
setup(
options = {
"py2exe": {
"dll_excludes": ["MSVCP90.dll"],
"includes":["sip"],
}
},
windows=[{"script": "CoolBall.py"}]
)
|
UTF-8
|
Python
| false | false | 2,012 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.