__id__
int64 3.09k
19,722B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
256
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 3
values | repo_name
stringlengths 5
109
| repo_url
stringlengths 24
128
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 6.65k
581M
⌀ | star_events_count
int64 0
1.17k
| fork_events_count
int64 0
154
| gha_license_id
stringclasses 16
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
5.76M
⌀ | gha_stargazers_count
int32 0
407
⌀ | gha_forks_count
int32 0
119
⌀ | gha_open_issues_count
int32 0
640
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 2
classes | gha_disabled
bool 1
class | content
stringlengths 9
4.53M
| src_encoding
stringclasses 18
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | year
int64 1.97k
2.01k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
10,015,863,768,419 |
33fe8a5b8e1f4b8bfb49ce84bfe28d4ccca50ae0
|
a4f1d49c80297d2a72e3b5cae16743cf2013afe5
|
/artwark/models.py
|
28db9dc2979964d0205a7d5b760ceec243a0010f
|
[] |
no_license
|
raony/twtrivia
|
https://github.com/raony/twtrivia
|
843c4202fedc11fa565bb5e96a27383c5ce11544
|
97b3cc273cbe5c9e65070b553714977ca9c1a63b
|
refs/heads/master
| 2021-01-11T00:47:27.628736 | 2013-12-05T06:40:02 | 2013-12-05T06:40:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import string
from django.db import models
# Create your models here.
class ArtWark(models.Model):
altura = models.IntegerField()
largura = models.IntegerField()
current = models.BooleanField(default=False)
def save(self, *args, **kwargs):
result = super(ArtWark, self).save(*args, **kwargs)
if not self.imagens.all().count():
for y in range(1,self.altura+1):
for x in range(1,self.largura+1):
Imagem.objects.create(artwark=self, x=x, y=y)
return result
class Imagem(models.Model):
artwark = models.ForeignKey(ArtWark, related_name='imagens')
arquivo = models.ImageField(null=True, upload_to=lambda instance, filename: 'artwark/%d/%d_%d.png'%(instance.artwark.pk, instance.x, instance.y))
x = models.IntegerField()
y = models.IntegerField()
locked = models.BooleanField(default=False)
@property
def coluna(self):
return list(string.ascii_uppercase)[self.x-1]
@property
def linha(self):
return self.y
class Participante(models.Model):
PAPEIS = (
(0, 'CIO/CTO'),
(1, 'Consultor'),
(2, 'Gerente'),
(3, 'Diretor'),
(4, 'VP'),
(5, 'Outro'),
)
nome = models.CharField(max_length=255)
sobrenome = models.CharField(max_length=255)
email = models.EmailField(max_length=255)
telefone = models.CharField(max_length=255)
cidade = models.CharField(max_length=255)
empresa = models.CharField(max_length=255)
papel = models.IntegerField(choices=PAPEIS)
imagens = models.IntegerField(default=0)
def __unicode__(self):
return self.nome_completo
@property
def nome_completo(self):
return u'%s %s'%(self.nome, self.sobrenome)
|
UTF-8
|
Python
| false | false | 2,013 |
19,396,072,327,086 |
75baa6c3b3bea7c8e6365a87f46cb6f3a6149fe8
|
0f1690953d025c3232dcef4a49e43d6eaf8300cf
|
/turtlehack.py
|
d48cf7743b93ef2d6ee4658eee82f8d59edf772e
|
[
"MIT"
] |
permissive
|
silshack/fall2013turtlehack
|
https://github.com/silshack/fall2013turtlehack
|
fd9f339299d442b14bffaa73f685792b954988f6
|
e0ffcf5930b2170eec1f38f5b06d907d942d67ad
|
refs/heads/master
| 2021-01-02T22:51:35.279928 | 2013-10-14T23:25:49 | 2013-10-14T23:25:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import turtle
import random
# A function that takes a turtle, a radius, a color, and an optional
# thickness and draws a circle
def colored_circle(turtle, radius, color, thickness=1):
turtle.width(thickness)
turtle.color(color)
turtle.circle(radius)
# A function that takes a side length and a color and makes a square.
def colored_square(turtle, side_length, color):
turtle.color(color)
for i in range(4):
turtle.forward(side_length)
turtle.left(90)
turtle.left(90)
# A function that take a turtle, dot size, color, and number of dots in
# line to make a dotted line
def dotted_line(turtle, number_of_dots, dot_size, color):
for i in range(number_of_dots):
turtle.dot(dot_size, color)
turtle.penup()
turtle.forward(dot_size * 2)
turtle.pendown()
# A function that takes a number and makes that many random sized circles
def random_circle(turtle, number_of_circles, max_size=100):
for i in range(number_of_circles):
turtle.circle(random.randint(0, max_size + 1))
# A function that changes the turtle's color to a random color
def random_color():
'''
returns a random hex value
'''
color_value = format(random.randint(0, 16777215), '06x')
return "#" + color_value
# A function that takes a turtle and a pair of numbers and sets the turtle
# to a random location from x to -x and y to -y
def random_location(turtle, x, y, relative=False):
'''
Moves a turtle to a random location within x,y bounds
takes turtle, x bound, y bound, uselrelative location (boolean)
'''
if relative is False:
random_x = random.randint(-x, x)
random_y = random.randint(-y, y)
turtle.setpos(random_x, random_y)
if relative is True:
random_x = turtle.xcor() + random.randint(-x, x)
random_y = turtle.ycor() + random.randint(-y, y)
turtle.setpos(random_x, random_y)
else:
raise Exception("expected boolean")
# A function that makes n random colored and located dots inside x, y
# Thanks to Stacey Mantooth for the idea
def pox(turtle, x, y, n=10):
origx = turtle.xcor()
origy = turtle.ycor()
turtle.penup()
for i in range(n):
random_location(turtle, x, y, True)
turtle.dot(random.randint(3, 10), random_color())
turtle.setpos(origx, origy)
turtle.pendown()
# A function that draws an n-sided polygon
def n_sided_polygon(
turtle,
n,
color="#FFFFFF",
line_thickness=1,
line_length=80):
'''
Draw an n-sided polygon
input: turtle, number of sides, line color, line thickness, line length
'''
# for n times:
# Draw a line, then turn 360/n degrees and draw another
# set initial parameters
turtle.degrees()
turtle.pensize(line_thickness)
turn_angle = (360 / n)
# Draw each line segment and turn
for i in range(0, n):
turtle.color(color)
turtle.pendown()
turtle.forward(line_length)
turtle.penup()
turtle.left(turn_angle)
# return the turtle to its original starting location
turtle.left(turn_angle)
return 0
|
UTF-8
|
Python
| false | false | 2,013 |
1,254,130,461,897 |
d8c0c37bbef633c6e91387be12883232c8e4d079
|
3114430ce15c18281117459e26eea4b774e3998a
|
/day4/accounts/views.py
|
ac974ad2c6722f09ca12464b9bef04ef7bfdff88
|
[
"MIT"
] |
permissive
|
Joseamica/Easily-written-Django
|
https://github.com/Joseamica/Easily-written-Django
|
c02e7333e84ca2257b7b8bfae3f6732898c5000a
|
0b746638751702c453db9490fe29ef6d34e4a3bc
|
refs/heads/master
| 2021-05-27T20:25:41.341149 | 2014-05-25T08:25:53 | 2014-05-25T08:25:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render, RequestContext, render_to_response
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth import login, authenticate, logout
from .forms import AccountForm, AccountAuthForm
# Create your views here.
def login_view(request):
if request.method == 'POST':
form = AuthenticateForm(data=request.POST)
if form.is_valid():
login(request, form.get_user())
# Success
return redirect('/')
else:
# Failure
return index(request, auth_form=form)
return redirect('/')
def logout_view(request):
logout(request)
return redirect('/')
def join(request):
form = AccountForm(request.POST or None)
context = RequestContext(request)
if form.is_valid():
# `commit=False`: before save it to database, just keep it in memory
save_it = form.save(commit=False)
save_it.save()
messages.success(request, 'Thank you for joining')
#return HttpResponseRedirect(reverse('articles:all'))
return render_to_response("join.html", locals(), context_instance=context)
return render_to_response("join.html", locals(), context_instance=context)
|
UTF-8
|
Python
| false | false | 2,014 |
6,906,307,444,297 |
4309daa3a958bccc9ee7033b0933718cafae8276
|
5e4630d7d4ce8ca15513bc4e709af306f9b12800
|
/application/config.py
|
314ef3acea11a0d19e17bf17b84d23969d2f28aa
|
[] |
no_license
|
luckypool/lucky-flask
|
https://github.com/luckypool/lucky-flask
|
81ab9b000f4ad814633587a2573ab4cc94de3428
|
ac8dad316762f7af345055c6bb9934fa5c634bf8
|
refs/heads/master
| 2020-05-17T12:46:48.491790 | 2013-01-24T11:40:27 | 2013-01-24T11:40:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class Config(object):
DEBUG = False
TESTING = False
ROOT = '0.0.0.0'
class ProductConfig(Config):
pass
class DevelopConfig(Config):
DEBUG = True
|
UTF-8
|
Python
| false | false | 2,013 |
19,610,820,692,392 |
446ddcdc8434e4338496a79fa68a176202f07882
|
ca10f9a2a5b8342ce8d95bd4b2a6e3644b1bd13e
|
/hddinfo/create_file.py
|
b8c260c9d5f4976065eb3ed007ec0ded37da7cfc
|
[] |
no_license
|
jenkokov/MyPyScripts
|
https://github.com/jenkokov/MyPyScripts
|
97a4b88e1a0c0661e95b7a5ec0739bb7a32075b2
|
700418f6db5fe9e58ed0aeadfbdb4316a8f79df3
|
refs/heads/master
| 2021-01-01T17:16:14.188830 | 2014-09-24T14:16:53 | 2014-09-24T14:16:53 | 5,236,498 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import comparegames
import pymysql
import mysqlwork
import socket
import sys
def delfolder(folder):
mysqlwork.del_folder(club, comp, folder)
print 'Successful deleting folder \'{0}\' for club #{1}'.format(folder, club)
def add_new_folder(folder):
if not os.path.exists(u'D:/Games/' + folder):
print 'Can not find folder {0}!'.format(folder)
return
size = comparegames.get_size(u'D:/Games/' + folder)
if mysqlwork.check_inbase(folder, club) == 1:
mysqlwork.update_size(club, folder, size)
print 'Update folder {0} in database for club #{1}!'.format(folder.upper(), club)
else:
mysqlwork.write_folder(folder.lower(), size, club, comp, 1, 1)
print 'Adds folder {0} in database for club #{1}!'.format(folder.upper(), club)
def write2sql():
mysqlwork.drop_comp(club, '0')
d_dict = os.listdir('D:/Games')
conn = pymysql.connect(host='172.16.10.189', port=3306, user='hdd_datauser', passwd='oknej1984', db='hdd_data')
cur = conn.cursor()
for word in d_dict:
word = word.lower()
print 'Calculate and write size to DB for ' + u'D:/Games/' + word
size = comparegames.get_size(u'D:/Games/' + word)
cur.execute("INSERT INTO hdd_space(Folder, Size, accuracy, Club, Comp, Status,InRange) "
"VALUES ('{0}','{1}',100,'{2}', '{3}',1,1)".format(word, size, club, comp))
#cur.execute("UPDATE hdd_space SET Folder='{0}' WHERE 1".format(word))
cur.close()
conn.close()
if __name__ == '__main__':
ip = socket.gethostbyname(socket.gethostname())
comp = '0'
club = raw_input('Club ID (empty for this club): ')
if club == '':
club = ip.split('.')[2]
ques = raw_input('Deleting folder? (\'Y\' for deleting, something other for add): ')
if ques == 'y' or ques == 'Y':
dfolder = raw_input('Input folder for deleting: ')
delfolder(dfolder.lower())
sys.exit()
nfolder = raw_input('New folder in D:\\Games (for renew all folders type NEW): ')
if nfolder == 'NEW':
write2sql()
else:
add_new_folder(nfolder.lower())
|
UTF-8
|
Python
| false | false | 2,014 |
10,144,712,801,646 |
27ee6497eb1128c4e391f560201af18ec3b3b538
|
fef98dab7c41fe5730d5a909418c95c28ff34bc8
|
/bpython/_internal.py
|
bb5d1aad3f77180e4c55f5174dc089717c22c652
|
[
"MIT"
] |
permissive
|
charles-dyfis-net/bpython
|
https://github.com/charles-dyfis-net/bpython
|
d530b6c3a7478a777859819328f4117e9735b417
|
424b7275e82e6ca0f3f75fd3d0ee9089f613512c
|
refs/heads/master
| 2017-10-30T15:20:55.968754 | 2008-08-28T20:38:44 | 2008-08-28T20:38:44 | 47,077 | 1 | 1 |
MIT
| false | 2018-10-05T23:33:48 | 2008-08-28T17:47:40 | 2016-05-08T12:32:39 | 2008-08-28T20:55:19 | 168 | 2 | 3 | 1 |
Python
| false | null |
import pydoc
import textwrap
import sys
import cStringIO
# window has to be a global so that the main bpython.py can load it and
# alter its state and share it with the interpreter being used for the
# actual user input, I couldn't think of another way of doing this.
window = None
def _help(obj):
"""Wrapper for the regular help() function but with a ghetto
PAGER since curses + less = :(
As per the vanilla help(), this function special-cases for str,
so you can do help('isinstance') or help(isinstance) and get the
same result.
"""
io = cStringIO.StringIO()
doc = pydoc.TextDoc()
helper = pydoc.Helper(None, io)
rows, columns = window.getmaxyx()
rows -= 3
columns -= 1
output = None
# Copied and pasted from Lib/pydoc.py and fiddled with
# so it works fine with bpython. As far as I can tell
# the bpython help is no compliant with the vanilla help.
# Please let me know if you find this to be untrue.
if type(obj) is type(''):
if obj == 'help':
helper.intro()
elif obj == 'keywords':
helper.listkeywords()
elif obj == 'topics':
helper.listtopics()
elif obj == 'modules':
helper.listmodules()
elif obj[:8] == 'modules ':
helper.listmodules(split(obj)[1])
elif obj in helper.keywords:
helper.showtopic(obj)
elif obj in helper.topics:
helper.showtopic(obj)
elif obj:
output = doc.document(eval(obj))
#######################
else:
output = doc.document(obj)
if not output:
output = "No help found for %s" % obj
return
if output is None:
output = io.getvalue()
io.close()
if not output:
return
output = output.replace('\t', ' ')
if '\n' in output:
output = output.replace('\n\n', '\n')
output = output.split('\n')
else:
output = [output]
paragraphs = []
for o in output:
paragraphs.append(textwrap.wrap(o, columns))
i = 0
for j, paragraph in enumerate(paragraphs):
for line in paragraph:
sys.stdout.write(line + '\n')
i += 1
# This is a little unclear, but it just waits for a
# keypress when the a page worth of text has been
# displayed and returns if 'q' is pressed:
if not i % rows and not wait_for_key():
return
def wait_for_key():
"""Block until a key is pressed for the ghetto paging."""
q = True
window.addstr("Press any key, q to cancel.")
while True:
c = window.getch()
if c and c == ord('q'):
q = False
if c:
break
clear_line()
return q
def clear_line():
y = window.getyx()[0]
window.move(y, 0)
window.clrtoeol()
# vim: sw=4 ts=4 sts=4 ai et
|
UTF-8
|
Python
| false | false | 2,008 |
2,259,152,829,208 |
7aa17f12aaab3a6f8668b5b86ccd152fd25c82b9
|
08c2cbb81e4c4c0973c0eb53d7d50d51a97595ae
|
/replace_numeral.py
|
f865a34af460f5762f76341cac6865e31128519e
|
[] |
no_license
|
ckesurf/NLP_1
|
https://github.com/ckesurf/NLP_1
|
b3154ffe36c5ac59a1d6589a67c8ffdc521c625b
|
45fa077e2ef2d306dbdf9fa9ea1d863900eac924
|
refs/heads/master
| 2020-05-04T07:28:55.326132 | 2013-09-29T00:01:55 | 2013-09-29T00:01:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/python
__author__="Daniel Bauer <[email protected]>"
__date__ ="$Sep 12, 2011"
import sys
from collections import defaultdict
import math
import shutil
"""
Count n-gram frequencies in a CoNLL NER data file and write counts to
stdout.
"""
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def replace_numeral(name):
# create new file "ner_train.dat.replaced", copy filename contents to ntr file.
try:
filename = open(name,"r")
except IOError:
sys.stderr.write("ERROR: Cannot read inputfile %s.\n" % arg)
sys.exit(1)
word_count = defaultdict(int)
l = filename.readline()
while l:
line = l.strip()
if line: # Nonempty line
# Calculate frequency of word
fields = line.split(" ")
word_count[fields[0]] += 1
l = filename.readline()
filename.close()
# now rewrite lines to new replacement file...
filename = open(name, "r")
new_training = name + ".replaced_numeral"
o = open(new_training,"a")
l = filename.readline()
while l:
line = l.strip()
if line: # Nonempty line
fields = line.split(" ")
# ... and replace rare words with _RARE_ and numerals with _NUMERAL_
if (word_count[fields[0]] < 5):
if is_number(fields[0]):
line = "_NUMERAL_ " + fields[1]
else:
line = "_RARE_ " + fields[1]
# Regardless, write line to new file
o.write(line + '\n')
l = filename.readline()
filename.close()
o.close()
def usage():
print """
python replace_numeral.py [input_file]
Read in a named entity tagged training input file, replace rares to _RARE_ and numerals to _NUMERAL_,
and rewrite to input_file.replaced_numeral
"""
if __name__ == "__main__":
if len(sys.argv)!=2: # Expect exactly one argument: the training data file
usage()
sys.exit(2)
# Replace numerals to _NUMERAL_ and infrequent words (where Count(x) < 5) to _RARE_
replace_numeral(sys.argv[1])
|
UTF-8
|
Python
| false | false | 2,013 |
7,662,221,689,947 |
fd1b666f92e795ad4795f01be484b517f9d44482
|
924e4ba149eaa286297534a977bfbafc831ff9d5
|
/test/CastTest.py
|
89e10023431ff3fa81d7544cd5577509066c8ab7
|
[
"MIT"
] |
permissive
|
4144/cAST
|
https://github.com/4144/cAST
|
5a556b032e59b15f30426fb3e1a1e1aa74127ea0
|
a947e20cd76a9cf41b7a5e4998fe1eec31183655
|
refs/heads/master
| 2023-03-16T00:37:07.500643 | 2012-09-21T19:20:40 | 2012-09-21T19:20:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import unittest, os, subprocess, re
from cast.pp_Parser import pp_Parser
from cast.c_Parser import c_Parser
from cast.ppLexer import ppLexer
from cast.cLexer import cLexer
from cast.ParserCommon import TokenStream, AstPrettyPrintable, ParseTreePrettyPrintable
from cast.PreProcessor import Factory as PreProcessorFactory
from cast.SourceCode import SourceCode, SourceCodeString
directory = 'test/cases'
class CastTest(unittest.TestCase):
def __init__(self, path=None, expectedPath=None, actualFunc=None):
super().__init__()
self.__dict__.update(locals())
self.maxDiff = None
def runTest(self):
fp = open(self.expectedPath)
expected = fp.read().rstrip()
fp.close()
self.assertEqual(expected, self.actualFunc(), 'failed to match %s (%s)' % (self.path, self.expectedPath))
class CastVersusGccTest(unittest.TestCase):
def __init__(self, arg=None, filepath=None):
super().__init__(arg)
self.__dict__.update(locals())
self.maxDiff = None
def getCastOutput(self):
cPPFactory = PreProcessorFactory()
cPP = cPPFactory.create([], [self.filepath])
filepath = os.path.join(self.filepath, 'source.c')
sourcecode = SourceCode(filepath, open(filepath))
cT, symbols = cPP.process( sourcecode, dict() )
actualTokens = list(map(mapFuncSimple, list(cT)))
return '\n'.join(actualTokens)
def getGccOutput(self):
regex = re.compile(r'^\#.*$', re.M)
null = open('/dev/null', 'w')
filepath = os.path.join(self.filepath, 'source.c')
gcc = subprocess.check_output(['gcc', '-std=c99', '-E', filepath], stderr=null)
null.close()
gcc = gcc.decode('ascii')
gcc = regex.sub('', gcc)
sourcecode = SourceCodeString('<string>', gcc)
cL = cLexer(sourcecode)
actualTokens = list(map(mapFuncSimple, list(cL)))
return '\n'.join(actualTokens)
def test_doesCastPreprocessExactlyLikeGccDoes(self):
filepath = os.path.join(directory, self.filepath)
self.assertEqual(self.getGccOutput(), self.getCastOutput(), \
"File %s didn't parse the same in GCC and cAST" % (filepath) )
def mapFunc(x):
return x.toString('long')
return "%s,%s,%s,%s,%s" % (x.getTerminalStr(), x.getLine(), x.getColumn(), x.getString().replace('\n', '\\n'), x.getResource())
def mapFuncSimple(x):
return "%s|%s" % (x.getTerminalStr(), x.getString().replace('\n', '\\n'))
def pptok(sourcecode, skipIncludes=False):
cPPL = ppLexer(sourcecode)
actualTokens = list(map(mapFunc, list(cPPL)))
return '\n'.join(actualTokens)
def ppparse(sourcecode, skipIncludes=False):
cPPL = ppLexer(sourcecode)
parsetree = pp_Parser().parse(TokenStream(cPPL))
prettyprint = str(ParseTreePrettyPrintable(parsetree))
return prettyprint
def ppast(sourcecode, skipIncludes=False):
cPPL = ppLexer(sourcecode)
ast = pp_Parser().parse(TokenStream(cPPL)).toAst()
prettyprint = str(AstPrettyPrintable(ast))
return prettyprint
def ctok(sourcecode, skipIncludes=False):
cPPFactory = PreProcessorFactory()
cPP = cPPFactory.create([], [os.path.dirname(sourcecode.resource)], skipIncludes=skipIncludes)
cT, symbols = cPP.process( sourcecode, dict() )
actualTokens = list(map(mapFunc, list(cT)))
return '\n'.join(actualTokens)
def cparse(sourcecode, skipIncludes=False):
cPPFactory = PreProcessorFactory()
cPP = cPPFactory.create([], [os.path.dirname(sourcecode.resource)], skipIncludes=skipIncludes)
cT, symbols = cPP.process( sourcecode, dict() )
parsetree = c_Parser().parse(TokenStream(cT))
prettyprint = str(ParseTreePrettyPrintable(parsetree))
return prettyprint
def cast(sourcecode, skipIncludes=False):
cPPFactory = PreProcessorFactory()
cPP = cPPFactory.create([], [os.path.dirname(sourcecode.resource)], skipIncludes=skipIncludes)
cT, symbols = cPP.process( sourcecode, dict() )
ast = c_Parser().parse(TokenStream(cT)).toAst()
prettyprint = str(AstPrettyPrintable(ast))
return prettyprint
def preprocessed(sourcecode, skipIncludes=False):
cPPFactory = PreProcessorFactory()
cPP = cPPFactory.create([], [os.path.dirname(sourcecode.resource)], skipIncludes=skipIncludes)
cT, symbols = cPP.process( sourcecode, dict() )
return cT.toString()
transformations = [
('pptok', pptok),
('ppparse', ppparse),
('ppast', ppast),
('ctok', ctok),
('cparse', cparse),
('cast', cast),
('preprocessed', preprocessed)
]
def load_tests(loader, tests, pattern):
testDirectories = os.listdir(directory)
suite = unittest.TestSuite()
for path in testDirectories:
try:
int(path)
except ValueError:
continue
path = os.path.join(directory, path)
sourcePath = os.path.join(path, 'source.c')
sourcecode = SourceCode(sourcePath, open(sourcePath))
options = []
if sourcecode.sourceCode[:2] == '//':
options = sourcecode.sourceCode[2:sourcecode.sourceCode.find('\n')].strip().split(' ')
skipIncludes = False
if 'no-includes' in options:
skipIncludes = True
if 'no-gcc' not in options:
suite.addTest(CastVersusGccTest('test_doesCastPreprocessExactlyLikeGccDoes', path))
for (expected, transformFunction) in transformations:
expectedPath = os.path.join(path, expected)
def func(sourcecode, transformFunction, skipIncludes):
def ret():
return transformFunction(sourcecode, skipIncludes).rstrip()
return ret
if not os.path.exists(expectedPath):
actual = transformFunction(sourcecode, skipIncludes).rstrip()
fp = open(expectedPath, 'w')
fp.write(actual + '\n')
fp.close()
suite.addTest( CastTest(path, expectedPath, func(sourcecode,transformFunction, skipIncludes)) )
return suite
|
UTF-8
|
Python
| false | false | 2,012 |
3,350,074,503,061 |
5884a43225a7404f70731e12f8c2d5d303e81525
|
cd7c6da245c167cf6f3dcdf7b7629d0d3dbbb895
|
/editor/structdata/__init__.py
|
7983585e2fedba81dc66e3df1b5ef2d6060fbc45
|
[
"MIT"
] |
permissive
|
develersrl/rooms
|
https://github.com/develersrl/rooms
|
029c6e310d014e54115369954640f46215250fb2
|
064d9c047df57bcc6d24ff2702cee9b925a6fb5d
|
refs/heads/master
| 2021-01-15T18:46:15.436146 | 2014-10-24T11:09:29 | 2014-10-24T11:09:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""Package in cui sono inserite tutte le classi necessarie per i dati.
Le classi base per rappresentare i dati sono:
- Room
- Area
- Event
- Image
- Item
- ItemRequirement
- Param
- Var
- VarRequirement
Ogni classe rappresenta un tag necessario per rappresentare l'informazione.
Nella classe Origin si ha la funzione dictionary che ritorna un dizionario
le cui coppie (chiave, valore) sono rispettivamente gli attributi i valori
di ogni tag. In ogni classe si ha una variabile di classe che da il nome
del tag che la classe rappresenta.
Le informazioni di tutto il progetto sono contenute nella classe Project.
Importando il modulo project.py si puo' utilizzare la variabile globale
g_project che contiene tutte le informazioni
g_project e' un singleton che punta sempre al progetto corrente e esiste
sempre un solo progetto corrente
E' possibile utilizzare un dizionario chiamato class_tag in cui tutti gli
elementi sono le classi del progetto. In particolare la coppia chiave valore
del dizionario e' data da (tag_name_classe, classe)
E' possibile importare le singole classi del progetto direttamente da
struct data
"""
class_tag = {}
__all__ = [class_tag, "Room", "Area", "Action", "Image", "Item",
"ItemRequirement", "Param", "Var", "VarRequirement", "World",
"DialogStep", "Dialog", "DialogLink", "g_project"]
from origin import OriginData
from action import Action
from area import Area
from event import Event
from image import Image
from item import Item
from itemRequirement import ItemRequirement
from param import Param
from room import Room
from var import Var
from varRequirement import VarRequirement
from world import World
from dialog import Dialog
from dialoglink import DialogLink
from dialogstep import DialogStep
from project import g_project
from inspect import isclass
for key, cls in globals().items():
if isclass(cls) and issubclass(cls, OriginData) and cls != OriginData:
class_tag[cls.tag_name] = cls
|
UTF-8
|
Python
| false | false | 2,014 |
12,446,815,225,129 |
6f657da6dd9e60f30ad0e9a4455286ba3c9fa5bd
|
3196460db64eded2daa77457643c8dd1ed1ba99e
|
/codeforces/steve/141-A-worst.py
|
97ba68ea70cc92ddcabf3764bbff19c4eff06e40
|
[] |
no_license
|
prototypemagic/proto-mastery
|
https://github.com/prototypemagic/proto-mastery
|
94c649958792f00ea2a057b63ed0f7717b5ab05d
|
45f7ef2e998fa7dbc071f5c42217a83fd9340f51
|
refs/heads/master
| 2020-05-28T08:55:45.769199 | 2012-09-10T22:12:00 | 2012-09-10T22:12:00 | 3,097,117 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# Steve Phillips / elimisteve
# 2012.01.08
name = raw_input()
host = raw_input()
both = raw_input()
name_host = list(name+host)
name_host.sort()
both = list(both)
both.sort()
if name_host == both:
print "YES"
else:
print "NO"
|
UTF-8
|
Python
| false | false | 2,012 |
6,382,321,410,839 |
12e58e51d24ac7d58cb7ecbd7e6e49103f23011a
|
068a6b8247e9668eed1956efb6e68e06a96ffba6
|
/plugins/misc.py
|
e755164a94d826654bba07555fc5fd43fd31afbb
|
[] |
no_license
|
f1redead/lupinebot
|
https://github.com/f1redead/lupinebot
|
1e6441a8f1d1a304f25179d65971ceda078b9bee
|
330055a41377a11c54b0726cc7a26fcfe21bd89f
|
refs/heads/master
| 2021-01-10T18:59:13.445892 | 2014-12-09T19:03:13 | 2014-12-09T19:03:13 | 19,583,792 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# *-* encoding: utf-8 *-*
# This is example cBot plugin
def ping(event, body, args, bot, message_type):
bot.reply(event, "pong!")
def say(event, body, args, bot, message_type):
bot.reply(event, body[1], with_nickname = False)
def join(event, body, args, bot, message_type):
if len(args) == 2:
conference, nickname = args
bot.join_muc(conference, nickname)
metainfo = {
'ping': {'function': ping, 'descr': "Понг!", 'privlevel': 1, 'aliases': ['пинг'], 'need_prefix': False},
'say': {'function': say, 'descr': "Отправит сообщение от имени бота", 'privlevel': 100, 'aliases': ['сказать'], 'need_prefix': True},
'join': {'function': join, 'descr': "Зайти в комнату", 'privlevel': 100, 'aliases': ['зайти'], 'need_prefix': True}
}
|
UTF-8
|
Python
| false | false | 2,014 |
16,441,134,824,834 |
8dc7649ea9faf9777be829f2b2d9927988e4af80
|
68edb933c25c7231d7ee6b14e44181f8ea928efa
|
/src/pageobjects/components.py
|
c05492ea77b77ae19ccc79548a24b4b0618e2106
|
[] |
no_license
|
esusekov/tech-testing-ha2
|
https://github.com/esusekov/tech-testing-ha2
|
3575a25b42e3a5b472eba85f68f534cce4e436b8
|
0b4bbc3657c41864c915984e90711bee5fc0d810
|
refs/heads/master
| 2020-12-28T22:23:35.041329 | 2014-10-21T22:10:45 | 2014-10-21T22:10:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from selenium.webdriver import ActionChains
from selenium.webdriver.support.ui import Select, WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.keys import Keys
import os
root_path = os.path.dirname(os.path.abspath(__file__))
class Component(object):
def __init__(self, driver):
self.driver = driver
class AuthForm(Component):
LOGIN = '#id_Login'
PASSWORD = '#id_Password'
DOMAIN = '#id_Domain'
SUBMIT = '#gogogo>input'
def set_login(self, login):
self.driver.find_element_by_css_selector(self.LOGIN).send_keys(login)
def set_password(self, pwd):
self.driver.find_element_by_css_selector(self.PASSWORD).send_keys(pwd)
def set_domain(self, domain):
select = self.driver.find_element_by_css_selector(self.DOMAIN)
Select(select).select_by_visible_text(domain)
def submit(self):
self.driver.find_element_by_css_selector(self.SUBMIT).click()
class TopMenu(Component):
EMAIL = '#PH_user-email'
def get_email(self):
return WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.EMAIL).text
)
class Slider(Component):
SLIDER = '.price-slider__begunok'
def move(self, offset):
element = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.SLIDER)
)
ac = ActionChains(self.driver)
ac.click_and_hold(element).move_by_offset(offset, 0).perform()
class BaseSettings(Component):
GAME_RADIOBOX = '#product-type-5208'
MM_GAME_RADIOBOX = '#pad-mail_mir_abstract'
CAMPAIGN_NAME_INPUT = '.base-setting__campaign-name__input'
MM_LABEL = '.base-setting__pads-item__label'
def choose_game(self):
game_radiobox = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.GAME_RADIOBOX)
)
game_radiobox.click()
def choose_mm(self):
mm_radiobox = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.MM_GAME_RADIOBOX)
)
mm_radiobox.click()
def set_campaign_name(self, name):
campaign = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.CAMPAIGN_NAME_INPUT)
)
campaign.clear()
campaign.send_keys(name)
def get_mm_label_text(self):
return WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.MM_LABEL).text
)
def is_right_platforms_opened(self):
OK_PROFILES_CHECKBOX = '#pad-odkl_profiles_abstract'
OK_APPS_CHECKBOX = '#pad-odkl_games_abstract'
wait = WebDriverWait(self.driver, 30, 0.1)
mm_game = wait.until(
lambda d: d.find_element_by_css_selector(self.MM_GAME_RADIOBOX)
)
ok_profiles = wait.until(
lambda d: d.find_element_by_css_selector(OK_PROFILES_CHECKBOX)
)
ok_apps = wait.until(
lambda d: d.find_element_by_css_selector(OK_APPS_CHECKBOX)
)
return mm_game.is_displayed() and ok_profiles.is_displayed() and ok_apps.is_displayed()
class BannerForm(Component):
BANNER_INPUT = '.banner-form__input[data-name="%s"]'
FILE_PATH = root_path + '/ya.png'
SUBMIT_BANNER = '.banner-form__save-button'
def set_image(self):
SUBMIT_IMAGE = '.image-cropper__save'
image = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector('.banner-form__img-file')
)
image.send_keys(self.FILE_PATH)
#self.driver.execute_script("var xxx = document.querySelector('.jcrop-holder').children[0]; xxx.style.width = xxx.style.height = '100px';")
submit_button = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(SUBMIT_IMAGE)
)
submit_button.click()
def set_title(self, title_text):
title = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.BANNER_INPUT % 'title')
)
title.send_keys(title_text)
def set_text(self, text_value):
text = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.BANNER_INPUT % 'text')
)
text.send_keys(text_value)
def set_url(self, url_text):
url = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_elements_by_css_selector(self.BANNER_INPUT % 'url')
)
url[1].send_keys(url_text)
def submit(self):
wait = WebDriverWait(self.driver, 30, 0.1)
banner_preview = BannerPreview(self.driver)
wait.until(
banner_preview.wait_for_image
)
submit_btn = wait.until(expected_conditions.element_to_be_clickable((By.CSS_SELECTOR, self.SUBMIT_BANNER)))
ac = ActionChains(self.driver)
ac.move_to_element(submit_btn).click(submit_btn).perform()
class BannerPreview(Component):
PREVIEW_BLOCK = '.added-banner'
PREVIEW_TITLE = '.banner-preview__title'
PREVIEW_TEXT = '.banner-preview__text'
PREVIEW_IMAGE = '.banner-preview__img'
def set_preview_block(self):
self.driver = WebDriverWait(self.driver, 30, 0.1).until(
expected_conditions.visibility_of_element_located((By.CSS_SELECTOR, self.PREVIEW_BLOCK))
)
def get_title(self):
return WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.PREVIEW_TITLE).text
)
def get_text(self):
return WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.PREVIEW_TEXT).text
)
def get_image(self):
return WebDriverWait(self.driver, 30, 0.1).until(
self.wait_for_image
)
def wait_for_image(self, driver):
images = driver.find_elements_by_css_selector(self.PREVIEW_IMAGE)
for image in images:
if image.is_displayed():
return image
def get_image_size(self):
image = self.get_image()
width = image.value_of_css_property("width")
height = image.value_of_css_property("height")
return width, height
class IncomeBlock(Component):
ABOVE_AVERAGE = '#income_group-9288'
AVERAGE = '#income_group-9287'
BELOW_AVERAGE = '#income_group-9286'
INCOME_BLOCK = '.all-settings__item[data-name="income_group"]'
UNCOLLAPSE_BTN = '.campaign-setting__value'
INPUT = '.campaign-setting__input'
def set_income_block(self):
self.driver = WebDriverWait(self.driver, 30, 1).until(
expected_conditions.visibility_of_element_located((By.CSS_SELECTOR, self.INCOME_BLOCK))
)
def uncollapse_income_group(self):
uncollapse = WebDriverWait(self.driver, 30, 1).until(
lambda d: d.find_element_by_css_selector(self.UNCOLLAPSE_BTN)
)
uncollapse.click()
def check_above_avg(self):
above_avg = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.ABOVE_AVERAGE)
)
above_avg.click()
def check_avg(self):
avg = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.AVERAGE)
)
avg.click()
def check_below_avg(self):
below_avg = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.BELOW_AVERAGE)
)
below_avg.click()
def what_is_checked(self):
items = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_elements_by_css_selector(self.INPUT)
)
data = {
'above_avg': False,
'avg': False,
'below_avg': False
}
if items[0].is_selected():
data['above_avg'] = True
if items[1].is_selected():
data['avg'] = True
if items[2].is_selected():
data['below_avg'] = True
return data
class DateBlock(Component):
DATE_BLOCK = '.all-settings__item[data-name="date"]'
UNCOLLAPSE_BTN = '.campaign-setting__value'
DATEPICKER = '.hasDatepicker[data-name="%s"]'
def set_date_block(self):
self.driver = WebDriverWait(self.driver, 30, 1).until(
expected_conditions.visibility_of_element_located((By.CSS_SELECTOR, self.DATE_BLOCK))
)
def uncollapse_datepickers(self):
uncollapse = WebDriverWait(self.driver, 30, 1).until(
lambda d: d.find_element_by_css_selector(self.UNCOLLAPSE_BTN)
)
uncollapse.click()
def open_datepicker(self, type, driver):
date_picker = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.DATEPICKER % type)
)
date_picker.click()
return DatePicker(driver)
def get_datepicker_value(self, type):
date_picker = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.DATEPICKER % type)
)
return date_picker.get_attribute('value')
def set_datepicker_value(self, type, date):
date_picker = WebDriverWait(self.driver, 30, 1).until(
lambda d: d.find_element_by_css_selector(self.DATEPICKER % type)
)
date_picker.send_keys(date)
date_picker.send_keys(Keys.RETURN)
class DatePicker(Component):
MONTH = '.ui-datepicker-month'
YEAR = '.ui-datepicker-year'
NEXT = '.ui-datepicker-next'
PREV = '.ui-datepicker-prev'
DAY = '[data-handler="selectDay"]'
def set_month(self, month):
select = self.driver.find_element_by_css_selector(self.MONTH)
Select(select).select_by_visible_text(month)
def set_year(self, year):
select = self.driver.find_element_by_css_selector(self.YEAR)
Select(select).select_by_visible_text(year)
def pick_day(self, day):
days = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_elements_by_css_selector(self.DAY)
)
days[day-1].click()
def next_month(self):
next_m = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.NEXT)
)
next_m.click()
def prev_month(self):
prev_m = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.PREV)
)
prev_m.click()
class CampaignInfo(Component):
CAMPAIGN = '.campaign-title__name'
CAMPAIGN_ID = '.campaign-title__id'
def get_campaign_name(self):
return WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.CAMPAIGN).text
)
def get_campaign_id(self):
return WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.CAMPAIGN_ID).text[:-1]
)
class CampaignActions(Component):
EDIT = '.control__link_edit'
DELETE = '.control__preset_delete'
def edit_campaign(self):
edit_btn = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.EDIT)
)
edit_btn.click()
def delete_campaign(self):
delete_btn = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.DELETE)
)
delete_btn.click()
class CreateCampaign(Component):
CREATE = '.main-button__label'
def create(self):
create_btn = WebDriverWait(self.driver, 30, 0.1).until(
lambda d: d.find_element_by_css_selector(self.CREATE)
)
create_btn.click()
|
UTF-8
|
Python
| false | false | 2,014 |
8,340,826,504,405 |
75a2c03416fe26f2c26e6188c137e6bf48b0caee
|
bbe53d0171efbc78ca43f409b4a5235df51f36fa
|
/mySuanming/src/_1004_qq_num_jixiong_f/qq_num_jixiong.py
|
6567e397fba21e6712a2be8fa49cde1a5a0bcc82
|
[] |
no_license
|
brianwang/gftop
|
https://github.com/brianwang/gftop
|
2758ec93e326ba5e801af48f951c73b5761bb25d
|
12a48eafb5114da325515fce4b97e744638e6faf
|
refs/heads/master
| 2021-01-12T08:16:43.816679 | 2012-12-12T16:25:29 | 2012-12-12T16:25:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#coding=utf-8
import csv,codecs,random
"""
高飞 于2009年5月11日
"""
def getDicts():
myfile=open('qq_num_jixiong.db','r')
reader=csv.reader(myfile)
seqLine=0 #标记读的是第几行
global dictExplanation #详解
dictExplanation={}
global dictAnalysis
dictAnalysis={}
for row in reader:
#print row #无法显示中文
#print type(row) #row的类型是列表
#print '行长度%d'%len(row)
"""
分别对两个字典进行赋值
"""
for item in row: #详解的位置从5行到15行;分析的位置从16行到末尾(97行)
#print '%d\t%s'%(seqLine,item)
posBranch=item.find('|')
if seqLine in range(5,15):
dictExplanation[item[:posBranch]]=item[posBranch:]
elif seqLine>15:
dictAnalysis[item[:posBranch]]=item[posBranch:]
seqLine=seqLine+1
# #解析字典
# #print dictExplanation
# print '----存放详解的字典----'
# for k,v in dictExplanation.items():
# print k,v
# print '\n\n----存放分析的字典----'
# for k,v in dictAnalysis.items():
# print k,v
myfile.close()
def getRemainder():
#QQNum=int(raw_input('请输入您的QQ号码:'))
QQNum=random.randrange(11111,9999999999)
#QQNum=170579652
remainder=QQNum%80
print '系统随机生成的QQ号码是',QQNum#,'余数是',remainder
shiweishu=remainder/10
geweishu=remainder%10
#print '余数的个位数为%d,十位数为%d'%(shiweishu,geweishu)
global factOfExplanation #详解因数
factOfExplanation=shiweishu+geweishu
if factOfExplanation>9: #详解的键值为0到9,大于9则无效
factOfExplanation=factOfExplanation-10
#print '详解因数为:%d'%factOfExplanation
global factOfAnalysis #分析因数
factOfAnalysis=remainder
#print '分析因数为:%d'%factOfAnalysis
return factOfAnalysis,factOfExplanation
def getResults(factOfExplanation,factOfAnalysis):
print '详解:%s'%dictExplanation[str(factOfExplanation)]
print '分析:%s'%dictAnalysis[str(factOfAnalysis)]
getDicts()
getRemainder()
getResults(factOfExplanation, factOfAnalysis)
|
UTF-8
|
Python
| false | false | 2,012 |
2,370,821,977,642 |
9ed02632a21c8c5e8c2ead3550b4aac93df921f8
|
7a38b004824ca1176d3de15339258e6fbe5d0d9e
|
/dr_lib/hlr_process_ref_data.py
|
02acdcc77b89c8ceb5eff130c4ffe57ff7c15d00
|
[] |
no_license
|
ornl-ndav/HLRedux
|
https://github.com/ornl-ndav/HLRedux
|
5464ef21097fc44b6e051800e86abb27bcecedaa
|
74c0949fbf7fcdf0d22e217c069d56dbe6a8069e
|
refs/heads/master
| 2021-01-10T06:03:37.456779 | 2011-03-17T20:48:19 | 2011-03-17T20:48:19 | 49,585,147 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# High-Level Reduction Functions
# A part of the SNS Analysis Software Suite.
#
# Spallation Neutron Source
# Oak Ridge National Laboratory, Oak Ridge TN.
#
#
# NOTICE
#
# For this software and its associated documentation, permission is granted
# to reproduce, prepare derivative works, and distribute copies to the public
# for any purpose and without fee.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor any of their employees, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness of any
# information, apparatus, product, or process disclosed, or represents that
# its use would not infringe privately owned rights.
#
# $Id$
def process_ref_data(datalist, conf, signal_roi_file, bkg_roi_file=None,
no_bkg=False, **kwargs):
"""
This function combines Steps 1 through 6 in section 2.4.5 of the data
reduction process for Reflectometers (without Monitors) as specified by
the document at
U{http://neutrons.ornl.gov/asg/projects/SCL/reqspec/DR_Lib_RS.doc}. The
function takes a list of file names, a L{hlr_utils.Configure} object,
signal and background region-of-interest (ROI) files and an optional flag
about background subtraction and processes the data accordingly.
@param datalist: The filenames of the data to be processed
@type datalist: C{list} of C{string}s
@param conf: Object that contains the current setup of the driver
@type conf: L{hlr_utils.Configure}
@param signal_roi_file: The file containing the list of pixel IDs for the
signal region of interest.
@type signal_roi_file: C{string}
@param bkg_roi_file: The file containing the list of pixel IDs for the
(possible) background region of interest.
@type bkg_roi_file: C{string}
@param no_bkg: (OPTIONAL) Flag which determines if the background will be
calculated and subtracted.
@type no_bkg: C{boolean}
@param kwargs: A list of keyword arguments that the function accepts:
@keyword inst_geom_dst: Object that contains the instrument geometry
information.
@type inst_geom_dst: C{DST.getInstance()}
@keyword dataset_type: The practical name of the dataset being processed.
The default value is I{data}.
@type dataset_type: C{string}
@keyword tof_cuts: Time-of-flight bins to remove (zero) from the data
@type tof_cuts: C{list} of C{string}s
@keyword no_tof_cuts: Flag to stop application of the TOF cuts
@type no_tof_cuts: C{boolean}
@keyword timer: Timing object so the function can perform timing
estimates.
@type timer: C{sns_timer.DiffTime}
@return: Object that has undergone all requested processing steps
@rtype: C{SOM.SOM}
"""
import common_lib
import dr_lib
import hlr_utils
# Check keywords
try:
dataset_type = kwargs["dataset_type"]
except KeyError:
dataset_type = "data"
if dataset_type != "data" and dataset_type != "norm":
raise RuntimeError("Please use data or norm to specify the dataset "\
+"type. Do not understand how to handle %s." \
% dataset_type)
try:
t = kwargs["timer"]
except KeyError:
t = None
try:
i_geom_dst = kwargs["inst_geom_dst"]
except KeyError:
i_geom_dst = None
try:
tof_cuts = kwargs["tof_cuts"]
except KeyError:
tof_cuts = None
no_tof_cuts = kwargs.get("no_tof_cuts", False)
so_axis = "time_of_flight"
# Step 0: Open data files and select signal (and possible background) ROIs
if conf.verbose:
print "Reading %s file" % dataset_type
if len(conf.norm_data_paths) and dataset_type == "norm":
data_path = conf.norm_data_paths.toPath()
else:
data_path = conf.data_paths.toPath()
(d_som1, b_som1) = dr_lib.add_files_bg(datalist,
Data_Paths=data_path,
SO_Axis=so_axis,
dataset_type=dataset_type,
Signal_ROI=signal_roi_file,
Bkg_ROI=bkg_roi_file,
Verbose=conf.verbose,
Timer=t)
if t is not None:
t.getTime(msg="After reading %s " % dataset_type)
if i_geom_dst is not None:
i_geom_dst.setGeometry(conf.data_paths.toPath(), d_som1)
# Calculate delta t over t
if conf.verbose:
print "Calculating delta t over t"
dtot = dr_lib.calc_deltat_over_t(d_som1[0].axis[0].val)
# Calculate delta theta over theta
if conf.verbose:
print "Calculating delta theta over theta"
dr_lib.calc_delta_theta_over_theta(d_som1, dataset_type)
# Step 1: Sum all spectra along the low resolution direction
# Set sorting
(y_sort,
cent_pixel) = hlr_utils.get_ref_integration_direction(conf.int_dir,
conf.inst,
d_som1.attr_list.instrument)
if dataset_type == "data":
d_som1.attr_list["ref_sort"] = y_sort
d_som1A = dr_lib.sum_all_spectra(d_som1, y_sort=y_sort, stripe=True,
pixel_fix=cent_pixel)
del d_som1
if b_som1 is not None:
b_som1A = dr_lib.sum_all_spectra(b_som1, y_sort=y_sort, stripe=True,
pixel_fix=cent_pixel)
del b_som1
else:
b_som1A = b_som1
# Set the TOF cuts
if no_tof_cuts:
tof_cut_min = None
tof_cut_max = None
else:
tof_cut_min = conf.tof_cut_min
tof_cut_max = conf.tof_cut_max
# Cut the spectra if necessary
d_som2 = dr_lib.cut_spectra(d_som1A, tof_cut_min, tof_cut_max)
del d_som1A
if b_som1A is not None:
b_som2 = dr_lib.cut_spectra(b_som1A, tof_cut_min, tof_cut_max)
del b_som1A
else:
b_som2 = b_som1A
# Fix TOF cuts to make them list of integers
try:
tof_cuts = [int(x) for x in tof_cuts]
# This will trigger if tof_cuts is None
except TypeError:
pass
d_som3 = dr_lib.zero_bins(d_som2, tof_cuts)
del d_som2
if b_som2 is not None:
b_som3 = dr_lib.zero_bins(b_som2, tof_cuts)
del b_som2
else:
b_som3 = b_som2
if conf.dump_specular:
if no_tof_cuts:
d_som3_1 = dr_lib.cut_spectra(d_som3, conf.tof_cut_min,
conf.tof_cut_max)
else:
d_som3_1 = d_som3
hlr_utils.write_file(conf.output, "text/Spec", d_som3_1,
output_ext="sdc",
extra_tag=dataset_type,
verbose=conf.verbose,
data_ext=conf.ext_replacement,
path_replacement=conf.path_replacement,
message="specular TOF information")
del d_som3_1
# Steps 2-4: Determine background spectrum
if conf.verbose and not no_bkg:
print "Determining %s background" % dataset_type
if dataset_type == "data":
peak_excl = conf.data_peak_excl
elif dataset_type == "norm":
peak_excl = conf.norm_peak_excl
if b_som3 is not None:
B = dr_lib.calculate_ref_background(b_som3, no_bkg, conf.inst, None,
aobj=d_som3)
else:
B = dr_lib.calculate_ref_background(d_som3, no_bkg, conf.inst,
peak_excl)
if t is not None:
t.getTime(msg="After background determination")
if not no_bkg and conf.dump_bkg:
if no_tof_cuts:
B_1 = dr_lib.cut_spectra(B, conf.tof_cut_min, conf.tof_cut_max)
else:
B_1 = B
hlr_utils.write_file(conf.output, "text/Spec", B_1,
output_ext="bkg",
extra_tag=dataset_type,
verbose=conf.verbose,
data_ext=conf.ext_replacement,
path_replacement=conf.path_replacement,
message="background TOF information")
del B_1
# Step 5: Subtract background spectrum from data spectra
if not no_bkg:
d_som4 = dr_lib.subtract_bkg_from_data(d_som3, B,
verbose=conf.verbose,
timer=t,
dataset1="data",
dataset2="background")
else:
d_som4 = d_som3
del d_som3
if not no_bkg and conf.dump_sub:
if no_tof_cuts:
d_som4_1 = dr_lib.cut_spectra(d_som4, conf.tof_cut_min,
conf.tof_cut_max)
else:
d_som4_1 = d_som4
hlr_utils.write_file(conf.output, "text/Spec", d_som4_1,
output_ext="sub",
extra_tag=dataset_type,
verbose=conf.verbose,
data_ext=conf.ext_replacement,
path_replacement=conf.path_replacement,
message="subtracted TOF information")
del d_som4_1
dtot_int = dr_lib.integrate_axis_py(dtot, avg=True)
param_key = dataset_type+"-dt_over_t"
d_som4.attr_list[param_key] = dtot_int[0]
if conf.store_dtot:
d_som4.attr_list["extra_som"] = dtot
# Step 6: Scale by proton charge
pc = d_som4.attr_list[dataset_type+"-proton_charge"]
pc_new = hlr_utils.scale_proton_charge(pc, "C")
d_som5 = common_lib.div_ncerr(d_som4, (pc_new.getValue(), 0.0))
del d_som4
return d_som5
|
UTF-8
|
Python
| false | false | 2,011 |
10,007,273,831,974 |
db1992e32d8e4f9980c0d7bb960dc2d02a657bc1
|
b8fbf8a00e82dcdc3495ea84fbed61dd6341ea4f
|
/solutions/projecteuler-p15.py
|
cd6edf970caf994dca8184235ea71d3d6844d282
|
[] |
no_license
|
handleart/projecteuler
|
https://github.com/handleart/projecteuler
|
ee4368646e462dd0dac9610116eae6d91de20459
|
be1c0a689946d12bf90ba14c0d9539743c6dd384
|
refs/heads/master
| 2021-01-18T16:27:57.173938 | 2014-10-02T00:59:41 | 2014-10-02T00:59:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#Project Euler, Problem 15
#How many (such) routes are there through a 20x20 grid?
# general solution for lattice problems is (m n)
n = 20
m = 20
# (n+m)! / (m! * n!)
mn = m + n
mn_fac = 1
n_fac = 1
m_fac = 1
for i in range(1, mn+1):
mn_fac = mn_fac * i
for i in range(1, m+1):
m_fac = m_fac * i
for i in range(1, n+1):
n_fac = n_fac * i
print (mn_fac / (n_fac * m_fac))
|
UTF-8
|
Python
| false | false | 2,014 |
10,934,986,784,970 |
195b614648b5e3953b7c0afc30e03e78585eb1cf
|
4abec4e2939ca8098c5676de20b30ed7f02cfaf5
|
/lib/python/Plugins/SystemPlugins/VFDGiga/plugin.py
|
10d05d0a61c7ca4dd1429c1c0c95f121ccffb532
|
[
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only"
] |
non_permissive
|
SIFTeam/enigma2
|
https://github.com/SIFTeam/enigma2
|
e8d06a06236990487d907b79fa6deb40a18f7b01
|
359f50594a1469a52f7c42d9577188d17b793f19
|
refs/heads/master
| 2021-01-19T05:36:32.462012 | 2013-12-26T13:02:34 | 2013-12-26T13:02:34 | 2,660,535 | 3 | 5 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from Screens.Screen import Screen
from Plugins.Plugin import PluginDescriptor
from Components.Console import Console
from Components.Button import Button
from Components.ActionMap import ActionMap
from Components.ConfigList import ConfigList
from Components.config import config, configfile, ConfigSubsection, ConfigEnableDisable, \
getConfigListEntry, ConfigInteger, ConfigSelection, ConfigYesNo
from Components.ConfigList import ConfigListScreen
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
from enigma import evfd, iPlayableService, eServiceCenter
from os import system
from Plugins.Plugin import PluginDescriptor
from Components.ServiceEventTracker import ServiceEventTracker
from Components.ServiceList import ServiceList
from Screens.InfoBar import InfoBar
config.plugins.VFD_Giga = ConfigSubsection()
config.plugins.VFD_Giga.showClock = ConfigEnableDisable(default = True)
config.plugins.VFD_Giga.setLed = ConfigYesNo(default = True)
led = {"0":"None","1":"Blue","2":"Red","3":"Purple"}
config.plugins.VFD_Giga.ledRUN = ConfigSelection(led, default = "1")
config.plugins.VFD_Giga.ledSBY = ConfigSelection(led, default = "2")
config.plugins.VFD_Giga.ledREC = ConfigSelection(led, default = "3")
config.plugins.VFD_Giga.timeMode = ConfigSelection(default = "24h", choices = [("12h"),("24h")])
class Channelnumber:
def __init__(self, session):
self.session = session
self.onClose = [ ]
self.__event_tracker = ServiceEventTracker(screen=self,eventmap=
{
iPlayableService.evUpdatedEventInfo: self.__eventInfoChanged
})
def __eventInfoChanged(self):
service = self.session.nav.getCurrentService()
info = service and service.info()
if info is None:
chnr = "---"
else:
chnr = self.getchannelnr()
info = None
service = None
open("/proc/vfd", "w").write(chnr + '\n')
def getchannelnr(self):
if InfoBar.instance is None:
chnr = "---"
return chnr
MYCHANSEL = InfoBar.instance.servicelist
markersOffset = 0
myRoot = MYCHANSEL.getRoot()
mySrv = MYCHANSEL.servicelist.getCurrent()
chx = MYCHANSEL.servicelist.l.lookupService(mySrv)
if not MYCHANSEL.inBouquet():
pass
else:
serviceHandler = eServiceCenter.getInstance()
mySSS = serviceHandler.list(myRoot)
SRVList = mySSS and mySSS.getContent("SN", True)
for i in range(len(SRVList)):
if chx == i:
break
testlinet = SRVList[i]
testline = testlinet[0].split(":")
if testline[1] == "64":
markersOffset = markersOffset + 1
chx = (chx - markersOffset) + 1
rx = MYCHANSEL.getBouquetNumOffset(myRoot)
chnr = str(chx + rx)
return chnr
ChannelnumberInstance = None
def initVFD():
forledx = file('/etc/vfdled','r')
forled = eval(forledx)
if forled[0] == 'True':
evfd.getInstance().vfd_led(str(forled[1]))
else:
evfd.getInstance().vfd_led(str(0))
if forled[4] == 'True':
evfd.getInstance().vfd_led(str(forled[1]))
forcmd = '1'
else:
evfd.getInstance().vfd_led(str(0))
forcmd = '0'
cmd = 'echo '+str(forcmd)+' > /proc/stb/fp/display_clock'
res = system(cmd)
class VFD_GigaSetup(ConfigListScreen, Screen):
def __init__(self, session, args = None):
self.skin = """
<screen position="100,100" size="500,210" title="VFD_Giga Setup" >
<widget name="config" position="20,15" size="460,150" scrollbarMode="showOnDemand" />
<ePixmap position="40,165" size="140,40" pixmap="skin_default/buttons/green.png" alphatest="on" />
<ePixmap position="180,165" size="140,40" pixmap="skin_default/buttons/red.png" alphatest="on" />
<widget name="key_green" position="40,165" size="140,40" font="Regular;20" backgroundColor="#1f771f" zPosition="2" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_red" position="180,165" size="140,40" font="Regular;20" backgroundColor="#9f1313" zPosition="2" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
Screen.__init__(self, session)
self.onClose.append(self.abort)
self.list = []
ConfigListScreen.__init__(self, self.list)
self.createSetup()
self.Console = Console()
self["key_red"] = Button(_("Cancel"))
self["key_green"] = Button(_("Save"))
self["key_yellow"] = Button(_("Update Date/Time"))
self["setupActions"] = ActionMap(["SetupActions"],
{
"save": self.save,
"cancel": self.cancel,
"ok": self.save,
}, -2)
def createSetup(self):
self.list = []
self.list.append(getConfigListEntry(_("Enable led"), config.plugins.VFD_Giga.setLed))
self.ledenable = config.plugins.VFD_Giga.setLed.value
if self.ledenable == True:
self.list.append(getConfigListEntry(_("Led state RUN"), config.plugins.VFD_Giga.ledRUN))
self.list.append(getConfigListEntry(_("Led state Standby"), config.plugins.VFD_Giga.ledSBY))
self.list.append(getConfigListEntry(_("Led state Record"), config.plugins.VFD_Giga.ledREC))
evfd.getInstance().vfd_led(str(config.plugins.VFD_Giga.ledRUN.value))
else:
evfd.getInstance().vfd_led("0")
self.list.append(getConfigListEntry(_("Show clock"), config.plugins.VFD_Giga.showClock))
if config.plugins.VFD_Giga.showClock.value == True:
self.list.append(getConfigListEntry(_("Time mode"), config.plugins.VFD_Giga.timeMode))
cmd = 'echo 1 > /proc/stb/fp/display_clock'
else:
cmd = 'echo 0 > /proc/stb/fp/display_clock'
evfd.getInstance().vfd_led("0")
res = system(cmd)
self["config"].list = self.list
self["config"].l.setList(self.list)
def newConfig(self):
if self["config"].getCurrent()[0] == 'Enable led':
self.ledenable = config.plugins.VFD_Giga.setLed.value
self.createSetup()
if self["config"].getCurrent()[0][:3] == 'Led':
evfd.getInstance().vfd_led(str(config.plugins.VFD_Giga.ledRUN.value))
if self["config"].getCurrent()[0] == 'Show clock':
self.createSetup()
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.newConfig()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.newConfig()
def abort(self):
print "aborting"
def save(self):
for x in self["config"].list:
x[1].save()
configfile.save()
forfile = []
forfile.append(str(config.plugins.VFD_Giga.setLed.value))
forfile.append(str(config.plugins.VFD_Giga.ledRUN.value))
forfile.append(str(config.plugins.VFD_Giga.ledSBY.value))
forfile.append(str(config.plugins.VFD_Giga.ledREC.value))
forfile.append(str(config.plugins.VFD_Giga.showClock.value))
forfile.append(str(config.plugins.VFD_Giga.timeMode.value))
fp = file('/etc/vfdled','w')
fp.write(str(forfile))
fp.close()
self.close()
def cancel(self):
for x in self["config"].list:
x[1].cancel()
self.close()
class VFD_Giga:
def __init__(self, session):
print "VFD_Giga initializing"
self.session = session
self.service = None
self.onClose = [ ]
self.Console = Console()
evfd.getInstance().vfd_led(str(config.plugins.VFD_Giga.ledRUN.value))
global ChannelnumberInstance
if ChannelnumberInstance is None:
ChannelnumberInstance = Channelnumber(session)
def shutdown(self):
self.abort()
def abort(self):
print "VFD_Giga aborting"
def main(menuid):
if menuid != "system":
return [ ]
return [(_("VFD_Giga"), startVFD, "VFD_Giga", None)]
def startVFD(session, **kwargs):
session.open(VFD_GigaSetup)
gigaVfd = None
gReason = -1
mySession = None
def controlgigaVfd():
global gigaVfd
global gReason
global mySession
if gReason == 0 and mySession != None and gigaVfd == None:
print "Starting VFD_Giga"
gigaVfd = VFD_Giga(mySession)
elif gReason == 1 and gigaVfd != None:
print "Stopping VFD_Giga"
import time
if time.localtime().tm_isdst == 0:
forsleep = int(time.time())-time.timezone
else:
forsleep = int(time.time())-time.altzone
try:
open("/proc/stb/fp/rtc", "w").write(str(forsleep))
except IOError:
print "setRTCtime failed!"
gigaVfd = None
def sessionstart(reason, **kwargs):
print "AutoStarting VFD_Giga"
global gigaVfd
global gReason
global mySession
if kwargs.has_key("session"):
mySession = kwargs["session"]
else:
gReason = reason
controlgigaVfd()
def Plugins(**kwargs):
return [ PluginDescriptor(where=[PluginDescriptor.WHERE_AUTOSTART, PluginDescriptor.WHERE_SESSIONSTART], fnc=sessionstart),
PluginDescriptor(name="VFD_Giga", description="Change VFD display settings",where = PluginDescriptor.WHERE_MENU, fnc = main) ]
|
UTF-8
|
Python
| false | false | 2,013 |
14,293,651,194,718 |
e7d54f94eb388e028173788ac90e30070e89dfbe
|
14e30a875a6fee930414cda4d85de5c3f7089c6a
|
/python/wicow_stats/graph_years.py
|
43e824267d060c4d957688424ffde3044338e918
|
[] |
no_license
|
collielester86/think-link
|
https://github.com/collielester86/think-link
|
e874481919822572c6a209e23193611d6fd9ecbe
|
3ab7c8e6ec49bcdb1ed85a49628f68f5cacd634d
|
refs/heads/master
| 2017-12-01T05:33:47.421559 | 2010-07-12T21:00:27 | 2010-07-12T21:00:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
"""
Generate a graph, showing how different nouns rose and fell in different years.
"""
|
UTF-8
|
Python
| false | false | 2,010 |
9,277,129,407,033 |
0400998542bc7305cbf48b41df5915d6e0cc24e6
|
63c89d672cb4df85e61d3ba9433f4c3ca39810c8
|
/python/testdata/launchpad/lib/lp/translations/browser/tests/test_product_view.py
|
e1e7bc40aa7a3b510041c3449f2823f4a26d8ce8
|
[
"AGPL-3.0-only",
"AGPL-3.0-or-later"
] |
non_permissive
|
abramhindle/UnnaturalCodeFork
|
https://github.com/abramhindle/UnnaturalCodeFork
|
de32d2f31ed90519fd4918a48ce94310cef4be97
|
e205b94b2c66672d264a08a10bb7d94820c9c5ca
|
refs/heads/master
| 2021-01-19T10:21:36.093911 | 2014-03-13T02:37:14 | 2014-03-13T02:37:14 | 17,692,378 | 1 | 3 |
AGPL-3.0
| false | 2020-07-24T05:39:10 | 2014-03-13T02:52:20 | 2018-01-05T07:03:31 | 2014-03-13T02:53:59 | 24,904 | 0 | 3 | 1 |
Python
| false | false |
# Copyright 2009 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
__metaclass__ = type
from soupmatchers import (
HTMLContains,
Tag,
)
from testtools.matchers import Not
from lp.app.enums import (
InformationType,
PUBLIC_PROPRIETARY_INFORMATION_TYPES,
ServiceUsage,
)
from lp.registry.interfaces.series import SeriesStatus
from lp.services.webapp import canonical_url
from lp.services.webapp.servers import LaunchpadTestRequest
from lp.testing import (
celebrity_logged_in,
login_person,
person_logged_in,
TestCaseWithFactory,
)
from lp.testing.layers import (
DatabaseFunctionalLayer,
LaunchpadZopelessLayer,
)
from lp.testing.views import (
create_initialized_view,
create_view,
)
from lp.translations.browser.product import ProductView
from lp.translations.publisher import TranslationsLayer
class TestProduct(TestCaseWithFactory):
"""Test Product view in translations facet."""
layer = LaunchpadZopelessLayer
def test_primary_translatable_with_package_link(self):
# Create a product that uses translations.
product = self.factory.makeProduct()
series = product.development_focus
product.translations_usage = ServiceUsage.LAUNCHPAD
view = ProductView(product, LaunchpadTestRequest())
# If development focus series is linked to
# a distribution package with translations,
# we do not try to show translation statistics
# for the package.
sourcepackage = self.factory.makeSourcePackage()
sourcepackage.setPackaging(series, None)
sourcepackage.distroseries.distribution.translations_usage = (
ServiceUsage.LAUNCHPAD)
self.factory.makePOTemplate(
distroseries=sourcepackage.distroseries,
sourcepackagename=sourcepackage.sourcepackagename)
self.assertEquals(None, view.primary_translatable)
def test_untranslatable_series(self):
# Create a product that uses translations.
product = self.factory.makeProduct()
product.translations_usage = ServiceUsage.LAUNCHPAD
view = ProductView(product, LaunchpadTestRequest())
# New series are added, one for each type of status
series_experimental = self.factory.makeProductSeries(
product=product, name='evo-experimental')
series_experimental.status = SeriesStatus.EXPERIMENTAL
series_development = self.factory.makeProductSeries(
product=product, name='evo-development')
series_development.status = SeriesStatus.DEVELOPMENT
series_frozen = self.factory.makeProductSeries(
product=product, name='evo-frozen')
series_frozen.status = SeriesStatus.FROZEN
series_current = self.factory.makeProductSeries(
product=product, name='evo-current')
series_current.status = SeriesStatus.CURRENT
series_supported = self.factory.makeProductSeries(
product=product, name='evo-supported')
series_supported.status = SeriesStatus.SUPPORTED
series_obsolete = self.factory.makeProductSeries(
product=product, name='evo-obsolete')
series_obsolete.status = SeriesStatus.OBSOLETE
series_future = self.factory.makeProductSeries(
product=product, name='evo-future')
series_future.status = SeriesStatus.FUTURE
# The series are returned in alphabetical order and do not
# include obsolete series.
series_names = [series.name for series in view.untranslatable_series]
self.assertEqual([
u'evo-current',
u'evo-development',
u'evo-experimental',
u'evo-frozen',
u'evo-future',
u'evo-supported',
u'trunk'], series_names)
class TestCanConfigureTranslations(TestCaseWithFactory):
layer = DatabaseFunctionalLayer
def test_cannot_configure_translations_product_no_edit_permission(self):
product = self.factory.makeProduct()
view = create_view(product, '+translations', layer=TranslationsLayer)
self.assertEqual(False, view.can_configure_translations())
def test_can_configure_translations_product_with_edit_permission(self):
product = self.factory.makeProduct()
login_person(product.owner)
view = create_view(product, '+translations', layer=TranslationsLayer)
self.assertEqual(True, view.can_configure_translations())
def test_rosetta_expert_can_configure_translations(self):
product = self.factory.makeProduct()
with celebrity_logged_in('rosetta_experts'):
view = create_view(product, '+translations',
layer=TranslationsLayer)
self.assertEqual(True, view.can_configure_translations())
def test_launchpad_not_listed_for_proprietary(self):
product = self.factory.makeProduct()
with person_logged_in(product.owner):
for info_type in PUBLIC_PROPRIETARY_INFORMATION_TYPES:
product.information_type = info_type
view = create_initialized_view(
product, '+configure-translations',
layer=TranslationsLayer)
if product.private:
self.assertNotIn(
ServiceUsage.LAUNCHPAD,
view.widgets['translations_usage'].vocabulary)
else:
self.assertIn(
ServiceUsage.LAUNCHPAD,
view.widgets['translations_usage'].vocabulary)
@staticmethod
def getViewContent(view):
with person_logged_in(view.request.principal):
return view()
@staticmethod
def hasLink(url):
return HTMLContains(Tag('link', 'a', attrs={'href': url}))
@classmethod
def getTranslationsContent(cls, product):
view = create_initialized_view(product, '+translations',
layer=TranslationsLayer,
principal=product.owner)
return cls.getViewContent(view)
def test_no_sync_links_for_proprietary(self):
# Proprietary products don't have links for synchronizing
# productseries.
product = self.factory.makeProduct()
content = self.getTranslationsContent(product)
series_url = canonical_url(
product.development_focus, view_name='+translations',
rootsite='translations')
manual_url = canonical_url(
product.development_focus, view_name='+translations-upload',
rootsite='translations')
automatic_url = canonical_url(
product.development_focus, view_name='+translations-settings',
rootsite='translations')
self.assertThat(content, self.hasLink(series_url))
self.assertThat(content, self.hasLink(manual_url))
self.assertThat(content, self.hasLink(automatic_url))
with person_logged_in(product.owner):
product.information_type = InformationType.PROPRIETARY
content = self.getTranslationsContent(product)
self.assertThat(content, Not(self.hasLink(series_url)))
self.assertThat(content, Not(self.hasLink(manual_url)))
self.assertThat(content, Not(self.hasLink(automatic_url)))
|
UTF-8
|
Python
| false | false | 2,014 |
16,930,761,123,983 |
8a3902ce6ab133d8cdcbc108c9e727ba897525ba
|
53b7428662a59bb850043b25ad9346852786f059
|
/gen_compile.py
|
c8dc861a0db36348124682ac04d29c5594e56be6
|
[] |
no_license
|
zachaysan/r
|
https://github.com/zachaysan/r
|
dee2e5b05ead5508e196552e64663344fefbe02f
|
ed007f037a6a406b96b844efadd7f83f24cbac4d
|
refs/heads/master
| 2020-06-09T17:24:04.312123 | 2013-03-28T14:24:50 | 2013-03-28T14:24:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
base = "#!/bin/bash\n\n"
files = ["Logger/logger.cpp",
"Timer/timer.cpp",
"Input/keyboard.cpp",
"Input/mouse.cpp",
"Drawer/drawer.cpp",
"Units/building.cpp",
"Grid/grid.cpp"]
def compile_file(filename):
return "g++ %s -c -Wall &&\n" % filename
for f in files:
base += compile_file(f)
base += compile_file("main.cpp")
base += "g++ main.o "
for f in files:
cpp = f.split("/")[-1]
o = cpp.split(".cpp")[0] + ".o"
base += o
base += " "
base += "-o main -lSDL -lSDL_image -lSDL_ttf -Wall"
print base
|
UTF-8
|
Python
| false | false | 2,013 |
6,167,573,072,174 |
e948c35d4a676ee757ef3cdcdef74eeee574f5ae
|
00e4daea7daff5867bbe9ceb6f4d9273ae2bf550
|
/utils/gst_check.py
|
5a3304445c97d2c48a885ac2655a72da259679b5
|
[
"GPL-3.0-only"
] |
non_permissive
|
sequoiar/scenic
|
https://github.com/sequoiar/scenic
|
7c0aa0fbbf690e6de34cc63cab07a1f1c0c66027
|
23d7cf4d8403c58b3d4746b5afbf13f25a634ec2
|
refs/heads/master
| 2020-04-10T21:07:03.164036 | 2011-05-17T00:42:59 | 2011-05-17T00:42:59 | 1,770,235 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Société des arts technologiques (SAT)
# http://www.sat.qc.ca
# All rights reserved.
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Sropulpof is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sropulpof. If not, see <http:#www.gnu.org/licenses/>.
#
""" Checks this host to ensure all gstreamer plugins needed by milhouse
are installed """
import glob
import os
import sys
import re
try:
import pygst
pygst.require('0.10')
import gst
except ImportError:
print("import failed, please install gst-python")
sys.exit(1)
#FIXME: not very pythonic
VERBOSE = sys.argv[-1] == '--verbose' or sys.argv[-1] == '-v'
# Get the full path to cpp files relative to the script location
cwd = os.path.dirname(os.path.realpath(__file__))
cpp_files = glob.glob(os.path.realpath(cwd + "/../src/gst") + "/*.cpp")
if (len(cpp_files) == 0):
sys.stderr.write("No cpp files found. Make sure the script is located within source directory \"utils\".")
sys.exit(2)
""" List of matches
codec_ = Pipeline::Instance()->makeElement("theoraenc", NULL);
if (source_ == "videotestsrc")
if (sink_ == "xvimagesink")
*coder(pipeline, "mad")
"""
matches = [
re.compile(r"^.*makeElement\(\""),
re.compile(r"^.*source_ == \""),
re.compile(r"^.*sink_ == \""),
re.compile(r"^.*coder\(pipeline, \""),
]
end = re.compile(r"\".*$")
gst_plugins = []
missing_plugins = []
# Scan for files and retrieve used gst elements
for source_file in cpp_files:
try:
f = open(source_file)
for line in f:
for m in matches:
if (m.search(line) is not None):
"""
We want to push the element name in the gst_plugins list:
1) We strip the line
2) We substitute the match with the empty string
3) We strip all characters after the double quote
"""
gst_plugins.append((end.sub("", m.sub("", line.strip()))))
except IOError, e:
sys.stderr.write(e)
finally:
f.close()
gst_plugins = list(set(gst_plugins))
gst_plugins.sort()
try:
gst_plugins.remove('sharedvideosink')
except:
pass
optional_plugins = ["dc1394src", "dv1394src", "dvdemux", "dvdec", "alsasrc", "alsasink", "pulsesrc", "pulsesink", "theoraenc", "theoradec", "lamemp3enc", "mp3parse", "mad", "x264enc", "ffenc_mpeg4", "ffenc_h263p", "celtenc", "celtdec"]
for plugin in gst_plugins:
if gst.element_factory_find(plugin) is None:
if plugin in optional_plugins:
print("Warning: optional plugin " + plugin + " is NOT installed")
else:
print("Error: required plugin " + plugin + " is NOT installed")
missing_plugins.append(plugin)
else:
if VERBOSE:
print(plugin + " installed")
if VERBOSE:
print("-------------------------------")
if len(missing_plugins) == 0:
if VERBOSE:
print("All " + str(len(gst_plugins)) + " necessary plugins installed")
sys.exit(0)
else:
missing_critical = False
for plugin in missing_plugins:
if plugin not in optional_plugins:
missing_critical = True
print("You may have to install the corresponding development headers \
(i.e. lib<MODULE>-dev)")
print("before building the missing gstreamer plugins")
if missing_critical:
sys.exit(1)
else:
sys.exit(0)
|
UTF-8
|
Python
| false | false | 2,011 |
9,749,575,802,767 |
309d38b2dd989a804a682e0aa08503f50277ada3
|
0e4467f56414689caeafa33e81ea6b9ff1429cbd
|
/amcat/scripts/actions/run_scraper.py
|
cf09b984b14ba9099daec0e37adb49d0b9c96a3e
|
[
"AGPL-3.0-only"
] |
non_permissive
|
larsmans/amcat
|
https://github.com/larsmans/amcat
|
a7f950d8a5e425f59376232a99f65da2aef3b02f
|
c9c0dbf77f0a929e7b3e7e7e96296b2ba54b5382
|
refs/heads/master
| 2021-01-12T22:07:50.279012 | 2014-01-15T10:17:42 | 2014-01-15T10:17:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
"""
Script run a scraper from the DB
"""
import logging; log = logging.getLogger(__name__)
from django import forms
from amcat.scripts.script import Script
from amcat.models.scraper import Scraper
from amcat.scraping.scraper import DBScraperForm
from amcat.scraping.controller import RobustController
class RunScraperForm(forms.Form):
scraper = forms.ModelChoiceField(queryset=Scraper.objects.all())
date = forms.CharField()
class AddProject(Script):
"""Add a project to the database."""
options_form = RunScraperForm
output_type = None
def run(self, _input=None):
scraper = self.options["scraper"].get_scraper(date=self.options["date"])
controller = RobustController()
controller.scrape(scraper)
if __name__ == '__main__':
from amcat.tools import amcatlogging
amcatlogging.debug_module("amcat.scraping.controller")
from amcat.scripts.tools import cli
cli.run_cli()
|
UTF-8
|
Python
| false | false | 2,014 |
16,234,976,380,483 |
674d480d2f7002ee3c3fc9e3e875cac011be0b8a
|
57f3f1c4368fbe790041d792ca025c138a07e934
|
/setup.py
|
2d6a6a3bb8fc1637e4cd4be68afdf0a5cb3ddc73
|
[] |
no_license
|
eelkeh/sylvester
|
https://github.com/eelkeh/sylvester
|
c51441a89d55b03355ceafa17e225d43db2c04d8
|
5ee58f3b37e6c026afd005fdc40fa63bfc6d2e05
|
refs/heads/master
| 2016-09-05T12:18:52.729918 | 2013-11-28T09:28:35 | 2013-11-28T09:28:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from setuptools import setup
import sys, os
version = '0.1'
settings = dict()
settings.update(
name='sylvester',
version=version,
description="High volume Twitter API client",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='twitter oauth api',
author='Eelke Hermens',
author_email='[email protected]',
url='',
license='MIT',
packages=['sylvester'],
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",
)
setup(**settings)
|
UTF-8
|
Python
| false | false | 2,013 |
10,428,180,639,277 |
c72c3fe17c8cb2328289b1254e34045bd2fc1b8c
|
315f19dfeb181341b5692abdcf295a9113bf5cf9
|
/src/club_comments_scrape.py
|
813504c86b965984d37181bf9f320f4b188638b3
|
[] |
no_license
|
nil-/quiet-discourse
|
https://github.com/nil-/quiet-discourse
|
e554a4feb95a9da911d9d0cc2855e6460e48a878
|
45742c32140976f3ae30b3e804c66831ff7a369e
|
refs/heads/master
| 2020-05-17T07:14:41.215252 | 2014-08-03T10:38:40 | 2014-08-03T10:38:40 | 14,981,267 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python2
# This script scrapes all club comments.
from bs4 import BeautifulSoup
import re
import requests
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# Obtain the html for the forum frontpage
url = "http://myanimelist.net/clubs.php?id=40791&action=view&t=comments"
headers = {
'User-Agent': raw_input("Enter your API key: ")
}
html = requests.get(url, headers=headers).text
# Find the number of pages to go through
soup = BeautifulSoup(html, "lxml")
num_pages = int(soup.find("div", attrs={'class': 'borderClass spaceit'}).text.split('(')[1].split(')')[0])
num_digits = len(str(num_pages))
for i in range(num_pages):
if i > 0:
url = 'http://myanimelist.net/clubs.php?id=40791&action=view&t=comments&show=' + str(i*20)
html = requests.get(url, headers=headers).text
f = open('club-comments/page_' + str(i+1).zfill(num_digits) + '.html', 'w')
f.write(html)
f.close()
# Progress bar
print 'Page ' + str(i+1) + '/' + str(num_pages) + ': True'
|
UTF-8
|
Python
| false | false | 2,014 |
14,637,248,581,278 |
a88a52f6334abc6d2eead2dcfc55f82ebd57c3e7
|
36b9916367278dfa4f512e5a5bc0d84970973816
|
/thief_reduction.py
|
6a9fc4151f07c9c0d594822e387d5377ce024ed7
|
[
"GPL-1.0-or-later",
"GPL-3.0-only"
] |
non_permissive
|
brianhouse/joyride
|
https://github.com/brianhouse/joyride
|
221f2b3abc5fcb529e6fb47706ebcf9b82a0c78a
|
e56b8649cc99168776883de1c0d4019e09391b20
|
refs/heads/master
| 2016-08-05T05:07:13.560843 | 2013-03-04T04:41:10 | 2013-03-04T04:41:10 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import json, time, random, datetime, csv, urllib, cv
import numpy as np
from pprint import pprint
from housepy import net, science, drawing, util, log
from openpaths_video import *
LON = 0
LAT = 1
T = 2
X = 3
Y = 4
ZOOM = 0.03
ZOOM = 10000
image_id = 0
if not os.path.isdir("sv_images"):
os.mkdir("sv_images")
points = []
for i, line in enumerate(open("pulled_points.txt").readlines()):
if i % 2 == 0: # things are double in the list for some reason
points.append([(float(p) if p != "None" else None) for p in line.strip().split(' ')])
points = np.array(points)
median_lon = np.median(points[:,0])
median_lat = np.median(points[:,1])
points = np.array([point for point in points if abs(point[0] - median_lon) < ZOOM and abs(point[1] - median_lat) < ZOOM])
max_lon = np.max(points[:,0])
min_lon = np.min(points[:,0])
max_lat = np.max(points[:,1])
min_lat = np.min(points[:,1])
points = list(points)
for point in points:
point[X] = util.scale(point[LON], min_lon, max_lon)
point[Y] = util.scale(point[LAT], min_lat, max_lat)
def get_streetview(heading, point=None, panoid=None):
"""For a lat/lon pair, pull Google Streetview panorama data and stitch together the choice tiles"""
if point is not None:
panoid_url = "http://cbk0.google.com/cbk?output=json&ll=%s,%s" % (point[LAT], point[LON])
elif panoid is not None:
panoid_url = "http://cbk0.google.com/cbk?output=json&panoid=%s" % panoid
# print(panoid_url)
try:
connection = urllib2.urlopen(panoid_url)
json_data = json.loads(''.join(connection.readlines()))
panoid = json_data['Location']['panoId']
lon, lat = float(json_data['Location']['lng']), float(json_data['Location']['lat'])
yaw = float(json_data['Projection']['pano_yaw_deg'])
alt_yaw = (yaw + 180) % 360
links = json_data['Links']
except Exception as e:
print("JSON download failed: %s" % panoid_url)
print(log.exc(e))
return None
get_image(panoid, heading, yaw, alt_yaw)
return (lon, lat), get_closest_link(links, heading)
def get_closest_link(links, heading):
min_difference = 1000
next_panoid = None
next_heading = None
for l, link in enumerate(links):
link_heading = float(link['yawDeg'])
difference = science.angular_difference(link_heading, heading)
# print("--> %f (%f)" % (link_heading, difference))
if difference < min_difference:
min_difference = difference
next_heading = link_heading
next_panoid = link['panoId']
print("LINK HEADING: %s" % next_heading)
if min_difference > 90:
return None
else:
return next_panoid
def get_image(panoid, heading, yaw, alt_yaw):
yaw_dist = science.angular_difference(yaw, heading)
alt_dist = science.angular_difference(alt_yaw, heading)
if yaw_dist <= alt_dist:
url_left = "http://cbk0.google.com/cbk?output=tile&panoid=%s&zoom=3&x=2&y=1" % panoid
url_right = "http://cbk0.google.com/cbk?output=tile&panoid=%s&zoom=3&x=3&y=1" % panoid
else:
url_left = "http://cbk0.google.com/cbk?output=tile&panoid=%s&zoom=4&x=12&y=3" % panoid
url_right = "http://cbk0.google.com/cbk?output=tile&panoid=%s&zoom=4&x=0&y=3" % panoid
filepath_left = "sv_images/%s_left.jpg" % i
filepath_right = "sv_images/%s_right.jpg" % i
try:
urllib.urlretrieve(url_left, filepath_left)
urllib.urlretrieve(url_right, filepath_right)
except Exception as e:
print("Image download failed")
return None, None
image_left = Image.open(filepath_left)
image_right = Image.open(filepath_right)
image = Image.new('RGB', (1024, 512))
image.paste(image_left, (0, 0))
image.paste(image_right, (512, 0))
os.remove(filepath_left)
os.remove(filepath_right)
global image_id
filepath = "sv_images/%s.png" % image_id
image.save(filepath, 'PNG')
image_id += 1
cv.ShowImage("streetview", drawing.pil_to_ipl(image))
cv.WaitKey(5)
print(url_left)
print(url_right)
ctx = drawing.Context(1000, 1000, relative=True, flip=True, hsv=True)
while True:
ctx.clear()
for p, point in enumerate(points):
ctx.arc(point[X], point[Y], 3 / ctx.width, thickness=1.0, stroke=(0.0, 0.0, 0.0))
pass
path_index = 0
while True:
print("----------")
origin = points[path_index]
destination = points[path_index + 1]
ctx.arc(origin[X], origin[Y], 3 / ctx.width, thickness=1.0, fill=(0.0, 0.0, 0.0))
ctx.arc(destination[X], destination[Y], 3 / ctx.width, thickness=1.0, fill=(0.33, 1.0, 1.0))
ctx.line(origin[X], origin[Y], destination[X], destination[Y], stroke=(0.0, 0.0, 0.5))
ctx.frame()
time.sleep(1)
heading = science.heading((origin[X], origin[Y]), (destination[X], destination[Y]))
real_heading = heading
print("REAL HEADING: %s" % heading)
fake_points = []
result = get_streetview(heading, point=origin)
if result is not None:
fake_point, next_panoid = result
fake_points.append(str(fake_point))
while next_panoid is not None:
print("--")
x = util.scale(fake_point[LON], min_lon, max_lon)
y = util.scale(fake_point[LAT], min_lat, max_lat)
ctx.arc(x, y, 3 / ctx.width, thickness=0.0, fill=(0.55, 1.0, 1.0))
ctx.frame()
# time.sleep(1)
fake_heading = science.heading((x, y), (destination[X], destination[Y]))
print((x, y))
print((destination[X], destination[Y]))
print("FAKE HEADING: %s" % fake_heading)
fake_point, next_panoid = get_streetview(fake_heading, panoid=next_panoid)
if str(fake_point) in fake_points and fake_points.index(str(fake_point)) != len(fake_points) - 1:
break
fake_points.append(str(fake_point))
path_index += 1
if path_index == len(points) - 1:
break
ctx.frame()
break
|
UTF-8
|
Python
| false | false | 2,013 |
3,255,585,212,534 |
82520f1153404051e846aa1af47f96a1c5705065
|
f3af2b2aaaa0c14980250bfdaadffd5c03418975
|
/homework2/hw2.py
|
9afd1b5f326bdcb11ee12900c2719074c422fd13
|
[] |
no_license
|
schkyl10/csf
|
https://github.com/schkyl10/csf
|
6bccbd99a3d57df1ee0e23738fce08c5b17df289
|
8ed5fa5a90da2a7fdb04e52bbcf2b9e7e9cd5424
|
refs/heads/master
| 2016-09-06T10:56:56.376612 | 2014-01-21T07:43:02 | 2014-01-21T07:43:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Name: Kyle Schaefer
# Evergreen Login: schkyl10
# Computer Science Foundations
# Programming as a Way of Life
# Homework 2
#problem 1 for gauss's problem
## This is the function used to solve problem 1
# This function adds consecutive numbers up to the amount entered using a while loop
def gauss(gauss_prob):
# These first two variables are used to start the sequence
total = 1
p = 2
while gauss_prob > 1:
gauss_prob = gauss_prob - 1
total = total + p
p = p + 1
return total
## This is the function used to solve problem 2
#this function takes a number and prints the reciprocals
def recip(recip_range):
for i in range(1,recip_range):
recip_range = 1.0 / i
print recip_range
## This is the function used to solve problem 3
## This function calculates the triangular numbers
def triang(triang_num):
# Identity for addition
total = 0
for i in range(1, triang_num + 1):
total = total + i
return total
## This is the function used to solve problem 4
## This function calculates factorials
def fact(factorial):
# Identity for multiplication
total = 1
for i in range(1, factorial + 1):
total = total * i
return total
## This is the function used to solve problem 5
## This function prints each factorial down from whatever the input is
def revfact(fact_range):
fact_range = fact_range + 1
for i in range(1, fact_range + 1):
fact_range = fact_range - 1
total = 1
for i in range(1, fact_range + 1):
total = total * i
print total
## This is the function used to solve problem 6
## This function gets the reciprocal of any factorial and adds the total
def sumrecipfact(total_range):
total = 1
sumtot = 1
for i in range(1,total_range + 1):
total = total * i
recip = 1.0 / total
sumtot = sumtot + recip
return sumtot
## This is the statisfy the import part of the first problem and to solve for n, however the next part of the program
## labeled problem 1 takes user input to solve for any n for the same problem.
from hw2_test import n
hw2_test = gauss(n)
print "\nThis is the answer received by importing n from mod hw2_test:", hw2_test
##
## Problem 1
##
print "\nProblem 1 solution follows:"
natural_num = input("\nWhat range of natural numbers do you wanted added: ")
natural_num = gauss(natural_num)
print "\nThe total sum of these natural numbers: ", natural_num
##
## Problem 2
##
print "\nProblem 2 solution follows:"
recip_num = input("\nEnter the range of numbers in which you want the reciprocal of: ")
print "\nThese are the reciprocals:"
recip_num = recip(recip_num)
##
## Problem 3
##
print "\nProblem 3 solution follows:"
triang_num = input("\nSWhat triangular number would you like: ")
triang_num = triang(triang_num)
print "\nThe triangular number is:", triang_num
##
## Problem 4
##
print "\nProblem 4 solution follows:"
num_factori = input("\nWhat factorial would you like calculated: ")
num_factori = fact (num_factori)
print "\nThe factorial Is:", num_factori
##
## Problem 5
##
print "\nProblem 5 solution follows:"
test = input("\nThis is to get the factorials of every number up to what you enter: ")
test = revfact(test)
##
## Problem :
##
print "\nProblem 6 solution follows:"
recipfact = input("\nEnter the number you want the sum of reciprocals of factorials: ")
recipfact = sumrecipfact(recipfact)
print "\nThe sum of all the reciprocals of all the factorials you wanted is:", recipfact
print "\n\nThanks for looking at my homework!"
|
UTF-8
|
Python
| false | false | 2,014 |
9,354,438,776,251 |
4ac3f73a8ce5764e5c6447dc58867156f39fd13a
|
aa349f9257e45933a682a38df9ef41e0ae28c730
|
/procedureTest.py
|
b05d331f02edd682b2ad9a5bfe1da0b8935cc399
|
[] |
no_license
|
lcbasu/Pyhthon-Search-Engine
|
https://github.com/lcbasu/Pyhthon-Search-Engine
|
4f01d417e6e69268584ac7303db7f67df68959a8
|
988b36dea82f2c125b820562ad29b1bd35166b40
|
refs/heads/master
| 2021-05-27T09:45:45.082514 | 2014-03-28T10:53:44 | 2014-03-28T10:53:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def rest_of_string(s):
return s[1:]
print rest_of_string('audacity')
def sum(a,b):
print 'a is',a
a=a+b
print 'a is',a
print 'exit without return statement, so the result is :'
print sum(1,2)
def sumModified(a,b):
a=a+b
return a
print sumModified('hello','lokesh')
|
UTF-8
|
Python
| false | false | 2,014 |
13,228,499,282,043 |
1b359269d4cce3f440762c2b78c5838a9d36a6f3
|
f38012f86e65141559519d7a359bae37b0e3a151
|
/r_xmlrpclib/xmlrpclib_exception.py
|
fa232320cfb17ffbda20d0a27d01f0b410db889f
|
[] |
no_license
|
rApeNB/PyMOTW
|
https://github.com/rApeNB/PyMOTW
|
e73607444c86afabafd23d0dab7e4fdd0f60cbf1
|
166a0098ab68863161a518a668e33f543288599d
|
refs/heads/master
| 2020-04-13T11:40:39.880083 | 2014-12-14T15:13:31 | 2014-12-14T15:13:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'rApeNB'
# -*- coding: utf-8 -*-
import xmlrpclib
server = xmlrpclib.ServerProxy('http://localhost:9000')
try:
server.raise_exception('A message')
except Exception as err:
print 'Fault code:', err.faultCode
print 'Message:', err.faultString
|
UTF-8
|
Python
| false | false | 2,014 |
3,478,923,548,308 |
e8bf7df99ddb0ec551cfd6c3cc8f6e8f696b1e2d
|
cbe9fa9a03876814cb1abdcf04c1be94d12ccde6
|
/src/utils/computeFeatures.py
|
a6f5be5a30166d2c11ec3680e6f281826de6d43d
|
[] |
no_license
|
rohitgirdhar-cmu-experimental/MCGRegressSegment
|
https://github.com/rohitgirdhar-cmu-experimental/MCGRegressSegment
|
4dc459e7e5c7517a84738ea3288828a88ea4aa9f
|
436d82b0ccbecf38251197882670598e533b7ab9
|
refs/heads/master
| 2020-06-03T11:41:46.455262 | 2014-11-24T20:46:01 | 2014-11-24T20:46:01 | 26,454,073 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python2.7
import numpy as np
import scipy, scipy.io
import matplotlib.pyplot as plt
import argparse
import os, errno
import sys
import gc
import pdb # for debugging
def main():
caffe_root = '/exports/cyclops/software/vision/caffe/'
sys.path.insert(0, caffe_root + 'python')
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--imagesdir', type=str, required=True,
help='VOC images to process')
parser.add_argument('-r', '--resdir', type=str, required=True,
help='Results directory')
parser.add_argument('-f', '--feature', type=str, default='prediction',
help='could be prediction/fc7/pool5 etc')
args = parser.parse_args()
IMGS_DIR = args.imagesdir
FEAT = args.feature
RES_DIR = args.resdir
OUT_DIR = os.path.join(RES_DIR, 'features', FEAT)
SEL_MAT_PATH = os.path.join(RES_DIR, 'selProposals.mat')
if not os.path.exists(OUT_DIR):
mkdir_p(OUT_DIR)
sel = scipy.io.loadmat(SEL_MAT_PATH)
gc.collect() # required, loadmat is crazy with memory usage
import caffe
# Set the right path to your model definition file, pretrained model weights,
# and the image you would like to classify.
MODEL_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'deploy.prototxt')
mean = np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy')
# convert into image for visualization and processing
meanImg = mean.swapaxes(0,1).swapaxes(1,2)
PRETRAINED = os.path.join(caffe_root, 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')
net = caffe.Classifier(MODEL_FILE, PRETRAINED,
mean=mean, channel_swap=(2,1,0), raw_scale=255, image_dims=(256, 256))
net.set_phase_test()
net.set_mode_cpu()
nImgs = np.shape(sel['imgs'][0])[0]
topimgs = []
imgslist = sel['imgs'][0]
for i in range(nImgs):
topimgs.append(imgslist[i][0])
if not os.path.isdir(OUT_DIR):
mkdir_p(OUT_DIR)
count = 0
for topimg in topimgs:
count += 1
fpath = os.path.join(IMGS_DIR, topimg + '.jpg')
out_fpath = os.path.join(OUT_DIR, str(count) + '.txt')
lock_fpath = os.path.join(OUT_DIR, str(count) + '.lock')
if os.path.exists(lock_fpath) or os.path.exists(out_fpath):
print('Some other working on/done for %s\n' % fpath)
continue
mkdir_p(lock_fpath)
input_image = caffe.io.load_image(fpath)
seg_image = sel['masks'][0][count - 1]
idx = (seg_image == 0)
bbox = sel['bboxes'][count - 1]
input_image_crop = input_image[bbox[0] : bbox[2], bbox[1] : bbox[3]]
idx = idx[bbox[0] : bbox[2], bbox[1] : bbox[3]]
# WITHOUT MASKING!!!
# mean_temp = scipy.misc.imresize(meanImg, np.shape(idx))
# input_image_crop[idx] = meanImg[idx]
# input_image_final = scipy.misc.imresize(input_image_crop, (256, 256))
try:
input_image_res = scipy.misc.imresize(input_image_crop, (256, 256))
prediction = net.predict([input_image_res])
except:
print 'Unable to do for', topimg
rmdir_noerror(lock_fpath)
np.savetxt(out_fpath, [])
continue
if FEAT == 'prediction':
feature = prediction.flat
else:
feature = net.blobs[FEAT].data[0]; # Computing only 1 crop, by def is center crop
feature = feature.flat
np.savetxt(out_fpath, feature, '%.7f')
rmdir_noerror(lock_fpath)
print 'Done for %s (%d / %d)' % (topimg, count, len(topimgs))
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def rmdir_noerror(path):
try:
os.rmdir(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,014 |
1,133,871,377,925 |
f73fada5e8c5da3aee91d764fb821f602ad5d7de
|
01fddd91ce6823a4a6b40eb1c543d421b4982bda
|
/wiki/admin.py
|
54d6a3be3db8f358d58223fd1881b9b37b60155d
|
[
"MIT"
] |
permissive
|
MatteoNardi/wikiwik
|
https://github.com/MatteoNardi/wikiwik
|
8e076280d56e8408e7bbe235e05dd42546e4242e
|
723336363841d657c815910a506f4a60531c9b9e
|
refs/heads/master
| 2016-09-05T10:39:39.020024 | 2013-11-09T10:15:43 | 2013-11-09T10:15:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from wiki.models import Page, Picture
class PageAdmin(admin.ModelAdmin):
fields = ('title', 'slug', 'content', 'css', 'mod_date', )
def save_model(self, request, obj, form, change):
obj.author = request.user
obj.save()
class PictureAdmin(admin.ModelAdmin):
fields = ['title', 'picture']
list_display = ('title',)
admin.site.register(Page, PageAdmin)
admin.site.register(Picture, PictureAdmin)
|
UTF-8
|
Python
| false | false | 2,013 |
13,700,945,681,595 |
864b41d5fed6a548f20dd64afe0b307cfc3c91b0
|
d9f0f2d88c97a242e39a222da434b3631b27c009
|
/setup.py
|
b7768e7a33f0aab442e20f65a973a911e8167355
|
[] |
no_license
|
stania/mdview
|
https://github.com/stania/mdview
|
7a3ac02a8cd600db621ed21a454efeb148784e63
|
f4f3e433407a43eff7cc21fbfb5a51f861352b31
|
refs/heads/master
| 2021-01-01T15:12:52.654198 | 2012-10-16T06:24:31 | 2012-10-16T06:24:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/cygdrive/c/Python27/python.exe
from distutils.core import setup
import py2exe
import resource
resource.gen_reslist_py()
py2exe_options = dict(
includes=['_reslist'],
packages=["encodings", "email", "BaseHTTPServer", "httplib", "Cookie"],
excludes=[#'_ssl', # Exclude _ssl
'pyreadline', 'doctest', 'locale',
'pickle', 'calendar'], # Exclude standard library
dll_excludes=['msvcr71.dll', "w9xpopen.exe", 'mswsock.dll', 'powrprof.dll'], # Exclude msvcr71
compressed=True, # Compress library.zip
bundle_files=1,
dist_dir="bin",
)
setup(windows=[{
'script': "mdview.py",
'other_resources': resource.py2exe_list()
}],
zipfile=None,
options={'py2exe': py2exe_options},)
|
UTF-8
|
Python
| false | false | 2,012 |
15,917,148,800,416 |
58184eee53f139291671c8b16f3257e52645e956
|
35c07d36820759a0557d50f227456a415fe72e71
|
/app/filters.py
|
0743dc12c3980c4aa7fa472b4bdd041895ab3144
|
[] |
no_license
|
damichael/python_blog
|
https://github.com/damichael/python_blog
|
38646dcab0f36cbc0a14f51bf8f665984c10ebc3
|
7d23d408b4f81cff199bac8a44993bc14cd8abfc
|
refs/heads/master
| 2018-10-29T18:20:19.573280 | 2014-11-12T04:38:27 | 2014-11-12T04:38:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'damichael'
from flask import current_app
@current_app.template_filter()
def dateformat(date, date_format):
if not date:
return None
return date.strftime(date_format)
|
UTF-8
|
Python
| false | false | 2,014 |
7,121,055,781,035 |
7f11b21330010bc6b6ffb81204f3d6bd3c74ce41
|
b0a7213814fbd51a092d560ef2030408436754be
|
/Lab 2/jack.py
|
6d65fc8acdbef21d25bc4bc2325abd9930775865
|
[] |
no_license
|
jguarni/Python-Labs-Project
|
https://github.com/jguarni/Python-Labs-Project
|
aa33609340717d43921e5d3fbe2d89b64a782d15
|
85c9aa076cbe12813648f09b49d6a1b1c798bcff
|
refs/heads/master
| 2021-01-13T01:50:03.562851 | 2014-12-08T03:46:47 | 2014-12-08T03:46:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from cisc106 import *
def total_weight2(liquidtype, volume):
""" This function will calculate the mass off a liquid when given
the volume and density of that specific liquid.
volume -- number
density -- number
return -- number
"""
WATER_DENSITY = 1.0
MILK_DENSITY = 1.03
GASOLINE_DENSITY = 0.7
if (liquidtype=="water"):
return WATER_DENSITY * volume
elif (liquidtype=="milk"):
return MILK_DENSITY * volume
elif (liquidtype=="gas"):
return GASOLINE_DENSITY * volume
assertEqual(total_weight2('gas', 5), 3.5)
assertEqual(total_weight2('milk', 6), 6.18)
assertEqual(total_weight2('water', 10), 10.0)
testwater = eval(input("What is the volume of your water?: "))
print('The mass of your water is:', total_weight2("water", testwater))
testmilk = float(input("What is the volume of your milk?: "))
print('The mass of your milk is:', total_weight2("milk", testmilk))
testgas = eval(input("What is the volume of your gas?: "))
print('The mass of your gas is:', total_weight2("gas", testgas))
|
UTF-8
|
Python
| false | false | 2,014 |
13,185,549,620,148 |
b31c588fc050b324f6bd0b49b4c45f04382e466f
|
2182412d0673aa70214fa0d4687c78332bff8514
|
/hello.py
|
0d5150e04bf608d9494c1f717ae14fbdd1bd20d1
|
[] |
no_license
|
WeirdSeven/CS3240_temp1
|
https://github.com/WeirdSeven/CS3240_temp1
|
f149865c85b5b19646b0db17609cfab25b350520
|
fec6a4adad86fe8c08b175cafe03685c71010c21
|
refs/heads/master
| 2015-08-13T17:40:29.561772 | 2014-09-29T22:39:24 | 2014-09-29T22:39:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'HaoBai'
from helper import greeting
if __name__ == '__main':
greeting('hello')
|
UTF-8
|
Python
| false | false | 2,014 |
3,393,024,206,399 |
4256e1ee5e7f1a8058b6e8296333a7b4b49687be
|
daef9724ae8e3dc8a79daf74af6b2d1aea6f7d83
|
/thrift/pystressor.py
|
cf5b4df0a746d53c440c5eb3b918d4026ed9d6d3
|
[] |
no_license
|
timiblossom/KucooQGen
|
https://github.com/timiblossom/KucooQGen
|
ae4dd72c98cfb1a87b741ec0aeda433e1b6e40e5
|
a482c4ccbdc1e03bd787cb579ba353accace6dfa
|
refs/heads/master
| 2021-01-19T10:08:07.036665 | 2013-03-13T05:20:23 | 2013-03-13T05:20:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import sys
sys.path.append('./gen-py')
from kucoo import QGen
from kucoo.ttypes import *
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
import threading
import time
from datetime import datetime
from datetime import timedelta
class ThreadClass(threading.Thread):
def __init__(self, films):
threading.Thread.__init__(self)
self.films = films
def run(self):
try:
transport = TSocket.TSocket('localhost', 9090)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = QGen.Client(protocol)
transport.open()
result = client.getQuestions(20, self.films, None)
print result
print len(result)
transport.close()
except Thrift.TException, tx:
print '%s' % (tx.message)
#################################################################################
if __name__ == "__main__":
film1 = Film(title='Gladiator')
film2 = Film(title='Big Boss')
film3 = Film(title='Rush Hour')
film4 = Film(title='The Jerk')
film5 = Film(title='Terminator 3: Rise of the Machines')
film6 = Film(title='Cliffhanger')
film7 = Film(title='Wild Hogs')
film8 = Film(title='The Punisher')
film9 = Film(title='Swordfish')
film10 = Film(title='The Pink Panther')
list = []
list.append([film1, film2])
list.append([film2, film3, film4])
list.append([film4, film5, film6])
list.append([film7, film8, film9])
list.append([film8, film9, film10])
list.append([film1, film5, film10])
list.append([film2, film4, film9, film10])
list.append([film3, film5, film8])
list.append([film4, film5, film7])
list.append([film1, film7, film8, film10])
#print list
for al in list :
t = ThreadClass(al)
t.start()
for thread in threading.enumerate():
if thread is not threading.currentThread():
thread.join()
|
UTF-8
|
Python
| false | false | 2,013 |
1,975,685,003,438 |
e7cbaf18071d3eb83cbfe9fe5d9063679b74e3f1
|
97b74b89a4785cb18c05c8cc3e2be414048e0434
|
/Flask-MVC/app/mod_test/models.py
|
ffea1712f8a40a7a7c8415e9baf30cab7d943cc8
|
[] |
no_license
|
paultheb/Flask-MVC
|
https://github.com/paultheb/Flask-MVC
|
0f502dbd03a18432742d759af22d60f3bec014a8
|
4e266a64bf6ff7e0cc48cef9c226228ed66c633f
|
refs/heads/master
| 2016-09-06T13:54:35.169610 | 2014-10-09T19:25:13 | 2014-10-09T19:25:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Model for this module
|
UTF-8
|
Python
| false | false | 2,014 |
7,249,904,835,684 |
c6aebb4629d8e3bc1f4ef5ce878d02ad32fd286d
|
4b1aa7f90bc0936b855a33c1aca2068072d5bb80
|
/app/router.py
|
ccca6331640de77cd7ecfa519fd7ab52d8eb6830
|
[] |
no_license
|
pombredanne/stomper
|
https://github.com/pombredanne/stomper
|
4bb9a9fe4174b59aeb482e5031b6fabc157f0071
|
2cc4fb92cd150c83e53bac9b11159524b5147a10
|
refs/heads/master
| 2021-01-18T04:47:36.160769 | 2013-09-26T22:19:59 | 2013-09-26T22:19:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import Flask
from .auth import get_user
from .websocket import handle_websocket
# from .patches import patch
# patch()
flask_app = Flask(__name__)
SECRET_KEY = 'secret'
flask_app.secret_key = SECRET_KEY
flask_app.debug = True
def router(environ, start_response):
path = environ["PATH_INFO"]
user = get_user(environ)
if path.startswith("/websocket") and user:
websocket = environ["wsgi.websocket"]
handle_websocket(user, websocket)
else:
return flask_app(environ, start_response)
from . import views
views # pyflakes
|
UTF-8
|
Python
| false | false | 2,013 |
17,291,538,342,016 |
ef3d5dc2d0e1690ad604f3c9ac3a910cbb2b820c
|
e13273b66d4b7715f5dd09105a1b42d93cfe8773
|
/setup.py
|
f3cfe1b85000277a3f20f66af1a71a4d8933114d
|
[
"Apache-2.0"
] |
permissive
|
gatoatigrado/plcd
|
https://github.com/gatoatigrado/plcd
|
922dcb00dfdd65f7d6a95766ea222086cb488197
|
b9df160ef5343024aea9d60b233abcd507c2cbfc
|
refs/heads/master
| 2020-05-25T10:24:01.889782 | 2014-09-16T06:45:33 | 2014-09-16T06:45:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from setuptools import find_packages
from setuptools import setup
setup(
name="plcd",
version="0.1.0",
provides=["plcd"],
author="gatoatigrado",
author_email="[email protected]",
url="https://github.com/gatoatigrado/plcd",
description=(
'This is a python library for facilitating '
'pre-loaded compression dictionary usage.'
),
classifiers=[
"Programming Language :: Python",
"Operating System :: OS Independent",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers",
"Development Status :: 3 - Alpha",
],
install_requires=[],
packages=find_packages(exclude=['tests*']),
entry_points={
'console_scripts': [
'plcd_generate = plcd.generate:main',
],
},
long_description="""Facilitates use of pre-loaded compression dictionaries.
Pre-loaded compression means that you "seed" a compressor with some
sample data, which is representative of future data you wish to encode.
This is primarily useful for encoding small messages, which have repeated
information with some sample small messages.
"""
)
|
UTF-8
|
Python
| false | false | 2,014 |
3,917,010,190,084 |
3fda3fb1456340b1ad78e5b9e62e4c2bd34ac539
|
19b0fc3a0d1aaa9a09ce6971984a2a146fb9b420
|
/store/signals.py
|
c3056c209828f0cd32b5a05aff240c670604d881
|
[] |
no_license
|
j0die/fakecoin
|
https://github.com/j0die/fakecoin
|
9b3b8ada09fb9bc1e4ef28aaa69315e4cd7f72d5
|
f73208e3c255ed611213e808684f0ac3831b7a6d
|
refs/heads/master
| 2017-05-03T05:57:32.502210 | 2014-06-01T20:27:43 | 2014-06-01T20:27:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.core.mail import send_mail
from django.db.models.signals import pre_save
from django.dispatch import Signal, receiver
from django.template import Context, loader
from django.contrib.auth.models import User
from store.models import Category
#Item was sold
item_was_sold = Signal(providing_args=['order'])
@receiver(item_was_sold)
def send_item_sold(sender, **kwargs):
#Send email to site Admin when Item was sold
recipient_list = User.objects.filter(is_superuser=True).values_list('email', flat=True)
template = loader.get_template('store/item_sold_email.txt')
context = Context({
'user': kwargs['order'].customer,
'full_name': kwargs['order'].customer.get_profile(),
'item': kwargs['order'].item.title,
'amount': kwargs['order'].amount,
'link': kwargs['order'].get_admin_url(),
'site': kwargs['site']
})
send_mail('Item was sold', template.render(context), None, recipient_list)
@receiver(item_was_sold)
def send_thank_you(sender, **kwargs):
#Send "Thank You" email to User that bought Item
template = loader.get_template('store/thank_you_email.txt')
recipient = [kwargs['order']._customer_cache.email]
context = Context({
'full_name': kwargs['order'].customer.get_profile(),
})
send_mail('Thank you for using our site', template.render(context), None, recipient)
@receiver(pre_save, sender=Category)
def unmark_previous_home(sender, **kwargs):
category = kwargs['instance']
if category.is_home:
Category.objects.filter(is_home=True).update(is_home=False)
|
UTF-8
|
Python
| false | false | 2,014 |
16,045,997,826,786 |
f241870b19717eeffbb28089caeb689c17a54b74
|
2c3b521fe51422d7c51c01455a5a47df1ec54cf0
|
/toxmail/contacts.py
|
c2836beee6c50e291ef60c86374901017a7bef26
|
[] |
no_license
|
chaoallsome/toxmail
|
https://github.com/chaoallsome/toxmail
|
a57380415c971913aea237f4feb8a97ff7f0a126
|
60ab7aa377852d62bda4cce84f9837ff7a5e4603
|
refs/heads/master
| 2021-01-17T21:13:52.244228 | 2014-08-15T22:25:45 | 2014-08-15T22:25:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# dead simple flat file database
# to lookup client_id given an e-mail
import shelve
class Contacts(object):
def __init__(self, path):
self.path = path
self._db = shelve.open(path, writeback=True)
def add(self, email, **data):
self._db[email] = data
def delete_first(self, **filter):
entry = self.first(**filter)
if entry:
self.remove(entry['email'])
def first(self, **filter):
for email, entry in self._db.items():
entry['email'] = email
match = True
for key, value in filter.items():
entry_value = entry.get(key)
if key == 'client_id':
value = value[:64]
entry_value = entry_value[:64]
if entry_value != value:
match = False
break
if match:
return entry
return None
def get(self, email):
return self._db.get(email)
def remove(self, email):
if 'email' in self._db:
del self._db[email]
def save(self):
self._db.sync()
self._db.close()
self._db = shelve.open(self.path, writeback=True)
|
UTF-8
|
Python
| false | false | 2,014 |
5,042,291,628,547 |
215a1f5c59b87c712cb5d1645c36811182f09035
|
62f8934c74740da33ccd11f22454127e60fe9fa4
|
/minesweeper.py
|
fc764a46f3fa40bc5a85a9037f432767cb4add7d
|
[] |
no_license
|
contactjiayi/CodeEval
|
https://github.com/contactjiayi/CodeEval
|
ad8d467e72be79758bdcb6cf3e248a2a8087c4f4
|
362cd7f0e9da26579c94e1ebfe385b7b3752c703
|
refs/heads/master
| 2021-01-18T19:58:23.532866 | 2014-01-14T23:18:55 | 2014-01-14T23:18:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Created by JiaYi Lim on 14/1/2014
import sys
def build_matrix(num_rows, num_cols, items):
matrix = [ [ '.' for j in range(num_cols) ] for i in range(num_rows) ]
for i, c in enumerate(items):
if c == '*':
row = i // num_cols
col = i % num_cols
matrix[row][col] = c
return set_counts(matrix)
def num_mines(matrix, row_num, col_num):
if 0 <= row_num < len(matrix):
return sum([ 1 for idx, item in enumerate(matrix[row_num]) if abs(col_num-idx) <= 1 and item == '*' ])
else:
return 0
def set_counts(matrix):
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if matrix[i][j] == '.':
adj_mines = num_mines(matrix, i-1, j) + num_mines(matrix, i, j) + num_mines(matrix, i+1, j)
matrix[i][j] = adj_mines
return matrix
def print_matrix(matrix):
for i in range(len(matrix)):
for j in range(len(matrix[i])):
print matrix[i][j],
print
def print_string(matrix):
print ''.join([str(matrix[i][j]) for i in range(len(matrix)) for j in range(len(matrix[i]))])
def start():
filepath = sys.argv[1]
with open(filepath) as f:
for line in f:
line = line.strip()
if line != '':
line = line.split(';')
rows, cols = line[0].split(',')
items = line[1]
matrix = build_matrix(int(rows), int(cols), items)
print_string(matrix)
if __name__ == '__main__':
start()
|
UTF-8
|
Python
| false | false | 2,014 |
9,302,899,164,772 |
4ad7f2e876f2e9da561648bb10631ee719f10fc4
|
304c1e4e4ddc8047951218a940ac455a94ad74c3
|
/addsource/addsource.py
|
94275eab51911ab4f876cb28fe98e689f9f7e852
|
[
"BSD-2-Clause"
] |
permissive
|
t-brandt/acorns-adi
|
https://github.com/t-brandt/acorns-adi
|
953372986f52febc9984813326f3b513924e8dae
|
6645fae7878a1801beeda0c6604b01e61f37ca15
|
refs/heads/master
| 2020-05-06T20:34:50.607227 | 2013-04-15T14:35:49 | 2013-04-15T14:35:49 | 5,718,451 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#
# Original filename: addsource.py
#
# Author: Tim Brandt
# Email: [email protected]
# Date: Feb 2012
#
# Summary: inject a fake source into an ADI sequence
#
import numpy as np
import random
from scipy import ndimage
from parallel import *
def addsource(flux, pos, pa, psf, norm=1, jitter=0):
"""
addsource assumes the frames are centered. It will add a source
at position is [y, x] relative to the center.
addsource takes four arguments:
1. A 2D or 3D flux array. If 3D, the first index should be the
frame number.
2. The position [y, x], relative to the center, at which to
add the source. This can be a list of positions, one for
each frame.
3. The position angle (in radians) at which to add the source.
If there is more than one frame, this should be a list or array.
4. The source PSF, as a 2D array
Optional arguments:
5. The normalization of the additional source's PSF
6. The random positional jitter, in pixels (both horizontal
and vertical) of the additional source
addsource returns the
"""
####################################################################
# Rotate to the appropriate position angle
####################################################################
x = pos[1] * np.cos(pa) - pos[0] * np.sin(pa) + flux.shape[-1] // 2
y = pos[1] * np.sin(pa) + pos[0] * np.cos(pa) + flux.shape[-2] // 2
normpsf = psf * norm
try:
################################################################
# Case 1: image sequence (flux is a 3D array)
################################################################
for i in range(x.size):
x[i] += random.gauss(0, jitter)
y[i] += random.gauss(0, jitter)
################################################################
# x, y > 0. Decompose into integer, fractional parts.
################################################################
xdiff = x % 1
x = x.astype(int)
ydiff = y % 1
y = y.astype(int)
x1 = np.zeros(x.shape, int)
x2 = np.zeros(x.shape, int)
y1 = np.zeros(x.shape, int)
y2 = np.zeros(x.shape, int)
z1 = np.zeros(x.shape, int)
z2 = np.zeros(x.shape, int)
w1 = np.zeros(x.shape, int)
w2 = np.zeros(x.shape, int)
xref = np.arange(psf.shape[1])
yref = np.arange(psf.shape[0])
xref, yref = np.meshgrid(xref - 0., yref - 0.)
################################################################
# Interpolate the template PSF onto the fractional part, add
# it to the correct part of the array.
################################################################
for i in range(x.size):
x1[i] = x[i] - min([x[i], psf.shape[1] // 2])
x2[i] = x[i] + min([flux.shape[-1] - x[i], psf.shape[1] // 2])
y1[i] = y[i] - min([y[i], psf.shape[0] // 2])
y2[i] = y[i] + min([flux.shape[-2] - y[i], psf.shape[0] // 2])
z1[i] = x1[i] + psf.shape[1] // 2 - x[i]
z2[i] = x2[i] + psf.shape[1] // 2 - x[i]
w1[i] = y1[i] + psf.shape[0] // 2 - y[i]
w2[i] = y2[i] + psf.shape[0] // 2 - y[i]
newpsf = ndimage.map_coordinates(normpsf, [yref - ydiff[i], xref - xdiff[i]], order=3)
flux[i, y1[i]:y2[i], x1[i]:x2[i]] += newpsf[w1[i]:w2[i], z1[i]:z2[i]]
return flux
except:
################################################################
# Same algorithm for a single image.
################################################################
xdiff = x % 1
x = x.astype(int)
ydiff = y % 1
y = y.astype(int)
x1 = x - min([x, psf.shape[1] // 2])
x2 = x + min([flux.shape[-1] - x, psf.shape[1] // 2])
y1 = y - min([y, psf.shape[0] // 2])
y2 = y + min([flux.shape[-2] - y, psf.shape[0] // 2])
z1 = x1 + psf.shape[1] // 2 - x
z2 = x2 + psf.shape[1] // 2 - x
w1 = y1 + psf.shape[0] // 2 - y
w2 = y2 + psf.shape[0] // 2 - y
xref = np.arange(psf.shape[1])
yref = np.arange(psf.shape[0])
xref, yref = np.meshgrid(xref - xdiff, yref - ydiff)
newpsf = ndimage.map_coordinates(normpsf, [yref, xref], order=3)
flux[y1:y2, x1:x2] += newpsf[w1:w2, z1:z2]
return flux
|
UTF-8
|
Python
| false | false | 2,013 |
7,988,639,206,885 |
c56d272d8eefae970d0682d51b2aa4d466145f69
|
9372fd804dd3a6c473b7913d92cd3992e4c85586
|
/build_hanzi_dict_with_radical_cangjie_kFreq.py
|
c03c7d7be2f4d7c018ae87a3ec18dbae121bb640
|
[] |
no_license
|
heitorchang/gwoyeu-romatzyh-dict
|
https://github.com/heitorchang/gwoyeu-romatzyh-dict
|
60fdf818f64cc8067169ea27bb07314517d68570
|
a33762450e9d549e589ac0f752049fe8e8b61bfa
|
refs/heads/master
| 2016-03-23T03:00:44.384560 | 2014-09-24T01:42:15 | 2014-09-24T01:42:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pickle
in_hanzi_ucp_set=open('hanzi_ucp_set.pickle','rb')
hanzi_ucp_set=pickle.load(in_hanzi_ucp_set)
in_hanzi_ucp_set.close()
class Hanzi:
kDefinition="[no English definition]"
kMandarin="[no Mandarin pronunciation]"
kRadical=0
kCangjie=""
kFrequency=6
hanzi_dict={}
in_unihan_readings=open('Unihan_Readings.txt','r',encoding='utf8')
for line in in_unihan_readings:
tokens = line.rstrip().split('\t',3)
if tokens[0] in hanzi_ucp_set:
if tokens[0] not in hanzi_dict:
hanzi_dict[tokens[0]]=Hanzi()
if tokens[1] == 'kMandarin':
hanzi_dict[tokens[0]].kMandarin=tokens[2]
elif tokens[1] == 'kDefinition':
hanzi_dict[tokens[0]].kDefinition=tokens[2]
in_unihan_readings.close()
in_unihan_radical=open('Unihan_RadicalStrokeCounts.txt','r',encoding='utf8')
for line in in_unihan_radical:
tokens = line.rstrip().split('\t',3)
if tokens[0] in hanzi_dict:
if tokens[1] == 'kRSUnicode':
hanzi_dict[tokens[0]].kRadical=int(tokens[2].split('.')[0].strip("'"))
in_unihan_radical.close()
# string translation from romatzyh to hantzyh
inlst = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
outlst= ["日","月","金","木","水","火","土","竹","戈","十","大","中","一","弓","人","心","手","口","尸","廿","山","女","田","難","卜","重"]
in_unihan_dictlike=open('Unihan_DictionaryLikeData.txt','r',encoding='utf8')
for line in in_unihan_dictlike:
tokens = line.rstrip().split('\t',3)
if tokens[0] in hanzi_dict:
if tokens[1] == 'kCangjie':
outstr = "'"
for c in tokens[2]:
outstr += outlst[inlst.index(c)]
outstr += "'"
hanzi_dict[tokens[0]].kCangjie=outstr
elif tokens[1] == 'kFrequency':
hanzi_dict[tokens[0]].kFrequency=int(tokens[2])
|
UTF-8
|
Python
| false | false | 2,014 |
5,317,169,514,431 |
33ee075423ba710822f6c1a76ed8f5bc68f2e498
|
e7fa457df22c9e4b5b27295a0b74daaa8daf51d6
|
/cython_tests/integral_image/setup.py
|
bd0bbe783a2ffc75710d6116eb0b80a9bf9a9425
|
[] |
no_license
|
danoan/faces-rec
|
https://github.com/danoan/faces-rec
|
ee1aceaffd5ac6be0bba2e4ea27248ea39b0fb13
|
3be0f065f3e5048d60ef45d5db903b6cbf6a5690
|
refs/heads/master
| 2021-03-12T19:43:42.539702 | 2014-08-12T13:17:35 | 2014-08-12T13:17:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#coding:utf-8
from distutils.core import setup, Extension
module = Extension('integral_image',
sources=['integral_image.c'],
libraries=['MagickWand-6.Q16']
)
setup( name="IntegralImage",
version="1.0",
description="First Package",
ext_modules=[module])
|
UTF-8
|
Python
| false | false | 2,014 |
12,111,807,806,986 |
f82f6d2459822fafb84f10a153bc3cf29539d996
|
8c7425c253310fea1ff8d738bf17cf2d434f2f12
|
/apps/picket/urls.py
|
8e04a729d55f524dea01949b8759b0f359377843
|
[
"GPL-3.0-or-later",
"GPL-3.0-only"
] |
non_permissive
|
sebastienhatton/picket
|
https://github.com/sebastienhatton/picket
|
86654ca20cc9ce8d876cc8aedf78c11d425351f9
|
cd014d8d7330d5b1706f277b7c6510531dfac726
|
refs/heads/master
| 2020-12-25T09:09:02.613140 | 2011-07-02T18:57:41 | 2011-07-02T18:57:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Copyright 2008-2010 Serge Matveenko
This file is part of Picket.
Picket is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Picket is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Picket. If not, see <http://www.gnu.org/licenses/>.
"""
from django.conf.urls.defaults import *
urlpatterns = patterns('%s.views' % __package__,
(r'^admin/', include('%s.admin.urls' % __package__)),
(r'^$', 'index', {}, 'picket-index'),
(r'^new/$', 'new_issue', {}, 'picket-issue-new'),
(r'^(?P<issue_number>\d+)/$', 'issue', {}, 'picket-issue'),
(r'^issues/$', 'issues', {}, 'picket-issues'),
)
|
UTF-8
|
Python
| false | false | 2,011 |
901,943,146,766 |
9eac9b68816d21be2b6dc6c768ae112a0d6586d6
|
e95f62655c7179067a5987b5ad8f72139f32db45
|
/sample_python/gpaSort2 (2013).py
|
37eb1c2da7d5b7c29549cfd93a260817319ccbbb
|
[] |
no_license
|
noahadelstein/mathemagic
|
https://github.com/noahadelstein/mathemagic
|
8bbf576997b41069cb8590f098f00bf29cbaec86
|
2ab749a94eb97b5ab78053e8f30d67c3879f5269
|
refs/heads/master
| 2021-01-12T21:06:14.023153 | 2013-06-11T03:46:28 | 2013-06-11T03:46:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#-------------------------------------------------------------------------------
# Name: gpaSort2
# Purpose: creates a decorated list that will sort students by GPA
#
# Author: Adrienne Sands
#
# Created: 19/05/2013
# Copyright: (c) Adrienne Sands 2013
#-------------------------------------------------------------------------------
#Test data - D:\From Adrienne's Computer\Programming\Python\Chapter 11\sample data sets\gpaTest.txt
from gpaSort import *
def dataSort2(gui,gpaList):
"""Sorts data by gpa,name, or credits"""
tempList = []
decoratedList = []
data = []
gui.setText("How would you like to sort?: ")
for button in [gui.gpaButton,gui.nameButton,gui.creditsButton]:
button.activate()
#only continue if one of the sort buttons has been clicked
while True:
p = gui.win.getMouse()
if (gui.gpaButton.clicked(p) or gui.nameButton.clicked(p) or gui.creditsButton.clicked(p)):
break
if gui.gpaButton.clicked(p):
for i in gpaList:
tempList.append(i.getGPA())
elif gui.nameButton.clicked(p):
for i in gpaList:
tempList.append(i.getName())
else:
for i in gpaList:
tempList.append(i.getQPoints())
decoratedList = list(zip(tempList,gpaList))
#deactivate buttons for next steps
for button in [gui.gpaButton,gui.nameButton,gui.creditsButton]:
button.deactivate()
#sort by the first element in the decoratedList
decoratedList.sort(key=lambda tup:tup[0])
for (i,j) in decoratedList:
data.append(j)
return data
def main():
print("This program sorts student grade information by GPA, name, or credits.")
#create graphics window
win = GraphWin()
win.setCoords(-20,-20,20,20)
#create graphical interface
gui = GUI(win)
p = win.getMouse()
#runs through program until user clicks quitButton
while not gui.quitCheck(p):
if gui.quitCheck(p):
break
elif gui.submitButton.clicked(p):
gui.quitButton.deactivate()
data = gui.dataRead()
gui.dataOutput(gui.dataOrder(dataSort2(gui,data)))
gui.quitButton.activate()
p = win.getMouse()
win.close()
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,013 |
1,984,274,896,495 |
59060982f4fc007facda04a7266d2e2eba6ccd82
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_9/mrkpet004/question3.py
|
51fe89fbb88564fa535fe58bef8be5116898d1b5
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
https://github.com/MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""program to check if a complete Sudoku grid is valid or not
peter m muriuki
16/05/14"""
grid=[] #initialize an array to use later
newgrid=[] #initialize an array to use later
for r in range (9):
values=input("") #get the input
grid.append(list(values)) #add the input into the initialised array
for c in range(9):
nlist=[]
for r in range (9):
nlist.append(grid[r][c]) # switch the rows and columns in grid and append to newgrid
newgrid.append(nlist)
# check if a value in each row in grid has been repeated
answer=1
for item in grid:
for o in item:
if (item.count(o))>1:
answer=0
else:
continue
# check if a value in each row in newgrid(column in grid) has been repeated
answer=1
for item in newgrid:
for o in item:
if (item.count(o))>1:
answer=0
else:
continue
# check if a value in each of the 3x3 non-overlapping subgrids has been repeated
for i in range (len(grid)):
for j in range (len(grid[i])):
if i<3 and j<3:
if (grid[i].count(grid[i][j]))>1:
answer=0
if i<3 and 3<=j<=5:
if (grid[i].count(grid[i][j]))>1:
answer=0
if i<3 and 6<=j<=8:
if (grid[i].count(grid[i][j]))>1:
answer=0
if 3<=i<=5 and j<3:
if (grid[i].count(grid[i][j]))>1:
answer=0
if 3<=i<=5 and 3<=j<=5:
if (grid[i].count(grid[i][j]))>1:
answer=0
if 3<=i<=5 and 6<=j<=8:
if (grid[i].count(grid[i][j]))>1:
answer=0
if 6<=i<=8 and j<3:
if (grid[i].count(grid[i][j]))>1:
answer=0
if 6<=i<=8 and 3<=j<=5:
if (grid[i].count(grid[i][j]))>1:
answer=0
if 6<=i<=8 and 6<=j<=8:
if (grid[i].count(grid[i][j]))>1:
answer=0
else:
continue
# if final answer is 0 the grid is not valid and if 1 the grid is valid
if answer ==1:
print("Sudoku grid is valid")
else:
print("Sudoku grid is not valid")
|
UTF-8
|
Python
| false | false | 2,014 |
9,088,150,803,221 |
e408192dd630cbbbe865766304957f30a32f12d0
|
260682e6d3f684487c5b12ee94d79a5c4f8c6165
|
/bin/check_rm_cluster.py
|
1787f5a237e82ca0406ff998218fb6a512f73f40
|
[] |
no_license
|
cshukla/MurSatRep1
|
https://github.com/cshukla/MurSatRep1
|
f27bf75e3e231a09eb47c0536638d9af80e43eb6
|
b034cfa5cd217e9acb826e6ba2d076f258ae67f7
|
refs/heads/master
| 2016-08-04T18:35:13.860254 | 2014-08-26T17:40:54 | 2014-08-26T17:40:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
from optparse import OptionParser
import math, os, subprocess, sys, tempfile, glob
################################################################
# check_rm_cluster.py
#
# This script takes the gff output of RepeatMasker and checks
# which input repeats clusters like MurSatRep1. The output is
# a scatter plot of distance vs identity; as well as a distance
# histogram. We also output the intersection with HMMER KZNF's as
# well as intersection with lincRNAs. Only repeats with more than
# 500 hits in the genome are considered for clustering. The
# mergebed distance to make clusters is set at a default of 500kb.
################################################################
def main():
usage = 'usage:%prog [options] <gff_file>'
parser = OptionParser(usage)
parser.add_option('--kf', dest='krab_file', help='Path of the bed file with KRAB motifs on the system')
parser.add_option('--kc', dest='krab_count', type='int', help='Number of lines in the KRAB bed file. Optional')
parser.add_option('--if', dest='intersect_fraction', type='float', default=0.3, help='Filter to check intersectBed with KRAB domains')
parser.add_option('--ws', dest='control_data', help='Concatatenated water_scatter Output File for MurSatRep1 and LINE/L1')
parser.add_option('--gf', dest='genome_file', help='Path of the genome fasta file on the system')
parser.add_option('--go', dest='gap_open', type='float', default=10.0, help='Gap opening penalty for Smith Waterman Alignment')
parser.add_option('--ge', dest='gap_extend', type='float', default=0.5, help='Gap extension penalty for Smith Waterman Alignment')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error(usage)
else:
mask_file = args[0]
input_dir = '/'.join(args[0].split('/')[:-1])
input_filename = args[0].split('/')[-1].split('.')[0]
# Step 1: Remove low complexity and satellite repeats from the
# output file. Only retain repeats in the RepeatScout library.
filtered_mask_file = 'library_repeats_' + input_filename + '.gff'
filtered_mask_file = '/'.join([input_dir, filtered_mask_file])
subprocess.call('grep -i 'R=' %s > %s' %(mask_file, filtered_mask_file), shell=True)
print >> sys.stderr, 'Finished filtering the input mask file. Here is the filtered file: %s' %(filtered_mask_file)
# Step 2: Take the filtered mask file and split into 1 file for
# each repeat. This will be a bed file with only the chromosome
# name, start and end site. Delete all the files which have < 500
# lines.
output_dir = rmsk_split(filtered_mask_file)
repeat_files = '/'.join([output_dir, '*.bed'])
repeats_line_count = '/'.join([output_dir, 'repeats_line_count.txt'])
subprocess.call('wc -l %s > %s' %(repeat_files, repeats_line_count) , shell=True)
count = 0
for line in open(repeats_line_count):
line = line.strip().split(' ')
repeat_file_name = line[1]
repeat_count = int(line[0])
if repeat_count < 500:
subprocess.call('rm %s' %(repeat_file_name), shell=True)
else:
count += 1
repeat_name = repeat_file_name.split('/')[-1].split('.')[0]
print >> sys.stderr, '%s has more than 500 hits' %(repeat_name)
print >> sys.stderr, '%d repeats have more than 500 hits' %(count)
# Step 3: Out of the remaining repeat files, check which ones
# intersect with >50% HMMER KZNFs. Remove all the other files.
# Print the %intersection with HMMER KZNFS to a summary file.
repeat_files = glob.glob('/'.join([output_dir, '*.bed']))
krab_file = options.krab_file # parser.add_option
if options.krab_count:
krab_count = int(options.krab_count) # parser.add_option
else:
p = subprocess.Popen('wc -l %s' %(krab_file), shell=True, stdout=subprocess.PIPE)
for line in p.stdout:
krab_count = int(line.strip().split(' ')[0])
print >> sys.stderr, 'Your input KRAB bed file has %d lines' %(krab_count)
summary_file = open('/'.join([output_dir, 'intersect_summary.txt']), 'w')
print >> summary_file, 'Repeat_Name\tKZNF_Intersection'
count = 0
for bed_file in repeat_files:
repeat_name = bed_file.split('/')[-1].split('.')[0]
bed_count = 0
p = subprocess.Popen('intersectBed -u -a %s -b %s' %(krab_file, bed_file), shell=True, stdout=subprocess.PIPE)
for line in p.stdout:
bed_count += 1
intersect_fraction = float(bed_count)/krab_count
if intersect_fraction < options.intersect_fraction: # parser.add_option()
subprocess.call('rm %s' %(bed_file), shell=True)
print >> sys.stderr, '%s had less than 0.3 intersection with KRAB domains. So, %s has been deleted' %(repeat_name, bed_file)
else:
out_line = '\t'.join([repeat_name, str(intersect_fraction)])
print >> summary_file, out_line
count +=1
print >> sys.stderr, 'Finished deleting files with less than 0.5 intersection with KZNFs'
print >> sys.stderr, '%d repeats have more than %f intersection with KZNFs' %(count, intersect_fraction)
print >> sys.stderr, 'Now, we will make scatter plots for the filtered repeat files'
print >> sys.stderr, 'This will take some time. Please be patient...'
# Step 4: For each remaining repeat file, make a gtf file.
# The gtf file will be used to make the fasta file. We can
# make the distance vs identity scatter plot with the fasta
# file. We can also make a histogram showing distribution of
# intra-repeat distances.
repeat_files = glob.glob('/'.join([output_dir, '*.bed']))
scatter_out = '/'.join([output_dir, 'scatter_plots'])
if not os.path.exists(scatter_out):
os.makedirs(scatter_out)
histogram_out = '/'.join([output_dir, 'distance_histograms'])
if not os.path.exists(histogram_out):
os.makedirs(histogram_out)
for bed_file in repeat_files:
repeat_name = bed_file.split('/')[-1].split('.')[0]
fasta_file = make_fasta(bed_file)
repeat_data = water_scatter(fasta_file)
control_data = options.control_data # parser.add_option
joint_file_fd, joint_file = tempfile.mkstemp()
subprocess.call('cat %s %s > %s' %(repeat_data, mursatrep1_data, joint_file))
scatter_name = repeat_name + '.pdf'
scatter_path = '/'.join([scatter_out, scatter_name])
subprocess.call('R --slave --args %s %s < repeat_scatter.r' %(joint_file, scatter_path), shell=True)
distances = distances(bed_file)
histogram_name = repeat_name + '_distances.pdf'
histogram_path = '/'.join([histogram_out, histogram_name])
subprocess.call('R --slave --args %s %s < repeat_distance.r' %(distances, histogram_path), shell=True)
print >> sys.stderr 'Generated histogram and scatter plot for %s' %(repeat_name)
os.close(joint_file_fd)
os.remove(joint_file)
def rmsk_split(filtered_mask_file):
input_file = open(filtered_mask_file, 'r')
input_dir = '/'.join(filtered_mask_file.split('/')[:-1])
input_filename = filtered_mask_file.split('/')[-1].split('.')[0]
output_dir = '/'.join([input_dir, input_filename])
if not os.path.exists(output_dir):
os.makedirs(output_dir)
repeats = {}
for line in input_file:
line = line.strip().split('\t')
repeat_family = line[-1].split('"')[1]
repeat_family = repeat_family.split(':')[1].split('=')
repeat_family = ''.join(repeat_family)
chrom = line[0]
start = line[3]
end = line[4]
strand = line[6]
out_line = '\t'.join([chrom, start, end, strand, '\n'])
if repeat_family not in repeats:
repeats[repeat_family] = ''
repeats[repeat_family] += out_line
print >> sys.stderr, 'Finished reading all the repeats into the hash table'
for key in repeats.keys():
out_file = open('/'.join([output_dir, key]) + '.bed', 'w')
print >> out_file, repeats[key]
out_file.close()
print >> sys.stderr, 'Finished splitting the filtered mask file into 1 file for each repeat'
print >> sys.stderr, 'The split files can be found in this directory: %s' %(output_dir)
return output_dir
def make_fasta(bed_file):
input_dir = '/'.join(bed_file.split('/')[:-1]+[''])
bed_file_name = bed_file.split('/')[-1].split('.')[0]
gtf_file = input_dir + bed_file_name + '.gtf'
subprocess.call('awk '{OFS="\t"}{print $1, "RepeatMasker", "exon", $2, $3, ".", $4, ".", "\"Exon_"NR"\"" }' %s > %s' %(bed_file, gtf_file), shell=True)
genome_file = options.genome_file # parser.add_option
fasta_file = input_dir + bed_file_name + '.fa'
subprocess.call('gtf_to_fasta %s %s %s' %(gtf_file, genome_file, fasta_file), shell=True)
return fasta_file
def water_scatter(fasta_file):
input_dir = '/'.join(fasta_file.split('/')[:-1] + [''])
repeat_name = fasta_file.split('/')[-1].split('.')[0]
repeat_data = input_dir + repeat_name + '.water'
repeat_data_file = open(repeat_data, 'w')
sequences = {}
information = {}
for line in open(fasta_file, 'r'):
if line[0] == '>':
seq = '>' + line.split(' ')[1].strip('"')
sequences[seq] = []
chrom = line.split(' ')[2][:-1]
location = line.split(' ')[3].split('-')
information[seq] = [chrom, location]
else:
sequences[seq].append(line.strip())
for key in sequences.keys():
sequences[key] = ''.join(sequences[key])
# For each sequence pair 1, 2; 2, 3; 3, 4 etc. if both the
# sequences are on the same chromosome get the smith
# waterman identities of the alignment with the distance.
identities = {}
for i in range(1,len(sequences.keys())):
id1 = '>Exon_' + str(i)
for line in open(fasta_file):
if line[0] == '>':
id2 = '>' + line.split(' ')[1].strip('"')
if information[id1][0] == information[id2][0]:
start1 = int(information[id1][1][0])
end1 = int(information[id1][1][1])
start2 = int(information[id2][1][0])
end2 = int(information[id2][1][1])
if end1 < start2:
distance = start2 - end1
else:
distance = start1 - end2
if distance !=0 :
asequence_fd, asequence_file = tempfile.mkstemp(dir='%s/research/scratch' % os.environ['HOME'])
a_file = open(asequence_file, 'w')
bsequence_fd, bsequence_file = tempfile.mkstemp()
b_file = open(bsequence_file, 'w')
water_out_fd, water_out_file = tempfile.mkstemp(dir='%s/scratch' % os.environ['HOME'])
print >> a_file, id1
print >> a_file, sequences[id1]
a_file.close()
print >> b_file, id2
print >> b_file, sequences[id2]
b_file.close()
subprocess.call('water %s %s %s -gapopen %f -gapextend %f -auto' %(asequence_file, bsequence_file, water_out_file, options.gap_open, options.gap_extend), shell=True)
for line in open(needle_out_file):
if line[0:10] == '# Identity':
identities[distance] = float(line.strip().split('(')[1][:-2])/100
os.close(asequence_fd)
os.remove(asequence_file)
os.close(bsequence_fd)
os.remove(bsequence_file)
os.close(water_out_fd)
os.remove(water_out_file)
for key in identities.keys():
print >> repeat_data_file, '\t'.join([str(key), str(identities[key]), repeat_name])
print >> repeat_data_file,
return repeat_data
def distances(bed_file):
input_dir = '/'.join(bed_file.split('/')[:-1] + [''])
repeat_name = bed_file.split('/')[-1].split('.')[0]
distances = input_dir + repeat_name + '.dist'
distances_file = open(distances, 'w')
distance = []
for line in open(bed_file, 'r'):
contents = line.strip().split('\t')
chromosome = contents[0]
mean_position = (float(contents[1]) + float(contents[2]))/2
for line in open(bed_file, 'r'):
contents1 = line.strip().split('\t')
chromosome1 = contents1[0]
mean_position1 = (float(contents1[1]) + float(contents1[2]))/2
if chromosome == chromosome1 and mean_position != mean_position1:
distance.append(math.fabs(mean_position1 - mean_position))
for i in range(0,len(distance)):
print >> distances_file, distance[i]
return distances
################################################################
# main()
################################################################
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,014 |
4,595,615,014,025 |
501432858a94cd0965ed3c3ce031b40666cb3dbc
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/playground/serbulent/system/base/rsyslog/comar/service.py
|
58896d96b1443d2d8be6202d9f1df382bfb8b409
|
[] |
no_license
|
aligulle1/kuller
|
https://github.com/aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from comar.service import *
serviceType = "local"
serviceDesc = _({"en": "System Message Logger",
"tr": "Sistem Mesajları Kütüğü"})
serviceDefault = "on"
@synchronized
def start():
startService(command="/usr/sbin/rsyslogd",
args="-c3",
pidfile="/var/run/rsyslogd.pid",
detach=True)
waitBus("/dev/log", stream=False)
@synchronized
def stop():
stopService(pidfile="/var/run/rsyslogd.pid",
donotify=True)
def status():
return isServiceRunning("/var/run/rsyslogd.pid")
|
UTF-8
|
Python
| false | false | 2,013 |
5,016,521,824,235 |
4d979a84979251d174ce50cbd827f237761946ac
|
cbb952b9ccab54c29a0489efab1ee6011606f9f3
|
/cluster-setup/fabfile.py
|
018522dae4036087119007d0aa66682d60e5a79e
|
[
"Apache-2.0"
] |
permissive
|
mzasada/solr-at
|
https://github.com/mzasada/solr-at
|
286ee5fc84f2f8edc82d4697df9721f7868b446f
|
1193af8cddf6ce2d0a820f998e0a03edcd0d5568
|
refs/heads/master
| 2020-05-16T21:18:02.279877 | 2014-10-26T14:39:48 | 2014-10-26T14:39:48 | 25,462,405 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding=utf-8
from fabric.api import task
from fabric.context_managers import cd
from fabric.operations import run
from fabtools.vagrant import vagrant
from fabtools import require, oracle_jdk
SOLR_4_6_0_DOWNLOAD_URL = 'http://archive.apache.org/dist/lucene/solr/4.6.0/solr-4.6.0.tgz'
SOLR_4_6_0_LOCATION = "~/solr-4.6.0"
SOLR_4_6_0_DD_LOCATION = "~/solr-4.6.0-dd"
@task
def provision():
install_dependencies()
download_solr_distro()
bootstrap_cluster(SOLR_4_6_0_LOCATION, 8000)
bootstrap_cluster(SOLR_4_6_0_LOCATION, 8100)
@task
def restart_clusters():
run('killall -9 java || exit 0')
bootstrap_cluster(SOLR_4_6_0_LOCATION, 8000)
bootstrap_cluster(SOLR_4_6_0_DD_LOCATION, 8100)
def install_dependencies():
oracle_jdk.install_from_oracle_site()
require.deb.package('dtach', update=True)
def download_solr_distro():
with cd('~/'):
run('wget -o solr-download.log –q {0} || exit 0'.format(SOLR_4_6_0_DOWNLOAD_URL))
run('tar xf solr-4.6.0.tgz {0}'.format(SOLR_4_6_0_LOCATION))
@task
def mv_solr():
run('rm -rf {}'.format(SOLR_4_6_0_DD_LOCATION))
run('mkdir -p {}'.format(SOLR_4_6_0_DD_LOCATION))
run('mv /vagrant/example {}'.format(SOLR_4_6_0_DD_LOCATION))
def bootstrap_cluster(solr_home, port_base):
with cd(solr_home):
run('rm -rf node-1')
run('rm -rf node-2')
run('cp -r example node-1')
run('cp -r example node-2')
with cd('node-1'):
run('cp /vagrant/jetty/jetty.xml etc/jetty.xml')
run_in_bg('java '
'-Djetty.port={} '
'-DzkRun '
'-DnumShards=2 '
'-Dbootstrap_confdir=./solr/collection1/conf '
'-Dcollection.configName=myconf '
'-jar start.jar '
'>& /dev/null < /dev/null'.format(port_base + 1))
with cd('node-2'):
run('cp /vagrant/jetty/jetty.xml etc/jetty.xml')
run_in_bg('java '
'-Djetty.port={} '
'-DzkHost=localhost:{} '
'-jar start.jar '
'>& /dev/null < /dev/null'.format(port_base + 2, port_base + 1001))
def run_in_bg(cmd):
run("dtach -n `mktemp -u /tmp/dtach.XXXX` {}".format(cmd))
|
UTF-8
|
Python
| true | false | 2,014 |
3,521,873,206,089 |
95f9ea7418b53e34d186c481c5074d36041bfd8c
|
35b7c30fa1553eefcada108d5aa559a9c06e32ad
|
/A3/connect-4/include/abstractscene_basis.py
|
6d346501b8ea5c6d53a0dc8d17dbba1df147f1f1
|
[] |
no_license
|
lukefrasera/cs382ai
|
https://github.com/lukefrasera/cs382ai
|
1a1c2810f5ce7f3ac4748b136dff1984b4ec3403
|
5fd5de3901615c9f7dd97d0ceddcfd7e88d7a26a
|
refs/heads/master
| 2022-06-14T23:20:18.467996 | 2013-10-18T05:12:07 | 2013-10-18T05:12:07 | 261,259,330 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLU import *
import sys
class abstractScene:
def __init__(self):
self.w = 500
self.h = 500
def initialize(self):
glClearColor(0.0,0.0,0.0,0.0)
glShadeModel (GL_FLAT)
def update(self):pass
def render(self):
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
gluLookAt (0.0, 0.0, 5.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0)
glScalef (1.0, 2.0, 1.0)
glutWireCube (1.0)
glFlush()
def resize(self, w, h):
glViewport (0, 0, w, h)
glMatrixMode (GL_PROJECTION)
glLoadIdentity ()
glFrustum (-1.0, 1.0, -1.0, 1.0, 1.5, 20.0)
glMatrixMode (GL_MODELVIEW)
def glutKeyPress(self, key, x, y):
if key == chr(27):
sys.exit(0)
def glutKeyRelease(self, key, x, y):pass
def glutMouse(self, x, y):pass
def glutMouseClick(self, button, state, x, y):pass
class glapplication:
def __init__(self, scene):
self.scene = scene
self.initializeScene()
self.initializeCallbacks()
glutMainLoop()
def initializeScene(self):
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE | GLUT_DEPTH | GLUT_RGBA)
glutInitWindowSize(self.scene.w, self.scene.h)
glutCreateWindow("Connect 4")
self.scene.initialize()
def cleanupScene(self):pass
def initializeCallbacks(self):
glutDisplayFunc(self.scene.render)
glutReshapeFunc(self.scene.resize)
glutIdleFunc(self.scene.update)
glutKeyboardFunc(self.scene.glutKeyPress)
glutKeyboardUpFunc(self.scene.glutKeyRelease)
glutMouseFunc(self.scene.glutMouseClick)
glutPassiveMotionFunc(self.scene.glutMouse)
def main():
scene = abstractScene()
app = glapplication(scene)
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,013 |
1,769,526,572,796 |
b86e3b1098636403243ba091a59b051fcb155b43
|
5dd149c84d747db5b412110dccf75b68cb7725e4
|
/test/src/testGensim.py
|
278a473ff9c31388f004e9dc80ddd71af081e831
|
[] |
no_license
|
whzhcahzxh/test
|
https://github.com/whzhcahzxh/test
|
22dbcc051f556dc08c69ef1618af092a26bb2a28
|
e31d8a5df0571c16ba48c83b737546af50164722
|
refs/heads/master
| 2016-08-05T01:16:08.083784 | 2014-01-23T03:42:02 | 2014-01-23T03:42:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#-*- coding:UTF-8 -*-
'''
Created on Dec 24, 2013
@author: administrator
'''
import jieba
from gensim import corpora, models, similarities
# import pymongo
# conn = pymongo.Connection('localhost', 27017);
# db = conn.test
# dbCollection = db.classifier
sentences = []
sentences.append("【我国乙肝疫苗接种13年 接种后死亡188例】昨天下午,国家食药监总局、卫生计生委联合召开发布会,就社会关注的乙肝疫苗疑似致死事件进行通报。根据中国疾控中心数据,从2000年到今年12月,接种乙肝疫苗后死亡的疑似异常反应病例已上报188例,最终确定为疫苗异常反应的18例。")
sentences.append("【呵护微笑天使】江豚,目前我国淡水水域中唯一的胎生哺乳动物,由于栖息地的丧失,生存水质被污染和破坏,2006年到2012年,长江江豚平均每72小时就消失一头,任此趋势发展下去,长江江豚最快将在10年内灭绝。保护江豚,别让江豚微笑告别。[心]大家一起转发努力。")
sentences.append("【步Facebook和Twitter后尘,伊朗政府屏蔽微信】伊朗政府负责监管网络内容的部门发言称,将在全国范围内屏蔽微信。Global Voices通过对伊朗当地网络用户的确认,证实微信在当地已经无法使用。当然还是可以翻墙。Facebook和Twitter此前同样遭到屏蔽。")
sentences.append("【台统派:大陆是台湾希望 强盛做得好统一会愈快】台湾中国统一联盟桃竹苗荣誉分会长阎中杰说,中国一定强,台湾才有希望,美国对台湾的友善是假的,利用我们买飞机大炮。大陆强盛做得好,台湾更认同,更有利两岸统一,可以领导台湾,大陆愈改愈好,相信两岸统一会愈快。")
sentences.append("HR吐血整理的面试技巧宝典 一起学习下,争取明年找个好工作!")
words=[]
for doc in sentences:
words.append(list(jieba.cut(doc)))
# print words
dic = corpora.Dictionary(words)
# print dic
# print dic.token2id
# for word,index in dic.token2id.iteritems():
# print word +" 编号为:"+ str(index)
# Corpus is simply an object which, when iterated over, returns its documents represented as sparse vectors.
# (9,2)这个元素代表第二篇文档中id为9的单词出现了2次。
corpus = [dic.doc2bow(text) for text in words]
# print corpus
tfidf = models.TfidfModel(corpus)
# vec = [(0, 1), (4, 1)]
# print tfidf[vec]
corpus_tfidf = tfidf[corpus]
# for doc in corpus_tfidf:
# print doc
# index = similarities.SparseMatrixSimilarity(tfidf[corpus], num_features=10)
# sims = index[tfidf[vec]]
# print list(enumerate(sims))
# 训练lsi模型
lsi = models.LsiModel(corpus_tfidf, id2word=dic, num_topics=3)
lsiout=lsi.print_topics(2)
# print lsiout[0]
# print lsiout[1]
# dbCollection.insert({"classifier":"lsiModel","content":lsiout})
corpus_lsi = lsi[corpus_tfidf]
# for doc in corpus_lsi:
# print doc
# # 训练lda模型
# lda = models.LdaModel(corpus_tfidf, id2word=dic, num_topics=3)
# ldaOut=lda.print_topics(2)
# print ldaOut[0]
# print ldaOut[1]
# corpus_lda = lda[corpus_tfidf]
# for doc in corpus_lda:
# print doc
# 搜索
index = similarities.MatrixSimilarity(lsi[corpus])
query = sentences[0]
query_bow = dic.doc2bow(list(jieba.cut(query)))
print query_bow
print tfidf[query_bow]
query_lsi = lsi[query_bow]
print query_lsi
# 计算其和index中doc的余弦相似度了:
sims = index[query_lsi]
print list(enumerate(sims))
sort_sims = sorted(enumerate(sims), key=lambda item: -item[1])
print sort_sims
|
UTF-8
|
Python
| false | false | 2,014 |
16,192,026,741,254 |
0ebe319cde5390fb478ebe8fbe87074994a8cbcc
|
270ee31292cb7b045f9848393a05eaf09ce5fc79
|
/bioscope/scope_rules.py
|
6cc86dcf074b721788f42d6a564289d02144575d
|
[] |
no_license
|
likaiguo/pln_inco
|
https://github.com/likaiguo/pln_inco
|
bdcad55b2ccdb6cf472ac23699a8e41ee092a9b6
|
255ed3bcf9a89db171867fc4e2f6c51e6da79b5c
|
refs/heads/master
| 2021-01-16T22:13:46.038862 | 2014-12-01T02:04:15 | 2014-12-01T02:04:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import nltk,nltk.tokenize,xml.etree
import os,codecs,fnmatch,re,types, copy,shutil
import pickle
from sys import *
from pln_inco import graphviz,penn_treebank,stanford_parser,genia_tagger
from string import *
import pln_inco.bioscope.util
import time
import sqlite3
import random
def apply_sr0 (dbname, scope_table_name, rule_field_name):
"""
Dada una tabla de atributos, y un campo donde incluirlo, carga el valor según la regla de scope 0
La regla de scope 0 dice que si el lemma es suggest y es HC, entonces su scope va desde la palabra suggest hasta
una palabra antes que el punto final (me equivoco a propósito, porque suggest podría ser "has been suggested" y el scope no es ese)
Si no hay punto final, entonces la regla no aplica
@arg dbname: nombre del archivo que tiene la base de datos
@type dbname:C{string}
@arg scope_table_name: tabla de scopes a actualizar
@type scope_table_name:C{string}
@arg rule_field_name: campo en la tabla donde poner el resultado de la aplicación de la regla
@type rule_field_name:C{string}
"""
# Inicializo la conexión
conn= sqlite3.connect(dbname)
conn.text_factory = str
conn.row_factory=sqlite3.Row
c=conn.cursor()
# Primero agrego el campo, por si no existe
try:
c.execute('alter table '+scope_table_name+' add column '+ rule_field_name+' text default \'O\'')
except sqlite3.OperationalError:
pass
conn.commit()
# Actualizo todo en O (por si la tabla ya existía)
c.execute('update '+scope_table_name+' set '+ rule_field_name+'=\'O\'')
conn.commit()
# Recorro ahora la tabla y aplico la regla
scope_positions=[]
c.execute('select * from '+ scope_table_name +' order by document_id,sentence_id,hc_start,token_num')
last_sentence=''
last_hc_start=''
row=c.fetchone()
while (row):
# Cargo los tokens de la oración en un array
last_sentence=row['sentence_id']
last_hc_start=row['hc_start']
sentence_tokens=[]
sentence_tokens.append(row)
while 1:
row=c.fetchone()
if row:
if row['sentence_id'] == last_sentence and row['hc_start']==last_hc_start:
sentence_tokens.append(row)
# Guardo el document_id,sentence_id,hc_start, que es igual para todas
document_id=row['document_id']
sentence_id=row['sentence_id']
hc_start=row['hc_start']
else:
break
else:
break
# Tengo en sentence_tokens la oración
first=-1
last=-1
hc_token=[(x['token_num'],x['lemma'],x['hc_start']) for x in sentence_tokens if x['lemma']=='suggest' and x['token_num']==x['hc_start']]
if hc_token:
#print 'Encontré un caso de la regla en la oración',sentence_id, 'comienzo de hc ',hc_token
first=hc_token[0][0]
punto_final=[x['token_num'] for x in sentence_tokens if x['lemma']=='.']
if punto_final:
last=punto_final[0]-1
else:
last=-1
# Actualizo el par first last para la oración
if first >=0 or last >=0:
scope_positions.append((document_id,sentence_id,hc_start,first,last))
# Una vez finalizado, hago los updates correspondientes
for s in scope_positions:
if s[3] != -1:
#print 'Actualizo ',scope_table_name, rule_field_name, s[0],s[1],s[2], 'en la posición ',s[3]
c.execute('update '+scope_table_name+' set '+rule_field_name+'=\'F\' where document_id=? and sentence_id=? and hc_start=? and token_num=?',(s[0],s[1],s[2],s[3]))
if s[4] != -1:
#print 'Actualizo L',scope_table_name, rule_field_name, s[0],s[1],s[2], 'en la posición ',s[4]
c.execute('update '+scope_table_name+' set '+rule_field_name+'=\'L\' where document_id=? and sentence_id=? and hc_start=? and token_num=?',(s[0],s[1],s[2],s[4]))
conn.commit()
|
UTF-8
|
Python
| false | false | 2,014 |
19,318,762,934,096 |
aa48596fdd5afb508dc4dde35442222e0fdd38ef
|
8e7a81a1fd4096b9dcefb6ef293c27a9cb1a22e9
|
/configuration/datatypes.py
|
b4f37072789d5908c67423014977a2d8569b755a
|
[
"GPL-3.0-only"
] |
non_permissive
|
bitsworking/blink-cocoa
|
https://github.com/bitsworking/blink-cocoa
|
61a2526f3c3d2e90c4ed6b2d34e9accf37d97faa
|
0f282283fd4a775fead0d518000a4b51d8ddf6cb
|
refs/heads/master
| 2021-01-25T07:27:36.110564 | 2014-04-25T15:05:54 | 2014-04-25T15:05:54 | 22,062,521 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright (C) 2010 AG Projects. See LICENSE for details.
#
"""
Definitions of datatypes for use in settings extensions.
"""
__all__ = ['Digits', 'AccountSoundFile', 'AnsweringMachineSoundFile', 'AccountTLSCertificate', 'SoundFile', 'UserDataPath', 'UserIcon', 'UserSoundFile','HTTPURL', 'LDAPdn', 'LDAPusername', 'NightVolume']
from Foundation import NSLocalizedString
import ldap
import os
import hashlib
import urlparse
from application.python.descriptor import WriteOnceAttribute
from resources import ApplicationData, Resources
from sipsimple.configuration.datatypes import Hostname
## PSTN datatypes
class Digits(str):
pass
## Path datatypes
class UserDataPath(unicode):
def __new__(cls, path):
path = os.path.expanduser(os.path.normpath(path))
if path.startswith(ApplicationData.directory+os.path.sep):
path = path[len(ApplicationData.directory+os.path.sep):]
return unicode.__new__(cls, path)
@property
def normalized(self):
return ApplicationData.get(self)
class SoundFile(object):
def __init__(self, path, volume=100):
self.path = path
self.volume = int(volume)
if self.volume < 0 or self.volume > 100:
raise ValueError(NSLocalizedString("Illegal volume level: %d", "Preference option error") % self.volume)
def __getstate__(self):
return u'%s,%s' % (self.__dict__['path'], self.volume)
def __setstate__(self, state):
try:
path, volume = state.rsplit(u',', 1)
except ValueError:
self.__init__(state)
else:
self.__init__(path, volume)
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.path, self.volume)
def _get_path(self):
return Resources.get(self.__dict__['path'])
def _set_path(self, path):
path = os.path.normpath(path)
if path.startswith(Resources.directory+os.path.sep):
path = path[len(Resources.directory+os.path.sep):]
self.__dict__['path'] = path
path = property(_get_path, _set_path)
del _get_path, _set_path
class NightVolume(object):
def __init__(self, start_hour=22, end_hour=8, volume=10):
self.start_hour = int(start_hour)
self.end_hour = int(end_hour)
self.volume = int(volume)
if self.volume < 0 or self.volume > 100:
raise ValueError(NSLocalizedString("Illegal volume level: %d", "Preference option error") % self.volume)
if self.start_hour < 0 or self.start_hour > 23:
raise ValueError(NSLocalizedString("Illegal start hour value: %d", "Preference option error") % self.start_hour)
if self.end_hour < 0 or self.end_hour > 23:
raise ValueError(NSLocalizedString("Illegal end hour value: %d", "Preference option error") % self.end_hour)
def __getstate__(self):
return u'%s,%s,%s' % (self.start_hour, self.end_hour, self.volume)
def __setstate__(self, state):
try:
start_hour, end_hour, volume = state.split(u',')
except ValueError:
self.__init__(state)
else:
self.__init__(start_hour, end_hour, volume)
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_hour, self.end_hour, self.volume)
class AccountSoundFile(object):
class DefaultSoundFile(object):
def __init__(self, setting):
self.setting = setting
def __repr__(self):
return 'AccountSoundFile.DefaultSoundFile(%s)' % self.setting
__str__ = __repr__
def __init__(self, sound_file, *args, **kwargs):
if isinstance(sound_file, self.DefaultSoundFile):
self._sound_file = sound_file
if args or kwargs:
raise ValueError("other parameters cannot be specified if sound file is instance of DefaultSoundFile")
else:
self._sound_file = SoundFile(sound_file, *args, **kwargs)
def __getstate__(self):
if isinstance(self._sound_file, self.DefaultSoundFile):
return u'default:%s' % self._sound_file.setting
else:
return u'file:%s' % self._sound_file.__getstate__()
def __setstate__(self, state):
type, value = state.split(u':', 1)
if type == u'default':
self._sound_file = self.DefaultSoundFile(value)
elif type == u'file':
self._sound_file = SoundFile.__new__(SoundFile)
self._sound_file.__setstate__(value)
@property
def sound_file(self):
if isinstance(self._sound_file, self.DefaultSoundFile):
from sipsimple.configuration.settings import SIPSimpleSettings
setting = SIPSimpleSettings()
for comp in self._sound_file.setting.split('.'):
setting = getattr(setting, comp)
return setting
else:
return self._sound_file
def __repr__(self):
if isinstance(self._sound_file, self.DefaultSoundFile):
return '%s(%r)' % (self.__class__.__name__, self._sound_file)
else:
return '%s(%r, volume=%d)' % (self.__class__.__name__, self._sound_file.path, self._sound_file.volume)
def __unicode__(self):
if isinstance(self._sound_file, self.DefaultSoundFile):
return u'DEFAULT'
else:
return u'%s,%d' % (self._sound_file.path, self._sound_file.volume)
class AnsweringMachineSoundFile(object):
class DefaultSoundFile(object):
def __init__(self, setting):
self.setting = setting
def __repr__(self):
return 'AnsweringMachineSoundFile.DefaultSoundFile(%s)' % self.setting
__str__ = __repr__
def __init__(self, sound_file, volume=100):
if isinstance(sound_file, self.DefaultSoundFile):
self._sound_file = sound_file
else:
self._sound_file = UserSoundFile(sound_file, volume)
def __getstate__(self):
if isinstance(self._sound_file, self.DefaultSoundFile):
return u'default:%s' % self._sound_file.setting
else:
return u'file:%s' % self._sound_file.__getstate__()
def __setstate__(self, state):
type, value = state.split(u':', 1)
if type == u'default':
self._sound_file = self.DefaultSoundFile(value)
elif type == u'file':
self._sound_file = UserSoundFile.__new__(UserSoundFile)
self._sound_file.__setstate__(value)
@property
def sound_file(self):
if isinstance(self._sound_file, self.DefaultSoundFile):
return UserSoundFile(Resources.get(self._sound_file.setting))
else:
return self._sound_file
def __repr__(self):
if isinstance(self._sound_file, self.DefaultSoundFile):
return '%s(%r)' % (self.__class__.__name__, self._sound_file)
else:
return '%s(%r, volume=%d)' % (self.__class__.__name__, self._sound_file.path, self._sound_file.volume)
def __unicode__(self):
if isinstance(self._sound_file, self.DefaultSoundFile):
return u'DEFAULT'
else:
return u'%s,%d' % (self._sound_file.path, self._sound_file.volume)
class UserSoundFile(SoundFile):
def _get_path(self):
return ApplicationData.get(self.__dict__['path'])
def _set_path(self, path):
path = os.path.normpath(path)
if path.startswith(ApplicationData.directory+os.path.sep):
path = path[len(ApplicationData.directory+os.path.sep):]
self.__dict__['path'] = path
path = property(_get_path, _set_path)
del _get_path, _set_path
class AccountTLSCertificate(object):
class DefaultTLSCertificate(unicode): pass
def __init__(self, path):
if not path or path.lower() == u'default':
path = self.DefaultTLSCertificate()
self.path = path
def __getstate__(self):
if isinstance(self.__dict__['path'], self.DefaultTLSCertificate):
return u'default'
else:
return self.path
def __setstate__(self, state):
self.__init__(state)
def __unicode__(self):
if isinstance(self.__dict__['path'], self.DefaultTLSCertificate):
return u'Default'
else:
return self.__dict__['path']
def _get_path(self):
if isinstance(self.__dict__['path'], self.DefaultTLSCertificate):
return Resources.get(self.__dict__['path'])
else:
return ApplicationData.get(self.__dict__['path'])
def _set_path(self, path):
if not path or path.lower() == u'default':
path = self.DefaultTLSCertificate()
if not isinstance(path, self.DefaultTLSCertificate):
path = os.path.normpath(path)
if path.startswith(ApplicationData.directory+os.path.sep):
path = path[len(ApplicationData.directory+os.path.sep):]
self.__dict__['path'] = path
path = property(_get_path, _set_path)
del _get_path, _set_path
@property
def normalized(self):
return self.path
## Miscellaneous datatypes
class HTTPURL(object):
url = WriteOnceAttribute()
def __init__(self, value):
url = urlparse.urlparse(value)
if url.scheme not in (u'http', u'https'):
raise ValueError(NSLocalizedString("Illegal HTTP URL scheme (http and https only): %s", "Preference option error") % url.scheme)
# check port and hostname
Hostname(url.hostname)
if url.port is not None:
if not (0 < url.port < 65536):
raise ValueError(NSLocalizedString("Illegal port value: %d", "Preference option error") % url.port)
self.url = url
def __getstate__(self):
return unicode(self.url.geturl())
def __setstate__(self, state):
self.__init__(state)
def __getitem__(self, index):
return self.url.__getitem__(index)
def __getattr__(self, attr):
if attr in ('scheme', 'netloc', 'path', 'params', 'query', 'fragment', 'username', 'password', 'hostname', 'port'):
return getattr(self.url, attr)
else:
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, attr))
def __unicode__(self):
return unicode(self.url.geturl())
class LDAPdn(str):
def __new__(cls, value):
value = str(value)
try:
ldap.dn.str2dn(value)
except ldap.DECODING_ERROR:
raise ValueError(NSLocalizedString("Illegal LDAP DN format: %s", "Preference option error") % value)
return value
class LDAPusername(str):
def __new__(cls, value):
value = str(value)
if "," in value:
try:
ldap.dn.str2dn(value)
except ldap.DECODING_ERROR:
raise ValueError(NSLocalizedString("Illegal LDAP DN format for username: %s", "Preference option error") % value)
return value
class UserIcon(object):
def __init__(self, path, etag=None):
self.path = path
self.etag = etag
def __getstate__(self):
return u'%s,%s' % (self.path, self.etag)
def __setstate__(self, state):
try:
path, etag = state.rsplit(u',', 1)
except ValueError:
self.__init__(state)
else:
self.__init__(path, etag)
def __eq__(self, other):
if isinstance(other, UserIcon):
return self.path==other.path and self.etag==other.etag
return NotImplemented
def __ne__(self, other):
equal = self.__eq__(other)
return NotImplemented if equal is NotImplemented else not equal
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.path, self.etag)
|
UTF-8
|
Python
| false | false | 2,014 |
12,163,347,431,011 |
e088973701956a8a651c3d78076075a8c7fb9b40
|
f76e7afdb14f9e91a4e12c7228016023f686f429
|
/media_planning/management/commands/check_omniturecode.py
|
fd21f0779cc2c914b041383fee9a3a0e45c20cca
|
[] |
no_license
|
sunshine027/MediaChooser
|
https://github.com/sunshine027/MediaChooser
|
5d581edd5760ae8b0540566f3e48c935a9e7048f
|
f5d52e0e14fc2bd988638c8c9eeece0ef8660bdd
|
refs/heads/master
| 2016-09-06T08:58:29.359080 | 2014-11-28T08:57:11 | 2014-11-28T08:57:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/env python
#coding=utf-8
from MediaChooser.ad.models import Ad
from MediaChooser.media_planning.models import Flight, DE_ClickData
from MediaChooser.media_planning.views import check_omniturecode
from django.core.management.base import BaseCommand
from django.core.mail import EmailMessage
from django.conf import settings
import pyodbc
import datetime
class Command(BaseCommand):
def handle(self, *args, **options):
pass
today = datetime.date.today()
nosc_urls_dict, cantopen_url_dict = check_omniturecode(today)
# mail2note = ['[email protected]', '[email protected]']
# mail2note = ['[email protected]']
if len(nosc_urls_dict) > 0:
html_content = '<h3>今日,以下URL中无Omniture监测代码,请及时查找原因!</h3>'
for url in nosc_urls_dict:
html_content += '<p>' + str(url) + '</p>'
html_content += '<ul>'
for flight in nosc_urls_dict[url]:
html_content += '<li>' + flight.ad.name.encode('utf8') + ': ' + flight.media.c_name.encode('utf8') + '->' + flight.channel.c_name.encode('utf8') + '->' + flight.media_ad_info.adform.encode('utf8') + '</li>'
html_content += '</ul>'
else:
html_content = '<h3>今日,所有投放的联想广告所对应的landing page都添加了Omniture监测代码!</h3>'
if len(cantopen_url_dict) > 0:
html_content += '今日,以下广告投放的landing page无法打开:'
for url in cantopen_url_dict:
html_content += '<p>' + str(url) + '</p>'
html_content += '<ul>'
for flight in cantopen_url_dict[url]:
html_content += '<li>' + flight.ad.name.encode('utf8') + ': ' + flight.media.c_name.encode('utf8') + '->' + flight.channel.c_name.encode('utf8') + '->' + flight.media_ad_info.adform.encode('utf8') + '</li>'
html_content += '</ul>'
msg = EmailMessage('以下联想广告目标页无Omniture监测代码', html_content, '[email protected]', settings.CHECK_OMNITURECODE_MAIL_LIST)
msg.content_subtype = 'html'
msg.send()
|
UTF-8
|
Python
| false | false | 2,014 |
1,382,979,470,537 |
bedc7fa98932847b93bf020ac6c1fb04741e7ecc
|
cef91fdcadd54ee8ec3923e32e873cf72d393810
|
/faq/views.py
|
bb1c44a8cd1287725904eb59160259ab79eeb06b
|
[] |
no_license
|
kl4us/django-bootstrap-registration-faq
|
https://github.com/kl4us/django-bootstrap-registration-faq
|
76a6ca6d719ac5c3b262e339ee01d5eb1a8129a7
|
2556421d666371ce97ff6429ff3f0f1f727c8d25
|
refs/heads/master
| 2021-01-16T01:01:34.455733 | 2013-08-06T10:40:57 | 2013-08-06T10:40:57 | 11,921,412 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import get_object_or_404, render_to_response
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.core.paginator import InvalidPage, EmptyPage
from django.utils import timezone
from django.core.context_processors import csrf
from django.conf import settings
from faq.models import Faq
from django.views.generic import DetailView
def faqs_list(request, slug=None, faq_id=None):
template_name='faq/faq_list.html'
faqs = Faq.objects.filter(draft=False)
return render_to_response(
template_name,
{
'faqs': faqs
},
context_instance=RequestContext(request)
)
class FaqDetailView(DetailView):
methods = ['get']
def get(self, request, *args, **kwargs):
self.object = self.get_object()
context = self.get_context_data(
object=self.object,
)
return self.render_to_response(context)
|
UTF-8
|
Python
| false | false | 2,013 |
7,310,034,387,914 |
6a9c6f45b54359dcf790c74695f59ac5546a9f8f
|
e1fe0e9b5961df76724a690117a81a527339dae1
|
/src/example_simple.py
|
6da3d72726da80e8fcb5c66a633b9d2a21f4e216
|
[
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only"
] |
non_permissive
|
InspectorB/3Dami-python
|
https://github.com/InspectorB/3Dami-python
|
4019d29e09ca67b25c94f00f0a6de151c9799923
|
f331210432c7cd8eb95311f34fc8b814343c6f0b
|
refs/heads/master
| 2020-05-17T05:33:41.350631 | 2014-09-05T10:59:41 | 2014-09-05T10:59:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""
In this example we count the number of operators that have been observed during
the 3Dami summer studio.
The design of the library is based on a Publisher/Subscriber pattern. That way
we can read the data once, and run it through several analysis chains. It was
designed to allow easy implementation of new feature extractors and reuse of
several common features. It has no facilities for complex analysis flows.
In this example we only make use of one thread, and we only need to read the
headers, and not the actual Thrift objects. See the MPI example for how to do
analysis in parallel.
"""
# Import the necessary classes for reading the data
from raw.load import DataLocation, Loader, Header
# Import the TypeCounter from the features packages
from features.counters import TypeCounter
# Import the TypeInfo utility class
from utils.types import TypeInfo
# Set data path. This is the path where the data resides. In this case
# we make use of the raw data, and not the data split into sessions.
dl_path = "/Users/vakkermans/data/TOC_3Dami/data/"
# Get the code for the operator type. The argument corresponds
# to the field names of the Thrift 'Data' struct. Other valid
# arguments are, for example, 'assignment', 'sessionStart',
# 'sessionEnd', etc.
wmop_code = TypeInfo.get_data_type_number('wmOp')
# Create the DataLocation that we will pass to the Loader instance
dl = DataLocation(dl_path)
print 'There are %s files at the specified data path' % dl.count_files()
# Create the Loader instance and pass it the DataLocation instance and
# a specification for what to load from the data files. There are
# several possible options:
# 1. Passing Loader.PUBLISH_BOTH will cause the Loader to load both the
# header and the Thrift object. It will publish them as a tuple
# (<header>, <message>) to any of its subscribers.
# 2. Passing Loader.PUBLISH_HEADER will cause the Loader to load only
# the header, and it will publish a tuple (<header>, None).
# 3. Passing Loader.PUBLISH_NONE will cause the Loader to only load the
# header, but it will publish nothing. It still needs to load the header
# to know how much to skip forward in the file (past the Thrift object).
# 4. Passing a function will cause the Loader to execute that function
# to decide what to publish. The function has to return either
# Loader.PUBLISH_BOTH, Loader.PUBLISH_HEADER, or Loader.PUBLISH_NONE.
# This function takes as an argument only a header Tuple. It can, for
# example, be used to only publish objects of a certain type.
# We won't use this, but this is just an example of how to only publish
# the headers for 'operator' observations.
def selector(header):
if Header.type_data(header) == wmev_code:
return Loader.PUBLISH_HEADER
else:
return Loader.PUBLISH_NONE
# loader = Loader(dl, selector)
# Instead, we use the simple form
loader = Loader(dl, Loader.PUBLISH_HEADER)
# So now we've set up the loader to only publish the headers for
# and it does so for any kind of observation. It publishes these as
# a (<header>, None) tuple to its subscribers. That means that in
# order to do anything useful we'll need to create an Instance of
# some sort of subscriber. Here we'll be using a simple one called
# TypeCounter().
# We create the typecounter and subscribe it to the loader.
tc = TypeCounter()
loader.subscribe(tc)
# To start reading the data and to start counter observation types
# we tell the loader to start reading.
loader.read()
# We can get the results from the TypeCounter by calling get_result() on it.
tc_data = tc.get_result()['data']
# Let's print the results
for key,val in tc_data.items():
print '%s %s' % (key.ljust(15), val)
|
UTF-8
|
Python
| false | false | 2,014 |
15,625,091,049,024 |
4ea07983b77ecbc4e00c6f4f8c8d9fc93e3b457c
|
dca91c476ec3a4dfc4e3c0c68b99a7e9a70724ee
|
/Version 0.5/mainWindow.py
|
bb5c6cda09324e411860bcd37ad3a52df92f7741
|
[] |
no_license
|
rriem/debitse
|
https://github.com/rriem/debitse
|
6d1038fc14e0875f21ce8abfcc9a75cc0a455e39
|
e69f4a51f7a41de13307cf95260aab0c2403b7fb
|
refs/heads/master
| 2021-01-13T14:29:37.371021 | 2009-03-08T21:24:59 | 2009-03-08T21:24:59 | 32,127,578 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/rriem/Desktop/Version 0.5/mainWindow.ui'
#
# Created: Sat Feb 14 17:31:36 2009
# by: PyQt4 UI code generator 4.4.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_DebitSE(object):
def setupUi(self, DebitSE):
DebitSE.setObjectName("DebitSE")
DebitSE.resize(550, 400)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons/debit.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
DebitSE.setWindowIcon(icon)
DebitSE.setAutoFillBackground(False)
DebitSE.setUnifiedTitleAndToolBarOnMac(False)
self.centralwidget = QtGui.QWidget(DebitSE)
self.centralwidget.setGeometry(QtCore.QRect(0, 66, 550, 312))
self.centralwidget.setObjectName("centralwidget")
DebitSE.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(DebitSE)
self.menubar.setGeometry(QtCore.QRect(0, 0, 550, 22))
self.menubar.setObjectName("menubar")
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuEdit = QtGui.QMenu(self.menubar)
self.menuEdit.setObjectName("menuEdit")
self.menuCalc = QtGui.QMenu(self.menubar)
self.menuCalc.setTearOffEnabled(False)
self.menuCalc.setObjectName("menuCalc")
self.menuAide = QtGui.QMenu(self.menubar)
self.menuAide.setObjectName("menuAide")
DebitSE.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(DebitSE)
self.statusbar.setGeometry(QtCore.QRect(0, 378, 550, 22))
self.statusbar.setObjectName("statusbar")
DebitSE.setStatusBar(self.statusbar)
self.toolBar = QtGui.QToolBar(DebitSE)
self.toolBar.setGeometry(QtCore.QRect(0, 22, 550, 44))
self.toolBar.setObjectName("toolBar")
DebitSE.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionAbout_Debit_SE = QtGui.QAction(DebitSE)
self.actionAbout_Debit_SE.setShortcutContext(QtCore.Qt.WidgetShortcut)
self.actionAbout_Debit_SE.setVisible(True)
self.actionAbout_Debit_SE.setMenuRole(QtGui.QAction.AboutRole)
self.actionAbout_Debit_SE.setIconVisibleInMenu(False)
self.actionAbout_Debit_SE.setObjectName("actionAbout_Debit_SE")
self.actionPreferences = QtGui.QAction(DebitSE)
self.actionPreferences.setEnabled(False)
self.actionPreferences.setMenuRole(QtGui.QAction.PreferencesRole)
self.actionPreferences.setIconVisibleInMenu(False)
self.actionPreferences.setObjectName("actionPreferences")
self.actionQuit = QtGui.QAction(DebitSE)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("icons/quit.PNG"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionQuit.setIcon(icon1)
self.actionQuit.setShortcutContext(QtCore.Qt.WindowShortcut)
self.actionQuit.setMenuRole(QtGui.QAction.TextHeuristicRole)
self.actionQuit.setObjectName("actionQuit")
self.actionPrint = QtGui.QAction(DebitSE)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("icons/print.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPrint.setIcon(icon2)
self.actionPrint.setObjectName("actionPrint")
self.actionUndo = QtGui.QAction(DebitSE)
self.actionUndo.setEnabled(False)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("icons/undo.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionUndo.setIcon(icon3)
self.actionUndo.setObjectName("actionUndo")
self.actionCut = QtGui.QAction(DebitSE)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("icons/cut.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionCut.setIcon(icon4)
self.actionCut.setObjectName("actionCut")
self.actionCopy = QtGui.QAction(DebitSE)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("icons/copy.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionCopy.setIcon(icon5)
self.actionCopy.setObjectName("actionCopy")
self.actionPaste = QtGui.QAction(DebitSE)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("icons/paste.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPaste.setIcon(icon6)
self.actionPaste.setObjectName("actionPaste")
self.actionPatient_Data = QtGui.QAction(DebitSE)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap("icons/patient.PNG"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPatient_Data.setIcon(icon7)
self.actionPatient_Data.setObjectName("actionPatient_Data")
self.actionDrug_Data = QtGui.QAction(DebitSE)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap("icons/drug.PNG"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionDrug_Data.setIcon(icon8)
self.actionDrug_Data.setObjectName("actionDrug_Data")
self.actionCalc = QtGui.QAction(DebitSE)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap("icons/calculator.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionCalc.setIcon(icon9)
self.actionCalc.setObjectName("actionCalc")
self.actionExpress_Calc = QtGui.QAction(DebitSE)
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap("icons/calculator2.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionExpress_Calc.setIcon(icon10)
self.actionExpress_Calc.setObjectName("actionExpress_Calc")
self.actionTable = QtGui.QAction(DebitSE)
icon11 = QtGui.QIcon()
icon11.addPixmap(QtGui.QPixmap("icons/table.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionTable.setIcon(icon11)
self.actionTable.setObjectName("actionTable")
self.menuFile.addAction(self.actionAbout_Debit_SE)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionPreferences)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionPrint)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuEdit.addAction(self.actionUndo)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionCut)
self.menuEdit.addAction(self.actionCopy)
self.menuEdit.addAction(self.actionPaste)
self.menuCalc.addAction(self.actionPatient_Data)
self.menuCalc.addAction(self.actionDrug_Data)
self.menuCalc.addSeparator()
self.menuCalc.addAction(self.actionCalc)
self.menuCalc.addSeparator()
self.menuCalc.addAction(self.actionExpress_Calc)
self.menuCalc.addAction(self.actionTable)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuCalc.menuAction())
self.menubar.addAction(self.menuAide.menuAction())
self.toolBar.addAction(self.actionQuit)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionPrint)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionCut)
self.toolBar.addAction(self.actionCopy)
self.toolBar.addAction(self.actionPaste)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionPatient_Data)
self.toolBar.addAction(self.actionDrug_Data)
self.toolBar.addAction(self.actionCalc)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionExpress_Calc)
self.toolBar.addAction(self.actionTable)
self.retranslateUi(DebitSE)
QtCore.QMetaObject.connectSlotsByName(DebitSE)
def retranslateUi(self, DebitSE):
DebitSE.setWindowTitle(QtGui.QApplication.translate("DebitSE", "Débit SE", None, QtGui.QApplication.UnicodeUTF8))
DebitSE.setToolTip(QtGui.QApplication.translate("DebitSE", "Débit SE", None, QtGui.QApplication.UnicodeUTF8))
DebitSE.setStatusTip(QtGui.QApplication.translate("DebitSE", "Débit SE", None, QtGui.QApplication.UnicodeUTF8))
DebitSE.setWhatsThis(QtGui.QApplication.translate("DebitSE", "This is the main window of the application", None, QtGui.QApplication.UnicodeUTF8))
DebitSE.setAccessibleName(QtGui.QApplication.translate("DebitSE", "Débit SE", None, QtGui.QApplication.UnicodeUTF8))
DebitSE.setAccessibleDescription(QtGui.QApplication.translate("DebitSE", "Débit SE", None, QtGui.QApplication.UnicodeUTF8))
self.menubar.setStatusTip(QtGui.QApplication.translate("DebitSE", "The Menu bar", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setToolTip(QtGui.QApplication.translate("DebitSE", "File menu: print or quit", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setStatusTip(QtGui.QApplication.translate("DebitSE", "File menu", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setTitle(QtGui.QApplication.translate("DebitSE", "Fichier", None, QtGui.QApplication.UnicodeUTF8))
self.menuEdit.setToolTip(QtGui.QApplication.translate("DebitSE", "Edit menu", None, QtGui.QApplication.UnicodeUTF8))
self.menuEdit.setStatusTip(QtGui.QApplication.translate("DebitSE", "Edit menu", None, QtGui.QApplication.UnicodeUTF8))
self.menuEdit.setTitle(QtGui.QApplication.translate("DebitSE", "Édition", None, QtGui.QApplication.UnicodeUTF8))
self.menuCalc.setStatusTip(QtGui.QApplication.translate("DebitSE", "Calculation menu", None, QtGui.QApplication.UnicodeUTF8))
self.menuCalc.setTitle(QtGui.QApplication.translate("DebitSE", "Calculs", None, QtGui.QApplication.UnicodeUTF8))
self.menuAide.setTitle(QtGui.QApplication.translate("DebitSE", "Aide", None, QtGui.QApplication.UnicodeUTF8))
self.statusbar.setToolTip(QtGui.QApplication.translate("DebitSE", "This is the <i>Status bar</i> of the application.", None, QtGui.QApplication.UnicodeUTF8))
self.statusbar.setWhatsThis(QtGui.QApplication.translate("DebitSE", "This is the <i>Status bar</i> of the application", None, QtGui.QApplication.UnicodeUTF8))
self.toolBar.setWindowTitle(QtGui.QApplication.translate("DebitSE", "toolBar", None, QtGui.QApplication.UnicodeUTF8))
self.actionAbout_Debit_SE.setText(QtGui.QApplication.translate("DebitSE", "À propos de Débit SE", None, QtGui.QApplication.UnicodeUTF8))
self.actionAbout_Debit_SE.setStatusTip(QtGui.QApplication.translate("DebitSE", "À propos de Débit SE", None, QtGui.QApplication.UnicodeUTF8))
self.actionPreferences.setText(QtGui.QApplication.translate("DebitSE", "Préférences", None, QtGui.QApplication.UnicodeUTF8))
self.actionQuit.setText(QtGui.QApplication.translate("DebitSE", "Quitter", None, QtGui.QApplication.UnicodeUTF8))
self.actionQuit.setStatusTip(QtGui.QApplication.translate("DebitSE", "Quitter Débit SE", None, QtGui.QApplication.UnicodeUTF8))
self.actionQuit.setShortcut(QtGui.QApplication.translate("DebitSE", "Ctrl+Q", None, QtGui.QApplication.UnicodeUTF8))
self.actionPrint.setText(QtGui.QApplication.translate("DebitSE", "Imprimer...", None, QtGui.QApplication.UnicodeUTF8))
self.actionPrint.setToolTip(QtGui.QApplication.translate("DebitSE", "Open the <i>Print diaglog...</i>", None, QtGui.QApplication.UnicodeUTF8))
self.actionPrint.setStatusTip(QtGui.QApplication.translate("DebitSE", "Print dialog", None, QtGui.QApplication.UnicodeUTF8))
self.actionPrint.setShortcut(QtGui.QApplication.translate("DebitSE", "Ctrl+P", None, QtGui.QApplication.UnicodeUTF8))
self.actionUndo.setText(QtGui.QApplication.translate("DebitSE", "Annuler", None, QtGui.QApplication.UnicodeUTF8))
self.actionUndo.setStatusTip(QtGui.QApplication.translate("DebitSE", "Annule la dernière commande.", None, QtGui.QApplication.UnicodeUTF8))
self.actionUndo.setShortcut(QtGui.QApplication.translate("DebitSE", "Ctrl+Z", None, QtGui.QApplication.UnicodeUTF8))
self.actionCut.setText(QtGui.QApplication.translate("DebitSE", "Couper", None, QtGui.QApplication.UnicodeUTF8))
self.actionCut.setStatusTip(QtGui.QApplication.translate("DebitSE", "Coupe la sélection dans le presse papier.", None, QtGui.QApplication.UnicodeUTF8))
self.actionCut.setShortcut(QtGui.QApplication.translate("DebitSE", "Ctrl+X", None, QtGui.QApplication.UnicodeUTF8))
self.actionCopy.setText(QtGui.QApplication.translate("DebitSE", "Copier", None, QtGui.QApplication.UnicodeUTF8))
self.actionCopy.setStatusTip(QtGui.QApplication.translate("DebitSE", "Copie la sélection dans le presse papier.", None, QtGui.QApplication.UnicodeUTF8))
self.actionCopy.setShortcut(QtGui.QApplication.translate("DebitSE", "Ctrl+C", None, QtGui.QApplication.UnicodeUTF8))
self.actionPaste.setText(QtGui.QApplication.translate("DebitSE", "Coller", None, QtGui.QApplication.UnicodeUTF8))
self.actionPaste.setStatusTip(QtGui.QApplication.translate("DebitSE", "Colle le presse papier", None, QtGui.QApplication.UnicodeUTF8))
self.actionPaste.setShortcut(QtGui.QApplication.translate("DebitSE", "Ctrl+V", None, QtGui.QApplication.UnicodeUTF8))
self.actionPatient_Data.setText(QtGui.QApplication.translate("DebitSE", "Données patients...", None, QtGui.QApplication.UnicodeUTF8))
self.actionPatient_Data.setToolTip(QtGui.QApplication.translate("DebitSE", "Open the <i>Patient data</i> entry form", None, QtGui.QApplication.UnicodeUTF8))
self.actionPatient_Data.setStatusTip(QtGui.QApplication.translate("DebitSE", "Patient data entry form", None, QtGui.QApplication.UnicodeUTF8))
self.actionPatient_Data.setWhatsThis(QtGui.QApplication.translate("DebitSE", "Open the <i>Patient data</i> entry form", None, QtGui.QApplication.UnicodeUTF8))
self.actionDrug_Data.setText(QtGui.QApplication.translate("DebitSE", "Données médicament...", None, QtGui.QApplication.UnicodeUTF8))
self.actionDrug_Data.setToolTip(QtGui.QApplication.translate("DebitSE", "Open the <i>Drug data</i> entry form", None, QtGui.QApplication.UnicodeUTF8))
self.actionDrug_Data.setStatusTip(QtGui.QApplication.translate("DebitSE", "Drug data entry form", None, QtGui.QApplication.UnicodeUTF8))
self.actionDrug_Data.setWhatsThis(QtGui.QApplication.translate("DebitSE", "Open the <i>Drug data</i> entry form", None, QtGui.QApplication.UnicodeUTF8))
self.actionCalc.setText(QtGui.QApplication.translate("DebitSE", "Calcul", None, QtGui.QApplication.UnicodeUTF8))
self.actionExpress_Calc.setText(QtGui.QApplication.translate("DebitSE", "Calculs express", None, QtGui.QApplication.UnicodeUTF8))
self.actionTable.setText(QtGui.QApplication.translate("DebitSE", "Tableau", None, QtGui.QApplication.UnicodeUTF8))
self.actionTable.setStatusTip(QtGui.QApplication.translate("DebitSE", "Table tool", None, QtGui.QApplication.UnicodeUTF8))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
DebitSE = QtGui.QMainWindow()
ui = Ui_DebitSE()
ui.setupUi(DebitSE)
DebitSE.show()
sys.exit(app.exec_())
|
UTF-8
|
Python
| false | false | 2,009 |
7,464,653,182,909 |
677a1c1b495067d2c24ee62154d6cb25b54a18a7
|
95ed79b041f63ab911f9ce9957feeecf12482773
|
/data_miner
|
acc84e5dca4a66ec6ebf09de7e7eae6af01206fc
|
[] |
no_license
|
mftb/IniciacaoCientifica
|
https://github.com/mftb/IniciacaoCientifica
|
4028debcc80e7f396d324da1f59f0aebdd742fdd
|
f03bafc20feec94c9c0db4cc8756a42bba6b258e
|
refs/heads/master
| 2016-09-06T19:15:40.839203 | 2013-07-13T17:50:25 | 2013-07-13T17:50:25 | 3,156,942 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: latin1 -*-
import os, sys, re
cmdline = sys.argv[1:]
if len(cmdline) > 1:
print "Too much args. Exiting..."
sys.exit(1)
if not os.path.isfile(cmdline[0]):
print "File does not exists. Exiting..."
sys.exit(1)
f = open(cmdline[0], 'r')
# shit can get serious here if the file is too big
data = f.read()
f.close()
data = data.split('\n')
# searchs for "*whitespace**whitespace*sc_"
pattern = '\s\ssc_'
# more efficient
prog = re.compile(pattern)
# searches for "*whitespace**number**dot**number**whitespace*"
num = '\s(\d+\.\d+)\s'
num_prog = re.compile(num)
i = 0.0
for j in data:
match = prog.findall(j)
if match:
n = num_prog.findall(j)
if n:
i += float(n[0])
print"Total time:", i
|
UTF-8
|
Python
| false | false | 2,013 |
13,683,765,838,470 |
4dbc0b1a29f4022ca88005e4aba83cf698ca18ba
|
daa61e4f94bbd151fc83e422c034f0fdddf658df
|
/examples/set_researcher.py
|
88ec3b6d31880fea6fabc96ad1f345e30f44af45
|
[] |
no_license
|
lordzappo/GenoLogics-LIMS-Python
|
https://github.com/lordzappo/GenoLogics-LIMS-Python
|
e9386cb2e626d01db1006fc86314409e536afa4b
|
7a62d83fb15c6f8348d401fea3c89273756744d5
|
refs/heads/master
| 2021-01-17T22:50:59.019051 | 2012-06-27T13:41:49 | 2012-06-27T13:41:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""Python interface to GenoLogics LIMS via its REST API.
Example usage: Set an externalid for a researcher.
XXX Don't understand how this works? The URI was set, but only in parts,
and it couldn't be retrieved at the /v1/external/perka address?
NOTE: You need to set the BASEURI, USERNAME AND PASSWORD.
Per Kraulis, Science for Life Laboratory, Stockholm, Sweden.
"""
from xml.etree import ElementTree
from genologics.lims import *
# Login parameters for connecting to a LIMS instance.
# NOTE: Modify according to your setup.
from genologics.site_cloud import BASEURI, USERNAME, PASSWORD
# Create the LIMS interface instance, and check the connection and version.
lims = Lims(BASEURI, USERNAME, PASSWORD)
lims.check_version()
researchers = lims.get_researchers(username='pkraulis')
researcher = researchers[0]
print researcher, researcher.name, researcher.externalids
## ElementTree.SubElement(researcher.root,
## nsmap('ri:externalid'),
## id='perka',
## uri='http://gmail.com/')
## researcher.put()
|
UTF-8
|
Python
| false | false | 2,012 |
7,610,682,062,873 |
952ec59c14a7110bcee818ec3be1fdb17db4fa5c
|
00bef670b18795c25e5cbacdca0d2d83903d4b03
|
/update/meng2/deploy/deploy.py
|
07b65c1964c2f0a76f8939cfccffdf1a8c748d69
|
[] |
no_license
|
jianghuiliang/python-hello
|
https://github.com/jianghuiliang/python-hello
|
4bfd892aed18f4a5ee01782f7372724b37f26c73
|
52aca5860b0ecc3dda5e7bdc0ef3d2caa9e6bfca
|
refs/heads/master
| 2016-03-30T19:54:02.820118 | 2013-02-26T13:03:35 | 2013-02-26T13:03:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import pexpect
import os,sys,signal
import dircache,time,dircache
import threading
import paramiko
import ConfigParser
from termcolor import colored
configfile=""
isdebug=False
config={}
spath=""
distpath=""
def helpFunc():
print "putsfile.py -f|--file (conf file) [-s|--split split_string default=|] [-D|--debug]"
print ""
sys.exit(3)
def verFunc():
print "Ver 0.0.1"
sys.exit(3)
def Log(var1, var2):
global mutex
mutex.acquire()
outfile = open(var1, "a+")
outfile.write(var2 + "\n")
outfile.close()
mutex.release()
def SignalHandler(sig, id):
if sig == signal.SIGUSR1:
print 'received signal USR1'
elif sig == signal.SIGHUP:
print 'received signal HUP'
elif sig == signal.SIGTERM:
print 'received SIGTERM, shutting down'
sys.exit(0)
def parseOption():
from optparse import OptionParser
global configfile
global spath
global distpath
global isdebug
parser = OptionParser(add_help_option=0)
parser.add_option("-h", "--help", action="callback", callback=helpFunc)
parser.add_option("-v", "--version", action="callback", callback=verFunc)
parser.add_option("-f", "--configfile", action="store", type="string", dest="configfile",default="config.txt")
parser.add_option("-s", "--sourcpath", action="store", type="string", dest="sourcepath",default="./voolestream.tar.gz")
parser.add_option("-d", "--distpath", action="store", type="string", dest="distpath",default="/root/voolestream/")
parser.add_option("-D", "--debug", action="store_true", dest="debug",default=False)
(options, args) = parser.parse_args()
configfile=options.configfile
spath=options.sourcepath
distpath=options.distpath
isdebug=options.debug
def loadConfig():
global configfile
global isdebug
global config
f=open(configfile,"r")
for e in f:
body=e.split("\t")
if len(body) != 3 and len(body) != 4:
continue
ip = body[1].strip();
config[ip] = {}
config[ip]["name"] = body[0].strip()
config[ip]["ip"] = body[1].strip()
config[ip]["username"] = "root"
config[ip]["password"] = body[2].strip()
if (len(body) == 4) :
config[ip]["port"] = body[3].strip()
else :
config[ip]["port"] = "22"
print config[ip]
f.close()
#print config
def downloadFile(hostname, port, username, passwd, filename, dstfilename) :
try:
t=paramiko.Transport((hostname,port))
t.connect(username=username,password=passwd)
sftp=paramiko.SFTPClient.from_transport(t)
print 'Beginning to download file %s from %s:%s ' % (filename, hostname, dstfilename)
print 'Downloading file:',filename
sftp.get(dstfilename, filename)
print 'Download file success'
t.close()
except Exception, e:
print "download file error!:" + str(e)
t.close()
return -1
return 0
def uploadFile(hostname, port, username, passwd, filename, dstfilename) :
try:
t=paramiko.Transport((hostname,port))
t.connect(username=username,password=passwd)
sftp=paramiko.SFTPClient.from_transport(t)
print 'Beginning to upload file %s to %s:%s' % (filename,hostname,dstfilename)
print 'uploading file:',filename
sftp.put(filename,dstfilename)
print 'upload file success '
t.close()
except Exception, e:
print "upload file error!:" + str(e)
t.close()
return -1
return 0
def changeConfig(filename, sec, key, value) :
try :
fp = open(filename, "r")
tmpfile = filename + ".tmp"
fp1 = open(tmpfile, "a+")
for e in fp :
if (e.find("["+sec+"]") != -1) :
insec = 1
elif (e.find("[") != -1 and e.find("]") != -1) :
insec = 0
elif (e.find(key) != -1) :
arr = e.split("=")
if (len(arr) > 1 and arr[0].strip() == key and insec == 1) :
e = arr[0] + " = " + value + "\n"
fp1.write(e);
except Exception, e:
print colored("changeConfig error:"+str(e), "red")
fp.close()
fp1.close()
return -1
fp.close()
fp1.close()
os.popen("mv -f " + tmpfile + " " + filename);
return 0
def testThread(host, port, username, passwd, hostname):
global spath
global distpath
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
print colored("开始部署..." + hostname + " " + host + "...", "yellow");
print "连接" + hostname + " " + "[" + host + "]" + "..."
try :
client.connect(host,port=int(port),username=username,password=passwd)
except Exception, e:
print "SSH Connect fail: " + str(e)
Log("deploylog.txt", "Host " + hostname + ":" + host + " 连接失败!" + str(e))
return 1
try :
print "远程删除临时目录 " + distpath
rcmd="rm -Rf "+distpath
stdin,stdout,stderr = client.exec_command(rcmd)
print ("cmd:"+rcmd)
rcmdret = stderr.readlines()
if (len(rcmdret) > 0) :
msg = "部署失败...rcmd=[%s] ret=%s" %(rcmd, rcmdret)
print colored(msg, "red");
Log("deploylog.txt", "Host " + hostname + ":" + host + "部署失败!")
Log("deploy_fail.txt", hostname + "\t" + host + "\t" + passwd)
return 1
print "远程建立临时目录 " + distpath
rcmd="mkdir -p "+distpath
stdin,stdout,stderr = client.exec_command(rcmd)
print rcmd
rcmdret = stderr.readlines()
if (len(rcmdret) > 0) :
msg = "部署失败...rcmd=[%s] ret=%s" %(rcmd, rcmdret)
print colored(msg, "red");
Log("deploylog.txt", "Host " + hostname + ":" + host + "部署失败!")
Log("deploy_fail.txt", hostname + "\t" + host + "\t" + passwd)
return 1
print "远程复制 " + spath + "到 " + distpath
status = uploadFile(host, int(port), username, passwd, spath, distpath+spath)
if (status != 0) :
msg = "部署失败...status is %d" %(status)
print colored(msg, "red");
Log("deploylog.txt", "Host " + hostname + ":" + host + "部署失败!")
Log("deploy_fail.txt", hostname + "\t" + host + "\t" + passwd)
return 1
print "远程复制 " + "stream_deploy.sh" + "到 " + distpath
status = uploadFile(host, int(port), username, passwd, "./stream_deploy.sh", distpath+"./stream_deploy.sh")
if (status != 0) :
msg = "部署失败...status is %d" %(status)
print colored(msg, "red");
Log("deploylog.txt", "Host " + hostname + ":" + host + "部署失败!" + status)
Log("deploy_fail.txt", hostname + "\t" + host + "\t" + passwd)
return 1
# print "下载配置文件并修改..."
# localfilename = "./tmp/" + host + "vooleatream.conf"
# distfilename = "/opt/voolestream/voolestream.conf"
# status = downloadFile(host, int(port), username, passwd, localfilename, distfilename)
# if (status != 0) :
# msg = "下载配置文件失败...status is %d" %(status)
# print colored(msg, "red");
# Log("deploylog.txt", "Host " + hostname + ":" + host + "部署失败!")
# Log("deploy_fail.txt", hostname + "\t" + "host" + "\t" + passwd)
# return 1
# changeConfig(localfilename, "Probe System Generic Option", "Max threads", "300");
# status = uploadFile(host, int(port), username, passwd, localfilename, distfilename)
# if (status != 0) :
# msg = "上传配置文件失败...status is %d" %(status)
# print colored(msg, "red");
# Log("deploylog.txt", "Host " + hostname + ":" + host + "部署失败!" + status)
# Log("deploy_fail.txt", hostname + "\t" + "host" + "\t" + passwd)
# return 1
print "执行部署脚本"
rcmd = "cd " + distpath + ";chmod +x stream_deploy.sh"
stdin,stdout,stderr = client.exec_command(rcmd)
rcmdret = stderr.readlines()
if (len(rcmdret) > 0) :
msg = "部署失败...rcmd=[%s] ret=%s" %(rcmd, rcmdret)
print colored(msg, "red");
Log("deploylog.txt", "Host " + hostname + ":" + host + "部署失败!")
Log("deploy_fail.txt", hostname + "\t" + host + "\t" + passwd)
return 1
rcmd = "sh " + distpath + "/stream_deploy.sh"
stdin,stdout,stderr = client.exec_command(rcmd)
print rcmd
rcmdret = stdout.readlines();
for out in rcmdret :
if (out.find("deploy voolestream error") != -1) :
print colored("部署失败..." + hostname + " " + host, "red");
Log("deploylog.txt", "Host " + hostname + ":" + host + "部署失败!" + out)
Log("deploy_fail.txt", hostname + "\t" + host + "\t" + passwd)
return 1
rcmd = "cd /opt/voolestream;./checkrun.sh"
stdin,stdout,stderr = client.exec_command(rcmd)
print rcmd
print colored("成功部署..." + hostname + " " + host, "green");
Log("deploy_succ.txt", hostname + "\t" + host + "\t" + passwd)
except Exception, e:
Log("deploylog.txt", "Host " + hostname + ":" + host + " 执行指令失败!" + str(e))
print colored("部署失败..." + hostname + " " + host + "...:" + str(e), "red");
return 1
return 0
def main():
global configfile
global distpath
global spath
global isdebug
global config
global mutex
threads = []
os.popen("rm -rf ./tmp;mkdir tmp")
signal.signal(signal.SIGUSR1, SignalHandler)
signal.signal(signal.SIGHUP, SignalHandler)
signal.signal(signal.SIGTERM, SignalHandler)
signal.signal(signal.SIGINT, SignalHandler)
mutex = threading.Lock()
parseOption()
loadConfig()
for each in config.keys():
host=config[each]["ip"]
username=config[each]["username"]
passwd=config[each]["password"]
port=config[each]["port"]
hostname=config[each]["name"]
testThread(host, port, username, passwd, hostname)
# t = threading.Thread(target=testThread, args=(host, port, username, passwd, hostname))
# threads.append(t)
# for t in threads:
# t.start()
# print "start complete"
# for t in threads:
# t.join()
print "run ok"
if __name__=='__main__':
main()
|
GB18030
|
Python
| false | false | 2,013 |
7,275,674,620,725 |
9a07c1aa2897436c1515317664d76c21b4be93e0
|
ad3b68d45ab4b90178264cebfe8dba5157138c22
|
/InClass7/InClass7.py
|
7822717386683b6bf62da507e9c11760b05906b9
|
[] |
no_license
|
Seancord/Stat-3654
|
https://github.com/Seancord/Stat-3654
|
2211098e4895f097b758eb8e13e2128634e58666
|
b3607fb26d513026c94b9a78308054aeb121cb27
|
refs/heads/master
| 2021-01-25T10:21:00.273037 | 2014-12-08T17:48:04 | 2014-12-08T17:48:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding: utf-8
# In[1]:
#Sean Cordrey
#In Class 7
#Part 1
# In[1]:
import pandas as pd
import numpy as np
import matplotlib as plt
# In[25]:
#2
carData = pd.read_table('vehicles.csv', header=False, sep=',')
# In[26]:
carData
# In[60]:
#3
#1
carData['barrels08'].hist(bins=15)
title('barrels08')
savefig('histogram.png', dpi=400, bbox_inches='tight')
# In[66]:
#2
carData['barrels08'].plot(kind='kde')
title('Barrel Consumption Frequency')
savefig('density.png', dpi=400, bbox_inches='tight')
# In[65]:
#3
city08 = carData['city08']
mpg = [0, 8, 16, 24, 32, 40]
categoricalMPG = pd.cut(city08, mpg)
carData['categoricalMPG']=categoricalMPG
categoricalFreq=pd.value_counts(categoricalMPG)
pd.value_counts(categoricalMPG)
pd.value_counts(categoricalMPG).plot(kind='bar')
title('MPG by Efficiency')
savefig('bar.png', dpi=400, bbox_inches='tight')
# In[67]:
#4
categoricalFreq_rel=categoricalFreq.div(categoricalFreq.sum(1).astype(float))
categoricalFreq_rel
categoricalFreq_rel.plot(kind='barh', stacked=True)
title('MPG by Efficiency(stacked)')
savefig('stacked', dpi=400, bbox_inches='tight')
# In[64]:
#5
plt.scatter(carData['barrels08'], carData['highway08'])
plt.title('Barrel Consumption vs Highway MPG')
# In[2]:
#Part 2
import scipy as sp
import sklearn as sk
# In[3]:
#2
medicalData=pd.read_table('Medical.csv', header=False, sep=',')
medicalData
# In[11]:
#4
X=array(medicalData[['Age', 'HgA1C']])
X
# In[10]:
#4
y=array(medicalData['A Literacy Category'])
y
# In[15]:
#5
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test= train_test_split(X, y, test_size=0.25, random_state=33)
# In[17]:
#6
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# In[18]:
#7
from sklearn.linear_model import SGDClassifier
clf = SGDClassifier()
clf.fit(X_train,y_train)
# In[19]:
clf.coef_
# In[20]:
clf.intercept_
# In[21]:
#equation
# -27.6459 + 5.0897 * x1 + -16.8182 * x2 = 0
# In[22]:
#8
from sklearn import metrics
y_train_pred = clf.predict(X_train)
metrics.accuracy_score(y_train, y_train_pred)
# In[23]:
#9
y_pred = clf.predict(X_test)
print metrics.accuracy_score(y_test, y_pred)
# In[25]:
#10
metrics.confusion_matrix(y_test, y_pred)
# In[26]:
# 2 individuals were correctly placed by our classifier and 11 were incorrectly placed
#Our classifier in this data set is very weak. It did well in the train set because that it where it was derived from
#When used on our train set it did very poorly, so this specific classifier would be a good one to use.
# In[ ]:
|
UTF-8
|
Python
| false | false | 2,014 |
1,554,778,199,326 |
bfed513a4326c66f2da8a417bcd7d849a4fe5b21
|
04880e9245b1d4aae779f90e9570719cef4c6d66
|
/cut_emails.py
|
8789760dc05777009e8ec7be6302b01c756e046b
|
[] |
no_license
|
invisible-college/scripts
|
https://github.com/invisible-college/scripts
|
505dadc5fdb8a583a557b39a5509cb498b3376ff
|
a0b503e74c568153f11d70160ff4792a9c29d47e
|
refs/heads/master
| 2020-05-17T03:41:25.236078 | 2014-06-18T07:08:56 | 2014-06-18T07:08:56 | 15,642,356 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
# First argument is the file to open / read
filename = raw_input();
file = open (filename, "r");
lines = file.readlines()
# To process a list of email addresses
# and add comma separation
string = ""
for line in lines:
tokens = line.split() # None separator means any whitespace
if (len(tokens) > 2):
string += tokens[2] + ", "
print(string)
|
UTF-8
|
Python
| false | false | 2,014 |
15,960,098,502,679 |
cbf9f11ab6394e105fd95439e7773f4f6b5ff145
|
5d1c61bad8f9a785f021b092ace2ac8b716f3ae7
|
/fabfile.py
|
9a47b1b74d096b7057061bb064d82e4a702408a8
|
[] |
no_license
|
hustlzp/yprogrammer
|
https://github.com/hustlzp/yprogrammer
|
37ae9f50a288aa0ea66fbd814b5633b3daa1fe99
|
8b9d12617fdf4f7b7fdf20828f9cf74f8093e05e
|
refs/heads/master
| 2016-09-11T02:09:22.610089 | 2014-01-28T13:24:20 | 2014-01-28T13:24:20 | 11,084,039 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from fabric.api import run, env, cd
host_string = "[email protected]"
def first():
env.host_string = host_string
with cd('/var/www/lishengchun'):
# nginx
run('cp nginx.conf /etc/nginx/sites-available/yp')
run('ln -sf /etc/nginx/sites-available/yp /etc/nginx/sites-enabled/')
run('nginx -s reload')
# supervisor
run('cp supervisor.conf /etc/supervisor/conf.d/yp.conf')
run('supervisorctl reread')
run('supervisorctl update')
def restart():
env.host_string = host_string
run('supervisorctl restart yp')
def deploy():
env.host_string = host_string
with cd('/var/www/yprogrammer'):
run('git pull')
run('supervisorctl restart yp')
|
UTF-8
|
Python
| true | false | 2,014 |
128,849,040,564 |
c701061a06207630f5b2e253ba8172f9ac04c865
|
153ecce57c94724d2fb16712c216fb15adef0bc4
|
/zopeorg.theme/trunk/zopeorg/theme/__init__.py
|
db2725e149e9cacc93e6a226d453eee49cd04ab4
|
[] |
no_license
|
pombredanne/zope
|
https://github.com/pombredanne/zope
|
10572830ba01cbfbad08b4e31451acc9c0653b39
|
c53f5dc4321d5a392ede428ed8d4ecf090aab8d2
|
refs/heads/master
| 2018-03-12T10:53:50.618672 | 2012-11-20T21:47:22 | 2012-11-20T21:47:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from zope.i18nmessageid import MessageFactory as msgfac
from Products.Archetypes.public import process_types
from Products.Archetypes.public import listTypes
from Products.CMFCore.utils import ContentInit
from zopeorg.theme.config import PROJECTNAME
MessageFactory = msgfac("zopeorg.theme")
def initialize(context):
import zopeorg.theme.content
from zopeorg.theme import permissions
listOfTypes=listTypes(PROJECTNAME)
(content_types, constructors, ftis)=process_types(listOfTypes, PROJECTNAME)
allTypes = zip(content_types, constructors)
for atype, constructor in allTypes:
kind = "%s: %s" % (PROJECTNAME, atype.archetype_name)
ContentInit(
kind,
content_types = (atype,),
# Add permissions look like perms.Add{meta_type}
permission = getattr(permissions, "Add%s" % atype.meta_type),
extra_constructors = (constructor,),
fti = ftis,
).initialize(context)
|
UTF-8
|
Python
| false | false | 2,012 |
17,102,559,781,359 |
7b8bcad6dabc9673a7af4ee32c8df2e1aa2c43f2
|
985909ab99bf9f7948dfce04dafd9a04ba3a23c3
|
/lipstr/lists/urls.py
|
da640d2c18dcf932f85abfac2d3c837a55d04c36
|
[] |
no_license
|
rcarton/lipstr.com
|
https://github.com/rcarton/lipstr.com
|
daf57a465d4e9fcefd3d8cab516452ee5bf28e21
|
46b9b42bb4f9b10897c57c9475270effe13156d5
|
refs/heads/master
| 2021-01-20T07:45:33.496119 | 2013-03-24T20:14:33 | 2013-03-24T20:14:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('lists.views',
url(r'^login$', 'login', name='login'),
url(r'^preferences$', 'preferences', name='preferences'),
url(r'^signup$', 'signup', name='signup'),
url(r'^logout$', 'disconnect', name='disconnect'),
url(r'^list$', 'list', name='list'),
url(r'^actions$', 'actions', name='actions'),
url(r'^404$', 'error404', name='error404'),
)
|
UTF-8
|
Python
| false | false | 2,013 |
14,989,435,863,780 |
f2ea098fd0be857e6906a4bc1c2c4093e156f3c4
|
69eff1269f320adf7d38130c4610c30e1ed3d99d
|
/im/views/zmqsender.py
|
a3b7d591c862a5f1c691ccc55b717d04cb5da7f6
|
[] |
no_license
|
xiasheng/qim
|
https://github.com/xiasheng/qim
|
4a76c94236769878b03dc92e524d73d3e2c213bf
|
c72d165feca71179bb5ea75c5060223c6f2dcc34
|
refs/heads/master
| 2020-04-09T02:24:13.926615 | 2014-08-22T08:39:33 | 2014-08-22T08:39:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import zmq
class ZMQSender():
instance = None
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:5557")
@staticmethod
def SendMsg(msg):
ZMQSender.socket.send_string(msg)
res = ZMQSender.socket.recv()
return res
def AddUser2Group(user, group):
msg = 'ejabberdctl srg_user_add %s d-connected.com %s d-connected.com' %(user, group)
ZMQSender.SendMsg(msg)
if __name__ == '__main__':
AddUser2Group('13022222222', 'test')
|
UTF-8
|
Python
| false | false | 2,014 |
13,640,816,179,488 |
1fedc99af9d1d5baf67c70de7aea883fbd9b9582
|
58895b2a69d8430b599d95a36750298610143b74
|
/code/yahoo_placemaker.py
|
55d785b5beeccc44ea4a5e75e4e30b3858cbe850
|
[] |
no_license
|
markhuberty/stackexchange
|
https://github.com/markhuberty/stackexchange
|
585e6c0b32b4c2843e985f8bb0dfbcf449f777d1
|
f70f47e3871e0edc7972ae2b0abcc6f61f19b4d7
|
refs/heads/master
| 2020-05-22T13:58:33.818206 | 2013-03-26T13:24:43 | 2013-03-26T13:24:43 | 3,125,649 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import urllib2
import urllib
import os
import csv
import re
import time
## This is a test comment
os.chdir('/home/markhuberty/Documents/stackexchange/data')
baseurl = 'http://wherein.yahooapis.com/v1/document'
apikey = 'D_LXiILV34FiY77NdugJQa._sV3kNPewot_0KsO.ZrNvX_iqWUmEGQ4UhCspZXzL6khK'
documentType = 'text/plain'
requests = []
## Initiate the locations and read in the CSV file
locations = []
def load_locations(location_csv):
locations = []
with open(location_csv, 'rb') as f:
for row in csv.reader(f):
locations.append(row)
return locations
def format_request(locations, apikey):
requests = []
for location in locations:
data = {'documentContent':location[1],
'documentType':'text/plain',
'appid':apikey
}
requests.append(data)
return requests
def geocode_request(locations, requests, baseurl, sleep_interval=1):
geocoded_location = []
for l, r, n in zip(locations, requests, range(len(requests))):
print n
try:
encoded_request = urllib.urlencode(r)
req = urllib2.Request(baseurl, encoded_request)
response = urllib2.urlopen(req)
except urllib2.URLError, e:
if hasattr(e, 'reason'):
print 'Reason', e.reason
elif hasattr(e, 'code'):
print 'Error code', e.code
else:
result = response.read()
geocoded_location.append({'location':l[1], 'result':result})
time.sleep(sleep_interval)
return(geocoded_location)
## End function definition
#################################################################
## Get the locations and convert to encoded requests
locations = load_locations('unique.locations.csv')
requests = format_request(locations, apikey)
#test_results = geocode_request(locations[1:10], requests[1:10], baseurl)
geocoded_results = geocode_request(locations, requests, baseurl, sleep_interval=0.1)
## Write out the results to a csv file
fieldnames = ('location', 'xml.location')
with open('geocoded_results.csv', 'wt') as f:
writer = csv.DictWriter(f, fieldnames = fieldnames)
headers = dict( (n,n) for n in fieldnames)
writer.writerow(headers)
for result in geocoded_results:
writer.writerow({'location': result['location'],
'xml.location': result['result']
}
)
quit()
|
UTF-8
|
Python
| false | false | 2,013 |
15,083,925,180,191 |
2cf7520e42055c321b799bebe62ccdd087890949
|
7b29700c45bcaa00e5ae8c034c782febcfa285ca
|
/CMGTools/WMass/cfg/test_mu_JPsiToMuMu_2011_cfg.py
|
72bea7ad41b04d30d7fc541a4964a835ce79c056
|
[] |
no_license
|
rmanzoni/tH
|
https://github.com/rmanzoni/tH
|
e29c09a247dd0795fce24d9bafbf5280df679b10
|
05f0baefed4d0b64dc121c43e4420ff7f1ac8e22
|
refs/heads/master
| 2016-09-03T07:21:59.444230 | 2014-06-09T19:48:52 | 2014-06-09T19:48:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import copy
import os
import CMGTools.RootTools.fwlite.Config as cfg
from CMGTools.RootTools.fwlite.Config import printComps
from CMGTools.WMass.triggerMap import pathsAndFilters
jsonAna = cfg.Analyzer(
'JSONAnalyzer',
)
triggerAna = cfg.Analyzer(
'TriggerAnalyzer',
keepFailingEvents = False
)
vertexAna = cfg.Analyzer(
'VertexAnalyzer',
fixedWeight = 1.,
keepFailingEvents = False
)
WAna = cfg.Analyzer(
'WAnalyzer',
recoilcut = 1000,
pfmetcut = 0,
jetptcut = 1000,
pt = 30,
eta = 2.1,
iso = 0.5,
savegenp = False,
verbose = True,
triggerMap = pathsAndFilters,
keepFailingEvents = False
)
WtreeProducer = cfg.Analyzer(
'WTreeProducer'
)
ZAna = cfg.Analyzer(
# 'ZAnalyzer',
'JPsiAnalyzer',
recoilcut = 1000,
pfmetcut = 0,
jetptcut = 1000,
pt = 30,
eta = 2.1,
iso = 0.5,
savegenp = True,
verbose = True,
triggerMap = pathsAndFilters,
keepFailingEvents = False
)
ZtreeProducer = cfg.Analyzer(
'ZTreeProducer'
)
genAna = cfg.Analyzer(
'GenParticleAnalyzerFSR',
src = 'genParticlesPruned'
)
sequence = cfg.Sequence( [
genAna,
jsonAna,
triggerAna,
vertexAna,
# WAna,
# WtreeProducer,
ZAna,
ZtreeProducer
] )
from CMGTools.H2TauTau.proto.samples.ewk import TTJets as JPsiMM
from CMGTools.H2TauTau.proto.samples.getFiles import getFiles
JPsiMM.files = getFiles('/JPsiToMuMu_2MuPEtaFilter_7TeV-pythia6-evtgen/Fall11-PU_S6_START44_V9B-v1/AODSIM/V5_B/PAT_CMG_5_6_0_B', 'cmgtools', '.*root')
JPsiMM.triggers = ["HLT_Dimuon10_Jpsi_Barrel_v1",
"HLT_Dimuon10_Jpsi_Barrel_v2",
"HLT_Dimuon10_Jpsi_Barrel_v3",
"HLT_Dimuon10_Jpsi_Barrel_v5",
"HLT_Dimuon10_Jpsi_Barrel_v6",
"HLT_Dimuon10_Jpsi_Barrel_v9",
"HLT_Dimuon10_Jpsi_Barrel_v10",
]
selectedComponents = [JPsiMM]
JPsiMM.splitFactor = 750
# TEST
JPsiMM.splitFactor = 1
JPsiMM.files = JPsiMM.files[0:2]
config = cfg.Config( components = selectedComponents,
sequence = sequence )
printComps(config.components, True)
|
UTF-8
|
Python
| false | false | 2,014 |
10,599,979,292,258 |
566f4d3af3d4decbfb4ceb96fcb86edb4ecc2e91
|
866d598b753071c5f12a3715d8669a2f3ad4b2d6
|
/scripts/submit.py
|
cac52805bf96ddd73f953e8e07bd937c960c6fad
|
[] |
no_license
|
ralesi/ase-gaussian
|
https://github.com/ralesi/ase-gaussian
|
4e7dce854221ff0fe0c092fcddefa8e6fd44fbd3
|
9bbfc9d0aa1fe32deac8af3fcbd7ce84a88eef03
|
refs/heads/master
| 2021-01-23T12:21:19.338022 | 2011-07-28T21:06:59 | 2011-07-28T21:06:59 | 2,115,134 | 4 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""
Submission with preferred commands to gaussian or jacapo based on file type and settings.
Uses other aspects of RCA (convert and watch) to fit to required method and ensure that file is not already being run.
"""
|
UTF-8
|
Python
| false | false | 2,011 |
3,917,010,204,113 |
9ae4ec8f498af10019030379095d726881a035b0
|
96516344438acfd2b17bd526e749487904d3babd
|
/list_postag_types.py
|
35d599da793a7243e4fb6ee19d2f56668be2a212
|
[] |
no_license
|
gkovacs/translationsense
|
https://github.com/gkovacs/translationsense
|
80807e88cbdcf91afdc218fe286dde66b683c110
|
11d7df9ffd33e392cac9e8804827dd7542195342
|
refs/heads/master
| 2021-01-10T00:54:23.543454 | 2012-12-13T05:36:04 | 2012-12-13T05:36:04 | 6,636,150 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import json
if __name__ == '__main__':
d = json.load(open('tagged_chinese_sentences.json'))
tags = []
for sentence,tagged in d.iteritems():
for word,tag in tagged:
tags.append(tag)
print sorted(list(set(tags)))
|
UTF-8
|
Python
| false | false | 2,012 |
738,734,385,305 |
244376325ecf335a41bf114e599048e7c8965241
|
63c09a10077b219d09eb252f298f60bc5ea68d0c
|
/scraper/iowa_salaries/items.py
|
25d697f41a2c93b6a2853cc6b90d8e6bc360066c
|
[
"MIT"
] |
permissive
|
austinlyons/iowa_salaries
|
https://github.com/austinlyons/iowa_salaries
|
bcb822de8587d11f706af37f9dfc1505814539a0
|
9c30559648527e81de51f41559a8a0da16dd0d14
|
refs/heads/master
| 2021-01-01T19:35:02.398537 | 2013-09-09T12:38:44 | 2013-09-09T12:38:44 | 12,630,464 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/topics/items.html
from scrapy.item import Item, Field
class IowaSalaryItem(Item):
year = Field()
salary = Field()
employee = Field()
department = Field()
position = Field()
county = Field()
sex = Field()
base_salary = Field()
extra_money = Field()
|
UTF-8
|
Python
| false | false | 2,013 |
5,695,126,638,787 |
0b7f07dc6fc154fc024f1c9c7abf44ce2eaf2b78
|
dd118f77a47ae545bdba9f08dd490532d129a976
|
/preventista/MMC/data/python/preventista/dsv.py
|
3562ae36edcbbb9db84f6875b464633dcd58b524
|
[] |
no_license
|
pointtonull/S60Salesman
|
https://github.com/pointtonull/S60Salesman
|
2e86a918060d7d3e5378ef9c3a66a8f07d918314
|
0572a61125cd22ae92e6523e1f06735cb9066acf
|
refs/heads/master
| 2020-06-12T13:57:35.699158 | 2011-07-07T01:39:42 | 2011-07-07T01:39:42 | 1,349,021 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#-*- coding: UTF-8 -*-
"""
Smart interface for reading and writing dsv files. Will recive and use
generator for avoid memory exhaustion.
"""
from debug import debug
class Data_manager(object):
def __init__(self, delimiter=";", qualifier="'", hasheader=True,
quoteall=True, newline=None):
self._delimiter = delimiter
self._qualifier = qualifier
self._hasheader = hasheader
self._quoteall = quoteall
self._newline = newline
def fromfile(self, filename):
data = open(filename).read()
if self._newline is None:
for posible in ("\r\n", "\n\r", "\n"):
if posible in data:
self._newline = posible
data = data.replace(self._newline, "\n")
break
else:
data = data.replace(self._newline, "\n")
if self._qualifier is None:
self._qualifier = DSV.guessTextQualifier(data)
data = DSV.organizeIntoLines(data, textQualifier=self._qualifier)
if self._delimiter is None:
self._delimiter = DSV.guessDelimiter(data)
if self._hasheader is None:
self._hasheader = DSV.guessHeaders(data)
dsvlistoflists = DSV.importDSV(data, delimiter=self._delimiter,
textQualifier=self._qualifier)
columns = len(dsvlistoflists[0])
if self._hasheader:
headers = dsvlistoflists.pop(0)
else:
headers = range(1, columns + 1)
for index, item in enumerate(headers):
headers[index] = unicode(item, "utf8", "replace")
dsvlistofdicts = []
for row in dsvlistoflists:
mapped_row = {}
for column in xrange(columns):
mapped_row[headers[column]] = unicode(row[column], "utf8",
"replace")
dsvlistofdicts.append(mapped_row)
return dsvlistofdicts
def tofile(self, filename, data, columns_order=None, append=False):
"""
data is a list of dicts as Data_manager.fromfile returns.
If columns order if given that will be the used order. else will be
used the dict order (almos random).
if append, doesnt print headers and append the rows whitnot erasing
previous content.
"""
if columns_order:
headers = columns_order
else:
headers = data[0].keys()
for index, item in enumerate(headers):
headers[index] = item.encode("latin-1", "replace")
listoflists = []
if not append:
listoflists.append(headers)
for dictrow in data:
listrow = []
for key in headers:
listrow.append(dictrow[key].encode("latin-1", "replace"))
listoflists.append(listrow)
as_string = DSV.exportDSV(listoflists, self._delimiter, self._qualifier,
self._quoteall) + self._newline
as_lines = as_string.splitlines(True)
if not append:
as_lines[0] = as_lines[0].replace("'", "")
file = open(filename, "w")
else:
file = open(filename, "a")
file.writelines(as_lines)
file.close()
return filename
if __name__ == "__main__":
data = Data_manager()
clientes = data.fromfile("../input/clientes.csv")
print(clientes)
header = ["COD_CLI", "NRO_ZON", "APNBR_CLI"]
print(data.tofile("../output/prueba.csv", clientes, header))
|
UTF-8
|
Python
| false | false | 2,011 |
13,391,708,071,209 |
625baf1e1edb44b31596b0447c3073822f9d49ec
|
2e6ca53f74b2b4c2fef8ea8d62c979ced4826626
|
/addons/sgr/component_registry_detail.py
|
015e31bc809168d7dd073d17bce402d96ee01a40
|
[
"AGPL-3.0-or-later"
] |
non_permissive
|
agrihold/sagi
|
https://github.com/agrihold/sagi
|
80f5a1485f6afc9592b157fa5324a8362a871feb
|
c1c5673cb8febfb4e9818be5ddb53483fce72066
|
refs/heads/master
| 2021-01-11T07:34:58.283827 | 2014-02-12T19:22:32 | 2014-02-12T19:22:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
##############################################################################
#
# SGR
# Copyright (C) 2013 Grupo ADHOC
# No email
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from openerp import netsvc
from openerp.osv import osv, fields
class component_registry_detail(osv.osv):
""""""
_name = 'sgr.component_registry_detail'
_description = 'component_registry_detail'
_columns = {
'component_id': fields.many2one('product.product', string='Component', help=u"""Filtrar por productos del tipo component""", required=True, context={'default_function_type':'component'}, domain=[('function_type','=','component')]),
'component_registry_id': fields.many2one('sgr.component_registry', string='Registry', ondelete='cascade', required=True),
'component_registry_finality_ids': fields.many2many('sgr.component_registry_finality', 'sgr___component_registry_finality_ids_rel', 'component_registry_detail_id', 'component_registry_finality_id', string='Finality', required=True),
'formulated_product_ids': fields.many2many('product.product', 'sgr_formulated_product_ids___rel', 'component_registry_detail_id', 'product_id', string='Formulated Products', context={'default_function_type':'formulated'}, domain=[('function_type','=','formulated')], required=True),
'uses_ids': fields.many2many('sgr.formulated_product_registry', 'sgr_component_registry_detail_ids_uses_ids_rel', 'component_registry_detail_id', 'formulated_product_registry_id', string='Uses', readonly=True),
'supplier_ids': fields.many2many('res.partner', 'sgr___supplier_ids_rel', 'component_registry_detail_id', 'partner_id', string='Suppliers', context={'default_supplier':'True','default_is_company':'True'}, domain=[('supplier','=',True)]),
'emergency_information_id': fields.many2one('sgr.emergency_information', string='Emergency Information'),
'cqq_ids': fields.one2many('sgr.cqq', 'component_registry_detail_id', string='CQQ'),
'packaging_ids': fields.one2many('sgr.packaging', 'component_registry_detail_id', string='Packaging'),
}
_defaults = {
'component_registry_id': lambda self, cr, uid, context=None: context and context.get('component_registry_id', False),
}
_constraints = [
]
component_registry_detail()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
UTF-8
|
Python
| false | false | 2,014 |
4,234,837,791,356 |
1f82f7441c7bd216ca20e584874fa08428649266
|
918ef23dee010ab808a808201266e457ef1a356f
|
/document/__init__.py
|
c555f518d89ce701a7b64a47190957dabb079726
|
[] |
no_license
|
techiev2/Mordor
|
https://github.com/techiev2/Mordor
|
17966de615ed2854d863e4441e0905c1d33a89c5
|
b86b28e5356fe41825a52b089907fa1f2515eb0a
|
refs/heads/master
| 2016-04-01T04:19:58.148724 | 2014-11-18T14:53:40 | 2014-11-18T14:53:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding=utf-8
from .document import MorDoc
__all__ = ('MorDoc',)
|
UTF-8
|
Python
| false | false | 2,014 |
13,280,038,899,938 |
5abe41ac128aa011fc368c463856cda78d6b57b9
|
76c39646a920cba97aef0bde1edb6a9f7e6d53e3
|
/trello.py
|
0669cc82f183b0a36beea126cfde87b877388843
|
[] |
no_license
|
drewtempelmeyer/trello
|
https://github.com/drewtempelmeyer/trello
|
50978f4a78b70faada731901868f32100dbcb87e
|
6294da553ed28e05984dcd793c637f6fec47808e
|
refs/heads/master
| 2016-09-11T07:07:27.941668 | 2013-01-08T17:21:52 | 2013-01-08T17:21:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import argparse
import json
import os
import requests
from colorama import init, Fore
API_KEY = 'a65c7d23c318237578a5f27c76c74f8e'
API_URL = 'https://api.trello.com/1/'
APP_NAME = 'trello-cmd'
CONFIG = os.path.join(os.environ['HOME'], '.trello')
class NoConfigException(Exception):
pass
class TrelloClient:
def __init__(self):
self._config = {}
self._boards = {}
self._orgs = {}
def read_config(self):
if os.path.isfile(CONFIG):
config_file = open(CONFIG, 'r')
self._config = json.loads(config_file.read())
else:
raise NoConfigException('Configuration file does not exists.')
def list_boards(self, org=None):
if not org:
url = 'members/my/boards?filter=open&key=%s&token=%s' % (API_KEY,
self._config['token'])
else:
url = 'organization/%s/boards?filter=open&key=%s&token=%s' % (org,
API_KEY, self._config['token'])
r = requests.get('%s%s' % (API_URL, url))
print Fore.GREEN + 'Boards' + Fore.RESET
for board in r.json():
print ' ' + board['name'] + ' (' + \
self.get_org(board['idOrganization'])['displayName'] + ')'
def list_orgs(self, should_print=True):
self._orgs = {}
r = requests.get('%smembers/my/organizations?key=%s&token=%s' % (
API_URL, API_KEY, self._config['token']))
if should_print:
print Fore.GREEN + 'Organizations' + Fore.RESET
print ' %-15s %s' % ('Board Name', 'Board Display Name')
print ' %-15s %s' % ('----------', '------------------')
for org in r.json():
self._orgs[org['id']] = {
'name': org['name'],
'displayName': org['displayName']
}
if should_print:
print ' %-15s %s' % (org['name'], org['displayName'])
return self._orgs
def get_org(self, org_id=None):
try:
return self._orgs[org_id]
except KeyError:
r = requests.get('%sorganizations/%s?key=%s&token=%s' % (API_URL,
org_id, API_KEY, self._config['token']))
org = r.json()
self._orgs[org['id']] = {
'name': org['name'],
'displayName': org['displayName']
}
return self._orgs[org['id']]
def setup(self):
"""Set up the client for configuration"""
if os.path.isfile(CONFIG):
os.remove(CONFIG)
auth_url = '%sauthorize?key=%s&name=%s&expiration=never&response_type='\
'token&scope=read,write' % (API_URL, API_KEY, APP_NAME)
print 'Open %s in your web browser' % auth_url
token = raw_input('Paste the token: ')
config_file = open(CONFIG, 'w')
config_file.write(json.dumps({'token': token}))
config_file.close()
print Fore.GREEN + 'Your config is ready to go!' + Fore.RESET
def run(self):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='commands')
board_parser = subparsers.add_parser('boards', help='Board operations')
board_parser.add_argument('-o', '--org', action='store', help='''List
boards for specific organizations''')
board_parser.set_defaults(which='board')
org_parser = subparsers.add_parser('orgs', help='List organizations')
org_parser.set_defaults(which='org')
config_parser = subparsers.add_parser('reconfig',
help='Reconfigure the client')
config_parser.set_defaults(which='reconfig')
options = parser.parse_args()
if not os.path.isfile(CONFIG) or options.which is 'reconfig':
self.setup()
elif options.which is 'board':
self.read_config()
self.list_boards()
elif options.which is 'org':
self.read_config()
self.list_orgs()
if __name__ == '__main__':
init() # Initialize colorama
client = TrelloClient()
client.run()
|
UTF-8
|
Python
| false | false | 2,013 |
16,870,631,560,758 |
c71eba1e2512ed4b208f1bd40f91d38c8f82be97
|
7f3e34fb4a62d9a738094a072765533916a2f2dc
|
/blog/urls.py
|
9a3b08bde27b47df3e034f34fa0fc4bbed8d6acf
|
[] |
no_license
|
mcourageous/personal
|
https://github.com/mcourageous/personal
|
1e571db13715450a8d442d853eca9da66c49b758
|
c181366713a22402241f83d72f20f68546124d1f
|
refs/heads/master
| 2021-01-23T20:14:20.373813 | 2014-07-07T09:18:02 | 2014-07-07T09:18:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import patterns, include, url
from django.views.generic import ListView, DetailView
from blog.models import Lyrics
from blog.views import Lyric, LyricDetail, Poem, PoemDetail, Stories, StoryDetail, BlogList, BlogDetail
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'blog.views.home', name='home'),
url(r'^about', 'blog.views.about', name='about'),
url(r'^lyrics/(?P<pk>\d+)$', LyricDetail.as_view(), name='detail'),
url(r'^lyrics', Lyric.as_view(), name='listing'),
url(r'^poems/(?P<pk>\d+)$', PoemDetail.as_view(), name="poem_detail"),
url(r'^poems', Poem.as_view(), name='poem_listing'),
url(r'^stories/(?P<pk>\d+)$', StoryDetail.as_view(), name="story_detail"),
url(r'^stories',Stories.as_view(), name="story_list"),
url(r'^blog/(?P<pk>\d+)$', BlogDetail.as_view(), name="blog_detail"),
url(r'^blog', BlogList.as_view(), name="blog_list"),
)
|
UTF-8
|
Python
| false | false | 2,014 |
10,213,432,258,200 |
b45555018991c942d292e205bf3c49d2cf191439
|
6c25652067ff5654ca32f24ad4fc28b0a978aef8
|
/Code/InfinityJSONParser/tohaa_symbiont.py
|
c1779e9800357d21f3b167c20eea0ece7addf71e
|
[] |
no_license
|
khariq/InfinityTroopCards
|
https://github.com/khariq/InfinityTroopCards
|
ce71b274af1adfeedfafe05760b76f194ffa86c6
|
299f78709c2bf5053656f35574dc61112b77a892
|
refs/heads/master
| 2021-01-23T12:48:11.243305 | 2014-01-03T18:58:08 | 2014-01-03T18:58:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'u0064666'
import json
SymbiontArmors = {}
def parseTohaa(rootDirectory):
f = open(rootDirectory + 'Code\\InfinityJSONParser\Data\\tohaa.json', "r")
units = json.load(f)['Tohaa']
for unit in units:
if 'altp' in unit and 'spec' in unit and 'Symbiont Armour' in unit['spec']:
name = unit.get('name', '')
if name != '':
profile = unit.get('altp', [{}])[0]
global SymbiontArmors
if SymbiontArmors.get(name, None) is None:
SymbiontArmors[name] = profile
|
UTF-8
|
Python
| false | false | 2,014 |
19,069,654,801,799 |
f026bff9c2a6f64986d2e6d347779ab1b8abb6fb
|
98520eae7544b61e8432e654bd707df59ad74b76
|
/src/berk/plugins.py
|
6242627174690db29208d65ecdff867aeeff59a5
|
[] |
no_license
|
vstojkovic/berk
|
https://github.com/vstojkovic/berk
|
a1f15b28c583b4085578738531c15c0771a4c49c
|
fd11dbd5e9ab0fc201d7f1f6b8d2bee1a7135e01
|
refs/heads/master
| 2020-06-26T08:04:13.545253 | 2013-03-21T01:50:16 | 2013-03-21T01:50:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import imp
import importlib
import os.path
import schema
import yaml
from collections import namedtuple, defaultdict, OrderedDict
from semantic_version import Version, Spec
from pygraph.classes.digraph import digraph
from pygraph.algorithms.traversal import traversal
from pygraph.algorithms.accessibility import accessibility
from pygraph.algorithms.sorting import topological_sorting
def relaxed_spec(spec_str):
if spec_str and (spec_str[0] not in '<>=!'):
min_ver = Version(spec_str, partial=True)
return Spec('>=%s' % spec_str, '<%d.0.0' % (min_ver.major + 1))
return Spec(spec_str)
dependency_schema = {'id': schema.required(str),
'version': schema.optional(schema.chain(str, relaxed_spec))}
PluginDependency = namedtuple('PluginDependency', dependency_schema.keys())
dependency_coercer = schema.chain(schema.dict(dependency_schema),
schema.kwargs(PluginDependency))
manifest_schema = {'id': schema.required(str),
'version': schema.required(schema.chain(str, Version)),
'name': schema.required(str), 'author': schema.optional(str),
'description': schema.optional(str), 'main_module': schema.optional(str),
'src_path': schema.optional(str),
'required_plugins': schema.optional(schema.list(dependency_coercer)),
'exported_modules': schema.optional(schema.list(str))}
PluginManifest = namedtuple('PluginManifest', manifest_schema.keys())
manifest_coercer = schema.chain(schema.dict(manifest_schema),
schema.kwargs(PluginManifest))
PluginInfo = namedtuple('PluginInfo', ('active', 'manifest', 'path'))
class PluginModuleLoader(object):
def __init__(self, file, pathname, description):
self.file = file
self.pathname = pathname
self.description = description
def load_module(self, full_name):
try:
return imp.load_module(full_name, self.file, self.pathname,
self.description)
finally:
if self.file: self.file.close()
class PluginModuleFinder(object):
def __init__(self, plugin_path):
self.plugin_path = plugin_path
self.plugin_modules = []
def find_module(self, full_name, path=None):
if full_name in sys.modules: return
package, _, name = full_name.rpartition('.')
pkg_path = os.path.normpath(
reduce(os.path.join, package.split('.'), self.plugin_path))
try:
module_info = imp.find_module(name, [pkg_path])
except ImportError: return
self.plugin_modules.append(full_name)
return PluginModuleLoader(*module_info)
class DependencyProblem(object):
def __init__(self, plugin_id, dependency):
self.plugin_id = plugin_id
self.dependency = dependency
class MissingDependency(DependencyProblem):
pass
class IncorrectVersion(DependencyProblem):
pass
class IndirectDependency(DependencyProblem):
pass
class DisabledDependency(DependencyProblem):
pass
class CyclicDependency(DependencyProblem):
def __init__(self, plugin_id):
super(CyclicDependency, self).__init__(plugin_id, None)
class PluginManagerError(Exception):
pass
class LoadDependencyError(PluginManagerError):
def __init__(self, plugin_id, dependency):
super(DependencyError, self).__init__(
'Plugin dependency not loaded (id=%r, dependency=%r, version=%r)' %
(plugin_id, dependency.id, str(dependency.version)))
self.plugin_id = plugin_id
self.dependency = dependency
class UnloadDependencyError(PluginManagerError):
def __init__(self, plugin_id, dependent_id):
super(DependencyError, self).__init__(
'Dependent plugin should be unloaded (id=%r, dependent=%r)' %
(plugin_id, dependency.id))
self.plugin_id = plugin_id
self.dependent_id = dependent_id
class PluginManager(object):
def __init__(self, base_dir):
self.base_dir = base_dir
self.manifests = {}
self.enabled_plugins = set()
self.plugins = OrderedDict()
self.plugin_modules = {}
self.module_refcount = defaultdict(int)
self.mark_dirty()
def plugin_path(self, plugin_id):
return os.path.join(self.base_dir, plugin_id)
def load_manifest(self, filename):
with open(filename, 'r') as f:
manifest_dict = yaml.safe_load(f)
return manifest_coercer(manifest_dict)
def add_manifest(self, manifest, enabled=True):
if manifest.id in self.manifests:
raise ValueError('Duplicate plugin ID: %s' % plugin_id)
self.manifests[manifest.id] = manifest
if enabled:
self.enabled_plugins.add(manifest.id)
self.mark_dirty()
def add_plugin(self, plugin_id, enabled=True, manifest_path=None):
if not manifest_path:
manifest_path = os.path.join(self.plugin_path(plugin_id),
'manifest.yml')
self.add_manifest(self.load_manifest(manifest_path), enabled=enabled)
def plugin_dependencies(self, plugin_id):
manifest = self.manifests[plugin_id]
return manifest.required_plugins or []
@property
def dirty(self):
return self.dependency_graph is None
def mark_dirty(self):
self.dependency_graph = None
self._dependency_problems = None
self._load_order = None
def resolve_plugin_dependencies(self):
graph = digraph()
problems = defaultdict(list)
def check_plugin_dependencies(plugin_id):
result = True
def add_problem(problem_type, plugin_id, dependency):
problems[plugin_id].append(problem_type(plugin_id, dependency))
result = False
for dependency in self.plugin_dependencies(plugin_id):
if dependency.id not in self.manifests:
add_problem(MissingDependency, plugin_id, dependency)
elif dependency.version:
if manifests[required_id].version not in dependency.version:
add_problem(IncorrectVersion, plugin_id, dependency)
elif dependency.id not in graph:
if dependency.id in self.enabled_plugins:
add_problem(IndirectDependency, plugin_id, dependency)
else:
add_problem(DisabledDependency, plugin_id, dependency)
return result
def remove_dependents(plugin_id):
for node in traversal(graph, plugin_id, 'pre'):
for dependent in graph[node]:
edge = node, dependent
problems[dependent].append(IndirectDependency(dependent,
graph.get_edge_properties(edge)['dependency']))
graph.del_node(node)
graph.add_nodes(self.enabled_plugins)
for plugin_id in self.enabled_plugins:
if check_plugin_dependencies(plugin_id):
for dependency in self.plugin_dependencies(plugin_id):
edge = dependency.id, plugin_id
graph.add_edge(edge)
graph.set_edge_properties(edge, dependency=dependency)
else:
remove_dependents(plugin_id)
transitive_deps = accessibility(graph)
cycle_nodes = [
node
for node in graph
if any(
(node in transitive_deps[dependent])
for dependent in transitive_deps[node]
if dependent != node)]
for node in cycle_nodes:
problems[node].append(CyclicDependency(node))
graph.del_node(node)
self.dependency_graph = graph
self._dependency_problems = problems
self._load_order = topological_sorting(graph)
@property
def dependency_problems(self):
if self.dirty:
self.resolve_plugin_dependencies()
return self._dependency_problems
@property
def load_order(self):
if self.dirty:
self.resolve_plugin_dependencies()
return self._load_order
def dependent_plugins(self, plugin_id):
if self.dirty:
self.resolve_plugin_dependencies()
return self.dependency_graph[plugin_id]
def _load_plugin(self, plugin_id, path=None):
manifest = self.manifests[plugin_id]
path = path or manifest.src_path or self.plugin_path(plugin_id)
imp.acquire_lock()
try:
finder = PluginModuleFinder(path)
sys.meta_path.append(finder)
main_module = manifest.main_module or plugin_id
result = importlib.import_module(main_module)
plugin_exports = [main_module]
plugin_modules = []
if manifest.exported_modules:
plugin_exports.extend(manifest.exported_modules)
for module_name in finder.plugin_modules:
if hasattr(sys.modules[module_name], '__path__'):
pkg_prefix = module_name + '.'
should_remove = not any(
name.startswith(pkg_prefix) for name in plugin_exports)
else:
should_remove = module_name not in plugin_exports
if should_remove:
sys.modules.pop(module_name, None)
else:
plugin_modules.append(module_name)
self.module_refcount[module_name] += 1
self.plugin_modules[plugin_id] = plugin_modules
return result
finally:
sys.meta_path.remove(finder)
imp.release_lock()
def load_plugin(self, plugin_id, path=None, recursive=False):
for dependency in self.plugin_dependencies(plugin_id):
if dependency.id not in self.plugins:
if not recursive:
raise LoadDependencyError(plugin_id, dependency)
else:
self.load_plugin(dependency_id, recursive=True)
plugin = self._load_plugin(plugin_id, path)
self.plugins[plugin_id] = plugin
return plugin
def load_all(self):
for plugin_id in self.load_order:
self.load_plugin(plugin_id)
def _unload_plugin(self, plugin_id):
for module_name in self.plugin_modules[plugin_id]:
self.module_refcount[module_name] -= 1
if not self.module_refcount[module_name]:
sys.modules.pop(module_name, None)
del self.plugin_modules[plugin_id]
del self.plugins[plugin_id]
def unload_plugin(self, plugin_id, recursive=False):
for dependent in self.dependent_plugins(plugin_id):
if dependent in self.plugins:
if not recursive:
raise UnloadDependencyError(plugin_id, dependent)
else:
self.unload_plugin(dependent, recursive=True)
self._unload_plugin(plugin_id)
def enable_plugin(self, plugin_id):
if plugin_id in self.enabled_plugins: return
self.enabled_plugins.add(plugin_id)
self.mark_dirty()
def disable_plugin(self, plugin_id, unload_dependents=False):
if plugin_id not in self.enabled_plugins: return
if plugin_id in self.plugins:
self.unload_plugin(plugin_id, recursive=unload_dependents)
self.enabled_plugins.remove(plugin_id)
self.mark_dirty()
|
UTF-8
|
Python
| false | false | 2,013 |
5,884,105,235,361 |
ac1a23334cdc65e630b51a481a7e6340fb0177e7
|
4addfb999c40edad291e1dffb5dc5651ad0cebba
|
/mm/composer_xls.py
|
c92f57e6d320daa95f07bfddef385c2718b65f84
|
[] |
no_license
|
namitkewat/mm
|
https://github.com/namitkewat/mm
|
a931a8b3a16802f83ad22ba48e994960cb3fbc52
|
ca5707140265086178d54091bccd781cf27374af
|
refs/heads/master
| 2018-05-30T12:54:21.569850 | 2013-01-02T21:16:57 | 2013-01-02T21:16:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from composer_base import ComposerBase
import lib.xlwt_0_7_2 as xlwt
from lib.font_data.core import get_string_width
from lib.xldate.convert import to_excel_from_C_codes
import logging
import StringIO
import model_base
import style_base
log = logging.getLogger(__name__)
def get_string_width_from_style(char_string, style):
point_size = style.font.height / 0x14 # convert back to points
font_name = style.font.name
return int(get_string_width(font_name, point_size, char_string) * 50)
class styleXLS(style_base.StyleBase):
font_name = "Times New Roman"
is_bold = False
font_points = 12
text_align = xlwt.Alignment()
pattern = xlwt.Pattern()
border = xlwt.Borders()
class ComposerXLS(ComposerBase):
def convert_style(self, stylestr):
in_style = styleXLS()
in_style.style_from_string(stylestr)
style = xlwt.XFStyle()
fnt1 = xlwt.Font()
fnt1.name = in_style.font_name
fnt1.bold = in_style.is_bold
fnt1.height = in_style.font_points*0x14
style.font = fnt1
style.alignment = in_style.text_align
style.pattern = in_style.pattern
style.borders = in_style.border
return style
def cell_to_value(self, cell):
style = self.convert_style(self.document.config.row_styles[0])
if type(cell) == model_base.HeaderFieldType:
style = self.convert_style(self.document.config.header_style)
return cell.data, style
elif type(cell) in (model_base.IntFieldType, model_base.StringFieldType):
return cell.data, style
elif type(cell) == model_base.DateTimeFieldType:
style.num_format_str = self.document.config.get('datetime_format', 'M/D/YY h:mm')
return cell.data, style
elif type(cell) == model_base.DateFieldType:
num_string_format = self.document.config.get('date_format', 'M/D/YY')
if cell.format:
num_string_format = to_excel_from_C_codes(cell.format, self.document.config)
style.num_format_str = num_string_format
return cell.data, style
return "", style
def start_new_row(self, id):
pass
def end_row(self, id):
pass
def write_cell(self, row_id, col_id, cell):
value, style = self.cell_to_value(cell)
self.sheet.write(row_id, col_id, value, style)
self.done_write_cell(row_id, col_id, cell, value, style)
def done_write_cell(self, row_id, col_id, cell, value, style):
if self.document.config.get('adjust_all_col_width', False):
if type(cell) == model_base.StringFieldType:
current_width = self.sheet.col_width(col_id)
log.info("current width is %s" % current_width)
new_width = get_string_width_from_style(value, style)
if new_width > current_width:
log.info("setting col #%s form width %s to %s" % (col_id,current_width,new_width))
col = self.sheet.col(col_id)
col.width = new_width
elif type(cell) == model_base.DateTimeFieldType:
current_width = self.sheet.col_width(col_id)
log.info("current width is %s" % current_width)
new_width = 5000 #todo: different date formats
if new_width > current_width:
log.info("setting col #%s form width %s to %s" % (col_id,current_width,new_width))
col = self.sheet.col(col_id)
col.width = new_width
def set_option(self, key):
val = getattr(self.document.config,key)
if key == 'freeze_col' and val and val >0:
self.sheet.panes_frozen = True
self.sheet.vert_split_pos = val
elif key == 'freeze_row' and val and val >0:
self.sheet.panes_frozen = True
self.sheet.horz_split_pos = val
else:
log.info("Nothing to be done for %s" % key)
return
log.info("Set option %s" % key)
def run(self):
self.w = xlwt.Workbook()
self.sheet = self.w.add_sheet('Sheet 1')
if self.document.config.headers:
self.write_header()
self.iterate_grid()
self.finish()
# write the file to string
output = StringIO.StringIO()
self.w.save(output)
contents = output.getvalue()
output.close()
return contents
|
UTF-8
|
Python
| false | false | 2,013 |
16,398,185,163,043 |
ac7ff908be532a342a0c75ca3cc229d155bc9fed
|
1ccc56352cbaffe2787308697ea246ebe58e47ea
|
/scripts/la_grid_analysis/src/map.py
|
0c47e90f2093b3d12d06bb7f72207735227cb551
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
non_permissive
|
nbir/gambit-scripts
|
https://github.com/nbir/gambit-scripts
|
05c073eb2d6e113f9df93781a582b01950bfe1e2
|
49bcf1c364614ad5a6478b7d4e582a8c7ef5db86
|
refs/heads/master
| 2020-12-24T13:53:08.373726 | 2013-11-05T22:18:39 | 2013-11-05T22:18:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Gambit scripts
#
# Copyright (C) USC Information Sciences Institute
# Author: Nibir Bora <[email protected]>
# URL: <http://cbg.isi.edu/>
# For license information, see LICENSE
import os
import sys
import csv
import math
import anyjson
import psycopg2
import shapefile
import matplotlib
import jsbeautifier as jsb
import matplotlib.pyplot as plt
from pylab import *
from PIL import Image
from pprint import pprint
from matplotlib import cm
from datetime import datetime, timedelta
from mpl_toolkits.basemap import Basemap
from matplotlib.collections import LineCollection
from matplotlib.collections import PolyCollection
import settings as my
sys.path.insert(0, os.path.abspath('..'))
#
# TWEET ACTIVITY ON MAP
#
def plot_grid():
'''Plot all geo-tagged tweets on map'''
lat1, lng1, lat2, lng2 = my.BBOX
xticks = np.arange(lng1, lng2, my.LNG_DELTA).tolist()
xticks.append(lng2)
print xticks
yticks = np.arange(lat1, lat2 + my.LAT_DELTA, my.LAT_DELTA).tolist()
fig=plt.figure(figsize=(18,13))
fig.set_tight_layout(True)
ax=fig.add_subplot(111)
m = Basemap(llcrnrlon=lng1, llcrnrlat=lat1,
urcrnrlon=lng2, urcrnrlat=lat2,
projection='mill')
ax.set_xlim(lng1, lng2)
ax.set_ylim(lat1, lat2)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
#ax.grid(ls='-')
ax.grid(ls='--', lw=1.25)
plt.setp(plt.xticks()[1], rotation=90)
bg = matplotlib.image.imread('data/' + my.DATA_FOLDER + 'map.png')
ax.imshow(bg, aspect='auto', extent=(lng1, lng2, lat1, lat2), alpha=0.9)
plt.savefig('data/' + my.DATA_FOLDER + 'grid' + '.png')
yticks.reverse()
grid = {
'bbox' : my.BBOX,
'lat_delta' : my.LAT_DELTA,
'lng_delta' : my.LNG_DELTA,
'xticks' : [round(i, 3) for i in xticks],
'yticks' : [round(i, 3) for i in yticks],
'rows' : len(yticks) - 1,
'columns' : len(xticks) - 1,
'cells' : (len(yticks) - 1) * (len(xticks) - 1),
'grid' : {},
#'grid_lookup' : {}
}
i = 0
for r in range(len(yticks) - 1):
#grid['grid_lookup'][round(yticks[r+1], 3)] = {}
for c in range(len(xticks) - 1):
grid['grid'][i] = ( round(yticks[r+1], 3),
round(xticks[c], 3),
round(yticks[r], 3),
round(xticks[c+1], 3))
#grid['grid_lookup'][round(yticks[r+1], 3)][round(xticks[c], 3)] = i
i += 1
with open('data/' + my.DATA_FOLDER + 'grid.json', 'wb') as fp:
fp.write(jsb.beautify(anyjson.dumps(grid)))
|
UTF-8
|
Python
| false | false | 2,013 |
5,282,809,784,130 |
032799abe268eb599b5c5ba2e01fb5a1e553ca8f
|
e2097927194ea849da410e67709d832e4312f209
|
/src/pyanim/__main__.py
|
c8f9e507108f378eff006b54e9111a8516c6ed6c
|
[] |
no_license
|
dj-foxxy/scalefreenetworks
|
https://github.com/dj-foxxy/scalefreenetworks
|
555afc3aedc10d691b029074041bd8279bad44f7
|
5983eac8b6c2a4af054fa9fe80776830f5f77fc1
|
refs/heads/master
| 2020-05-18T15:15:22.885293 | 2012-05-08T16:07:26 | 2012-05-08T16:07:26 | 4,223,857 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from argparse import ArgumentParser
import sys
from pyanim import Frames
def build_argument_parser():
ap = ArgumentParser()
add = ap.add_argument
add('animation')
add('outdir')
return ap
def main(argv=None):
if argv is None:
argv = sys.argv
args = build_argument_parser().parse_args(args=argv[1:])
frames = Frames.from_file(args.animation)
frames(args.outdir)
return 0
if __name__ == '__main__':
exit(main())
|
UTF-8
|
Python
| false | false | 2,012 |
19,430,432,069,393 |
050c4d742b932422cdbc6b748d48d6049f859033
|
4e8057973e834b393710ab61d73666e18f9905d6
|
/condiciones/admin.py
|
0913222ceea4e65496fd554ad12571015880564a
|
[] |
no_license
|
mditamo/gigsAndVenues
|
https://github.com/mditamo/gigsAndVenues
|
4d86f61720ba0938cd18f99453f1bd2484b454e3
|
3da1635de9b28bc3aa050c094ce44c2e9b672cd3
|
refs/heads/master
| 2016-09-06T11:29:20.947716 | 2012-10-29T15:28:57 | 2012-10-29T15:28:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from condiciones.models import Tipo_Condicion
admin.site.register(Tipo_Condicion)
|
UTF-8
|
Python
| false | false | 2,012 |
15,539,191,709,767 |
d258fee93278ef51890946645b6b3113075faecb
|
3f516bada11c5264b1f54b1b2e6e7d43310da581
|
/projects/parser/DashboardParser.py
|
3cc7bbb6b5e1614e6d1c78465b835cb93b4c14d4
|
[
"MIT"
] |
permissive
|
vhatgithub/projects-python-wrappers
|
https://github.com/vhatgithub/projects-python-wrappers
|
2adc9fb89e980cef4e74e71c73f8a2514fe8e3ca
|
33e9f6bccba16a581b115c582033a93d43bb159c
|
refs/heads/master
| 2021-01-12T05:51:15.822295 | 2014-11-06T07:01:46 | 2014-11-06T07:01:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#$Id$
from projects.model.Activity import Activity
from projects.model.Status import Status
class DashboardParser:
"""This class is used to create object for Dashboard parser."""
def get_activities(self, resp):
"""This method parses the given response and returns list of activities.
Args:
resp(dict): Dictionary containing json object for activities.
Returns:
list of instance: List of activity object.
"""
activities = []
for value in resp['activities']:
activity = self.get_activity(value)
activities.append(activity)
return activities
def get_activity(self, resp):
"""This method parses the given response and returns activity object.
Args:
resp(dict): Dictionary containing json object for activity.
Returns:
instance: Activity object.
"""
activity = Activity()
if 'id' in resp:
activity.set_id(resp['id'])
if 'state' in resp:
activity.set_state(resp['state'])
if 'activity_for' in resp:
activity.set_activity_for(resp['activity_for'])
if 'name' in resp:
activity.set_name(resp['name'])
if 'activity_by' in resp:
activity.set_activity_by(resp['activity_by'])
if 'time_long' in resp:
activity.set_time_long(resp['time_long'])
if 'display_time' in resp:
activity.set_display_time(resp['display_time'])
if 'time' in resp:
activity.set_time(resp['time'])
return activity
def get_statuses(self, resp):
"""This method parses the given response and returns list of status object.
Args:
resp(dict): Response containing json object for status.
Returns:
list of instance: List of status object.
"""
statuses = []
for value in resp['statuses']:
status = self.get_status(value)
statuses.append(status)
return statuses
def get_status(self, resp):
"""This method parses the json response for status.
Args:
resp(dict): Dictionary containing json response for status.
Returns:
instance: Status object.
"""
status = Status()
if 'id' in resp:
status.set_id(resp['id'])
if 'content' in resp:
status.set_content(resp['content'])
if 'posted_by' in resp:
status.set_posted_by(resp['posted_by'])
if 'posted_person' in resp:
status.set_posted_person(resp['posted_person'])
if 'posted_time' in resp:
status.set_posted_time(resp['posted_time'])
if 'posted_time_long' in resp:
status.set_posted_time_long(resp['posted_time_long'])
return status
def to_json(self, status):
"""This method is used to convert status object to json form.
Args:
status(instance): Status object.
Returns:
dict: Dictionary containing json object for status.
"""
data = {}
if status.get_content() != "":
data['content'] = status.get_content()
return data
|
UTF-8
|
Python
| false | false | 2,014 |
3,942,780,000,500 |
80d234c89835b3f3f58194059ddd9ed5e9938685
|
ad2b906c08debf12775be19cecad596caec97952
|
/redef.py
|
9b0d4640f8e3ec7e7196638fec90c65a0d1bf7a4
|
[] |
no_license
|
msabramo/redef
|
https://github.com/msabramo/redef
|
152902889246332e3ed2636c0139fa0258cbb7e3
|
d81d086eaa2aad52a16ea18d1cfcc85ed9e24594
|
refs/heads/master
| 2023-06-09T16:55:35.579452 | 2012-07-10T06:57:44 | 2012-07-10T06:57:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
redef.py
@author [email protected]
Test module that redefines attributes of a module or class
When the test goes out of scope, your redefined attribute goes back to normal behavior.
'''
import types
import inspect
class CallableWrapper:
'''Captures information on a redefined function'''
called = 0
method_args = None
named_method_args = None
def capture(self, args, kwargs):
'''Store the input to the captured function'''
self.called = self.called + 1
self.method_args = args
self.named_method_args = kwargs
def reset(self):
'''Set the wrapper to a base state where the function was never called'''
self.called = 0
self.method_args = None
self.named_method_args = None
def __init__(self, rd):
'''Take a Redef object and wrap the function you want to redefine'''
# don't keep any references to the Redef object around
# or else the __del__ function will not work correctly
is_class_method = not inspect.ismethod(rd.old_value)
func = rd.value
def tocall(*args, **kwargs):
self.capture(args, kwargs)
# pop off the redef class variable to keep the
# faked out function signature the same
if is_class_method:
args = args[1:]
return func(*args, **kwargs)
# @staticmethods need to return a function bound
# to the class
if is_class_method:
tocall = types.MethodType(tocall, rd.obj)
self.wrapped = tocall
class Redef(object):
'''An object that when deleted puts the redefined object back to normal'''
def __init__(self, obj, key, value):
'''Make sure you keep the returned Redef object in a variable so that the __del__ function is not called immediately
Good: >>> rd_somefunc = Redef(SomeClass, "attr", lambda s, x: "something else")
Bad: >>> Redef(SomeClass, "attr", lambda s, x: "something else")
'''
self.key = key
self.obj = obj
self.old_value = getattr(self.obj, self.key)
self.value = value
if value is None:
self.value = self.old_value
self.wrapper = CallableWrapper(self)
if callable(self.value):
setattr(self.obj, self.key, self.wrapper.wrapped)
else:
setattr(self.obj, self.key, self.value)
def __del__(self):
'''Can be called explicitly or implied when gone out of scope'''
setattr(self.obj, self.key, self.old_value)
def called(self):
'''ask the wrapper how many times the redef has been called'''
return self.wrapper.called
def method_args(self):
'''ask the wrapper for the most recent non-named args'''
return self.wrapper.method_args
def named_method_args(self):
'''ask the wrapper for the most recent named args'''
return self.wrapper.named_method_args
def reset(self):
'''ask the wrapper to forget all wrapped information'''
self.wrapper.reset()
def redef(obj, key, value):
'''A static constructor helper function'''
return Redef(obj, key, value)
class WriteCapturer:
def __init__(self, func, *args, **kwargs):
self.output = ''
self.func = func
self.args = args
self.kwargs = kwargs
def capture(self):
self.returned = self.func(*self.args, **self.kwargs)
def write(self, *args):
self.output = self.output + ' '.join([str(x) for x in args])
def capture_output_(output_type, func, *args, **kwargs):
import sys
writer = WriteCapturer(func, *args, **kwargs)
rd_write = redef(sys, output_type, writer)
writer.capture()
return writer
def stdout_of(func, *args, **kwargs):
return capture_output_('stdout', func, *args, **kwargs)
def stderr_of(func, *args, **kwargs):
return capture_output_('stderr', func, *args, **kwargs)
|
UTF-8
|
Python
| false | false | 2,012 |
1,245,540,560,621 |
a5ba44d6ec530e52daafd7a51698fe5b4b51ca76
|
d3521f9f404fe5163d86e54f0d18ee59a7960e5f
|
/SL/blog/views.py
|
605a05e11a2a9d392a08a507fdb40464ac2c5893
|
[] |
no_license
|
yuluhuang/SL
|
https://github.com/yuluhuang/SL
|
4d89666ba1e9d59921184fa93c7732638382fb91
|
095ce3c1bfebf24e7595b55470440bb35db0db7c
|
refs/heads/master
| 2021-01-10T20:28:52.688408 | 2014-08-02T12:12:39 | 2014-08-02T12:12:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#coding:utf-8
from django.shortcuts import render_to_response
from django.http import HttpResponse,HttpResponseRedirect
from django.template import RequestContext
from SL.blog.models import Users,Theme,Task,Collection,Info,Item,Note,Tel,Nu,ProxyIp
from django.conf import settings#must path
from django.views.decorators.csrf import csrf_exempt
import uuid
import os
from django.core import serializers
import json
#settings.MEDIA_ROOT sl/sl
#BASEDIR=os.path.dirname(__file__)#Return the directory name of pathname path.
# Create your views here.
'''
'''
@csrf_exempt
def index(req):
return render_to_response('index.html',{})
'''
'''
@csrf_exempt
def loginhtml(req):
return render_to_response('login.html',{})
'''
登录处理
'''
@csrf_exempt
def login(req):
response=HttpResponse()
response['Content-type']="text/plain"
userId = req.POST.get('username')#从post上来的数据中取出
password = req.POST.get('password')
u=Users.objects.filter(userId__exact=userId,password__exact=password)
'''
[{"fields": {"introduction": "1", "phone": "1", "password": "a", "identity": "1", "salt": "11", "icon": "1", "userId": "a", "Email": "[email protected]", "motto": "1", "name": "1", "qq": "1"}, "pk": 1, "model": "blog.users"}]
'''
if u:
req.session['username']=userId
return HttpResponse('[{"code":"1","user":['+Users.objects.get(Id="1").toJSON()+']}]', mimetype='application/javascript')#response
else:
return HttpResponse(userId,mimetype='application/javascript')
'''
'''
@csrf_exempt
def logout(req):
response=HttpResponse()
response['Content-type']='text/plain'
try:
if 'username' in req.session:
del req.session['username']
response.write('[{"code":"logout"}]')
except KeyError:
response.write('[{"code":"0"}]')
return response
'''
是否登录状态
'''
@csrf_exempt
def islogin(req):
response=HttpResponse()
response['Content-type']="text/plain"
if req.session['username']:
response.write('[{"code":"1","username":"'+req.session['username']+'"}]')
else:
response.write('[{"code":"0"}]')
return response
'''
note编写页面
'''
@csrf_exempt
def notehtml(req):
return render_to_response('note.html',{})
@csrf_exempt
def collecthtml(req):
if req.method=='POST':
return render_to_response('mycollect.html',{})
else:
#collection=Collection.objects.get(collectId="1")
#return HttpResponse(collection.collectId, mimetype='application/javascript')#response
return render_to_response('mycollect.html',{})
@csrf_exempt
def getcollect(req):
collection=Collection.objects.get(collectId="1")
#return HttpResponse(json.dumps(collection,ensure_ascii = False), mimetype='application/javascript')#response object
return HttpResponse('[{"collect":['+collection.toJSON()+']}]', mimetype='application/javascript')#response
#return HttpResponse(serializers.serialize("json", Collection.objects.get(collectId="1")), mimetype='application/javascript')#response
@csrf_exempt
def myhomehtml(req):
return render_to_response('myhome.html',{})
@csrf_exempt
def mydetailshtml(req):
return render_to_response('mydetails.html',{})
@csrf_exempt
def upload_img(request):
if request.method == 'POST':
for field_name in request.FILES:
uploaded_file = request.FILES[field_name]#name
file_ext = (request.FILES['Filedata'].name.split('.')[-1])#文件后缀
file_name=str(uuid.uuid1())#newName
newFileName=file_name+'.'+file_ext#newfilename
path=os.path.join(settings.MEDIA_ROOT,'./static/uploads/s/')
if not os.path.exists(path):
os.mkdir(path)
destination_path =os.path.join(path,'%s'% newFileName)
destination = open(destination_path, 'wb+')
for chunk in uploaded_file.chunks():
destination.write(chunk)
destination.close()
#write.response(destination_path)
#context.Response.Write(destination_path);
return HttpResponse('/static/uploads/s/'+newFileName) #render_to_response('uploadify.html',{})#HttpResponse("ok", mimetype="text/plain")
else:
return render_to_response('uploadify.html',{})#HttpResponse("ok", mimetype="text/plain")
'''
前台传值x(左边距),y(上边距),w(切图的图宽),h(切图的图高)div_w(用来显示图片的框框的大小,用来计算比例)
box的四个参数为左上角和右下角距(0,0)的距离
'''
@csrf_exempt
def cutimage(request):
response=HttpResponse()
response['Content-type']="text/plain"
path=''
marginTop=0
marginLeft=0
width=0
height=0
if request.POST.get('x'):
marginLeft=int(request.POST.get('x'))
if request.POST.get('y'):
marginTop=int(request.POST.get('y'))
if request.POST.get('w'):
width=int(request.POST.get('w'))
if request.POST.get('h'):
height=int(request.POST.get('h'))
if request.POST.get('filepath'):
path=request.POST.get('filepath')
if request.POST.get('div_w'):#页面上显示图片的div的大小,不是图片的大小
div_w=int(request.POST.get('div_w'))
if request.POST.get('div_h'):
div_h=int(request.POST.get('div_h'))
#filepath=settings.MEDIA_ROOT+path
filepath=settings.MEDIA_ROOT+path
#filepath='/home/yuluhuang/python/two/two/resource/upload/c231dea2-4619-11e3-8ae3-000c29176c6b.jpg'
houz=filepath[filepath.rfind('.'):]#获得后缀
pathnofilename=filepath[0:filepath.rfind('\\')]#获得文件名前的路径
from PIL import Image
f = Image.open(settings.MEDIA_ROOT+path)
xsize,ysize=f.size#原图宽高
bilix=float(xsize)/float(div_w)
biliy=float(ysize)/float(div_h)
#box变量是一个四元组(左,上,右,下)都已(0,0)为起始点
#等比例还原
x=int(marginLeft*bilix)
y=int(marginTop*biliy)
w=int((marginLeft+width)*bilix)
h=int((marginTop+height)*biliy)
box=(x,y,w,h)
import random
import re
f.crop(box).save(settings.MEDIA_ROOT+"/static/uploads/z/"+repr(random.randrange(1000))+".jpg")
#f.crop(box).save(filepath)
response.write(str(marginLeft)+","+str(marginTop)+","+str(x)+","+str(y)+","+str(w)+","+str(h))
return response
'''
post note
'''
@csrf_exempt
def note(req):
response=HttpResponse()
response['Content-type']="text/plain"
if req.method=='POST' and req.session["username"]:
try:
content=str(req.POST.get('content',''))
tag=str(req.POST.get('tag',''))
time=str(req.POST.get('time',''))
title=str(req.POST.get('title',''))
url=str(req.POST.get('url',''))
n=Note.objects.create(noteTitle=title,noteUrl=url,noteContent=content,noteTime=time,noteTag=tag,userId=req.session['username'])
#n.save()
response.write('[{"code":"1"}]')
except Exception:
response.write('[{"code":"0"}]')
return response
'''
noteSearchByUsername
'''
@csrf_exempt
def noteSearchByUsername(req):
response=HttpResponse()
response['Content-type']="text/plain"
if req.method=='POST' and req.session["username"]:
try:
notes=Note.objects.filter(userId__exact=req.session['username'])
response.write('[{"code":"1","notes":'+serializers.serialize("json", notes)+'}]')
except Exception:
response.write('[{"code":"'+Exception+'"}]')
return response
'''
爬虫spider
'''
import time
@csrf_exempt
def splice(req):
response=HttpResponse()
response['Content-type']="text/plain"
if req.method=='POST':
try:
bug(response)
#time.sleep(1)
#downloadd(txt1="1.txt",path='./1/',houz='.swf')
except Exception:
response.write('[{"code":"'+Exception+'"}]')
return response
import urllib.request
import os
import re
from random import choice
import uuid
def bug(response):
urlString=[]
nu=Nu.objects.get(NId="1")
#response.write(nu.NT)
#return response
pc=int(nu.NT)
nu.NT=pc+1
nu.save()
#response.write(serializers.serialize("json", nu))
#return response
phone='18267833656'
length=len(str(pc))
#response.write(length)
#return response
phone=phone[0:len(str(phone))-length]+str(pc)
#response.write(phone)
#return response
url1=r'http://data.haoma.sogou.com/vrapi/query_number.php?number={0}&type=json&callback=show'.format(phone)
time.sleep(1)
iplists=['211.138.121.38:80','118.187.37.254:80','211.162.39.98:80','61.158.173.179:9999',
'183.57.82.74:80','119.184.120.133:6015','115.238.164.208:8080','218.64.58.122:9999']
ips=ProxyIp.objects.all()
for ip in ips:
iplists.append(ip.ip[:-1])
#response.write(iplists)
#return response
ip=choice(iplists)
#ip=''
headers={
"GET":url1,
#"HOST":"",
"Referer":"http://www.python.org/",
"User-Agent":"Mozilla/5.0",
}
req=urllib.request.Request(url1)
for key in headers:
#return response
req.add_header(key,headers[key])
proxy_handler = urllib.request.ProxyHandler({'http': 'http://'+ip})
#proxy_auth_handler = urllib.request.ProxyBasicAuthHandler()
# proxy_auth_handler.add_password('realm', 'host', 'username', 'password')
opener = urllib.request.build_opener(proxy_handler)
urllib.request.install_opener(opener)
#response.write(ip)
#return response
html=urllib.request.urlopen(req).read().decode('utf-8')[5:-1]
#html=json.dumps(html)
#response.write(html)
#return response
txt=html.split('"')
#response.write((txt[3]).encode('ascii'))
#return response
if txt[6][1:-1]=="0":
#response.write(html)
#return response
tel=Tel.objects.create(text=html,telName=txt[3],telContent=phone,code=txt[6][1:-1])
tel.save()
bug(response)
if txt[6][1:-1]=="403":
#response.write(html)
#return response
time.sleep(10)
nu=Nu.objects.get(NId="1")
pc=int(nu.NT)
nu.NT=pc-1
nu.save()
bug(response)
#response.write(html)
#return response
#response.write(str(html))
#return response
#txt.write(str(html))
@csrf_exempt
def showTel(req):
response=HttpResponse()
response['Content-type']="text/plain"
tel=Tel.objects.filter(telName__contains='6570')#
#response.write(tel)
#return response
response.write('[{"tel":'+serializers.serialize("json", tel)+'}]')
return response
@csrf_exempt
def showtelhtml(req):
return render_to_response('showTel.html',{})
'''
爬代理ip
'''
@csrf_exempt
def spiderIp(req):
response=HttpResponse()
response['Content-type']="text/plain"
url=r'http://cn-proxy.com/'
parrent=r'<td>(.*?)</td>'
'''
html=urllib.request.urlopen('http://www.python.org').read().decode('utf-8')
'''
#通过url获取页面源码,通过正则表达式获取图片地址,存入文件
time.sleep(1)
iplists=['182.254.129.123:80']
ip=choice(iplists)
headers={
"GET":url,
#"HOST":"",
"Referer":"http://www.python.org/",
"User-Agent":"Mozilla/5.0",
}
req=urllib.request.Request(url)
for key in headers:
req.add_header(key,headers[key])
proxy_handler = urllib.request.ProxyHandler({'http': 'http://'+ip})
#proxy_auth_handler = urllib.request.ProxyBasicAuthHandler()
#proxy_auth_handler.add_password('realm', 'host', 'username', 'password')
opener = urllib.request.build_opener(proxy_handler)
urllib.request.install_opener(opener)
html=urllib.request.urlopen(req).read().decode('utf-8')
#print(html)
prog = re.compile(parrent)
ss = prog.findall(html)
#response.write(ss[5:])
#return response
#ss=['182.254.129.123', '80', '广东 深圳', '2014-07-31 14:39:23', '121.10.120.135', '8001', '广东 ', '2014-07-31 14:35:34', '115.236.59.194', '3128', '浙江 杭州', '2014-07-31 14:35:36', '211.138.121.37', '84', '浙江 ', '2014-07-31 14:39:49', '211.152.50.70', '80', '上海 ', '2014-07-31 14:35:52', '218.89.170.110', '8888', '四川 攀枝花', '2014-07-31 14:39:52', '111.206.125.74', '8080', '北京 ', '2014-07-31 14:39:03', '218.204.131.250', '3128', '江西 南昌', '2014-07-31 14:37:06', '202.120.83.18', '9000', '上海 ', '2014-07-31 14:36:02', '115.28.213.143', '8000', '北京 ', '2014-07-31 14:36:48', '111.206.125.76', '8080', '北京 ', '2014-07-31 14:39:18', '111.206.125.77', '8080', '北京 ', '2014-07-31 14:39:16', '116.226.61.108', '8080', '上海 ', '2014-07-31 14:36:39', '117.79.73.166', '8080', '北京 ', '2014-07-31 14:37:12', '222.87.129.29', '80', '贵州 六盘水', '2014-07-31 14:35:03', '111.1.36.25', '83', '浙江 温州', '2014-07-31 14:35:12', '123.138.184.19', '8888', '陕西 西安', '2014-07-31 14:37:28', '211.138.121.37', '83', '浙江 ', '2014-07-31 14:39:39', '182.254.129.124', '80', '广东 深圳', '2014-07-31 14:39:25', '202.103.150.70', '8088', '广东 深圳', '2014-07-31 14:37:33', '61.174.9.96', '8080', '浙江 金华', '2014-07-31 14:35:57', '114.80.136.112', '7780', '上海 ', '2014-07-31 14:35:08', '116.236.203.238', '8080', '上海 ', '2014-07-31 14:38:41', '115.29.168.245', '18080', '北京 ', '2014-07-31 14:37:23', '61.135.153.22', '80', '北京 ', '2014-07-31 14:38:44', '218.16.99.253', '8081', '广东 东莞', '2014-07-31 14:37:10', '182.118.23.7', '8081', '河南 ', '2014-07-31 14:37:41', '123.138.68.172', '8000', '陕西 西安', '2014-07-31 14:36:51', '111.205.122.222', '80', '北京 ', '2014-07-31 14:38:18', '116.228.55.217', '8003', '上海 ', '2014-07-31 14:36:21', '119.188.46.42', '8080', '山东 ', '2014-07-31 14:36:37', '121.196.141.249', '80', '北京 ', '2014-07-31 14:35:55', '116.228.55.217', '8000', '上海 ', '2014-07-31 14:36:27', '183.129.212.180', '82', '浙江 杭州', '2014-07-31 14:39:59', '125.215.37.81', '3128', '上海 ', '2014-07-31 14:37:21', '183.57.42.79', '81', '广东 佛山', '2014-07-31 14:36:46', '183.136.221.6', '3128', '浙江 ', '2014-07-31 14:35:46', '121.199.59.43', '80', '北京 ', '2014-07-31 14:37:55', '115.29.225.229', '80', '北京 ', '2014-07-31 14:37:48', '122.227.8.190', '80', '浙江 金华', '2014-07-31 14:37:01', '218.75.155.242', '8888', '湖南 常德', '2014-07-31 14:39:35', '115.29.164.195', '8081', '北京 ', '2014-07-31 14:35:36', '115.29.28.137', '8090', '北京 ', '2014-07-31 14:36:46', '115.29.184.17', '82', '北京 ', '2014-07-31 14:37:19', '183.63.149.103', '80', '广东 广州', '2014-07-31 14:39:14', '116.236.216.116', '8080', '上海 ', '2014-07-31 14:36:14', '117.59.217.237', '83', '重庆 ', '2014-07-31 14:36:07', '111.206.125.75', '8080', '北京 ', '2014-07-31 14:35:49', '115.28.15.118', '82', '北京 ', '2014-07-31 14:37:02', '211.151.76.25', '80', '北京 ', '2014-07-31 14:36:36', '服务器地址', '端口', '位置', '速度', '上次检查', '211.151.59.251', '80', '北京 ', '2014-07-31 14:38:36', '210.73.220.18', '8088', '上海 ', '2014-07-31 14:35:49', '210.14.138.102', '8080', '北京 ', '2014-07-31 14:35:50', '61.234.123.64', '8080', '广东 珠海', '2014-07-31 14:36:54', '120.198.230.31', '80', '广东 ', '2014-07-31 14:37:36', '111.1.36.26', '83', '浙江 温州', '2014-07-31 14:40:03', '111.1.36.21', '80', '浙江 温州', '2014-07-31 14:36:30', '111.1.36.162', '80', '浙江 温州', '2014-07-31 14:39:20', '120.198.230.93', '80', '广东 ', '2014-07-31 14:38:12', '111.1.36.22', '80', '浙江 温州', '2014-07-31 14:36:00', '111.1.36.26', '82', '浙江 温州', '2014-07-31 14:40:01', '111.1.36.25', '85', '浙江 温州', '2014-07-31 14:35:07', '111.1.36.163', '80', '浙江 温州', '2014-07-31 14:37:25', '111.1.36.26', '84', '浙江 温州', '2014-07-31 14:35:06', '111.1.36.165', '80', '浙江 温州', '2014-07-31 14:37:26', '120.198.230.30', '80', '广东 ', '2014-07-31 14:37:35', '111.1.36.25', '80', '浙江 温州', '2014-07-31 14:35:39', '111.1.36.23', '80', '浙江 温州', '2014-07-31 14:35:31', '111.1.36.26', '85', '浙江 温州', '2014-07-31 14:35:29', '111.1.36.164', '80', '浙江 温州', '2014-07-31 14:37:56', '120.198.230.31', '81', '广东 ', '2014-07-31 14:39:53', '111.1.36.26', '80', '浙江 温州', '2014-07-31 14:35:37', '222.74.6.10', '8000', '内蒙古 呼和浩特', '2014-07-31 14:35:59', '120.198.230.31', '82', '广东 ', '2014-07-31 14:35:09', '222.89.155.62', '9000', '河南 驻马店', '2014-07-31 14:35:11', '120.198.243.130', '80', '广东 ', '2014-07-31 14:37:58', '211.151.50.179', '81', '北京 ', '2014-07-31 14:35:54', '211.138.121.38', '80', '浙江 ', '2014-07-31 14:36:08', '115.28.54.149', '80', '北京 ', '2014-07-31 14:35:12', '211.138.121.36', '80', '浙江 ', '2014-07-31 14:39:47', '211.138.121.37', '82', '浙江 ', '2014-07-31 14:39:41', '211.138.121.37', '80', '浙江 ', '2014-07-31 14:39:45', '211.138.121.36', '82', '浙江 ', '2014-07-31 14:39:36', '211.138.121.36', '81', '浙江 ', '2014-07-31 14:39:43', '211.138.121.38', '84', '浙江 ', '2014-07-31 14:36:11', '111.1.36.133', '80', '浙江 温州', '2014-07-31 14:36:19', '211.138.121.37', '81', '浙江 ', '2014-07-31 14:37:08', '218.240.156.82', '80', '福建 福州', '2014-07-31 14:35:32', '211.138.121.38', '81', '浙江 ', '2014-07-31 14:37:06', '114.112.91.116', '90', '江苏 ', '2014-07-31 14:37:04', '61.235.249.165', '80', '辽宁 沈阳', '2014-07-31 14:36:52', '124.238.238.50', '80', '河北 廊坊', '2014-07-31 14:38:37', '114.112.91.114', '90', '江苏 ', '2014-07-31 14:37:30', '114.112.91.115', '90', '江苏 ', '2014-07-31 14:37:04', '183.57.78.124', '8080', '广东 佛山', '2014-07-31 14:39:27', '61.155.169.11', '808', '江苏 苏州', '2014-07-31 14:37:14', '180.153.32.93', '8088', '上海 ', '2014-07-31 14:35:47', '110.232.64.93', '8080', '北京 ', '2014-07-31 14:37:10', '202.98.123.126', '8080', '四川 成都', '2014-07-31 14:35:05', '116.228.55.217', '80', '上海 ', '2014-07-31 14:36:29']
ss=ss[5:]
count=0
ip=""
for x in ['服务器地址', '端口', '位置', '速度', '上次检查']:
ss.remove(x)
for sss in ss:
if count%4<2:
ip+=sss+":"
if count%4==2:
proxy=ProxyIp.objects.create(ip=ip,time=time.time())
proxy.save()
ip=""
count=count+1
response.write(ss)
return response
'''
['182.254.129.123', '80', '广东 深圳', '2014-07-31 14:39:23', '121.10.120.135', '8001', '广东 ', '2014-07-31 14:35:34', '115.236.59.194', '3128', '浙江 杭州', '2014-07-31 14:35:36', '211.138.121.37', '84', '浙江 ', '2014-07-31 14:39:49', '211.152.50.70', '80', '上海 ', '2014-07-31 14:35:52', '218.89.170.110', '8888', '四川 攀枝花', '2014-07-31 14:39:52', '111.206.125.74', '8080', '北京 ', '2014-07-31 14:39:03', '218.204.131.250', '3128', '江西 南昌', '2014-07-31 14:37:06', '202.120.83.18', '9000', '上海 ', '2014-07-31 14:36:02', '115.28.213.143', '8000', '北京 ', '2014-07-31 14:36:48', '111.206.125.76', '8080', '北京 ', '2014-07-31 14:39:18', '111.206.125.77', '8080', '北京 ', '2014-07-31 14:39:16', '116.226.61.108', '8080', '上海 ', '2014-07-31 14:36:39', '117.79.73.166', '8080', '北京 ', '2014-07-31 14:37:12', '222.87.129.29', '80', '贵州 六盘水', '2014-07-31 14:35:03', '111.1.36.25', '83', '浙江 温州', '2014-07-31 14:35:12', '123.138.184.19', '8888', '陕西 西安', '2014-07-31 14:37:28', '211.138.121.37', '83', '浙江 ', '2014-07-31 14:39:39', '182.254.129.124', '80', '广东 深圳', '2014-07-31 14:39:25', '202.103.150.70', '8088', '广东 深圳', '2014-07-31 14:37:33', '61.174.9.96', '8080', '浙江 金华', '2014-07-31 14:35:57', '114.80.136.112', '7780', '上海 ', '2014-07-31 14:35:08', '116.236.203.238', '8080', '上海 ', '2014-07-31 14:38:41', '115.29.168.245', '18080', '北京 ', '2014-07-31 14:37:23', '61.135.153.22', '80', '北京 ', '2014-07-31 14:38:44', '218.16.99.253', '8081', '广东 东莞', '2014-07-31 14:37:10', '182.118.23.7', '8081', '河南 ', '2014-07-31 14:37:41', '123.138.68.172', '8000', '陕西 西安', '2014-07-31 14:36:51', '111.205.122.222', '80', '北京 ', '2014-07-31 14:38:18', '116.228.55.217', '8003', '上海 ', '2014-07-31 14:36:21', '119.188.46.42', '8080', '山东 ', '2014-07-31 14:36:37', '121.196.141.249', '80', '北京 ', '2014-07-31 14:35:55', '116.228.55.217', '8000', '上海 ', '2014-07-31 14:36:27', '183.129.212.180', '82', '浙江 杭州', '2014-07-31 14:39:59', '125.215.37.81', '3128', '上海 ', '2014-07-31 14:37:21', '183.57.42.79', '81', '广东 佛山', '2014-07-31 14:36:46', '183.136.221.6', '3128', '浙江 ', '2014-07-31 14:35:46', '121.199.59.43', '80', '北京 ', '2014-07-31 14:37:55', '115.29.225.229', '80', '北京 ', '2014-07-31 14:37:48', '122.227.8.190', '80', '浙江 金华', '2014-07-31 14:37:01', '218.75.155.242', '8888', '湖南 常德', '2014-07-31 14:39:35', '115.29.164.195', '8081', '北京 ', '2014-07-31 14:35:36', '115.29.28.137', '8090', '北京 ', '2014-07-31 14:36:46', '115.29.184.17', '82', '北京 ', '2014-07-31 14:37:19', '183.63.149.103', '80', '广东 广州', '2014-07-31 14:39:14', '116.236.216.116', '8080', '上海 ', '2014-07-31 14:36:14', '117.59.217.237', '83', '重庆 ', '2014-07-31 14:36:07', '111.206.125.75', '8080', '北京 ', '2014-07-31 14:35:49', '115.28.15.118', '82', '北京 ', '2014-07-31 14:37:02', '211.151.76.25', '80', '北京 ', '2014-07-31 14:36:36', '服务器地址', '端口', '位置', '速度', '上次检查', '211.151.59.251', '80', '北京 ', '2014-07-31 14:38:36', '210.73.220.18', '8088', '上海 ', '2014-07-31 14:35:49', '210.14.138.102', '8080', '北京 ', '2014-07-31 14:35:50', '61.234.123.64', '8080', '广东 珠海', '2014-07-31 14:36:54', '120.198.230.31', '80', '广东 ', '2014-07-31 14:37:36', '111.1.36.26', '83', '浙江 温州', '2014-07-31 14:40:03', '111.1.36.21', '80', '浙江 温州', '2014-07-31 14:36:30', '111.1.36.162', '80', '浙江 温州', '2014-07-31 14:39:20', '120.198.230.93', '80', '广东 ', '2014-07-31 14:38:12', '111.1.36.22', '80', '浙江 温州', '2014-07-31 14:36:00', '111.1.36.26', '82', '浙江 温州', '2014-07-31 14:40:01', '111.1.36.25', '85', '浙江 温州', '2014-07-31 14:35:07', '111.1.36.163', '80', '浙江 温州', '2014-07-31 14:37:25', '111.1.36.26', '84', '浙江 温州', '2014-07-31 14:35:06', '111.1.36.165', '80', '浙江 温州', '2014-07-31 14:37:26', '120.198.230.30', '80', '广东 ', '2014-07-31 14:37:35', '111.1.36.25', '80', '浙江 温州', '2014-07-31 14:35:39', '111.1.36.23', '80', '浙江 温州', '2014-07-31 14:35:31', '111.1.36.26', '85', '浙江 温州', '2014-07-31 14:35:29', '111.1.36.164', '80', '浙江 温州', '2014-07-31 14:37:56', '120.198.230.31', '81', '广东 ', '2014-07-31 14:39:53', '111.1.36.26', '80', '浙江 温州', '2014-07-31 14:35:37', '222.74.6.10', '8000', '内蒙古 呼和浩特', '2014-07-31 14:35:59', '120.198.230.31', '82', '广东 ', '2014-07-31 14:35:09', '222.89.155.62', '9000', '河南 驻马店', '2014-07-31 14:35:11', '120.198.243.130', '80', '广东 ', '2014-07-31 14:37:58', '211.151.50.179', '81', '北京 ', '2014-07-31 14:35:54', '211.138.121.38', '80', '浙江 ', '2014-07-31 14:36:08', '115.28.54.149', '80', '北京 ', '2014-07-31 14:35:12', '211.138.121.36', '80', '浙江 ', '2014-07-31 14:39:47', '211.138.121.37', '82', '浙江 ', '2014-07-31 14:39:41', '211.138.121.37', '80', '浙江 ', '2014-07-31 14:39:45', '211.138.121.36', '82', '浙江 ', '2014-07-31 14:39:36', '211.138.121.36', '81', '浙江 ', '2014-07-31 14:39:43', '211.138.121.38', '84', '浙江 ', '2014-07-31 14:36:11', '111.1.36.133', '80', '浙江 温州', '2014-07-31 14:36:19', '211.138.121.37', '81', '浙江 ', '2014-07-31 14:37:08', '218.240.156.82', '80', '福建 福州', '2014-07-31 14:35:32', '211.138.121.38', '81', '浙江 ', '2014-07-31 14:37:06', '114.112.91.116', '90', '江苏 ', '2014-07-31 14:37:04', '61.235.249.165', '80', '辽宁 沈阳', '2014-07-31 14:36:52', '124.238.238.50', '80', '河北 廊坊', '2014-07-31 14:38:37', '114.112.91.114', '90', '江苏 ', '2014-07-31 14:37:30', '114.112.91.115', '90', '江苏 ', '2014-07-31 14:37:04', '183.57.78.124', '8080', '广东 佛山', '2014-07-31 14:39:27', '61.155.169.11', '808', '江苏 苏州', '2014-07-31 14:37:14', '180.153.32.93', '8088', '上海 ', '2014-07-31 14:35:47', '110.232.64.93', '8080', '北京 ', '2014-07-31 14:37:10', '202.98.123.126', '8080', '四川 成都', '2014-07-31 14:35:05', '116.228.55.217', '80', '上海 ', '2014-07-31 14:36:29']
'''
'''
noteSearchByNoteId
'''
@csrf_exempt
def noteSearchByNoteId(req):
response=HttpResponse()
response['Content-type']="text/plain"
if req.method=='POST' and req.session["username"]:
try:
noteid=str(req.POST.get('id',''))
note=Note.objects.filter(noteId__exact=noteid)
response.write('[{"code":"1","note":'+serializers.serialize("json", note)+'}]')
except Exception:
response.write('[{"code":'+Exception+'}]')
return response
|
UTF-8
|
Python
| false | false | 2,014 |
12,025,908,428,837 |
05c86f2a810a330db51ef4aeb33cb4dc4d0fe3ef
|
91e9641667d0fffa2478d8a1225805f9bee09f41
|
/twist/nlp/libSVMInput.py
|
e83af445a78186b65b2f0e6ed1871ed86482a282
|
[] |
no_license
|
rahmaniacc/twist
|
https://github.com/rahmaniacc/twist
|
5c3563145498490650772168c66b591da2161403
|
fcc0243986462b73682d4390582c1abd7c6b09b5
|
refs/heads/master
| 2019-05-14T12:37:58.326638 | 2012-11-13T22:56:55 | 2012-11-13T22:56:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on Nov 9, 2012
@author: sonali
'''
import sqlite3 as lite
def createfile():
con = lite.connect('/Users/sonali/Documents/Ischool/BigDataTwitter/twist.db')
f = open('inputfile.txt', 'w')
with con:
cur = con.cursor()
cur.execute("SELECT DISTINCT TWEETID FROM Documents")
docrows = cur.fetchall()
#print docrows.length()
for i in docrows:
cur.execute("SELECT DISTINCT(CATID) FROM Documents WHERE TWEETID=?",(i[0],))
catid = cur.fetchone()
f.write(str(catid[0])+" ")
print "catid"+str(catid[0])+" "
cur.execute("SELECT WORDID FROM Documents WHERE TWEETID=?",(i[0],))
wordrows = cur.fetchall()
for j in wordrows:
cur.execute("SELECT TFIDF FROM GlobalDict WHERE TWEETID=? AND WORDID=?",(i[0],j[0]))
tfidf = cur.fetchone()
f.write(str(j[0])+":"+str(tfidf[0])+" ");
print str(j[0])+":"+str(tfidf[0])+" "
f.write("\n")
f.close()
if(__name__=="__main__"):
createfile()
|
UTF-8
|
Python
| false | false | 2,012 |
18,081,812,350,885 |
55793124c3fdc5bb35a3c5525f94ab9a938b624e
|
a394a802db11fb3b590094a445e6a9ad9df9ad65
|
/script/ncr.py
|
1afee54d6f4a217def3197b7df9d284092187286
|
[] |
no_license
|
gtmanfred/Euler
|
https://github.com/gtmanfred/Euler
|
86b897d964071a284022aa81d98fa5e9fc53e8d0
|
3b22b58a4be9317075d2667d72ff2b41bd50b19d
|
refs/heads/master
| 2016-09-06T15:34:20.053309 | 2012-04-25T10:30:21 | 2012-04-25T10:30:21 | 1,621,157 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def ncrrow(row,lc=-1):
if lc==-1:lc=row+1
if lc<row+1 and lc>row//2:
lc = row-lc
print(lc)
r = row+1
vc = [1]
c = 1
mini = 10000
while True:
if len(vc)>mini:
print(lc,len(vc)/(lc),end='\r')
mini+=10000
vc.append(vc[-1]*(r-c)//c)
if len(vc)>lc:return vc[-1]
if vc[-1]==1:return vc
c+=1
def ncr(n):
x=[[1]]
for i in range(n+1):
x.append([sum(i) for i in zip([0]+x[-1],x[-1]+[0])])
return x
|
UTF-8
|
Python
| false | false | 2,012 |
4,045,859,202,344 |
9bb9db708ca9cf1790cc4b7f24522e12fe3b1215
|
f9ad953cc547960eda33fead24d4ad29b1fc9471
|
/turbion/bits/markup/fields.py
|
af4ca64d624df6304b435a5762ec36d996b1437c
|
[
"BSD-3-Clause"
] |
permissive
|
strogo/turbion
|
https://github.com/strogo/turbion
|
49c3ed454f24319463948288424e18c7541a5bb1
|
b9b9c95e1a4497e6c4b64f389713a9a16226e425
|
refs/heads/master
| 2021-01-13T02:08:05.997563 | 2011-01-04T13:29:19 | 2011-01-04T13:29:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from turbion.bits.markup.filters import Filter
class MarkupField(models.CharField):
__metaclass__ = models.SubfieldBase
def __init__(self, safe=True, limit_choices_to=None, *args, **kwargs):
self.limit_choices_to = limit_choices_to
self.safe = safe
def _check(name, filter):
if limit_choices_to and name not in limit_choices_to:
return False
if safe and not filter.is_safe():
return False
return True
defaults = {
"choices": [(name, name) for name, filter in Filter.manager.all()\
if _check(name, filter)],
"max_length": 50,
"default": "markdown"
}
defaults.update(kwargs)
super(MarkupField, self).__init__(*args, **defaults)
class MarkupTextField(models.TextField):
def __init__(self, *args, **kwargs):
self.processing = kwargs.pop("processing", False)
self.html_name = kwargs.pop("html_name", None)
self.filter_field_name = kwargs.pop("filter_field_name", None)
self.limit_choices_to = kwargs.pop("limit_choices_to", None)
self.safe = kwargs.pop('safe', True)
super(MarkupTextField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name):
super(MarkupTextField, self).contribute_to_class(cls, name)
if self.html_name is None:
self.html_name = "%s_html" % name
if self.filter_field_name is None:
self.filter_field_name = "%s_filter" % name
models.TextField(
editable=False, blank=True
).contribute_to_class(cls, self.html_name)
MarkupField(
verbose_name=_("markup filter"),
limit_choices_to=self.limit_choices_to,
safe=self.safe
).contribute_to_class(cls, self.filter_field_name)
def pre_save(self, model_instance, add):
value = super(MarkupTextField, self).pre_save(model_instance, add)
if self.processing:
from turbion.bits.markup import processing
value = processing.render_string(value)
filter = getattr(model_instance, self.filter_field_name)
setattr(
model_instance,
self.html_name,
Filter.manager.get(filter).to_html(value)
)
return value
|
UTF-8
|
Python
| false | false | 2,011 |
68,719,504,743 |
8c968a87882524012156c2dc5876ab1fe9ee70d3
|
9be1d53623ada2db83be37c9887fdc9422fc53b8
|
/suit/core/keynodes.py
|
7545ba25d7d8d168cc82039dc329d9825ece70d5
|
[] |
no_license
|
sosnovitch/pyUI
|
https://github.com/sosnovitch/pyUI
|
e276f93cd8eaf59112c8520b6a4100adf460fe0f
|
67a890be00c502915de83d6393e2291f6c49a6b9
|
refs/heads/master
| 2021-01-18T19:48:48.249853 | 2012-12-18T10:27:00 | 2012-12-18T10:27:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
-----------------------------------------------------------------------------
This source file is part of OSTIS (Open Semantic Technology for Intelligent Systems)
For the latest info, see http://www.ostis.net
Copyright (c) 2010 OSTIS
OSTIS is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OSTIS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with OSTIS. If not, see <http://www.gnu.org/licenses/>.
-----------------------------------------------------------------------------
"""
'''
Created on 20.11.2009
@author: Denis Koronchik
'''
import kernel
session = kernel.Kernel.session()
session.open_segment(u"/etc/com_keynodes")
session.open_segment(u"/etc/questions")
# getting system keynodes
n_1 = session.find_keynode_full_uri(u"/proc/keynode/1_")
n_2 = session.find_keynode_full_uri(u"/proc/keynode/2_")
n_3 = session.find_keynode_full_uri(u"/proc/keynode/3_")
n_4 = session.find_keynode_full_uri(u"/proc/keynode/4_")
n_5 = session.find_keynode_full_uri(u"/proc/keynode/5_")
n_6 = session.find_keynode_full_uri(u"/proc/keynode/6_")
n_7 = session.find_keynode_full_uri(u"/proc/keynode/7_")
n_8 = session.find_keynode_full_uri(u"/proc/keynode/8_")
n_9 = session.find_keynode_full_uri(u"/proc/keynode/9_")
n_10 = session.find_keynode_full_uri(u"/proc/keynode/10_")
attr = {
0:session.find_keynode_full_uri(u"/proc/keynode/1_"),
1:session.find_keynode_full_uri(u"/proc/keynode/2_"),
2:session.find_keynode_full_uri(u"/proc/keynode/3_"),
3:session.find_keynode_full_uri(u"/proc/keynode/4_"),
4:session.find_keynode_full_uri(u"/proc/keynode/5_"),
5:session.find_keynode_full_uri(u"/proc/keynode/6_"),
6:session.find_keynode_full_uri(u"/proc/keynode/7_"),
7:session.find_keynode_full_uri(u"/proc/keynode/8_"),
8:session.find_keynode_full_uri(u"/proc/keynode/9_"),
9:session.find_keynode_full_uri(u"/proc/keynode/10_")
}
# semantic keynodes
class info:
sc_unknown = session.find_keynode_full_uri(u"/info/sc_unknown")
sc_const = session.find_keynode_full_uri(u"/info/sc_const")
sc_var = session.find_keynode_full_uri(u"/info/sc_var")
sc_meta = session.find_keynode_full_uri(u"/info/sc_meta")
# structure type
stype_element = session.find_keynode_full_uri(u"/info/stype_element")
stype_sheaf = session.find_keynode_full_uri(u"/info/stype_sheaf")
stype_pair_noorient = session.find_keynode_full_uri(u"/info/stype_pair_noorient")
stype_pair_orient = session.find_keynode_full_uri(u"/info/stype_pair_orient")
stype_nopair_sheaf = session.find_keynode_full_uri(u"/info/stype_nopair_sheaf")
stype_struct = session.find_keynode_full_uri(u"/info/stype_struct")
stype_concept = session.find_keynode_full_uri(u"/info/stype_concept")
stype_relation = session.find_keynode_full_uri(u"/info/stype_relation")
stype_bin_noorient_rel = session.find_keynode_full_uri(u"/info/stype_bin_noorient_rel")
stype_bin_orient_rel = session.find_keynode_full_uri(u"/info/stype_bin_orient_rel")
stype_bin_orient_role_rel = session.find_keynode_full_uri(u"/info/stype_bin_orient_role_rel")
stype_bin_orient_norole_rel = session.find_keynode_full_uri(u"/info/stype_bin_orient_norole_rel")
stype_nobin_noorient_rel = session.find_keynode_full_uri(u"/info/stype_nobin_noorient_rel")
stype_nobin_orient_rel = session.find_keynode_full_uri(u"/info/stype_nobin_orient_rel")
stype_concept_norel = session.find_keynode_full_uri(u"/info/stype_concept_norel")
stype_struct_class = session.find_keynode_full_uri(u"/info/stype_struct_class")
stype_ext_obj_class = session.find_keynode_full_uri(u"/info/stype_ext_obj_class")
stype_ext_info_type1_class = session.find_keynode_full_uri(u"/info/stype_ext_info_type1_class")
stype_ext_obj = session.find_keynode_full_uri(u"/info/stype_ext_obj")
stype_ext_obj_abstract = session.find_keynode_full_uri(u"/info/stype_ext_obj_abstract")
stype_ext_obj_real = session.find_keynode_full_uri(u"/info/stype_ext_obj_real")
stype_ext_info_constr = session.find_keynode_full_uri(u"/info/stype_ext_info_constr")
stype_ext_noinfo_obj_real = session.find_keynode_full_uri(u"/info/stype_ext_noinfo_obj_real")
stype_pair_time = session.find_keynode_full_uri(u"/info/stype_pair_time")
# keynodes for user interface
class ui:
viewer = session.find_keynode_full_uri(u"/ui/core/просмотрщик")
editor = session.find_keynode_full_uri(u"/ui/core/редактор")
translator = session.find_keynode_full_uri(u"/ui/core/транслятор")
main_window = session.find_keynode_full_uri(u"/ui/core/главное окно")
sc_window = session.find_keynode_full_uri(u"/ui/core/sc-окно")
set_output_windows = session.find_keynode_full_uri(u"/ui/core/ui_output_window_set")
translate_langs = session.find_keynode_full_uri(u"/ui/core/ui_all_translations")
translate_lang_current = session.find_keynode_full_uri(u"/ui/core/ui_used_translation")
nrel_set_of_supported_formats = session.find_keynode_full_uri(u"/ui/core/множество поддерживаемых форматов*")
nrel_set_of_supported_input_formats = session.find_keynode_full_uri(u"/ui/core/множество поддерживаемых входных форматов*")
nrel_set_of_supported_output_formats = session.find_keynode_full_uri(u"/ui/core/множество поддерживаемых выходных форматов*")
nrel_child_window = session.find_keynode_full_uri(u"/ui/core/дочернее окно*")
nrel_set_of_output_windows = session.find_keynode_full_uri(u"/ui/core/множество окон для вывода ответа*")
arg_cur_window = session.find_keynode_full_uri(u"/ui/core/ui_arg_cur_window")
arg_set = session.find_keynode_full_uri(u"/ui/core/ui_arg_set")
arg_set_only = session.find_keynode_full_uri(u"/ui/core/ui_arg_set_only")
arg_all_el = session.find_keynode_full_uri(u"/ui/core/ui_arg_all_el")
arg_1 = session.find_keynode_full_uri(u"/ui/core/ui_arg_1")
arg_2 = session.find_keynode_full_uri(u"/ui/core/ui_arg_2")
arg_3 = session.find_keynode_full_uri(u"/ui/core/ui_arg_3")
arg_4 = session.find_keynode_full_uri(u"/ui/core/ui_arg_4")
base_user_cmd = session.find_keynode_full_uri(u"/ui/core/элементарная пользовательская команда")
init_base_user_cmd = session.find_keynode_full_uri(u"/ui/core/инициированная элементарная пользовательская команда")
active_base_user_cmd = session.find_keynode_full_uri(u"/ui/core/активная элементарная пользовательская команда")
finish_base_user_cmd = session.find_keynode_full_uri(u"/ui/core/завершенная элементарная пользовательская команда")
user_cmd = session.find_keynode_full_uri(u"/ui/core/ui_user_command")
init_user_cmd = session.find_keynode_full_uri(u"/ui/core/ui_initiated_user_command")
active_user_cmd = session.find_keynode_full_uri(u"/ui/core/ui_active_user_command")
finish_user_cmd = session.find_keynode_full_uri(u"/ui/core/ui_finished_user_command")
nrel_template_user_cmd = session.find_keynode_full_uri(u"/ui/core/обобщенная формулировка команды*")
cmd_mouse_move_obj = session.find_keynode_full_uri(u"/ui/core/ui_cmd_mouse_move_obj")
cmd_mouse_button_press = session.find_keynode_full_uri(u"/ui/core/ui_cmd_mouse_button_press")
cmd_mouse_button_release= session.find_keynode_full_uri(u"/ui/core/ui_cmd_mouse_button_release")
mouse_button_left = session.find_keynode_full_uri(u"/ui/core/mouse_button_left")
mouse_button_right = session.find_keynode_full_uri(u"/ui/core/mouse_button_right")
mouse_button_middle = session.find_keynode_full_uri(u"/ui/core/mouse_button_middle")
# format
format = session.find_keynode_full_uri(u"/ui/core/формат")
format_sc = session.find_keynode_full_uri(u"/ui/core/SC")
format_scgx = session.find_keynode_full_uri(u"/ui/core/SCGx")
format_geomx = session.find_keynode_full_uri(u"/ui/core/GEOMx")
format_jpg = session.find_keynode_full_uri(u"/ui/core/JPG")
format_jpeg = session.find_keynode_full_uri(u"/ui/core/JPEG")
format_bmp = session.find_keynode_full_uri(u"/ui/core/BMP")
format_png = session.find_keynode_full_uri(u"/ui/core/PNG")
format_string = session.find_keynode_full_uri(u"/ui/core/STRING")
format_term = session.find_keynode_full_uri(u"/ui/core/TERM")
format_int = session.find_keynode_full_uri(u"/ui/core/INT")
format_real = session.find_keynode_full_uri(u"/ui/core/REAL")
format_wmv = session.find_keynode_full_uri(u"/ui/core/WMV")
format_avi = session.find_keynode_full_uri(u"/ui/core/AVI")
format_mp4 = session.find_keynode_full_uri(u"/ui/core/MP4")
format_flv = session.find_keynode_full_uri(u"/ui/core/FLV")
format_mpg = session.find_keynode_full_uri(u"/ui/core/MPG")
format_html = session.find_keynode_full_uri(u"/ui/core/HTML")
format_swf = session.find_keynode_full_uri(u"/ui/core/SWF")
format_midmif = session.find_keynode_full_uri(u"/ui/core/MIDMIF")
format_objx = session.find_keynode_full_uri(u"/ui/core/OBJx")
format_graph = session.find_keynode_full_uri(u"/ui/core/GRAPH")
format_space = session.find_keynode_full_uri(u"/ui/core/SPACEx")
format_logic = session.find_keynode_full_uri(u"/ui/core/LOGICx")
# command keynodes
atom_command = session.find_keynode_full_uri(u"/ui/core/атомарная команда")
noatom_command = session.find_keynode_full_uri(u"/ui/core/неатомарная команда")
question_command = session.find_keynode_full_uri(u"/ui/core/команда вопрос")
user = session.find_keynode_full_uri(u"/ui/core/пользователь")
active_user = session.find_keynode_full_uri(u"/ui/core/активный пользователь*")
class common:
nrel_decomposition = session.find_keynode_full_uri(u"/etc/com_keynodes/декомпозиция*");
nrel_identification = session.find_keynode_full_uri(u"/etc/com_keynodes/идентификация*");
nrel_authors = session.find_keynode_full_uri(u"/etc/com_keynodes/авторы*")
nrel_base_order = session.find_keynode_full_uri(u"/etc/com_keynodes/базовая последовательность*")
nrel_value = session.find_keynode_full_uri(u"/etc/com_keynodes/значение*")
nrel_explanation = session.find_keynode_full_uri(u"/etc/com_keynodes/пояснение*")
rrel_russian_text = session.find_keynode_full_uri(u"/etc/com_keynodes/русский текст_");
rrel_english_text = session.find_keynode_full_uri(u"/etc/com_keynodes/английский текст_");
rrel_dec_number = session.find_keynode_full_uri(u"/etc/com_keynodes/десятичное число_")
#group_russian_language = session.find_keynode_full_uri(u"/etc/com_keynodes/Русский язык")
group_image = session.find_keynode_full_uri(u"/etc/com_keynodes/изображение")
user_name = session.find_keynode_full_uri(u"/etc/com_keynodes/имя пользователя")
user_password = session.find_keynode_full_uri(u"/etc/com_keynodes/пароль")
class questions:
question = session.find_keynode_full_uri(u"/etc/questions/вопрос")
initiated = session.find_keynode_full_uri(u"/etc/questions/инициированный вопрос")
atom = session.find_keynode_full_uri(u"/etc/questions/атомарный вопрос")
noatom = session.find_keynode_full_uri(u"/etc/questions/неатомарный вопрос")
active = session.find_keynode_full_uri(u"/etc/questions/активный вопрос")
finished = session.find_keynode_full_uri(u"/etc/questions/отработанный вопрос")
succesful = session.find_keynode_full_uri(u"/etc/questions/успешный вопрос")
_class = session.find_keynode_full_uri(u"/etc/questions/класс вопроса")
nrel_action_area = session.find_keynode_full_uri(u"/etc/questions/область действия вопроса*")
nrel_key_fragment = session.find_keynode_full_uri(u"/etc/questions/ключевой фрагмент вопроса*")
nrel_answer = session.find_keynode_full_uri(u"/etc/questions/ответ*")
nrel_general_formulation= session.find_keynode_full_uri(u"/etc/questions/обобщенная формулировка вопроса*")
|
UTF-8
|
Python
| false | false | 2,012 |
15,951,508,554,903 |
90e0d2893ca47e5b3d9eb579315079a1ca4671fd
|
15e11a7c56a1986bbe989adf6e7e83c225f39bfd
|
/gibthon/gibson/forms.py
|
4abadcfec952c9bc03bfe38a93df753435dcfa4a
|
[] |
no_license
|
Gibthon/Gibthon
|
https://github.com/Gibthon/Gibthon
|
9fd28d451b15756bfc2f43d91e80ede265f6b05f
|
d24f69b80e7917c7b78ede04548d5944f6e86e77
|
refs/heads/master
| 2020-03-30T13:05:49.297529 | 2012-11-04T12:50:19 | 2012-11-04T12:50:19 | 5,396,956 | 6 | 1 | null | false | 2012-11-04T13:00:30 | 2012-08-13T09:12:55 | 2012-11-04T13:00:30 | 2012-11-04T13:00:28 | 312 | null | 5 | 4 |
JavaScript
| null | null |
# gibson.forms
#
# contains forms for use with the gibthon app, along with classes for radio
# buttons that are compatible with jQuery buttonsets. These should probably be
# moved somewhere a bit more global at some point
from django import forms
from models import *
from gibthon import formfields
# very basic form for changing settings
class SettingsForm(forms.ModelForm):
class Meta:
model = Settings
exclude = ['construct']
# for changing the detail of the form
class ConstructForm(forms.ModelForm):
description = forms.CharField(widget=forms.Textarea)
shape = forms.ChoiceField(
widget=forms.RadioSelect(renderer = formfields.BetterRadioFieldRenderer),
choices=SHAPE_CHOICES,
initial='c'
)
class Meta:
model = Construct
exclude = ['genbank', 'fragments', 'settings', 'owner']
# for generating the content of the accordion used to manipulate ConstructFragments
class FeatureListForm(forms.Form):
DIRECTION_CHOICES = (
('f', 'Forward'),
('r', 'Reverse'),
)
start_feature = forms.ModelChoiceField('fragment.Feature', None, label='')
finish_feature = forms.ModelChoiceField('fragment.Feature', None, label='')
direction = forms.ChoiceField(widget=forms.RadioSelect(renderer = formfields.BetterRadioFieldRenderer), choices=DIRECTION_CHOICES)
def __init__(self, _constructFragment, _construct, *args, **kwargs):
sf = self.base_fields['start_feature']
ff = self.base_fields['finish_feature']
sf.queryset = _constructFragment.fragment.features.all()
ff.queryset = _constructFragment.fragment.features.all()
sf.widget.choices = sf.choices
ff.widget.choices = ff.choices
sf.initial = _constructFragment.start_feature
ff.initial = _constructFragment.end_feature
self.base_fields['direction'].initial = _constructFragment.direction
super(FeatureListForm, self).__init__(*args, **kwargs)
|
UTF-8
|
Python
| false | false | 2,012 |
249,108,109,740 |
d5092c6dd3bd44689bc264891bca340710387288
|
a7fb2c89a00531aa7a2c56f7f7abffd25181bc00
|
/l_mirror/__init__.py
|
6ab450eef33e3a74451697ccdf31cfddd7a1ec3c
|
[
"GPL-3.0-only",
"GPL-3.0-or-later"
] |
non_permissive
|
rbtcollins/lmirror
|
https://github.com/rbtcollins/lmirror
|
af92b5cb6b5a00c357af940218686c99ffe30903
|
98f2e46beeb72adc8cc24eef4da193a3150c5a12
|
refs/heads/master
| 2016-09-05T22:04:57.378200 | 2014-09-16T02:34:22 | 2014-09-16T02:34:22 | 23,956,584 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#
# LMirror is Copyright (C) 2010 Robert Collins <[email protected]>
#
# LMirror is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
# In the LMirror source tree the file COPYING.txt contains the GNU General Public
# License version 3.
#
"""The l_mirror library.
This library is divided into some broad areas.
The arguments package contains command argument parsing facilities.
The commands package contains the main user entry points into the application.
The ui package contains various user interfaces.
The journals module contains logic for working with individual journals.
The mirrorset module contains logic for working with a mirrorset - a thing to
be mirrored around.
The tests package contains tests and test specific support code.
"""
__all__ = ['version', '__version__']
import pbr.version
# same format as sys.version_info: "A tuple containing the five components of
# the version number: major, minor, micro, releaselevel, and serial. All
# values except releaselevel are integers; the release level is 'alpha',
# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
# releaselevel of 'dev' for unreleased under-development code.
#
# If the releaselevel is 'alpha' then the major/minor/micro components are not
# established at this point, and setup.py will use a version of next-$(revno).
# If the releaselevel is 'final', then the tarball will be major.minor.micro.
# Otherwise it is major.minor.micro~$(revno).
# Uncomment when pbr 0.11 is released.
#_version = pbr.version.VersionInfo('lmirror').semantic_version()
#__version__ = _version.version_tuple()
#version = _version.release_string()
__version__ = (0, 0, 4, 'alpha', 0)
|
UTF-8
|
Python
| false | false | 2,014 |
15,109,694,997,740 |
85fa82659fd3d40c35806e3d5a066a0f71106c86
|
26d5dbef76d7c640b12ccbaa2a33d2cee1b10d0f
|
/scripts/pylab/https.py
|
213646a552baf0d0ce5b0c15281f095942190871
|
[] |
no_license
|
wb670/stonelab
|
https://github.com/wb670/stonelab
|
41dfefc3a75c9e3550877c1b9b78981d3e65cd24
|
846c493124354da4e787b5172c05061c694b98f0
|
refs/heads/master
| 2021-01-01T05:44:28.372620 | 2014-01-21T03:27:54 | 2014-01-21T03:27:54 | 55,951,211 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#coding:utf-8
'''
Created on Jun 15, 2012
@author: stone
'''
from httplib import HTTPSConnection
con = HTTPSConnection('www.alibaba-inc.com', cert_file=u'/Users/stone/Tmp/li.jinl.pem')
con.request('get', '/')
res = con.getresponse()
print res.status
res.close()
con.request('get', '/welcome.nsf/pages/welcome')
res = con.getresponse()
print res.status
res.close()
con.close()
|
UTF-8
|
Python
| false | false | 2,014 |
6,871,947,714,184 |
d90385b1eddb27b5db20003cc74f07656f6c3646
|
963456073e8debf5fa14c658bc573a3c25829116
|
/project/apps/auth/views.py
|
f82a3296866007531ef16342c54302c6b07abf81
|
[] |
no_license
|
victorliun/alliely
|
https://github.com/victorliun/alliely
|
0f97d369f7bba8bbf5cd72d2545a78f661910b39
|
6aa12024f2b76f7dc7867da6198b817e2dfe4cc5
|
refs/heads/master
| 2021-01-21T13:11:06.694316 | 2014-04-16T05:15:16 | 2014-04-16T05:15:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Create your views here.
from django.http import HttpResponseRedirect
from django.shortcuts import render,redirect
from django.template import RequestContext
from django.contrib.auth import authenticate, login, logout
from django.core.urlresolvers import reverse
def login_view(request):
"""this view has been replaced by django default view"""
logout(request)
username = password = ''
if request.POST:
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
assert request.POST['next']
redirect_url = request.POST.get("next", reverse("home"))
return HttpResponseRedirect(redirect_url)
return render(request, 'auth/login.html')
def logout_view(request):
logout(request)
# redirect to success url
return HttpResponseRedirect(reverse("home"))
|
UTF-8
|
Python
| false | false | 2,014 |
13,391,708,077,234 |
def17942673f0f42dbcecb37c510dbbea637ec1e
|
d8a295fe9c2595dc50ee557bda46373fedfeed57
|
/Crawler-example.py
|
09004f1192b24cdc862b076994d1ba8751e19ca2
|
[] |
no_license
|
Mr-Dai/PythonCrawler
|
https://github.com/Mr-Dai/PythonCrawler
|
6b431723c271fc0b4c6eaf14855c0dfd5635756e
|
4ff36e199b41dc8943958cc705667f7b10b91a8d
|
refs/heads/master
| 2016-08-04T00:53:14.565258 | 2014-09-25T08:03:12 | 2014-09-25T08:03:12 | 24,449,059 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import requests
import BeautifulSoup
signin_url = "https://accounts.coursera.org/api/v1/login"
logininfo = {"email": "...",
"password": "...",
"webrequest": "true"
}
user_agent = ("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/36.0.1985.143 Safari/537.36")
def randomString(length):
return ''.join(random.choice(string.letters + string.digits) for i in xrange(length))
XCSRF2Cookie = 'csrf2_token_%s' % ''.join(randomString(8))
XCSRF2Token = ''.join(randomString(24))
XCSRFToken = ''.join(randomString(24))
cookie = "csrftoken=%s; %s=%s" % (XCSRFToken, XCSRF2Cookie, XCSRF2Token)
post_headers = {"User-Agent": user_agent,
"Referer": "https://accounts.coursera.org/signin",
"X-Requested-With": "XMLHttpRequest",
"X-CSRF2-Cookie": XCSRF2Cookie,
"X-CSRF2-Token": XCSRF2Token,
"X-CSRFToken": XCSRFToken,
"Cookie": cookie
}
coursera_session = requests.Session()
login_res = coursera_session.post(signin_url,
data=logininfo,
headers=post_headers,
)
if login_res.status_code == 200:
print "Login Successfully!"
else:
print login_res.text
soup = BeautifulSoup(content)
chapter_list = soup.find_all("div", class_="course-item-list-header")
lecture_resource_list = soup.find_all("ul", class_="course-item-list-section-list")
ppt_pattern = re.compile(r'https://[^"]*\.ppt[x]?')
pdf_pattern = re.compile(r'https://[^"]*\.pdf')
for lecture_item, chapter_item in zip(lecture_resource_list, chapter_list):
# weekly title
chapter = chapter_item.h3.text.lstrip()
for lecture in lecture_item:
lecture_name = lecture.a.string.lstrip()
# get resource link
ppt_tag = lecture.find(href=ppt_pattern)
pdf_tag = lecture.find(href=pdf_pattern)
srt_tag = lecture.find(title="Subtitles (srt)")
mp4_tag = lecture.find(title="Video (MP4)")
print ppt_tag["href"], pdf_tag["href"]
print srt_tag["href"], mp4_tag["href"]
|
UTF-8
|
Python
| false | false | 2,014 |
4,475,355,957,860 |
f6ca00fa55f01484b7db904ab985c60975ab7b1c
|
aec8cd51cb682db71c9212a68cd4018c2232811e
|
/net/thornet/testsuitecreator/suiteCreator.py
|
daf0475a10c08949386a7e38fd5957452a8b27b5
|
[] |
no_license
|
proofek/UnittestConverter
|
https://github.com/proofek/UnittestConverter
|
d0b98ee28fa9c134c3dad9fb067da8154af6424d
|
1d4aba3cc3d19ea070962cebfff0c5dc3261f049
|
refs/heads/master
| 2021-01-19T05:03:16.434172 | 2009-10-23T09:32:56 | 2009-10-23T09:32:56 | 347,029 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
TestSuite creator
"""
import os
import re
from net.thornet.converter.testFinder import UnittestFinder
from Cheetah.Template import Template
from net.thornet.testsuitecreator.PHPUnitSuite import PHPUnitSuite
class TestSuiteCreator:
testFinder = None
def __init__(self, arguments):
self.testFinder = UnittestFinder(arguments['workingDir'])
self.testFinder.fetchDirList()
def extractClassNames(self, fileName, classNames):
p = re.compile('class(.+)extends.+PHPUnit_Framework_TestCase')
unitTestFile = open(fileName, 'r')
for line in unitTestFile:
m = p.search(line)
if m is not None:
className = m.group(1).strip()
if className is not None:
classNames.append(className)
unitTestFile.close()
def saveSuite(self, dirName, content):
newSuite = open(dirName + '/AllTests.php', 'w')
newSuite.write(content)
newSuite.close()
def run(self):
dirList = self.testFinder.getDirList()
for dirName in dirList:
self.testFinder.clearFileList()
self.testFinder.fetchFileList(dirName, False)
print 'Creating suite for ' + os.path.basename(dirName)
template = PHPUnitSuite()
template.dirName = os.path.basename(dirName)
template.tests = self.testFinder.getFileList()
template.classNames = []
for fileName in self.testFinder.getFileList():
self.extractClassNames(fileName, template.classNames)
self.saveSuite(dirName, str(template))
|
UTF-8
|
Python
| false | false | 2,009 |
8,667,244,004,642 |
abb7c6c60ae26bf6be58e6a3ca8725cc886ea021
|
92091031f3b57913d410f068d3f7343ef9e742d4
|
/source/python/loadROI.py
|
5ecf1288cc43421f298c92f105e1460b396c2ba0
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-or-later",
"GPL-1.0-or-later",
"GPL-2.0-only",
"GPL-2.0-or-later"
] |
non_permissive
|
decode-india/Parking_lot
|
https://github.com/decode-india/Parking_lot
|
d7045fb164ceb86f0950505e0e7a5dbd7dd6b4f1
|
cbb495b0194410bcc17ba3e79ffe8ae74338991a
|
refs/heads/master
| 2020-03-19T17:36:03.631250 | 2013-12-09T19:52:38 | 2013-12-09T19:52:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy
import cv2
import pickle
import os
import sys
if len(sys.argv) < 4:
print 'usage: loadROI.py <input ROI file> <empty lot image> <video file>'
sys.exit(2);
#operating system flag
#There are different modes of operation depending on OS.
Win = False
if os.name == 'nt':
Win = True
#Read in ROIs from file
f = open(sys.argv[1], 'r');
ROI = pickle.load(f);
f.close();
#Read in image of empty parkinglot
origIMG = cv2.imread(sys.argv[2], 1);
#Read in video file to analyze
video = cv2.VideoCapture(sys.argv[3])
#uncomment this line to capture video from available cameras
#video = cv2.VideoCapture(0)
##uncomment these lines to output a video file.
#if Win:
#code to produce output video file
#height , width , layers = origIMG.shape
#outvideo = cv2.VideoWriter(sys.argv[3][0:len(sys.argv[3])-4] + '_garbage.avi', -1, 3, (width, height));
templateEdges = [];
#Preload templates and precompute template edges
for r in ROI:
(x1,y1,x2,y2) = r
template = origIMG[y1:y2,x1:x2, :]
templateEdges.append(cv2.Canny(template, 100, 200))
#main loop
i = 0;
while True:
#get the next frame
ret, img = video.read()
#we only want to look at every 120th frame
if i % 120 == 0:
#for every parking spot
for spot in range(0,len(ROI)):
(x1,y1,x2,y2) = ROI[spot]
new = img[y1:y2,x1:x2, :]
#extract Canny edges
newEdges = cv2.Canny(new, 100, 200)
#calculate the difference between template and new edges
sub = cv2.absdiff(templateEdges[spot], newEdges)
#normalize the difference normSub is array of 1's and zeros
normSub = sub/255;
threshPercent = 10;
threshold = normSub.size*threshPercent/100;
#if there is greater than a 10% change between template and new edges
#declare the parking spot occupied
if sum(sum(normSub)) > threshold:
if Win:
#draw a red rectangle to indicate an occupied spot
cv2.rectangle(img, (x1,y1), (x2,y2), [0, 0, 255], 2)
else:
#or print occupied
print 'Parking spot # ' + str(spot) + ': occupied'
else:
if Win:
#draw a green rectangle to indicate an unoccupied spot
cv2.rectangle(img, (x1,y1), (x2,y2), [0, 255, 0], 2)
else:
#or print vacant
print 'Parking spot # ' + str(spot) + ': vacant'
if Win:
#show the next frame in the image
cv2.imshow('video', img)
cv2.imwrite('imageROIs.jpg', img);
#outvideo.write(img)
else:
#clear the screen
os.system('clear')
if (0xFF & cv2.waitKey(5) == 27) | (ret == False):
break
i = i+1
if Win:
cv2.destroyAllWindows()
#outvideo.release()
|
UTF-8
|
Python
| false | false | 2,013 |
2,276,332,706,072 |
48aa4bb93318d236746eacda2edbde32d9c83a9e
|
50e39231d8bea2a01a9d5db69aeb5c1a8054642b
|
/wafer/launchall.py
|
8a10d49ea4a45d2c638c56aa4122067db3a1c728
|
[] |
no_license
|
leecrest/wafer
|
https://github.com/leecrest/wafer
|
eb09e96d79e149cfee4d6fc40270996618bdea6c
|
58b148d03dc18dcfdf6bac1c5ed410f1fe112ad3
|
refs/heads/master
| 2020-05-18T18:16:41.566961 | 2014-07-15T13:37:31 | 2014-07-15T13:37:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#coding=utf-8
"""
@author : leecrest
@time : 14-1-29 上午10:40
@brief : 服务器组的启动器,执行此文件将会按序启动所有服务器
"""
#主进程为Master,其余服务器在子进程中启动
import json
import subprocess
import traceback
import wafer
if __name__ == "__main__":
try:
sConfigFile = "config/server.json"
dConfig = json.load(open(sConfigFile, "r"))
#启动子进程
for sName in dConfig.iterkeys():
if sName == "master":
continue
cmd = "python %s %s %s" % ("launch.py", sName, sConfigFile)
subprocess.Popen(cmd)
#启动主服务器
app = wafer.CreateServer("master", dConfig["master"])
app.Start()
except Exception, e:
print "="*20, "Error", "="*20
print e
print traceback.format_exc()
|
UTF-8
|
Python
| false | false | 2,014 |
11,012,296,167,038 |
ecca7a956c5a8f71d9a4b8c4e9731fdafbbe8ea0
|
2735c5f1a9b1f1a3d2468f0838fc0f20725cbe31
|
/usr/lib/pymodules/python2.7/numpy/core/tests/test_multiarray.py
|
58655afb03edca70c1cded51e4f8060652866838
|
[] |
no_license
|
sanyaade-iot/rpi-sysroot
|
https://github.com/sanyaade-iot/rpi-sysroot
|
f202b9188fd99c372d28b59ebe1b8fcabbfb7a67
|
6e13f05b5b2b44b29ead66c96230a17f077d5198
|
refs/heads/master
| 2020-04-08T16:14:25.745147 | 2014-03-21T06:27:54 | 2014-03-21T09:47:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
/usr/share/pyshared/numpy/core/tests/test_multiarray.py
|
UTF-8
|
Python
| false | false | 2,014 |
15,315,853,400,866 |
a175266ffa6a9627e4801a1de594a47bcef9222e
|
bba8d21052c8fa0e6f4be490b9e091bf5bd754cc
|
/13th-chapter/5.py
|
ebad735535c1de9ad3ac1a34088211e1b4e4148f
|
[] |
no_license
|
binoytv9/Think-Python-by-Allen-B-Downey--Exercises
|
https://github.com/binoytv9/Think-Python-by-Allen-B-Downey--Exercises
|
cd8250753d2377a2ff00077b2eb98a10496e38e8
|
b4ed0d5c758014d70cf2db1dac24af1175bb17c6
|
refs/heads/master
| 2021-01-01T05:40:43.633619 | 2014-11-29T18:04:02 | 2014-11-29T18:04:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def histogram(lst):
d={}
for element in lst:
d[element]=d.get(element,0) + 1
return d
def choose_from_hist(h):
sum=0.0
for key in h:
sum+=h[key]
new={}
for key in h:
new[key]=h[key]/sum
return new
t = ['a', 'a', 'b']
hist = histogram(t)
print hist
hist = choose_from_hist(hist)
print hist
|
UTF-8
|
Python
| false | false | 2,014 |
4,243,427,713,957 |
9d65d6a633c37b7c81c9da1a92184dffa019b081
|
963b1359b2f5ecf6c0eca974d005d530fa22ccf8
|
/ex31.py
|
87ef5a52801adedd194f237ecbbe21f3186051eb
|
[] |
no_license
|
mwctahoe/pythonHardWay
|
https://github.com/mwctahoe/pythonHardWay
|
5502a8fee33d94594d7baf19a372b007c0591fbf
|
8661b54eae7ba257853984e5eda43669598a2516
|
refs/heads/master
| 2020-05-17T05:10:01.268822 | 2013-01-10T02:15:59 | 2013-01-10T02:15:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#decisions decisions
print """
You enter a dark room.
Across the abyss you seen the slimmest sliver of light beneath two doors.
Which door do you take, #1 or #2
"""
door = raw_input("> ")
if door == "1":
print "THERE IS A FUCKING BEAR IN THE ROOM! He has cake, what do you do?"
print "1. take the cake"
print "2. Scream at the bear."
bear = raw_input("> ")
if bear == "1":
print "The Cake is a Lie. Bear mauls you to death for your ignorance."
elif bear == "2":
print "Bear finds your feminine scream annoying. He slaughters your family."
else:
print "You have died of dysentery. Oregon Trail was such a good game, Bear starts playing it."
elif door == "2":
print "The light was eminating from Cthulhu's eyes."
print "1. Bluberries"
print "2. Yellow jacket clothespins."
print "3. UNderstanding revolvers yelling melodies."
insanity = raw_input("> ")
if insanity == "1" or insanity == "2":
print "Your body survives powered by a mind of jello. Good Job!"
else:
print "Insanity melts your face, raiders of the lost ark style. You have fucked up now."
else:
print "You wander through the endless darkness forever. You suck at this game, User."
|
UTF-8
|
Python
| false | false | 2,013 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.