__id__
int64 3.09k
19,722B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
256
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 3
values | repo_name
stringlengths 5
109
| repo_url
stringlengths 24
128
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 6.65k
581M
⌀ | star_events_count
int64 0
1.17k
| fork_events_count
int64 0
154
| gha_license_id
stringclasses 16
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
5.76M
⌀ | gha_stargazers_count
int32 0
407
⌀ | gha_forks_count
int32 0
119
⌀ | gha_open_issues_count
int32 0
640
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 2
classes | gha_disabled
bool 1
class | content
stringlengths 9
4.53M
| src_encoding
stringclasses 18
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | year
int64 1.97k
2.01k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,241,972,965,061 |
9930d5ba40b24d45af63b4d0969cabe77a782ba3
|
d05d0f4f0db27233f20ad96a5bf22ae6c64e3a54
|
/tracker/views.py
|
9e9e2011f1b7f41a89c3c7065508657e4bb48a0e
|
[] |
no_license
|
Apkawa/simplebtt
|
https://github.com/Apkawa/simplebtt
|
8a1030e0ec514d695e7c71d2b219deb8d09af35a
|
64ae84fed9e32f75e3cf5ba8b41763dd541e9a9d
|
refs/heads/master
| 2021-01-23T22:06:15.281916 | 2009-01-17T20:47:46 | 2009-01-17T20:47:46 | 104,719 | 8 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseRedirect
from django.core.exceptions import ObjectDoesNotExist
from django.utils.encoding import DjangoUnicodeDecodeError
from django.shortcuts import render_to_response
from django.conf import settings
from simplebtt.recaptcha import captcha
from simplebtt.tracker.models import Torrent, User, Client, Stat, Category# TorrentForm
from simplebtt.tracker.forms import TorrentAddForm
from urllib import unquote
from re import findall
def search( request ):
if request.method == 'GET':
if request.GET.get('search'):
import re
query = request.GET['search']
regex = '!|'.join(re.sub('([\s]+|\n+)',' ', query).split(' '))
finder = Torrent.objects.filter( name__iregex = r'(%s)'%regex)
else:
#search = TorrentSearch()
finder = Torrent.objects.all()
t_t = [ {
't': f,
'leech': f.clients.exclude(left = 0).count(),
'seed' : f.clients.filter( left = 0 ).count(),
} for f in finder]
print t_t
return render_to_response('tracker/torrent_search.html', {'torrents': t_t, 'query': query})
def torrent_list( request, category=None , page=1):
list_category = Category.objects.all()
step = 10
page = int(page)
prev_page = (page-1)*step
next_page = page*step
if category:
temp = Torrent.objects.filter( category__name = category ).order_by('-creation_date')[prev_page:next_page]
else:
temp = Torrent.objects.all().order_by('-creation_date')[prev_page:next_page]
t_t = [ {
't': t,
'leech': t.clients.exclude(left = 0).count(),
'seed' : t.clients.filter( left = 0 ).count(),
} for t in temp]
stat ={
's':Stat.objects.get(id=1),
'leechs': Client.objects.exclude(left=0).count(),
'seeds' : Client.objects.filter(left=0).count(),
}
return render_to_response('tracker/torrent_list.html',
{
'torrents': t_t,
'stat': stat,
'category': category,
'page': page,
'step': step,
'num_page': { 'page':page, 'step':step, 'count': temp.count(), 'category': category, },
})
def torrent_info( request, _id ):
_i = Torrent.objects.filter(id=_id)
if _i:
_i = _i[0]
if _i.announce_list:
announce_list = _i.announce_list.split('|')
else:
announce_list = None
if _i.file_list:
from json import loads
file_list = loads(_i.file_list)
else:
file_list = None
info = { 't': _i,
'leech': _i.clients.exclude(left = 0).count(),
'seed' : _i.clients.filter( left = '0' ).count(),
}
#print announce_list
return render_to_response('tracker/torrent_info.html', {'i': info, 'announce_list': announce_list, 'file_list': file_list })
else:
return HttpResponseNotFound('<h1>Page not found</h1>')
def torrent_add( request ):
nocaptcha = 0
error = None
if request.method == 'POST':
if not nocaptcha:
check_captcha = captcha.submit( request.POST['recaptcha_challenge_field'],
request.POST['recaptcha_response_field'],
settings.RECAPTCHA_PRIVATE_KEY, request.META['REMOTE_ADDR'])
#print check_captcha.is_valid
if not check_captcha.is_valid:
pass
return HttpResponseNotFound('<h1>You bot?</h1>')
else: check_captcha = None
form = TorrentAddForm( request.POST, request.FILES)
if form.is_valid():
anon = User.objects.get(id=1)
instance = form.save( commit=False)
instance.author = anon
dublicate_id = instance.save()
_id=instance.id
if _id:
return HttpResponseRedirect('/info/%i'%_id)
else:
error = "This torrent file is already in the database"
else:
form = TorrentAddForm()
html_captcha = captcha.displayhtml(settings.RECAPTCHA_PUB_KEY)
return render_to_response('tracker/torrent_add.html', {'form': form, 'recaptcha': html_captcha, 'error': error})
#-------------------------------------------------------------
'''
from django import forms
class UploadFileForm(forms.Form):
title = forms.CharField(max_length=50)
file = forms.FileField()
from django.shortcuts import render_to_response
def upload_torrent( request ):
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
#form = TorrentForm(request.POST)#UploadTorrentForm( request.POST )
print request.FILES.keys()
#_add_torrent(request.FILES['file'])
if form.is_multipart():
pass
#return HttpResponseRedirect('/success/url/')
else:
form = UploadFileForm()
# form = TorrentForm()
return render_to_response('add_torrent.html', {'form': form})
def _add_torrent(file):
save = open('media/torrent/%s'%f.name, 'w')
torrent = f.read()
info_hash = _get_info_torrent(torrent)
print info_hash
try:
t = Torrent.objects.get( info_hash = info_hash )
except ObjectDoesNotExist:
pass
# t = Torrent.objects.create()
def _get_info_torrent(torrent):
from hashlib import sha1
torrent = hunnyb.decode(torrent)
_hash = sha1(hunnyb.encode(torrent['info']))
hash_base64 = base64.b64encode(_hash.digest())
return hash_base64
'''
|
UTF-8
|
Python
| false | false | 2,009 |
6,322,191,910,634 |
261df60022396d947130b4c628b26514f3fc4203
|
e54b4d56eab2365411337250179d203dc9156045
|
/src/knesset/user/models.py
|
32169c8a570d93ec845d2bb0b6c16189101fa2db
|
[
"GPL-2.0-only"
] |
non_permissive
|
borisd/Open-Knesset
|
https://github.com/borisd/Open-Knesset
|
fefd0795fef88e1373fab14cdff9c7f134f011fe
|
86d32d32b95e25e8de94542996b0291e7c064f1d
|
refs/heads/master
| 2020-12-25T13:23:58.717310 | 2010-08-21T14:02:44 | 2010-08-21T14:02:44 | 850,770 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#encoding: utf-8
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from actstream import follow
from actstream.models import Follow
from knesset.mks.models import Party, Member
class UserProfile(models.Model):
'''
This model is extending the builtin user model.
The extension includes a list of followed parties and members.
>>> daonb = User.objects.create(username='daonb')
>>> profile = daonb.get_profile()
>>> legalize = Party.objects.create(name='legalize')
>>> follow(daonb, legalize)
<Follow: daonb -> legalize>
>>> legalize == daonb.get_profile().parties[0]
True
>>> dbg = Member.objects.create(name='david ben gurion')
>>> follow(daonb, dbg)
<Follow: daonb -> david ben gurion>
>>> dbg == daonb.get_profile().members[0]
True
'''
user = models.ForeignKey(User, unique=True)
@property
def members(self):
#TODO: ther has to be a faster way
return map(lambda x: x.actor,
Follow.objects.filter(user=self.user, content_type=ContentType.objects.get_for_model(Member)))
@property
def parties(self):
#TODO: ther has to be a faster way
return map(lambda x: x.actor,
Follow.objects.filter(user=self.user, content_type=ContentType.objects.get_for_model(Party)))
@models.permalink
def get_absolute_url(self):
return ('public-profile', (), {'object_id': self.user.id})
def handle_user_save(sender, created, instance, **kwargs):
if created and instance._state.db=='default':
UserProfile.objects.create(user=instance)
post_save.connect(handle_user_save, sender=User)
|
UTF-8
|
Python
| false | false | 2,010 |
3,599,182,622,318 |
127bbb97a759ec4efda295f13e835670a0cb1329
|
e8bda1dc15587704b292690671a7e354e06c6b8b
|
/blast_scripts/localBlastAnalysis.py
|
7b1257cb14695daef10eb67f97fca6c294946d74
|
[] |
no_license
|
kokonech/utils
|
https://github.com/kokonech/utils
|
69914bd194cd0dca9cacf23403b4a35f74783648
|
23805ccb437ae90fbbcc05574ce4e63f5ba29682
|
refs/heads/master
| 2020-03-30T12:36:33.954743 | 2013-01-07T09:37:16 | 2013-01-07T09:37:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import re
import sys
import argparse
import os.path
import time
import tempfile
import os
from Bio import Entrez
from Bio import SeqIO
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast import NCBIXML
SEQ_BLASTED = 0
SEQ_NOT_FOUND = -1
MAX_BUF_SIZE = 100
def parseBlastResult(fileName):
handle = open(fileName)
blast_records = NCBIXML.parse(handle)
results = []
for record in blast_records:
rec_id = str(record.query)
if len(record.alignments) == 0:
results.append( (rec_id, "-", 0, "-") )
continue
for algn in record.alignments:
evalue = algn.hsps[0].expect
score = 0
ids = []
for hsp in algn.hsps:
score += hsp.bits
ids.append(hsp.identities / float(hsp.align_length))
max_identity = int(max(ids)*100)
seq_id = algn.hit_id
results.append( (rec_id, seq_id, max_identity, algn.hit_def ) )
return results
def analyzeSeq(seqBuf, outputPath, db_name ):
(fd, qname) = tempfile.mkstemp()
tmp = os.fdopen(fd, "w")
for seq in seqBuf:
seq_id = seq[0]
seq_data = seq[1]
#print "Saving ", seq_id, seq_data
tmp.write(">%s\n%s\n" % (seq_id,seq_data) )
tmp.close()
#print qname
cline = NcbiblastnCommandline(query=qname, db=db_name, task="megablast", evalue=0.001, outfmt=5, out=outputPath)
print cline
try:
cline()
except:
os.remove(outputPath)
if __name__ == "__main__":
descriptionText = "The script reads each sequence from an input .csv file and blasts it for specified organism. The accession id is then extracted from blast result."
parser = argparse.ArgumentParser(description = descriptionText,formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("seqFile", help="Input file with list of sequences")
parser.add_argument("dbName", help="BLAST database name")
args = parser.parse_args()
seqFileName = args.seqFile
dbName = args.dbName
print "Input sequences file is ", seqFileName
seqFile = open(seqFileName)
sequences = {}
missing = []
for line in seqFile:
items = line.strip().split(",")
seq_id = items[0]
try:
seq = items[1].split("\"")[1]
sequences[seq_id] = seq
except:
print "No sequence available, skipping ", seq_id
#print seq_id, seq
workDirName = "local_analysis_%s_%d" % (seqFileName, MAX_BUF_SIZE)
if not os.path.exists(workDirName):
os.makedirs(workDirName)
print "Workdir name is ", workDirName
outputFileName = workDirName + "/" + "resume.txt"
output = open(outputFileName, "w")
output.write("#Seq id\tGenbank id\tUrl\tMax identity(%)\tDescription\n")
total = len(sequences)
bunch_count = 1
i = 0
buf = []
for seq_id in sorted(sequences):
outputPath = workDirName + "/" + "bunch_" + str(bunch_count) +".xml";
#print i, seq_id
if not os.path.exists(outputPath):
i += 1
buf.append ( (seq_id, sequences[seq_id]) )
if len(buf) == MAX_BUF_SIZE or i == total:
print time.ctime()
res = analyzeSeq(buf, outputPath, dbName)
print time.ctime()
buf = []
else:
continue
else:
i += MAX_BUF_SIZE
bunch_count += 1
res = parseBlastResult(outputPath)
for r in res:
#print r
output.write( "%s\t%s\t%d\t%s\n" % ( r[0], r[1], r[2], r[3] ) )
print "Analyzed %d out of %d sequences..." % ( i , total)
print "\n\n"
print "Missing sequences:"
print missing
|
UTF-8
|
Python
| false | false | 2,013 |
2,156,073,627,717 |
be96a385aaa9d099e0293f1db6e6901ce1d6eaf2
|
4e2241d9f6ab0a1f4231e14d01fc96df9edd0576
|
/bairrosbh/bairros_noroeste.py
|
f8429cea015548eba43cbd3c891c3a60ae5e10a0
|
[] |
no_license
|
leandrocosta/bairrosbh
|
https://github.com/leandrocosta/bairrosbh
|
2a1e65725d4181ecd7593ffcf4edb9075cc96903
|
7cb1e2c101f53a78fce345c328c5c7ce1567e0b9
|
refs/heads/master
| 2020-06-03T05:28:10.233778 | 2014-06-05T22:56:57 | 2014-06-05T22:56:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#coding=utf-8
BAIRRO_ALTO_CAICARAS = 'Alto Caiçaras'
BAIRRO_ALTO_DOS_PINHEIROS = 'Alto dos Pinheiros'
BAIRRO_ALVARO_CAMARGOS = 'Álvaro Camargos'
BAIRRO_APARECIDA = 'Aparecida'
BAIRRO_APARECIDA_SETIMA_SECAO = 'Aparecida, Sétima Seção'
BAIRRO_BOM_JESUS = 'Bom Jesus'
BAIRRO_BONFIM = 'Bonfim'
BAIRRO_CAICARA_ADELAIDE = 'Caiçara-Adelaide'
BAIRRO_CAICARAS = 'Caiçaras'
BAIRRO_CALIFORNIA = 'Califórnia'
BAIRRO_CARLOS_PRATES = 'Carlos Prates'
BAIRRO_CONJUNTO_CALIFORNIA_I = 'Conjunto Califórnia I'
BAIRRO_CONJUNTO_CALIFORNIA_II = 'Conjunto Califórnia II'
BAIRRO_CONJUNTO_JARDIM_FILADELFIA = 'Conjunto Jardim Filadélfia'
BAIRRO_CONJUNTO_NOVO_DOM_BOSCO = 'Conjunto Novo Dom Bosco'
BAIRRO_COQUEIROS = 'Coqueiros'
BAIRRO_CORACAO_EUCARISTICO = 'Coração Eucarístico'
BAIRRO_DELTA = 'Delta'
BAIRRO_DOM_BOSCO = 'Dom Bosco'
BAIRRO_DOM_CABRAL = 'Dom Cabral'
BAIRRO_ERMELINDA = 'Ermelinda'
BAIRRO_GLORIA = 'Glória'
BAIRRO_INCONFIDENCIA = 'Inconfidência'
BAIRRO_JARDIM_MONTANHES = 'Jardim Montanhês'
BAIRRO_JOAO_PINHEIRO = 'João Pinheiro'
BAIRRO_LAGOINHA = 'Lagoinha'
BAIRRO_LORENA = 'Lorena'
BAIRRO_MARMITEIROS = 'Marmiteiros'
BAIRRO_MINAS_BRASIL = 'Minas Brasil'
BAIRRO_MONSENHOR_MESSIAS = 'Monsenhor Messias'
BAIRRO_NOVA_CACHOEIRINHA = 'Nova Cachoeirinha'
BAIRRO_NOVA_ESPERANCA = 'Nova Esperança'
BAIRRO_NOVO_GLORIA = 'Novo Glória'
BAIRRO_OESTE = 'Oeste'
BAIRRO_PADRE_EUSTAQUIO = 'Padre Eustáquio'
BAIRRO_PEDREIRA_PRADO_LOPES = 'Pedreira Prado Lopes'
BAIRRO_PINDORAMA = 'Pindorama'
BAIRRO_SANTO_ANDRE = 'Santo André'
BAIRRO_SAO_CRISTOVAO = 'São Cristóvão'
BAIRRO_SAO_FRANCISCO_DAS_CHAGAS = 'São Francisco das Chagas'
BAIRRO_SAO_SALVADOR = 'São Salvador'
BAIRRO_SENHOR_DOS_PASSOS = 'Senhor dos Passos'
BAIRRO_SUMARE = 'Sumaré'
BAIRRO_VILA_CALIFORNIA = 'Vila Califórnia'
BAIRRO_VILA_COQUEIRAL = 'Vila Coqueiral'
BAIRRO_VILA_DAS_OLIVEIRAS = 'Vila das Oliveiras'
BAIRRO_VILA_MALOCA = 'Vila Maloca'
BAIRRO_VILA_NOVA_CACHOEIRINHA_PRIMEIRA_SECAO = 'Vila Nova Cachoeirinha Primeira Seção'
BAIRRO_VILA_NOVA_CACHOEIRINHA_SEGUNDA_SECAO = 'Vila Nova Cachoeirinha Segunda Seção'
BAIRRO_VILA_PUC = 'Vila PUC'
BAIRRO_VILA_SUMARE = 'Vila Sumaré'
BAIRRO_VILA_TRINTA_E_UM_DE_MARCO = 'Vila Trinta e Um de Março'
|
UTF-8
|
Python
| false | false | 2,014 |
3,925,600,121,780 |
d25dd59d667be0d705f307e4cc03f66865dbfb97
|
7c9b04d0e74d76b0b5fe2130040e97c5c1241ebd
|
/bin/statuspi.py
|
2d7fc8f80996f8abc722248ce94eca3bcc730c23
|
[
"GPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
h2g2guy/statuspi.py
|
https://github.com/h2g2guy/statuspi.py
|
0ae773448a4b658b72d65d8c9325674a0c161873
|
a16682c17b7c0632116f216e15cf05b25ebef648
|
refs/heads/master
| 2016-09-06T01:52:19.144360 | 2014-05-22T04:32:29 | 2014-05-22T04:32:29 | 17,426,778 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# This file is part of statuspi.py, available at
# <github.com/h2g2guy/statuspi.py>.
# statuspi.py is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import RPi.GPIO as GPIO
import subprocess, time, sys
def usage():
print "usage: sudo python statuspi.py <cfg_file>"
print " the first line of the configuration file will be executed"
print " as a python script every five seconds; on a return code of"
print " 1, the LED will illuminate. otherwise, the LED will turn off."
GPIO.cleanup()
sys.exit()
GPIO.setmode(GPIO.BCM)
if len(sys.argv) != 2:
usage();
with open(sys.argv[1]) as f:
line = f.readline()[:-1];
line2 = f.readline()
if line2 is not None:
line2 = line2[:-1]
# connect LED on pin 18
GPIO.setup(18, GPIO.OUT)
GPIO.setup(23, GPIO.OUT)
while True:
if subprocess.call(["python", line]) == 1:
GPIO.output(18, True)
else:
GPIO.output(18, False)
if line2 is not "" and line2 is not None and line2 is not '':
if subprocess.call(["python", line2]) == 1:
GPIO.output(23, True)
else:
GPIO.output(23, False)
time.sleep(5)
|
UTF-8
|
Python
| false | false | 2,014 |
1,082,331,769,170 |
47324c08538be5d24c9590f3f64922079514d954
|
55ff9a75f27263bcd0e7f87b78425ddb083dccd5
|
/menu/admin.py
|
614a85b7ab2f0a805d05185688f6c06ce79018be
|
[] |
no_license
|
wd5/log-house
|
https://github.com/wd5/log-house
|
fec5036c96b02edc35478ce9a590ba1de9413eb2
|
90bfb82f00175d083b8df90d47cb196d3cb51f6d
|
refs/heads/master
| 2021-01-24T00:14:07.055758 | 2010-11-08T17:31:37 | 2010-11-08T17:31:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from django.contrib import admin
from loghouse.menu.models import Category, Menu
admin.site.register(Category)
admin.site.register(Menu)
|
UTF-8
|
Python
| false | false | 2,010 |
4,621,384,854,109 |
a58c17f03edb7a46e2eb5802fcf71bc154c441d8
|
76de4fc4f00a04c8c9acc1e9e4a5fae12cf0c08a
|
/tags/release-0.8.1/pyformex/plugins/mesh.py
|
44d1eed192e9db3cdcfc21f7e894b8e185a40353
|
[
"GPL-3.0-only"
] |
non_permissive
|
BackupTheBerlios/pyformex-svn
|
https://github.com/BackupTheBerlios/pyformex-svn
|
ec2361b1b9967918be65e892217a691a6f8b145d
|
f5404809095711334bbb938d9d119a69ad8fc260
|
refs/heads/master
| 2020-12-24T13:20:47.422165 | 2011-11-15T11:52:23 | 2011-11-15T11:52:23 | 40,749,266 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# $Id$
##
## This file is part of pyFormex 0.8.1 Release Wed Dec 9 11:27:53 2009
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Homepage: http://pyformex.org (http://pyformex.berlios.de)
## Copyright (C) Benedict Verhegghe ([email protected])
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""mesh.py
Definition of the Mesh class for describing discrete geometrical models.
And some useful meshing functions to create such models.
"""
from numpy import *
from coords import *
from formex import *
from connectivity import *
import elements
from plugins.fe import mergeModels
from utils import deprecation
# This should probably go to formex or coords module
def vectorPairAreaNormals(vec1,vec2):
"""Compute area of and normals on parallellograms formed by vec1 and vec2.
vec1 and vec2 are (n,3) shaped arrays holding collections of vectors.
The result is a tuple of two arrays:
- area (n) : the area of the parallellogram formed by vec1 and vec2.
- normal (n,3) : (normalized) vectors normal to each couple (vec1,2).
These are calculated from the cross product of vec1 and vec2, which indeed
gives area * normal.
Note that where two vectors are parallel, an area zero will results and
an axis with components NaN.
"""
normal = cross(vec1,vec2)
area = vectorLength(normal)
print("vectorPairAreaNormals",area,normal)
normal /= area.reshape((-1,1))
return area,normal
def vectorPairCosAngles(vec1,vec2,normalized=False):
"""Return the cosine of the angles between two vectors.
vec1: an (nvector,3) shaped array of floats
vec2: an (nvector,3) shaped array of floats
normalized: can be set True if the vectors are already normalized.
Return value: an (nvector,) shaped array of floats
"""
if not normalized:
vec1 = normalize(vec1)
vec2 = normalize(vec2)
return dotpr(vec1,vec2)
def vectorPairAngles(vec1,vec2,normalized=False,angle_spec=Deg):
return arccos(vectorPairCosAngles(vec1,vec2,normalized))/angle_spec
def vectorRotation(vec1,vec2,upvec=[0.,0.,1.]):
"""Return axis and angle to rotate vectors in a parallel to b
vectors in a and b should be unit vectors.
The returned axis is the cross product of a and b. If the vectors
are already parallel, a random vector normal to a is returned.
"""
u = normalize(vec1)
u1 = normalize(vec2)
v = normalize(upvec)
v1 = v
w = cross(u,v)
w1 = cross(u1,v)
wa = where(length(w) == 0)
wa1 = where(length(w1) == 0)
print(u)
print(u1)
print(v)
print(v1)
print(w)
print(w1)
if len(wa) > 0 or len(wa1) > 0:
print(wa,wa1)
raise
## if len(wa1)
## = normalize(random.random((len(w),3)))
# Should probably be made a Coords method
# But that would make the coords module dependent on a plugin
def sweepCoords(self,path,origin=[0.,0.,0.],normal=0,upvector=None,avgdir=False,enddir=None):
""" Sweep a Coords object along a path, returning a series of copies.
origin and normal define the local path position and direction on the mesh.
At each point of the curve, a copy of the Coords object is created, with
its origin in the curve's point, and its normal along the curve's direction.
In case of a PolyLine, directions are pointing to the next point by default.
If avgdir==True, average directions are taken at the intermediate points.
Missing end directions can explicitely be set by enddir, and are by default
taken along the last segment.
If the curve is closed, endpoints are treated as any intermediate point,
and the user should normally not specify enddir.
The return value is a sequence of the transformed Coords objects.
"""
points = path.coords
if avgdir:
directions = path.avgDirections()
else:
directions = path.directions()
missing = points.shape[0] - directions.shape[0]
if missing == 1:
lastdir = (points[-1] - points[-2]).reshape(1,3)
directions = concatenate([directions,lastdir],axis=0)
elif missing == 2:
lastdir = (points[-1] - points[-2]).reshape(1,3)
firstdir = (points[1] - points[0]).reshape(1,3)
directions = concatenate([firstdir,directions,lastdir],axis=0)
if enddir:
for i,j in enumerate([0,-1]):
if enddir[i]:
directions[j] = Coords(enddir[i])
directions = normalize(directions)
print(directions )
if type(normal) is int:
normal = unitVector(normal)
angles,normals = vectorRotation(directions,normal)
print(angles,normals)
base = self.translate(-Coords(origin))
if upvector is None:
sequence = [
base.rotate(a,-v).translate(p)
for a,v,p in zip(angles,normals,points)
]
else:
if type(upvector) is int:
upvector = Coords(unitVector(upvector))
uptrf = [ upvector.rotate(a,v) for a,v in zip(angles,normals) ]
uangles,unormals = vectorRotation(uptrf,upvector)
print(uangles,unormals)
sequence = [
base.rotate(a,v).rotate(ua,uv).translate(p)
for a,v,ua,uv,p in zip(angles,normals,uangles,unormals,points)
]
return sequence
_default_eltype = {
1 : 'point',
2 : 'line2',
3 : 'tri3',
4 : 'quad4',
6 : 'wedge6',
8 : 'hex8',
}
def defaultEltype(nplex):
"""Default element type for a mesh with given plexitude.
"""
return _default_eltype.get(nplex,None)
class Mesh(object):
"""A mesh is a discrete geometrical model consisting of nodes and elements.
In the Mesh geometrical data model, coordinates of all points are gathered
in a single twodimensional array 'coords' with shape (ncoords,3) and the
individual geometrical elements are described by indices into the 'coords'
array.
This model has some advantages over the Formex data model, where all
points of all element are stored by their coordinates:
- compacter storage, because coordinates of coinciding points do not
need to be repeated,
- faster connectivity related algorithms.
The downside is that geometry generating algorithms are far more complex
and possibly slower.
In pyFormex we therefore mostly use the Formex data model when creating
geometry, but when we come to the point of exporting the geometry to
file (and to other programs), a Mesh data model may be more adequate.
The Mesh data model has at least the following attributes:
- coords: (ncoords,3) shaped Coords array,
- elems: (nelems,nplex) shaped array of int32 indices into coords. All
values should be in the range 0 <= value < ncoords.
- prop: array of element property numbers, default None.
- eltype: string designing the element type, default None.
"""
def __init__(self,coords=None,elems=None,prop=None,eltype=None):
"""Create a new Mesh from the specified data.
data is either a tuple of (coords,elems) arrays, or an object having
a 'toMesh()' method, which should return such a tuple.
"""
self.coords = None
self.elems = None
self.prop = prop
if elems is None:
if hasattr(coords,'toMesh'):
# initialize from a single object
coords,elems = coords.toMesh()
elif type(coords) is tuple:
coords,elems = coords
try:
self.coords = Coords(coords)
self.elems = Connectivity(elems)
if coords.ndim != 2 or coords.shape[-1] != 3 or elems.ndim != 2 or \
elems.max() >= coords.shape[0] or elems.min() < 0:
raise ValueError,"Invalid mesh data"
except:
raise ValueError,"Invalid initialization data"
if eltype is None:
self.eltype = defaultEltype(self.nplex())
else:
self.eltype = eltype
def copy(self):
"""Return a copy using the same data arrays"""
return Mesh(self.coords,self.elems,self.prop,self.eltype)
def toFormex(self):
"""Convert a Mesh to a Formex.
The Formex inherits the element property numbers and eltype from
the Mesh. Node property numbers however can not be translated to
the Formex data model.
"""
return Formex(self.coords[self.elems],self.prop,eltype=self.eltype)
def data(self):
"""Return the mesh data as a tuple (coords,elems)"""
return self.coords,self.elems
def nelems(self):
return self.elems.shape[0]
def nplex(self):
return self.elems.shape[1]
def ncoords(self):
return self.coords.shape[0]
npoints = ncoords
def shape(self):
return self.elems.shape
def bbox(self):
return self.coords.bbox()
def nedges(self):
"""Return the number of edges.
Currently, the edges are not fused!
"""
try:
el = getattr(elements,self.eltype.capitalize())
return self.nelems() * len(el.edges)
except:
return 0
def centroids(self):
"""Return the centroids of all elements of the Formex.
The centroid of an element is the point whose coordinates
are the mean values of all points of the element.
The return value is a Coords object with nelems points.
"""
return self.coords[self.elems].mean(axis=1)
def report(self):
bb = self.bbox()
return """
Shape: %s nodes, %s elems, plexitude %s
BBox: %s, %s
Size: %s
""" % (self.ncoords(),self.nelems(),self.nplex(),bb[1],bb[0],bb[1]-bb[0])
def compact(self):
"""Renumber the mesh and remove unconnected nodes."""
nodes = unique1d(self.elems)
if nodes[-1] >= nodes.size:
coords = self.coords[nodes]
elems = reverseUniqueIndex(nodes)[self.elems]
return Mesh(coords,elems,eltype=self.eltype)
else:
return self
def extrude(self,n,step=1.,dir=0,autofix=True):
"""Extrude a Mesh in one of the axes directions.
Returns a new Mesh obtained by extruding the given Mesh
over n steps of length step in direction of axis dir.
The returned Mesh has double plexitude of the original.
This function is usually used to extrude points into lines,
lines into surfaces and surfaces into volumes.
By default it will try to fix the connectivity ordering where
appropriate. If autofix is switched off, the connectivities
are merely stacked, and the user may have to fix it himself.
Currently, this function correctly transforms: point1 to line2,
line2 to quad4, tri3 to wedge6, quad4 to hex8.
"""
nplex = self.nplex()
coord2 = self.coords.translate(dir,n*step)
M = connectMesh(self,Mesh(coord2,self.elems),n)
if autofix and nplex == 2:
# fix node ordering for line2 to quad4 extrusions
M.elems[:,-nplex:] = M.elems[:,-1:-(nplex+1):-1].copy()
if autofix:
M.eltype = defaultEltype(M.nplex())
return M
def sweep(self,path,autofix=True,**kargs):
"""Sweep a mesh along a path, creating an extrusion
Returns a new Mesh obtained by sweeping the given Mesh
over a path.
The returned Mesh has double plexitude of the original.
The operation is similar to the extrude() method, but the path
can be any 3D curve.
This function is usually used to extrude points into lines,
lines into surfaces and surfaces into volumes.
By default it will try to fix the connectivity ordering where
appropriate. If autofix is switched off, the connectivities
are merely stacked, and the user may have to fix it himself.
Currently, this function correctly transforms: point1 to line2,
line2 to quad4, tri3 to wedge6, quad4 to hex8.
"""
nplex = self.nplex()
seq = sweepCoords(self.coords,path,**kargs)
ML = [ Mesh(x,self.elems) for x in seq ]
M = connectMeshSequence(ML)
if autofix and nplex == 2:
# fix node ordering for line2 to quad4 extrusions
M.elems[:,-nplex:] = M.elems[:,-1:-(nplex+1):-1].copy()
if autofix:
M.eltype = defaultEltype(M.nplex())
return M
def convert(self,fromtype,totype):
"""Convert a mesh from element type fromtype to type totype.
Currently defined conversions:
'quad4' -> 'tri3'
"""
fromtype = fromtype.capitalize()
totype = totype.capitalize()
try:
conv = getattr(elements,fromtype).conversion[totype]
except:
raise ValueError,"Don't know how to convert from '%s' to '%s'" % (fromtype,totype)
elems = self.elems[:,conv].reshape(-1,len(conv[0]))
print(elems.shape)
return Mesh(self.coords,elems)
@classmethod
def concatenate(clas,ML):
"""Concatenate a list of meshes of the same plexitude and eltype"""
if len(set([ m.nplex() for m in ML ])) > 1 or len(set([ m.eltype for m in ML ])) > 1:
raise ValueError,"Meshes are not of same type/plexitude"
coords,elems = mergeModels([(m.coords,m.elems) for m in ML])
elems = concatenate(elems,axis=0)
return Mesh(coords,elems,eltype=ML[0].eltype)
def connectMesh(mesh1,mesh2,n=1,n1=None,n2=None,eltype=None):
"""Connect two meshes to form a hypermesh.
mesh1 and mesh2 are two meshes with same topology (shape).
The two meshes are connected by a higher order mesh with n
elements in the direction between the two meshes.
n1 and n2 are node selection indices permitting a permutation of the
nodes of the base sets in their appearance in the hypermesh.
This can e.g. be used to achieve circular numbering of the hypermesh.
"""
# For compatibility, allow meshes to be specified as tuples
if type(mesh1) is tuple:
mesh1 = Mesh(mesh1)
if type(mesh2) is tuple:
mesh2 = Mesh(mesh2)
if mesh1.shape() != mesh2.shape():
raise ValueError,"Meshes are not compatible"
# compact the node numbering schemes
mesh1 = mesh1.copy().compact()
mesh2 = mesh2.copy().compact()
# Create the interpolations of the coordinates
x = Coords.interpolate(mesh1.coords,mesh2.coords,n).reshape(-1,3)
nnod = mesh1.ncoords()
nplex = mesh1.nplex()
if n1 is None:
n1 = range(nplex)
if n2 is None:
n2 = range(nplex)
e1 = mesh1.elems[:,n1]
e2 = mesh2.elems[:,n2] + nnod
et = concatenate([e1,e2],axis=-1)
e = concatenate([et+i*nnod for i in range(n)])
return Mesh(x,e,eltype=eltype)
def connectMeshSequence(ML,loop=False,**kargs):
#print([Mi.eltype for Mi in ML])
MR = ML[1:]
if loop:
MR.append(ML[0])
else:
ML = ML[:-1]
HM = [ connectMesh(Mi,Mj,**kargs) for Mi,Mj in zip (ML,MR) ]
#print([Mi.eltype for Mi in HM])
return Mesh.concatenate(HM)
########### Deprecated #####################
@deprecation("\nUse mesh.connectMesh instead.")
def createWedgeElements(S1,S2,div=1):
"""Create wedge elements between to triangulated surfaces.
6-node wedge elements are created between two input surfaces (S1 and S2).
The keyword div determines the number of created wedge element layers.
Layers with equal thickness are created when an integer value is used for div.
div can also be specified using a list, that defines the interpolation between the two surfaces.
Consequently, this can be used to create layers with unequal thickness.
For example, div=2 gives the same result as [0.,0.5,1.]
"""
#check which surface lays on top
n = S1.areaNormals()[1][0]
if S2.coords[0].distanceFromPlane(S1.coords[0],n) < 0:
S = S2.copy()
S2 = S1.copy()
S1 = S
#determine the number of layers of wedge elements
if type(div) == int:
nlayers = div
else:
nlayers = shape(div)[0] - 1
#create array containing the nodes of the wedge elements
C1 = S1.coords
C2 = S2.coords
coordsWedge = Coords.interpolate(C1,C2,div).reshape(-1,3)
#create array containing wedge connectivity
ncoords = C1.shape[0]
elems = S1.getElems()
elemsWedge = array([]).astype(int)
for i in range(nlayers):
elemsLayer = append(elems,elems+ncoords,1).reshape(-1)
elemsWedge = append(elemsWedge,elemsLayer,0)
elems += ncoords
return coordsWedge,elemsWedge.reshape(-1,6)
@deprecation("\nUse mesh.sweepMesh instead.")
def sweepGrid(nodes,elems,path,scale=1.,angle=0.,a1=None,a2=None):
""" Sweep a quadrilateral mesh along a path
The path should be specified as a (n,2,3) Formex.
The input grid (quadrilaterals) has to be specified with the nodes and
elems and can for example be created with the functions gridRectangle or
gridBetween2Curves.
This quadrilateral grid should be within the YZ-plane.
The quadrilateral grid can be scaled and/or rotated along the path.
There are three options for the first (a1) / last (a2) element of the path:
1) None: No corresponding hexahedral elements
2) 'last': The direction of the first/last element of the path is used to
direct the input grid at the start/end of the path
3) specify a vector: This vector is used to direct the input grid at the
start/end of the path
The resulting hexahedral mesh is returned in terms of nodes and elems.
"""
nodes = Formex(nodes.reshape(-1,1,3))
n = nodes.shape()[0]
s = path.shape()[0]
sc = scale-1.
a = angle
if a1 != None:
if a1 == 'last':
nodes1 = nodes.rotate(rotMatrix(path[0,1]-path[0,0])).translate(path[0,0])
else:
nodes1 = nodes.rotate(rotMatrix(a1)).translate(path[0,0])
else:
nodes1 = Formex([[[0.,0.,0.]]])
for i in range(s-1):
r1 = vectorNormalize(path[i+1,1]-path[i+1,0])[1][0]
r2 = vectorNormalize(path[i,1]-path[i,0])[1][0]
r = r1+r2
nodes1 += nodes.rotate(angle,0).scale(scale).rotate(rotMatrix(r)).translate(path[i+1,0])
scale = scale+sc
angle = angle+a
if a2 != None:
if a2 == 'last':
nodes1 += nodes.rotate(angle,0).scale(scale).rotate(rotMatrix(path[s-1,1]-path[s-1,0])).translate(path[s-1,1])
else:
nodes1 += nodes.rotate(angle,0).scale(scale).rotate(rotMatrix(a2)).translate(path[s-1,1])
if a1 == None:
nodes1 = nodes1[1:]
s = s-1
if a2 == None:
s = s-1
elems0 = elems
elems1 = append(elems0,elems+n,1)
elems = elems1
for i in range(s-1):
elems = append(elems,elems1+(i+1)*n,0)
if s == 0:
elems = array([])
return nodes1[:].reshape(-1,3),elems
# End
|
UTF-8
|
Python
| false | false | 2,011 |
15,479,062,140,905 |
24e0d0e202042f08b97d0833923b50290b78a2f3
|
9e72150c8d6ff633afa2a454238be5516e620e82
|
/src/games/paint.py
|
e45008ccd06f84d8b1e97b5fbece7d17f03a3da8
|
[
"AGPL-3.0-only"
] |
non_permissive
|
smab/playhouse-web
|
https://github.com/smab/playhouse-web
|
327047125e3a597e2e5972303a235ccf915b2fa7
|
54da5a685ca54231ddb602440ef6ecb3fcfd5ea9
|
refs/heads/master
| 2020-05-20T16:31:26.414748 | 2014-10-06T09:53:22 | 2014-10-06T09:53:22 | 16,112,955 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Playhouse: Making buildings into interactive displays using remotely controllable lights.
# Copyright (C) 2014 John Eriksson, Arvid Fahlström Myrman, Jonas Höglund,
# Hannes Leskelä, Christian Lidström, Mattias Palo,
# Markus Videll, Tomas Wickman, Emil Öhman.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import random
import lightgames
def create(client):
print("Creating paint game")
return Paint(client)
class Paint(lightgames.Game):
config_file = "paintconfig.html"
template_file = "paint.html"
def __init__(self, client):
super().__init__(client)
self.template_vars['module_name'] = 'paint.html'
self.template_vars['title'] = 'Paint'
self.template_vars['grid_x'] = lightgames.get_grid_size()[1]
self.template_vars['grid_y'] = lightgames.get_grid_size()[0]
def reset(self):
self.playerColors = {}
self.board = [[-1 for _ in range(self.template_vars['grid_x'])]
for _ in range(self.template_vars['grid_y'])]
self.reset_lamp_all()
def sync(self, handler):
self.set_description(handler)
self.playerColors[handler] = (
random.randint(0,255),
random.randint(0,255),
random.randint(0,255)
)
print("Connection %s, color %s" % (handler, self.playerColors[handler]))
for y in range(len(self.board)):
for x in range(len(self.board[y])):
if self.board[y][x] != -1:
lightgames.send_msg(handler, {'x':x, 'y':y, 'color':self.board[y][x]})
@lightgames.validate_xy
def on_message(self, handler, coords):
x = coords['x']
y = coords['y']
color = self.playerColors[handler]
if x<0 or x>=self.template_vars['grid_x'] or \
y<0 or y>=self.template_vars['grid_y']:
print('error: out of bounds!')
return
self.board[y][x] = color
for handler in self.playerColors:
lightgames.send_msg(handler, {'x':x, 'y':y, 'color':color})
self.send_lamp(x, y, { 'bri': 255, 'rgb': color })
def on_close(self, handler):
super(Paint, self).on_close(handler)
if handler in self.playerColors:
del self.playerColors[handler]
def set_description(self, handler):
message = '<p><p><b>Name:</b> Paint</p><p><b>Players:</b> Any</p><p><b>Rules & Goals:</b> No rules, no goals, only paint. By clicking a cell the player will fill it with their assigned colour, thus painting the grid. Refresh the page to get a new colour.</p></p>'
lightgames.send_msg(handler, {'rulemessage': (message)})
|
UTF-8
|
Python
| false | false | 2,014 |
14,972,256,010,286 |
129c673b08a97041ff51e672f056b82e0d16f758
|
ffd0ff0492c4190097283dcfa1d51de70c84c1d1
|
/Misc/python/timer.py
|
91ee88c52a2f0e476cbecdbcae0a977504b6706e
|
[] |
no_license
|
keithbrown/Miscellaneous
|
https://github.com/keithbrown/Miscellaneous
|
c8b6b6db97fb0b077e77d46017f75dfedd14514b
|
6d2d44f67fe33fb24674cc8b016df0814cdda753
|
refs/heads/master
| 2021-01-11T01:30:31.928471 | 2014-06-05T15:37:40 | 2014-06-05T15:37:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#time test
z = input('type an integer increment of time')
import time
x1 = time.strftime('%M')
y1 = time.strftime('%S')
time.sleep(z)
x2 = time.strftime('%M')
y2 = time.strftime('%S')
if int(y2) - int(y1) < 0:
y2 = int(y2) + 60
x2 = int(x2) - 1
fy = int(y2) - int(y1)
sfx = int(x2) - int(x1)
fx = sfx * 60
final = fy + fx
print final
|
UTF-8
|
Python
| false | false | 2,014 |
2,448,131,405,335 |
8ac2b3df20c8b01eb49b38646d7c9dbbf7f4d727
|
16a1a47cb4b608e998dfed3d83ea57793b81da3b
|
/YUMI_check.py
|
ed2806049a2d89d84a4cc23db5aa44b9c6ea43d0
|
[
"GPL-2.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"LicenseRef-scancode-unrar",
"LGPL-2.1-or-later",
"GPL-3.0-only"
] |
non_permissive
|
alesgc/yumi
|
https://github.com/alesgc/yumi
|
3685dde3f89e3f74dfe184504a628d25de9d79c8
|
775d2933c9b5dcd9efce838621a951b5d2c8d635
|
refs/heads/master
| 2023-04-06T09:42:39.800263 | 2013-02-25T23:56:48 | 2013-02-25T23:56:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/bin/env python
#
# Data in RemoveDistro.nsh
# ${ElseIf} $DistroName == "Archlinux" ;
# ${AndIf} ${FileExists} $BootDir\$SomeFile2Check
# RMDir /R "$BootDir\multiboot\archlinux"
# ${DeleteMenuEntry} "$BootDir\multiboot\menu\$Config2Use" "label Archlinux" "APPEND /multiboot/menu/archlinux.cfg"
# Delete "$BootDir\multiboot\menu\archlinux.cfg"
# Data in InstallDistro.nsh
# ${ElseIf} $DistroName == "Archlinux"
# ExecWait '"$PLUGINSDIR\7zG.exe" x "$ISOFile" -x![BOOT] -o"$BootDir\multiboot\archlinux\" -y'
# ${AndIf} ${FileExists} $BootDir\$SomeFile2Check
# ${WriteToFile} "label Archlinux$\r$\nmenu label Archlinux$\r$\nMENU INDENT 1$\r$\nkernel vesamenu.c45$\r$\nAPPEND /multiboot/menu/archlinux.cfg" $R0
# SetShellVarContext all
# InitPluginsDir
# File /oname=$PLUGINSDIR\archlinux.cfg "Menu\archlinux.cfg"
# CopyFiles "$PLUGINSDIR\archlinux.cfg" "$BootDir\multiboot\menu\archlinux.cfg"
# Data in YUMI.nsi
# !insertmacro FileNames "Archlinux" multiboot\archlinux\arch\boot\i686\archiso.img
# !insertmacro SetISOFileNames "Archlinux" archlinux-2011.08.19-core-dual.iso "http://mirrors.us.kernel.org/archlinux/iso/2011.08.19/archlinux-2011.08.19-core-dual.iso" archlinux-2011.08.19-core-dual.iso "673" linux.cfg multiboot\archlinux\arch\boot\i686\archiso.img "http://www.archlinux.org" "Archlinux"
import sys, string, os, re
p1 = re.compile('.*DistroName.*"(.*)".*')
p2 = re.compile('.*"label (.*)" .*')
p3 = re.compile('.*label (.*)\$.*\$.*menu label (.*)\$.*\$.*MENU INDENT.*')
p4 = re.compile('\!insertmacro FileNames "(.+)" ("?.+"?)+')
p4 = re.compile(r'\!insertmacro FileNames \"(.+?)\" ');
p5 = re.compile(r'\!insertmacro SetISOFileNames \"(.+?)\" ');
ydistros = []
isodistros = []
idistros = []
ildistros = []
rdistros = []
rldistros = []
in_file = open("YUMI.nsi","r")
for in_line in in_file.readlines():
in_line = string.strip(in_line[:-1])
if (len(in_line) < 2): continue
if (in_line[0] == ";"): continue
if (in_line[0] == "#"): continue
m4 = p4.match(in_line)
if m4:
s = p4.split(in_line)
if (len(s[2]) < 5): continue
ydistros.append(s[1])
m5 = p5.match(in_line)
if m5:
s = p5.split(in_line)
isodistros.append(s[1])
# pieces = [i.strip('"').strip("'") for i in re.split(r'(\s+|(?<!\\)".*?(?<!\\)"|(?<!\\)\'.*?(?<!\\)\')', in_line) if i.strip()]
# pieces = [p for p in re.split("( |\\\".*?\\\"|'.*?')", in_line) if p.strip()]
# print pieces
in_file.close()
print "\nChecking YUMI.nsi"
print "\nMenu Item Distros not in SetISOFileNames\n"
for x in ydistros:
cnt = isodistros.count(x)
if (cnt == 0): print " %s" % (x)
print "\nSetISOFileNames Distros not in Menu Item Distros\n"
for x in isodistros:
cnt = ydistros.count(x)
if (cnt == 0): print " %s" % (x)
print "\nDone checking YUMI.nsi\n"
print "\nChecking InstallDistro.nsh\n"
print "\nLabel vs Menu Label [Mismatches]\n"
in_file = open("InstallDistro.nsh","r")
for in_line in in_file.readlines():
in_line = string.strip(in_line[:-1])
if (len(in_line) < 2): continue
if (in_line[0] == ";"): continue
if (in_line[0] == "#"): continue
m1 = p1.match(in_line)
if m1:
s = p1.split(in_line)
idistros.append(s[1])
idist = s[1]
m3 = p3.match(in_line)
if m3:
s = p3.split(in_line)
if (s[1] != s[2]): print " %-45s %-45s" % (s[1],s[2])
ildistros.append(s[1])
# if (s[2] != idist): print " %-45s != %-45s" % (distro,s[2])
in_file.close()
print "\nChecking RemoveDistro.nsh"
print "\nRemove Distros [DistroName != Label]\n"
in_file = open("RemoveDistro.nsh","r")
rdist = ""
for in_line in in_file.readlines():
in_line = string.strip(in_line[:-1])
if (len(in_line) < 2): continue
if (in_line[0] == ";"): continue
if (in_line[0] == "#"): continue
m1 = p1.match(in_line)
if m1:
s = p1.split(in_line)
rdistros.append(s[1])
rdist = s[1]
m2 = p2.match(in_line)
if m2:
s = p2.split(in_line)
if (s[1] != rdist): print " %-45s %-45s" % (rdist,s[1])
rldistros.append(s[1])
in_file.close()
print "\nDistros in Remove not in Install\n"
for x in rdistros:
cnt = idistros.count(x)
if (cnt == 0): print " %s" % (x)
print "\nDistros in Install not in Remove\n"
for x in idistros:
cnt = rdistros.count(x)
if (cnt == 0): print " %s" % (x)
print "\nLabels in Install not found in Remove\n"
for x in ildistros:
cnt = rldistros.count(x)
if (cnt == 0): print " %s" % (x)
print "\nLabels in Remove not found in Install\n"
for x in rldistros:
cnt = ildistros.count(x)
if (cnt == 0): print " %s" % (x)
|
UTF-8
|
Python
| false | false | 2,013 |
15,874,199,156,612 |
1f7a99ea5f5f2ab98b05de981aa2f3edb1fdc756
|
058542f733b55c001bc486f10c93ccbc520f6bd2
|
/platforms/python/edn_format/hash-keyword.py
|
c1b4fd9cd2e7f38f51ced03728b501248fd19733
|
[] |
no_license
|
shaunxcode/edn-tests
|
https://github.com/shaunxcode/edn-tests
|
ff5843ea4665564d128e8e5401f98dbbc2c85fb3
|
88f5f3bb4f03af64a1faa4c0337c0b56365acd47
|
refs/heads/master
| 2020-05-27T13:44:03.652700 | 2013-11-01T19:32:53 | 2013-11-01T19:32:53 | 10,677,289 | 3 | 3 | null | false | 2013-11-01T19:32:53 | 2013-06-13T22:27:34 | 2013-11-01T19:32:53 | 2013-11-01T19:32:53 | 804 | null | 2 | 2 |
PHP
| null | null |
edn_format.Keyword("#foo")
|
UTF-8
|
Python
| false | false | 2,013 |
7,490,422,965,111 |
8f676da310e9664a70e581f995b5bb540eb8675b
|
aa721af1e23e0b86d08eeaa6d8fe257383659e3e
|
/gardenpath/gardener.py
|
3bf67cf29eebc96264015b6eb53dc742f2208228
|
[] |
no_license
|
kmcintyre/gardenpath
|
https://github.com/kmcintyre/gardenpath
|
4a31f8ac5e7f7ddf5eb96a1c70dca3cbd5fcaf68
|
889fbdd082213fa6651a4f32eb835ce28f9defd6
|
refs/heads/master
| 2021-01-10T20:32:56.791420 | 2013-10-06T16:29:28 | 2013-10-06T16:29:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from cookielib import CookieJar
from urlparse import urljoin
from urlparse import urlparse
from twisted.web.http_headers import Headers
from twisted.web.client import Agent, CookieAgent, HTTPConnectionPool
from twisted.web.xmlrpc import XMLRPC
from twisted.internet import reactor
from twisted.python import log
import logging
import sys
class HeaderException(Exception):
def setHeader(self, h):
self.header = h
class TooManyHopsException(HeaderException):
pass
class Gardener():
HTTP_REASON_PHRASE = 'Reason-Phrase'
HTTP_VERSION = 'Version'
HTTP_STATUS_CODE = 'Status-Code'
HTTP_URI = 'URI'
DNS = 'DNS'
http_content_type = 'content-type'
http_header_location = 'location'
previous = 'previous'
text_html = 'text/html'
@staticmethod
def get_header(headers, header):
for h, v in headers.iteritems():
if h.lower() == header.lower():
return v
return None
'''
'''
def __init__(self, common_headers = None, hang_up = True, use_cookies = True, pool = True, dns = True, max_hops = 5, connection_timeout = 10, verbose = False):
if pool:
self.connection_pool = HTTPConnectionPool(reactor, persistent=True)
else:
self.connection_pool = HTTPConnectionPool(reactor, persistent=False)
if use_cookies:
cookieJar = CookieJar()
self.agent = CookieAgent(Agent(reactor, pool = self.connection_pool), cookieJar)
else:
self.agent = Agent(reactor, pool = self.connection_pool)
if verbose:
log.startLogging(sys.stdout)
self.hang_up = hang_up
self.common_headers = common_headers
self.max_hops = max_hops
self.connection_timeout = connection_timeout
def _request_error(self, err, url, prev = None):
log.msg('request_error: {0} for {1}'.format(err.value.message, url), logLevel=logging.CRITICAL)
raise err
def _gather_headers(self, reply, url, timer = None, prev = None):
if timer is not None and not timer.called:
timer.cancel()
headers = {}
if prev:
headers[self.previous] = prev
try:
headers[self.HTTP_URI] = url
for header, value in reply.headers.getAllRawHeaders():
headers[header] = value[0]
try:
headers[self.HTTP_STATUS_CODE] = reply.code
except:
log.msg('no code', logLevel=logging.DEBUG)
raise Exception("Bad Response:" + url + " no " + self.HTTP_STATUS_CODE)
try:
headers[self.HTTP_VERSION] = reply.version
except:
log.msg('no version', logLevel=logging.DEBUG)
raise Exception("Bad Response:" + url + " no " + self.HTTP_VERSION)
try:
headers[self.HTTP_REASON_PHRASE] = reply.phrase
except:
log.msg('no phrase', logLevel=logging.DEBUG)
raise Exception("Bad Response:" + url + " no " + self.HTTP_REASON_PHRASE)
try:
if reply._transport:
log.msg( 'stop producing: {0}'.format(url), logLevel=logging.DEBUG)
reply._transport.stopProducing()
#if reply._transport._producer:
# print 'Producer', reply._transport._producer.__class__.__name__
# reply._transport._producer.loseConnection()
except Exception as e:
log.msg('bad reply?: {0}'.format(e), logLevel=logging.CRITICAL)
raise Exception("bad reply?" + url)
except Exception as e:
he = HeaderException(e)
he.setHeader(headers)
raise he
return headers
def been_to(self, url, headers):
if url == headers[self.HTTP_URI]:
return True
elif self.previous in headers:
return self.been_to(url, headers[self.previous])
else:
return False
def _follow_(self, headers):
if str(headers[self.HTTP_STATUS_CODE])[:1] == '3' and Gardener.get_header(headers, self.http_header_location):
moved_to = Gardener.get_header(headers, self.http_header_location)
log.msg('{0} moved: {1}'.format(headers[self.HTTP_URI], moved_to), logLevel=logging.DEBUG)
if not urlparse(moved_to).scheme:
moved_to = urljoin(headers[self.HTTP_URI], moved_to)
if not self.been_to(moved_to, headers):
log.msg('chase {0}'.format(moved_to), logLevel=logging.INFO)
return self.get_url(moved_to, headers)
else:
he = HeaderException('Code: ' + str(headers[self.HTTP_STATUS_CODE]) + ' Location and URI resolve to same:' + headers[self.HTTP_URI] + ' ' + moved_to)
he.setHeader(headers)
raise he
elif headers[self.HTTP_STATUS_CODE] == 302 and Gardener.get_header(headers, self.http_content_type) and self.text_html in Gardener.get_header(headers, self.http_content_type):
log.msg('acceptable 302 found', logLevel=logging.DEBUG)
return headers
else:
return headers
def timeout_request(self, timed_deferred, url):
if not timed_deferred.called:
log.msg('cancel request to {0}'.format(url), logLevel=logging.INFO)
timed_deferred.cancel()
if timed_deferred.paused:
def check_paused(paused_deferred):
log.msg('paused deferred {0}'.format(paused_deferred), logLevel=logging.INFO)
paused_deferred.cancel()
reactor.callLater(self.connection_timeout, check_paused, timed_deferred)
def _hang_up(self, answer, url):
log.msg('hang up {0}'.format(self.connection_pool._connections.keys()), logLevel=logging.INFO)
if self.connection_pool._connections or self.connection_pool._timeouts:
d = self.connection_pool.closeCachedConnections()
d.addBoth(lambda ign: answer)
return d
else:
log.msg('no hang up necessary: {0}'.format(url), logLevel=logging.DEBUG)
return answer
def get_url(self, url, prev = None):
if not urlparse(url).scheme:
log.msg('add http:// to {0}'.format(url), logLevel=logging.DEBUG)
url = "http://" + url
log.msg('url: {0}'.format(url), logLevel=logging.INFO)
def previousCount(p):
if p is None:
return 0
elif self.previous in p:
return 1 + previousCount(p[self.previous])
else:
return 1
if previousCount(prev) > self.max_hops:
log.msg('Too Many Hops {0}'.format(url), logLevel=logging.WARN)
ex = TooManyHopsException('Too Many Hops')
ex.setHeader(prev)
raise ex
request = self.agent.request('GET', url, Headers(self.common_headers))
timer = reactor.callLater(self.connection_timeout, self.timeout_request, request, url)
request.addCallback(self._gather_headers, url, timer, prev)
request.addCallback(self._follow_)
request.addErrback(self._request_error, url, prev)
if self.hang_up and previousCount(prev) == 0:
request.addBoth(lambda answer: self._hang_up(answer, url))
return request
class GardenPathXMLRPCServer(Gardener, XMLRPC):
def __init__(self):
XMLRPC.__init__(self, allowNone=True)
def xmlrpc_path(self, url):
log.msg('xmlrpc_path {0}'.format(url), logLevel=logging.DEBUG)
return self.get_url(url)
|
UTF-8
|
Python
| false | false | 2,013 |
1,185,411,018,242 |
5b58b74ba3bf2b31c33d6cdea2403bd008ac76b6
|
c4cfe7b67a2980cd99b5a1315fc73f77f1f76f2f
|
/synapse/handlers/events.py
|
93dcd40324d8f5ddadc0eaba143f241d1f8de8f9
|
[
"Apache-2.0"
] |
permissive
|
uroborus/synapse
|
https://github.com/uroborus/synapse
|
a271a4cc9d69e34e8d7bfb985f5b171699e367d5
|
270825ab2a3e16bb8ffcdbcea058efd28a38e8e1
|
refs/heads/master
| 2021-01-17T17:12:55.371236 | 2014-09-19T10:41:49 | 2014-09-19T10:41:49 | 24,245,059 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from synapse.util.logutils import log_function
from ._base import BaseHandler
import logging
logger = logging.getLogger(__name__)
class EventStreamHandler(BaseHandler):
def __init__(self, hs):
super(EventStreamHandler, self).__init__(hs)
# Count of active streams per user
self._streams_per_user = {}
# Grace timers per user to delay the "stopped" signal
self._stop_timer_per_user = {}
self.distributor = hs.get_distributor()
self.distributor.declare("started_user_eventstream")
self.distributor.declare("stopped_user_eventstream")
self.clock = hs.get_clock()
self.notifier = hs.get_notifier()
@defer.inlineCallbacks
@log_function
def get_stream(self, auth_user_id, pagin_config, timeout=0):
auth_user = self.hs.parse_userid(auth_user_id)
try:
if auth_user not in self._streams_per_user:
self._streams_per_user[auth_user] = 0
if auth_user in self._stop_timer_per_user:
self.clock.cancel_call_later(
self._stop_timer_per_user.pop(auth_user))
else:
self.distributor.fire(
"started_user_eventstream", auth_user
)
self._streams_per_user[auth_user] += 1
if pagin_config.from_token is None:
pagin_config.from_token = None
rm_handler = self.hs.get_handlers().room_member_handler
room_ids = yield rm_handler.get_rooms_for_user(auth_user)
events, tokens = yield self.notifier.get_events_for(
auth_user, room_ids, pagin_config, timeout
)
chunks = [self.hs.serialize_event(e) for e in events]
chunk = {
"chunk": chunks,
"start": tokens[0].to_string(),
"end": tokens[1].to_string(),
}
defer.returnValue(chunk)
finally:
self._streams_per_user[auth_user] -= 1
if not self._streams_per_user[auth_user]:
del self._streams_per_user[auth_user]
# 10 seconds of grace to allow the client to reconnect again
# before we think they're gone
def _later():
logger.debug(
"_later stopped_user_eventstream %s", auth_user
)
self.distributor.fire(
"stopped_user_eventstream", auth_user
)
del self._stop_timer_per_user[auth_user]
logger.debug("Scheduling _later: for %s", auth_user)
self._stop_timer_per_user[auth_user] = (
self.clock.call_later(30, _later)
)
class EventHandler(BaseHandler):
@defer.inlineCallbacks
def get_event(self, user, event_id):
"""Retrieve a single specified event.
Args:
user (synapse.types.UserID): The user requesting the event
event_id (str): The event ID to obtain.
Returns:
dict: An event, or None if there is no event matching this ID.
Raises:
SynapseError if there was a problem retrieving this event, or
AuthError if the user does not have the rights to inspect this
event.
"""
event = yield self.store.get_event(event_id)
if not event:
defer.returnValue(None)
return
if hasattr(event, "room_id"):
yield self.auth.check_joined_room(event.room_id, user.to_string())
defer.returnValue(event)
|
UTF-8
|
Python
| false | false | 2,014 |
12,506,944,801,314 |
ff5e06a9555fe27de0cb9cc2b36a2d91711eb029
|
9d8a4b4c7076733d070dc8b0dc0026028366b735
|
/checks/zope2
|
f72ecc31ded8b42df924d4a9c1aa1cd632d97f9e
|
[
"GPL-1.0-or-later",
"GPL-2.0-only"
] |
non_permissive
|
RedTurtle/check-mk-zope
|
https://github.com/RedTurtle/check-mk-zope
|
b0fa4481cf059acf8c8565dd2e3f731b6037e614
|
abf7a67d3f9b04d0e8e64af7953f5e80199c5e40
|
refs/heads/master
| 2021-01-22T05:33:53.868642 | 2012-07-05T13:07:32 | 2012-07-05T13:07:32 | 48,167,208 | 0 | 0 | null | true | 2015-12-17T10:10:17 | 2015-12-17T10:10:17 | 2013-11-03T23:38:28 | 2012-07-05T13:08:39 | 124 | 0 | 0 | 0 | null | null | null |
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Biodec 2010 [email protected] |
# +------------------------------------------------------------------+
#
# Example of plugin output
#I zopeinstance=status:OK
#I zopecache=total_objs:116799.0,total_objs_memory:628.0,target_number:20000.0
#I zodbactivity=total_load_count:0.0,total_store_count:0.0,total_connections:67.0
#I zopethreads=total_threads:3.0,free_threads:0.0
#I zopememory=VmPeak:447668224,VmSize:447668224,VmLck:0,VmHWM:160661504,VmRSS:160661504,VmData:170082304,VmStk:393216,VmExe:2215936,VmLib:9814016,VmPTE:663552
## USE factory_settings (since 1.1.11i2)
## For each numeric check, define 4 levels:
## - below the first, is CRITICAL
## - below the second, is WARNING
## - above the third, is WARNING
## - above the fourth, is CRITICAL
factory_settings["zope2zopeinstance_default_levels"] = {}
factory_settings["zope2zopecache_default_levels"] = { 'total_objs' : (None, None, None, None),
'total_objs_memory' : (None, None, None, None),
'target_numer' : (None, None, None, None),
}
factory_settings["zope2zodbactivity_default_levels"] = { 'total_load_count' : (None, None, None, None),
'total_store_count' : (None, None, None, None),
'total_connections' : (None, None, None, None),
}
factory_settings["zope2zopethreads_default_levels"] = {'total_threads' : (None, 0, 6, None),
'free_threads' : (None, None, None, None),
}
factory_settings["zope2zopememory_default_levels"] = { 'VmPeak' : (None, None, None, None),
'VmSize' : (None, None, None, None),
'VmLck' : (None, None, None, None),
'VmHWM' : (None, None, None, None),
'VmRSS' : (None, None, None, None),
'VmData' : (None, None, None, None),
'VmStk' : (None, None, None, None),
'VmExe' : (None, None, None, None),
'VmLib' : (None, None, None, None),
'VmPTE' : (None, None, None, None),
}
def zconf(line):
return getattr(re.match(r'^\[\[\[(.*)\]\]\]$', line),
'group', lambda x: None)(1)
#return '-'.join([x for x in zconf.split('/') if x not in ('opt','var','srv','etc','parts','zope.conf')])
# the inventory function, checks the agent output and create a list of items
def inventory_zope2_factory(plugin):
def inventory_zope2(info):
# begin with empty inventory
inventory = []
# loop over all output lines of the agent
conf = None
for line in info:
if zconf(line[0]):
conf = zconf(line[0])
elif conf:
if line[1].split('=')[0] == plugin:
inventory.append( (conf, "", "zope2%s_default_levels" % plugin ) )
return inventory
return inventory_zope2
# the check function, process the output
def check_zope2_factory(plugin):
def check_zope2(item, params, info):
conf = None
for line in info:
if zconf(line[0]):
conf = zconf(line[0])
elif conf:
k, values = line[1].split('=')
results = [row.split(':') for row in values.split(',')]
# v = int(float(v))
if conf==item and k == plugin:
## If plugin is zopeinstane returns direct agent results
if plugin == 'zopeinstance':
res, val = results[0]
if val == 'OK':
return (0, "OK - %s" % values, results)
else:
return (2, "CRIT - %s" % values, results)
## For other plugins, with multiple results returns the worst
for res, val in results:
if type(params) == dict:
# check below the first (CRIT)
if params.has_key(res) and params[res][0] and float(val) <= params[res][0]:
return (2, "CRIT - %s" % values, results)
# check above the fourth (CRIT)
elif params.has_key(res) and params[res][3] and float(val) >= params[res][3]:
return (2, "CRIT - %s" % values, results)
# check below the second (WARN)
elif params.has_key(res) and params[res][1] and float(val) <= params[res][1]:
return (1, "WARN - %s" % values, results)
# check above the third (WARN)
elif params.has_key(res) and params[res][2] and float(val) >= params[res][2]:
return (1, "WARN - %s" % values, results)
# otherwise is in range (OK)
return (0, "OK - %s" % values, results)
return (3, "UNKNOWN - %s %s not found in agent output" % (plugin, item))
return check_zope2
# declare the check to Check_MK
for plugin in [
'zopeinstance',
'zopecache',
'zodbactivity',
'zopethreads',
'zopememory',]:
check_info['zope2.%s' % plugin] = \
(check_zope2_factory(plugin), "zope2_%s %%s" % plugin, 1, inventory_zope2_factory(plugin))
## Use next to check against main.mk parameter settings
check_default_levels['zope2.%s' % plugin] = "zope2%s_default_levels" % plugin
|
UTF-8
|
Python
| false | false | 2,012 |
4,982,162,098,274 |
673c6cece93b4e07b789018f8d82cadf555953cf
|
dec825e96fc81a9cf3ad8d97396d6cf0391ce027
|
/gistpage/models.py
|
b47425d3537a2a0b85d762ac05d5d14bdd156e28
|
[
"Apache-2.0"
] |
permissive
|
texastribune/django-gistpage
|
https://github.com/texastribune/django-gistpage
|
ba1b53cbcb56101c304bde68f1a522f0c61b8ffd
|
b96137b71009d8e8b8642097bde657b747a7dd68
|
refs/heads/master
| 2021-01-01T20:41:43.364624 | 2014-02-14T06:48:09 | 2014-02-14T06:48:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# from django.conf import settings
from django.contrib.sites.models import Site
from django.db import models
class GistPage(models.Model):
"""Similar to a Flatpage, but backed by a Github.com gist."""
#
gist_url = models.URLField()
template = models.TextField(null=True, blank=True)
style = models.TextField(null=True, blank=True)
script = models.TextField(null=True, blank=True)
# integration fields
site_url = models.CharField(max_length=200, null=True, blank=True,
help_text="The url for this page on the site. e.g.: /test/page/")
sites = models.ManyToManyField(Site,
# default=settings.SITE_ID
)
# lead_art
# headline
# summary
|
UTF-8
|
Python
| false | false | 2,014 |
13,400,297,988,944 |
183267320a56619e1ba3d0f3228a544ba65a7915
|
251d7289b2144d783fc5c7ff3405bd53d2af1622
|
/TAIOa.py
|
bdd197ce88f31e42f5ecc0e8252027ed65b40a13
|
[] |
no_license
|
ariel19/AutomataLetterRecognition
|
https://github.com/ariel19/AutomataLetterRecognition
|
f57d0fca70a99d956f61ad7d46ebe8bd974f81a6
|
0b16ee0147e4d0691ba748640432f707b5e6fea8
|
refs/heads/master
| 2021-01-23T03:08:07.306545 | 2014-12-18T14:44:44 | 2014-12-18T14:44:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import os
error_fn = "output_err_test.dat"
args_to_script = dict()
# specific for bullet
error = 0.0
bullet_val = 0
def clean_args_info():
global args_to_script
args_to_script["-etap"] = False
args_to_script["-wejscieTyp"] = False
args_to_script["-PSOiter"] = False
args_to_script["-PSOs"] = False
args_to_script["-procRozmTest"] = False
args_to_script["-iloscKlas"] = False
args_to_script["-iloscCech"] = False
args_to_script["-iloscPowtorzenWKlasie"] = False
args_to_script["-minLos"] = False
args_to_script["-maxLos"] = False
args_to_script["-zaburzenie"] = False
args_to_script["-dyskretyzacja"] = False
clean_args_info()
# specific variables
val1 = "-iloscKlas"
val2 = "-iloscPowtorzenWKlasie"
bul1 = 0
bul2 = 0
cmd_line = "python TAIO2014.py "
# check for valuable args
for i in range(len(sys.argv)):
arg = sys.argv[i]
# print arg
if arg in args_to_script:
args_to_script[arg] = True
if arg != "TAIOa.py":
cmd_line += (arg + ' ')
# for a) bullet calculate instance number
if arg == val1:
bul1 = int(sys.argv[i + 1])
print sys.argv[i + 1]
elif arg == val2:
bul2 = int(sys.argv[i + 1])
print sys.argv[i + 1]
# calculate bullet value
bullet_val = bul1 * bul2
# check if all params are present
for k, v in args_to_script.items():
if v != True:
print "missing: ", k
sys.exit(127)
# execute Mateusz script
os.system(cmd_line)
with open(error_fn, "r") as f:
nums = f.read().split(' ')
error = float(nums[0]) / int(nums[1])
os.system("rm input.dat output_class.dat output_err_test.dat output_err_train.dat 2>/dev/null")
bullet_x = "bullet_a"
with open(bullet_x, "a") as f:
line = str(bullet_val) + ' ' + str(error) + "\n"
f.write(line)
|
UTF-8
|
Python
| false | false | 2,014 |
6,631,429,516,337 |
3cdc1de1ed207a1c59944dab96ed557dee363cc7
|
2b54ac381bb52abe5f1a00c1393cbae542ade442
|
/Script/analyze_results.py
|
9e9b165f409048a8c61507b84c87bbf267a81a7b
|
[] |
no_license
|
omidi/classifier-with-dependency
|
https://github.com/omidi/classifier-with-dependency
|
df35f9ce866fd39443b00e79330014629b678446
|
7e876b1cd6610d893298f0d3b1645fcc1443c70a
|
refs/heads/master
| 2021-01-01T16:19:59.279096 | 2014-08-27T14:20:29 | 2014-08-27T14:20:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def specificity_sensitivity(results):
dep_FP, dep_TP = 0, 0
dep_FN, dep_TN = 0, 0
naive_FP, naive_TP = 0.0001, 0.0001
naive_FN, naive_TN = 0, 0
dep_misclassification_probability, naive_misclassification_probability= 0., 0.
dependency_decision = lambda v,w: v if v[2]>w[2] else w
naive_bayes_decision = lambda v,w: v if v[3]>w[3] else w
for i in xrange(0, len(results), 2):
dependency = dependency_decision(results[i], results[i+1])
naive_bayes = naive_bayes_decision(results[i], results[i+1])
if dependency[0] == dependency[1]:
if dependency[0] == 1:
dep_TP += 1
else:
dep_TN += 1
dep_misclassification_probability += (1.0 - dependency[2])
else:
dep_misclassification_probability += dependency[2]
if dependency[0] == 1:
dep_FN += 1
else:
dep_FP += 1
if naive_bayes[0] == naive_bayes[1]:
if naive_bayes[0] == 1:
naive_TP += 1
else:
naive_TN += 1
naive_misclassification_probability += (1.0 - naive_bayes[3])
else:
naive_misclassification_probability += naive_bayes[3]
if naive_bayes[0] == 1:
naive_FN += 1
else:
naive_FP += 1
try:
dep_sensitivity = float(dep_TP) / (dep_TP + dep_FN)
except ZeroDivisionError:
dep_sensitivity = 0
try:
dep_specificity = float(dep_TN) / (dep_FP + dep_TN)
except ZeroDivisionError:
dep_specificity = 0
try:
dep_precision = float(dep_TP) / (dep_TP + dep_FP)
except ZeroDivisionError:
dep_precision = 0
try:
dep_NPV = float(dep_TN) / (dep_TN + dep_FN)
except ZeroDivisionError:
dep_NPV = 0
naive_sensitivity = float(naive_TP) / (naive_TP + naive_FN)
naive_specificity = float(naive_TN) / (naive_FP + naive_TN)
naive_precision = float(naive_TP) / (naive_TP + naive_FP)
naive_NPV = float(naive_TN) / (naive_TN + naive_FN)
if dep_sensitivity==1. or dep_specificity==1.:
# print results
None
print 'Sensitivity: ', dep_sensitivity, '\t', naive_sensitivity
print 'Specificity: ', dep_specificity, '\t', naive_specificity
print 'Precision: ', dep_precision, '\t', naive_precision
print 'Negative predictive value: ', dep_NPV, '\t', naive_NPV
print 'Misclassification probability', dep_misclassification_probability / (len(results) / 2), '\t', \
naive_misclassification_probability / (len(results) / 2)
print
return True
def loss_function(results):
dep_score = 0.
naive_score = 0.
dependency_decision = lambda v,w: v if v[2]>w[2] else w
naive_bayes_decision = lambda v,w: v if v[3]>w[3] else w
for i in xrange(0, len(results), 2):
dependency = dependency_decision(results[i], results[i+1])
naive_bayes = naive_bayes_decision(results[i], results[i+1])
if dependency[0] == dependency[1]:
dep_score += dependency[2]
else:
dep_score -= dependency[2]
if naive_bayes[0] == naive_bayes[1]:
naive_score += naive_bayes[2]
else:
naive_score -= naive_bayes[2]
print dep_score, '\t', naive_score
return True
|
UTF-8
|
Python
| false | false | 2,014 |
17,179,870,507 |
c91fd7d89de0f344e1d6f573d72ccfa22244bd87
|
2627792ae6f034a600cec80023cfc2c0e9514efc
|
/computertemp/scripts/computertemp.in
|
e04be47d312a3143297b55828ec72e59fe5bccb9
|
[
"GPL-1.0-or-later"
] |
non_permissive
|
infinicode/computertemp
|
https://github.com/infinicode/computertemp
|
bc3e693996ff5c94e30a02a1fdfac27f80e29deb
|
79ef91b2260a91a745c7b6868afc658af51f70ec
|
refs/heads/master
| 2021-01-10T09:47:39.652941 | 2010-06-09T19:45:35 | 2010-06-09T19:45:35 | 50,017,596 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2005-2008 Adolfo González Blázquez <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
If you find any bugs or have any suggestions email: [email protected]
"""
import sys
import getopt
PYTHON_DIR = "@PYTHONDIR@"
if PYTHON_DIR not in sys.path:
sys.path.append(PYTHON_DIR)
from computertemp import computertemp_applet
# Prints help info on screen
def print_help():
print """Usage: computertemp [OPTIONS]
OPTIONS:
-h, --help Print this help notice.
-d, --debug Enable debug information on tooltip.
"""
# Parses options passed by command line and acts
def parse_commandline_options():
debug = False
try:
opts, args = getopt.getopt(sys.argv[1:],"dh", ["debug","help"])
except:
return False
for o, a in opts:
if o in ("-h", "--help"):
print_help()
sys.exit()
elif o in ("-d", "--debug"):
debug = True
return debug
if __name__ == "__main__":
debug = parse_commandline_options()
computertemp_applet.main(debug)
|
UTF-8
|
Python
| false | false | 2,010 |
962,072,690,061 |
509f561c6514875f87824abc179d1442caf820cb
|
531326de0de31dec3a442a4a6d38dbd8c86ccfa6
|
/SConstruct
|
58aea5a3f01da4c668eabdc110d963723478019f
|
[
"BSD-3-Clause"
] |
permissive
|
pixiv/neoagent
|
https://github.com/pixiv/neoagent
|
43c6decc1032d2a7a33bc8315483ee58c8dbcf9c
|
aafe8674ede6b2d9c0ff6c29e9a27fb8844a4bd5
|
refs/heads/master
| 2023-03-30T08:28:36.435988 | 2013-10-02T06:03:59 | 2013-10-02T06:03:59 | 4,193,993 | 0 | 1 |
NOASSERTION
| true | 2023-03-27T03:02:30 | 2012-05-01T16:22:12 | 2019-06-27T06:30:20 | 2023-03-25T12:10:38 | 941 | 2 | 1 | 0 |
C
| false | false |
# -*- coding: utf-8 -*-
import build.util
build.util.info_print()
progs = [
'neoagent',
]
[ SConscript( prog + "/SConscript") for prog in progs ]
if 'debian' in COMMAND_LINE_TARGETS:
SConscript("debian/SConscript")
elif 'doc' in COMMAND_LINE_TARGETS:
SConscript("doc/SConscript")
|
UTF-8
|
Python
| false | false | 2,013 |
5,299,989,673,506 |
2d1eb75a7863be9b28642b24ecc779732abddf2b
|
1e263d605d4eaf0fd20f90dd2aa4174574e3ebce
|
/components/ally-utilities/ally/general_error.py
|
d834495bd8176c268d901c35d27cc32e9c89bc5a
|
[] |
no_license
|
galiminus/my_liveblog
|
https://github.com/galiminus/my_liveblog
|
698f67174753ff30f8c9590935d6562a79ad2cbf
|
550aa1d0a58fc30aa9faccbfd24c79a0ceb83352
|
refs/heads/master
| 2021-05-26T20:03:13.506295 | 2013-04-23T09:57:53 | 2013-04-23T09:57:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on May 31, 2011
@package: ally utilities
@copyright: 2012 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Gabriel Nistor
Provides general errors.
'''
# --------------------------------------------------------------------
class DevelopmentError(Exception):
'''
Wraps exceptions that are related to wrong development usage.
'''
def __init__(self, message):
assert isinstance(message, str), 'Invalid string message %s' % message
self.message = message
Exception.__init__(self, message)
|
UTF-8
|
Python
| false | false | 2,013 |
3,393,024,202,237 |
d5af8212c91b32558e43d3ca7fc95016739cc780
|
fc133065c96c529b1771c30b44d4bd4627bcd3b1
|
/sandbox/dead_code/resolver-dot-generator/util2.py
|
1cc54af8943b4651804acb76bbb818d3ff0514af
|
[
"BSD-3-Clause",
"GPL-2.0-only"
] |
non_permissive
|
vapier/pkgcore
|
https://github.com/vapier/pkgcore
|
d3067ddc8bf0d643e9f773cb1d114ea20c380b52
|
35a7e4f4f0fc61dd9c4dc72d35a57e2e9d5b832f
|
refs/heads/master
| 2020-12-14T00:21:50.645537 | 2014-10-26T08:32:22 | 2014-10-26T08:32:22 | 26,106,180 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright: 2006 Brian Harring <[email protected]>
# License: GPL2/BSD
def mangle_name(arg):
return '"%s"' % str(arg).replace('"', '\\"')
def dump_edge(parent, child, text):
return "%s->%s [label=%s];" % (mangle_name(parent), mangle_name(child), mangle_name(text))
def dump_package_dep(parent, atom):
return "%s->%s [color=red];" % (mangle_name(parent), mangle_name(atom))
def dump_atom_match(atom, match):
return "%s->%s [color=blue];" % (mangle_name(atom), mangle_name(match))
def dump_dot_file_from_graph(graph, filepath, graph_name="dumped_graph"):
if isinstance(filepath, basestring):
fd = open(filepath, "w")
else:
fd = filepath
if not hasattr(fd, "write"):
raise TypeError("filepath must be either a file instance or a string filepath: got %s" % filepath)
fd.write("digraph %s {\n" % graph_name)
# write atoms
fd.write("\tnode [shape=circle,color=red, style=filled,fixedsize=False]\n;")
for a in graph.atoms.iterkeys():
fd.write("\t%s\n" % mangle_name(a))
# write pkgs
fd.write("\tnode [shape=box,fill=true,color=blue,fixedsize=False];\n")
for x in graph.pkgs.keys():
fd.write("\t%s\n" % mangle_name(x))
l = list(graph.unresolved_atoms())
if l:
fd.write("\tUNRESOLVED_ATOM\n")
for a, data in graph.atoms.iteritems():
for parent in data[0]:
fd.write("\t%s\n" % dump_package_dep(parent, a))
for match in data[1]:
fd.write("\t%s\n" % dump_atom_match(a, match))
# fd.write("\tnode [shape=box];\n\t%s;\n" % " ".join(map(mangle_name, graph.unresolved_atoms())))
# for a,data in graph.atoms.iteritems():
# for parent in data[0]:
# if data[1]:
# for matches in data[1]:
# fd.write("\t%s\n" % dump_edge(parent, matches, a))
# else:
# fd.write("\t%s\n" % dump_edge(parent, a, a))
#
#
# fd.write("\tnode [shape=circle];\n\t%s;\n" % " ".join(map(mangle_name, graph.pkgs.keys())))
# l = list(graph.unresolved_atoms())
# if l:
# fd.write("\tnode [shape=box];\n\t%s;\n" % " ".join(map(mangle_name, graph.unresolved_atoms())))
# del l
fd.write("}\n");
|
UTF-8
|
Python
| false | false | 2,014 |
10,325,101,415,961 |
58c16681ec66e28e5fbe8adc56d84ae8a8485956
|
589bcb232986a67b7fefe61555bd77687bb8bec7
|
/lab4/4lab_2_1.py
|
e1e269bea5d0bf304e74fc538687949f59f412ca
|
[] |
no_license
|
spetz911/CM
|
https://github.com/spetz911/CM
|
7758bc82b211bdb2d6c205ccd254cb4e463f128a
|
e12c5201755b51d8d2c6d3895f474047bb687bb3
|
refs/heads/master
| 2021-01-02T08:56:29.618860 | 2012-05-15T09:05:19 | 2012-05-15T09:05:19 | 2,492,641 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from math import e
from lib import *
from my_matrix_2 import *
#xy"+2y'-xy=0
#y'(1)=0
#1.5y(2)+y'(2)=e**2
#y'=z
#z'=y-2/x*z
#z(1) = 0
#z(2)-1.5y(2) = e**2
x0 = [1., 2.]
#~ koefs = [[0., 1., 0.], [1.5, 1, exp(2)]]
x0 = 1.
a = 1.
b = 2.
eps = .01
h = 0.1
eta0 = 2.
y01 = [eta0,0.]
eta1 = 3.
y02 = [eta1,0.]
def solve(x):
return e**(x)/x
def F(x,(y,z)):
return [z,y-2/x*z]
def G(slv):
return slv[-1][1][1]+1.5*slv[-1][1][0]-e**2
def p(x):
return 2/x
def q(x):
return -1
def shooting(h):
global eta0,eta1
s = ODY(F,h,x0,y01,a,b)
s1 = ODY(F,h,x0,y02,a,b)
slv = s.Rynge_Kytte()
slv1 = s1.Rynge_Kytte()
while abs(G(slv1)) > eps:
eta2 = eta1 - G(slv1)*(eta1-eta0)/(G(slv1)-G(slv))
s2 = ODY(F,h,x0,[eta2,0.],a,b)
slv2 = s2.Rynge_Kytte()
eta0 = eta1
eta1 = eta2
slv = slv1
slv1 = slv2
return slv2
def KRM(h):
x = frange(1.,2.+h,h)
f = open("in","w")
f.write("-100 100 0\n")
for k in xrange(1,len(x)-1):
f.write("%f %f %f %f\n" %((1-p(x[k])*h/2)*100, (-2+h**2*q(x[k]))*100,(1+h*p(x[k])/2)*100,0))
f.write("-100 %f %f\n" %((1.5*h+1)*100,h*e**2*100))
f.close()
m = Tridiagonal_Matrix("in")
return m.solve()
def main():
global h
print "Метод стрельбы."
slv = shooting(h)
slv2 = shooting(h/2.)
for i in xrange(len(slv)):
print "(%f, %f),%g %g" %(slv[i][0],slv[i][1][0],abs(slv[i][1][0]-solve(slv[i][0])),abs(slv[i][1][0]-slv2[2*i][1][0]))
print "Конечно-разностный метод."
y = KRM(0.1)
y2 = KRM(0.1/3)
for i in xrange(len(y)):
print "(%f, %f),%g %g" % (1+i*h,y[i],abs(y[i]-solve(1+i*h)),abs(y[i]-y2[2*i]))
return 0
main()
|
UTF-8
|
Python
| false | false | 2,012 |
7,301,444,447,580 |
196ef558b5ef248befa9a6ca05b661716e1d3d08
|
dff24dbb2b534f161684dacb602fce866308bfa1
|
/src/helloword/lib/modul_12_3.py
|
9ddcf099b05b1db9b79c515212affc1bbb01603e
|
[] |
no_license
|
czelsa/HelloWord
|
https://github.com/czelsa/HelloWord
|
8472fef77ee4c6688e87b609faaa2ce419977b60
|
1103d79a0e5c6b0a83cc1d3f1cfd970aa8ac6fca
|
refs/heads/master
| 2021-05-27T15:32:57.657085 | 2013-08-22T07:55:39 | 2013-08-22T07:55:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import sys
from xml.dom.minidom import parseString
import urllib , urllib2
import xml_tree as xtre
import getopt
# python modul-12_3.py canon1100d a8839b1180ea00fa1cf7c6b74ca01bb5
# python modul_12_3.py canon1100d a8839b1180ea00fa1cf7c6b74ca01bb5
# python modul_12_3.py -o canon1100d -n a8839b1180ea00fa1cf7c6b74ca01bb5
# python modul_12_3.py macbook a8839b1180ea00fa1cf7c6b74ca01bb5
def sortedDictValues3(adict):
keys = adict.keys()
keys.sort()
print keys
return keys[0]
def drukujSlownik(adict):
for klucz, wartosc in slownik.items():
print klucz, wartosc
def downlNokaut(przedmiot, key_number):
"""Return oferts
>>>downlNokaut(canon1100d, a8839b1180ea00fa1cf7c6b74ca01bb5)
35.0 http://www.nokaut.pl/ochrona-wyswietlacza-aparatu/oslona-na-wyswietlacz-canon-1100d.html
"""
a_url = 'http://api.nokaut.pl/?format=xml&key=' + key_number + \
'&method=nokaut.product.getByKeyword&keyword='+przedmiot+'&filters[price_min]'
#http://api.nokaut.pl/?format=xml&key=a8839b1180ea00fa1cf7c6b74ca01bb5&method=nokaut.product.getByKeyword&keyword=canon1100d&filters[price_min]
#root = etree.Element("root")
#print root
file = urllib.urlopen(a_url)
data = file.read()
file.close()
dom = parseString(data)
slownik=dict()
price_min=float()
cNodes = dom.childNodes
price_min = dom.getElementsByTagName("price_min")[0].childNodes[0].toxml()
for i in cNodes[0].getElementsByTagName("item"):
li=list()
li.extend([i.getElementsByTagName("name")[0].childNodes[0].toxml(), i.getElementsByTagName("url")[0].childNodes[0].toxml(), i.getElementsByTagName("image_mini")[0].childNodes[0].toxml()])
price_min = i.getElementsByTagName("price_min")[0].childNodes[0].toxml()
price = price_min.replace(',','.')
price_min = float(price)
slownik.update({price_min:li})
k=sortedDictValues3(slownik)
#drukujSlownik(slownik)
for klucz, wartosc in slownik.items():
if klucz==k:
print klucz, wartosc[1:]
return klucz, wartosc[1:]
def usage():
usage = """
-h --help Prints this help
-o --objects Print objects
-n --argument -API key Print argument
"""
print usage
def end():
sys.exit(2)
def main():
"""main
>>>main()
"""
if len(sys.argv)==3 or len(sys.argv)==5:
#if len(sys.argv)==4 or len(sys.argv)==6:
#print sys.argv[3].join(sys.argv[4])
i = 0
try:
opts, argsy = getopt.getopt(sys.argv[1:], 'ho:n:', ["help"])
#print len(opts)
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, args in opts:
if opt in ("--help", "-h"):
return usage()
if opt in ("-o"):
nazwa = args
#print nazwa
i = i + 1
if opt in ("-n"):
num = args
#print num
i = i + 1
if i == 2:
print nazwa
print num
return downlNokaut(nazwa, num)
if i == 0:
nazwa = sys.argv[1]
num = sys.argv[2]
print nazwa
print num
return downlNokaut(nazwa, num)
else:
print len(sys.argv)
if __name__ == '__main__':
main()
#raise SystemExit, "Wrong number of arguments"
#nazwa = str(sys.argv[1])
#num = str(sys.argv[2])
#print downl(nazwa , num)
|
UTF-8
|
Python
| false | false | 2,013 |
12,704,513,273,071 |
75124312fc2043e1b36e593906b0a2ccfc793c12
|
6cc9f3f6683a723c326f653f9f470f65cb09f563
|
/tm/w_db/tosi_db/30.cgi
|
2f11830a035308d149eb0425333096d3dea91d09
|
[] |
no_license
|
xmglw/kunitori-game
|
https://github.com/xmglw/kunitori-game
|
5923b2649306757e6b2645c026234094ec4fc926
|
19ce9dc79c7285af791ac0bb5aa9730aab9251a5
|
refs/heads/master
| 2021-05-27T16:43:39.703565 | 2012-02-28T09:51:43 | 2012-02-28T09:51:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
30,建寧,0,0,,,,,,,,,,,,,,,,,100000,0,0,500,,,5000,5000,,0,30,30,11,0,0,,0,0,1,,300,,5000,999,3000,999,5000,999,0,0,100,0,2,,,,,,,,,,,,,,,,,,,,,,,,,0,0,0,0,,,,,,,0,0,0,0,0,,,,,,0,0,0,0,0,,,,,,0,0,0,0,0,,,,,,0,0,0,0,0,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,0,,,,,,3,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,54,64,65,67,75,,,,,,,,,,,,,,,,,,,,,,,,,,1,9,,,,,,,,,,,,,,0,
|
UTF-8
|
Python
| false | false | 2,012 |
1,159,641,216,527 |
62ff25cc10e204cfbe5e74832ddb97a0cc143e64
|
cd58183b2261d7d8160d45f91dfdaed3e331ba01
|
/ar.py
|
a68e69b2a8f079a5ab47c38ca37a5c7df1f1f0fb
|
[] |
no_license
|
nowaits/algorithm_demo
|
https://github.com/nowaits/algorithm_demo
|
2b2736a7e616efc4bc852820139e49530a86b9c5
|
a228dcf92407c2638339653da1d6e710c107bc6b
|
refs/heads/master
| 2016-09-10T00:31:10.096334 | 2014-06-29T15:36:26 | 2014-06-29T15:36:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import random
import string
def array(a, b):
return [x for x in range(a, b)];
def rarray(a, b):
x = array(a, b);
random.shuffle(x);
return x;
def carray(index_a, index_b):
return list(string.letters[index_a:index_b]);
def genMatrix(M, N):
matrix = [[m*N+n for n in range(N)] for m in range(M)]
return matrix;
def printMatrix(A, M, N):
for i in range(M):
for j in range(N):
print("%10d"% A[i][j]),
print;
|
UTF-8
|
Python
| false | false | 2,014 |
13,829,794,711,270 |
9e681e696c8d3dfe63141447201b6c0698ddc8e0
|
81b4f6cf648778e56219d106c78e07a337f0702c
|
/nova/virt/smartosapi/kvm_image.py
|
9f5176b8a1d0badc7b80ba27206065c8d34d8072
|
[
"Apache-2.0"
] |
permissive
|
jbijoux/openstack-smartos-nova-grizzly
|
https://github.com/jbijoux/openstack-smartos-nova-grizzly
|
4a7e27f41633220aaab2dd24ed0fb33d348e52a6
|
5f3d04dc608f7b47f010bf999be091b80ef1d2ec
|
refs/heads/master
| 2021-01-18T16:52:54.248637 | 2013-04-12T12:52:46 | 2013-04-12T12:52:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding=utf-8
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Hendrik Volkmer, Thijs Metsch
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from nova import utils
from nova.openstack.common import log as logging
from nova.virt.smartosapi import image
LOG = logging.getLogger(__name__)
class KVMImage(image.Image):
def register_image(self):
LOG.debug("Doing the -KVM- thing")
# Get the actual file sizse from file (the image might have been
# converted from qcow2 to raw)
# and thus become bigger
self.image_size = os.path.getsize(self.image_temp_target)
image_size_in_mb = (int(self.image_size) / 1024 / 1024) + 1
utils.execute("zfs", "create", "-V", "%sM" % image_size_in_mb,
"zones/%s" % self.image_uuid)
utils.execute("dd", "if=%s" % self.image_temp_target,
"of=/dev/zvol/rdsk/zones/%s" % self.image_uuid)
utils.execute("zfs", "snapshot", "zones/%s@dataset" % self.image_uuid)
manifest_file = "/var/db/imgadm/%s.json" % self.image_uuid
self.write_manifest_file(manifest_file)
LOG.debug("KVM image registered at %s" % manifest_file)
# TODO: Using imgadm does not work because of
# https://github.com/joyent/smartos-live/issues/110
# utils.execute("imgadm","install","-m", manifest_file, "-f",
# image_temp_target)
def create_manifest(self):
return {
"name": "cirros",
"version": "1.6.3",
"type": "zvol",
"description": "Base template to build other templates on",
"published_at": "2012-05-02T15:15:24.139Z",
"os": "linux",
"image_size": self.image_size,
"files": [
{
"path": "cirros",
"sha1": "bdc60b8f3746d786003fe10031a8231abcbf21de",
"size": self.image_size,
"url": "http://192.168.83.123:9292/v1/images/1415980f-9f1b-4ef6-b02b-05569bbefc17"
}
],
"requirements": {
"networks": [
{
"name": "net0",
"description": "public"
}
],
"ssh_key": True
},
"disk_driver": "virtio",
"nic_driver": "virtio",
"uuid": self.image_uuid,
"creator_uuid": "352971aa-31ba-496c-9ade-a379feaecd52",
"vendor_uuid": "352971aa-31ba-496c-9ade-a379feaecd52",
"creator_name": "sdc",
"platform_type": "smartos",
"cloud_name": "sdc",
"urn": "sdc:sdc:cirros:1.6.3",
# Dynamic timestamps
"created_at": "2012-05-02T15:15:24.139Z",
"updated_at": "2012-05-02T15:15:24.139Z"
}
|
UTF-8
|
Python
| false | false | 2,013 |
10,548,439,695,158 |
b3336ec0cbf59281bbf32953788cee5672020f44
|
0508ad4b330443ea0e72124326476dd6159d4ac0
|
/degree_largest.py
|
b7be3df36858fae6e17484b1fffb4dc6a3d1ef05
|
[
"MIT"
] |
permissive
|
ryaninhust/sampling
|
https://github.com/ryaninhust/sampling
|
7d12e32168f380a87dc1f606c580bd6a191c24f9
|
cf9c8c4460161b1b1f577d8c37d1acb3382351aa
|
refs/heads/master
| 2021-01-25T07:34:06.412930 | 2014-12-10T08:06:46 | 2014-12-10T08:06:46 | 27,218,576 | 1 | 0 | null | false | 2014-12-09T09:16:22 | 2014-11-27T09:40:48 | 2014-12-09T09:00:31 | 2014-12-09T09:16:21 | 40,144 | 0 | 2 | 1 |
Python
| null | null |
from random import sample,random,choice
from core import Algorithm
from egraphs import RemoteGraph
class DegreeLargest(Algorithm):
def update_graph(self, start_node, result):
g = self.sampled_graph
start_id = g.vs['name'].index(start_node)
for node in result:
if node['name'] not in g.vs['name']:
g.add_vertex(**node)
index = g.vs['name'].index(node['name'])
g.add_edge(start_id,index)
else:
index = g.vs['name'].index(node['name'])
if g.get_eid(start_id, index, directed=False, error=False) == -1:
g.add_edge(start_id,index)
def degree_largest(self):
full_degree = self.sampled_graph.vs['degree']
sample_degree = self.sampled_graph.degree()
difference = [x1 - x2 for (x1, x2) in zip(full_degree, sample_degree)]
return difference.index(max(difference))
def run(self,k):
n_attribute = len(self.sampled_graph.vertex_attributes())-2
i = 0
while i < k:
query_node = self.sampled_graph.vs['name'][self.degree_largest()]
query_result = self.egraph.query_node(query_node,n_attribute)
self.update_graph(query_node,query_result)
i += 1
if __name__ == "__main__":
fbego_graph = RemoteGraph('data/public.txt')
fuck_dl = DegreeLargest(fbego_graph)
print fuck_dl.validate()
|
UTF-8
|
Python
| false | false | 2,014 |
5,059,471,480,775 |
909779951c09a9edb0fc48f8781722766550ad92
|
ec5a47ae27c7e805b71bece53b6586b3a70b812e
|
/modules/dftba.py
|
7c4316244aee7d697508de723e914ac8a858e221
|
[] |
no_license
|
embolalia/jenni
|
https://github.com/embolalia/jenni
|
1428a94c7b15f783484c8e4f4188cf9552512b29
|
39bab101a59dfe5fa6af1f52c179f98fe716f8e6
|
refs/heads/master
| 2020-04-10T04:29:05.251400 | 2012-08-27T17:57:33 | 2012-08-27T17:57:33 | 3,035,586 | 1 | 6 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
dftba.py - jenni DFT.BA Module
Author: Edward Powell, embolalia.net
About: http://inamidst.com/phenny
This module allows for retrieving stats, shortening and lengthening dft.ba urls.
"""
import urllib
try:
import json
except ImportError:
import simplejson as json
except ImportError:
print("Either update to python 2.6+ or install simplejson")
def shorten(jenni, input):
"""Shorten a URL with DFT.BA"""
args = input.groups()
url = args[0]
code = None
if args[1]: code = args[1].lstrip(' ')
if code: params = urllib.urlencode({'TARGET_URL': url, 'SOURCE_URL': code})
else: params = urllib.urlencode({'TARGET_URL': url})
r = urllib.urlopen('http://dft.ba/api/shorten.json', params)
response = json.loads(r.read())['api_response']
url = response['response']['short_url']
if not url:
msg = 'Uh oh. Something went wrong with your request.'
if code: msg = msg + ' I think the code you want is already in use.'
else:
msg = 'http://dft.ba/' + url
jenni.say(msg)
shorten.rule = '\.shorten (\S+)( ?\S+)?'
shorten.priority = 'high'
shorten.example = '.shorten http://example.com example'
def expand(jenni, input):
url = input.group(1)
params = urllib.urlencode({'SHORT_URL': url})
r = urllib.urlopen('http://dft.ba/api/expand.json', params)
response = json.loads(r.read())
if response['api_response']['response']['status'] == 'error':
jenni.say('Uh oh. Something went wrong with your request.')
else:
longurl = response['api_response']['response']['long_url']
jenni.say('http://dft.ba/' + url + ' redirects to ' + longurl)
expand.rule = '.*http://dft.ba/(\S+).*'
|
UTF-8
|
Python
| false | false | 2,012 |
7,103,875,956,853 |
ff372b084cec9d9188d86af6fad1f5bc8f96284f
|
804e6b67553274d5b9f88d3f80330981bc568875
|
/server/model/Train.py
|
edefb7a7b2934a64c98f5a3e7a4082bdb513d72e
|
[] |
no_license
|
jiekebo/DSB-POC
|
https://github.com/jiekebo/DSB-POC
|
39325e44a46b7aa97c7c1d1709c7efc043bc35f8
|
d85ae4fab0851431a3c35e174e26c5b2e806b5fd
|
refs/heads/master
| 2016-09-05T16:58:00.716389 | 2014-04-22T20:15:18 | 2014-04-22T20:15:18 | 31,680,050 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import datetime
import re
class Train:
def __init__(self, id, train_type, train_number, station_uic, direction,
track, line, destination_uic, destination_name, destination_country_code,
generated, scheduled_arrival, arrival_delay, scheduled_departure,
minutes_to_departure, departure_delay, cancelled):
# Train identification
self.id = id
self.train_type = train_type
self.train_number = train_number
# Train location
self.station_uic = station_uic
self.direction = direction
self.track = track
self.line = line
# Train destination
self.destination_uic = destination_uic
self.destination_name = destination_name
self.destination_country_code = destination_country_code
# Train schedule
self.generated = generated
self.scheduled_arrival = scheduled_arrival
self.arrival_delay = arrival_delay
self.scheduled_departure = scheduled_departure
self.minutes_to_departure = minutes_to_departure
self.departure_delay = departure_delay
self.cancelled = cancelled
@staticmethod
def _convert_timestamp(str):
if str is None:
return None
timestamp = re.findall('\d+', str)
return datetime.datetime.fromtimestamp(int(timestamp[0]) / 1000).strftime("%Y-%m-%d %H:%M:%S")
def __repr__(self):
return u"{{train_number:{}, station:{}, arrival:{}, end:{}}}".format(
self.train_number,
self.station_uic,
self._convert_timestamp(self.scheduled_arrival),
self.destination_uic)
|
UTF-8
|
Python
| false | false | 2,014 |
13,889,924,245,725 |
a78f5d4a5b968fcd8da547c0bd48f5a1a7266de6
|
0454a2937e582ee938713f3b88dd72083b313cfd
|
/build.py
|
43ae9c5562f9dae3bba3a8c3ef8e89fea8399a9e
|
[
"MIT"
] |
permissive
|
jeremejevs/battletag
|
https://github.com/jeremejevs/battletag
|
3f1026af74579857602e3886fff2304b96ab073f
|
4470121ba3640588e56a6c8b1c78d25486bfdd1b
|
refs/heads/master
| 2021-05-03T07:56:08.403625 | 2014-04-12T11:42:52 | 2014-04-12T11:42:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import json
import os
import shutil
import zipfile
shutil.rmtree('bin', True)
bt = 'bin/tmp'
btc = bt + '/chrome'
btf = bt + '/firefox'
btfd = btf + '/data'
btfl = btf + '/lib'
os.makedirs(btc)
os.makedirs(btfd)
os.makedirs(btfl)
with open('common/common.json', 'r') as tmpIn:
common = json.load(tmpIn)
with open('chrome/manifest.json', 'r') as tmpIn:
tmpJson = json.load(tmpIn)
tmpJson['name'] = common['name']
tmpJson['short_name'] = common['short_name']
tmpJson['description'] = common['description']
tmpJson['version'] = common['version']
with open(btc + '/manifest.json', 'w') as tmpOut:
json.dump(tmpJson, tmpOut, ensure_ascii=False)
with open('firefox/package.json', 'r') as tmpIn:
tmpJson = json.load(tmpIn)
tmpJson['fullName'] = common['name']
tmpJson['name'] = common['short_name'].replace(' ', '-').lower()
tmpJson['description'] = common['description']
tmpJson['version'] = common['version']
with open(btf + '/package.json', 'w') as tmpOut:
json.dump(tmpJson, tmpOut, ensure_ascii=False)
with open('common/inject.js', 'r') as tmpIn:
inject = tmpIn.read()
with open(btc + '/inject.js', 'w') as tmpOut:
tmpOut.write(inject
.replace('@@CSS@@', 'chrome.extension.getURL("battletag.css")')
.replace('@@JS@@', 'chrome.extension.getURL("battletag.js")')
.replace('@@CHECK@@', '')
)
with open(btfd + '/inject.js', 'w') as tmpOut:
tmpOut.write(inject
.replace('@@CSS@@', 'self.options.css')
.replace('@@JS@@', 'self.options.js')
.replace(
'@@CHECK@@',
'\n' +
' if (\n' +
' \'resource://battletag-at-jeremejevs-dot-com/\' !=\n' +
' jshref.substring(0, 43)\n' +
' ) {\n' +
' return;\n' +
' }\n'
)
)
shutil.copy('chrome/background.js', btc)
shutil.copy('common/battletag.js', btc)
shutil.copy('common/battletag.css', btc)
shutil.copy('images/icon-16.png', btc)
shutil.copy('images/icon-48.png', btc)
shutil.copy('images/icon-128.png', btc)
shutil.copy('firefox/main.js', btfl)
shutil.copy('common/battletag.js', btfd)
shutil.copy('common/battletag.css', btfd)
shutil.copy('images/icon-48.png', btfd)
shutil.copy('images/icon-64.png', btfd)
os.chdir(btc)
with zipfile.ZipFile('../../chrome.zip', 'w') as tmpZip:
for root, dirs, files in os.walk('.'):
for tmpFile in files:
tmpZip.write(os.path.join(root, tmpFile))
os.chdir('..')
os.system('CALL "%PYTHON2%" "%CFX%" xpi --pkgdir=firefox')
shutil.move('battletag.xpi', '../firefox.xpi')
|
UTF-8
|
Python
| false | false | 2,014 |
9,577,777,090,483 |
ed9f3ef938ea2cdca408ecb3b8b9e62416ad0242
|
d2bafb2cca9f925c5e1e6d0363f75fddefb05d30
|
/merengueproj/merengue/collab/models.py
|
e8ea4ef9885c6175ef579bcd15161078aeca8491
|
[
"LGPL-3.0-only",
"GPL-3.0-only",
"LGPL-2.1-or-later"
] |
non_permissive
|
FullStackable/merengueproj
|
https://github.com/FullStackable/merengueproj
|
0f26bb4fd5ff6853bff4f78ecea60976778451a3
|
b13f7e9767b6afe0914875f745272c90fafa53d4
|
refs/heads/master
| 2021-05-28T04:22:21.509005 | 2014-04-25T10:56:57 | 2014-04-25T10:57:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright (c) 2010 by Yaco Sistemas
#
# This file is part of Merengue.
#
# Merengue is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Merengue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Merengue. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.translation import ugettext_lazy as _
from transmeta import TransMeta
REVISOR_RESULTS = (
('just', _('Just set status')),
('replace', _('Display an alternative comment in public view')),
('hide', _('Hide comment from public view')),
)
class CollabCommentType(models.Model):
__metaclass__ = TransMeta
name = models.CharField(
_('name'),
max_length=20,
)
label = models.CharField(
_('label'),
max_length=100,
)
class Meta:
abstract = True
translate = ('label', )
def __unicode__(self):
return self.label
class CollabCommentUserType(CollabCommentType):
pass
class CollabCommentRevisorStatusType(CollabCommentType):
decorator = models.ImageField(
_('decorator'),
upload_to='revisor_status_types',
blank=True,
null=True,
)
result = models.CharField(
_('result'),
max_length=30,
choices=REVISOR_RESULTS,
default="just",
help_text=_('Select the resulting action this status trigger'),
)
reason = models.TextField(
_('reason'),
blank=True,
null=True,
)
class Meta:
translate = ('reason', )
class CollabComment(models.Model):
# Generic Foreign Key
content_type = models.ForeignKey(
ContentType,
verbose_name=_('content type'),
related_name="content_type_set_for_%(class)s",
)
object_pk = models.PositiveIntegerField(
_('object ID'),
)
content_object = generic.GenericForeignKey(
ct_field="content_type",
fk_field="object_pk",
)
# User commenting (Authenticated)
user = models.ForeignKey(
User,
verbose_name=_('user'),
blank=True,
null=True,
related_name="%(class)s_comments",
)
# User commenting (Non Authenticated)
user_name = models.CharField(
_("user's name"),
max_length=50,
blank=True,
)
user_email = models.EmailField(
_("user's email address"),
blank=True,
)
user_url = models.URLField(
_("user's URL"),
blank=True,
)
comment_user_type = models.ForeignKey(
CollabCommentUserType,
verbose_name=_('comment user type'),
)
comment = models.TextField(
_('comment'),
)
submit_date = models.DateTimeField(
_('date/time submitted'),
default=None,
auto_now_add=True,
)
ip_address = models.IPAddressField(
_('IP address'),
blank=True,
null=True,
)
class Meta:
ordering = ('submit_date', )
permissions = [("can_revise", "Can revise comments")]
verbose_name = _('collaborative comment')
verbose_name_plural = _('collaborative comments')
def get_user_name(self):
return (self.user and (self.user.get_full_name() or self.user.username)) or self.user_name
def get_user_email(self):
if self.user:
return self.user.email or ""
else:
return self.user_email
def get_user_url(self):
return self.user_url
def get_last_revision_status(self):
status_history = self.collabcommentrevisorstatus_set.order_by('-revision_date')
if status_history.count():
return status_history[0]
else:
return None
class CollabCommentRevisorStatus(models.Model):
comment = models.ForeignKey(
CollabComment,
verbose_name=_('revised comment'),
)
# User that revises the comment
revisor = models.ForeignKey(
User,
verbose_name=_('user'),
blank=True,
null=True,
related_name="revised_%(class)s_comments",
)
type = models.ForeignKey(
CollabCommentRevisorStatusType,
verbose_name=_('revised comment status'),
)
revision_date = models.DateTimeField(
_('date/time revised'),
default=None,
auto_now_add=True,
)
short_comment = models.CharField(
_('short comment'),
max_length=100,
blank=True,
null=True,
)
def __unicode__(self):
return u'%s' % self.type
|
UTF-8
|
Python
| false | false | 2,014 |
14,233,521,646,266 |
4af0139b3f57d131a0536796935b65f6cb29ff19
|
597bd1d28ca6cd24cad380d9cec7faa00cc6532f
|
/ternary.py
|
96d6c0df9ed1f8532061d8be73fba532caa9e67d
|
[
"MIT"
] |
permissive
|
lorenzhs/balanced_ternary
|
https://github.com/lorenzhs/balanced_ternary
|
51d70102cb9914611832f0f37d83496bda4249cb
|
14427d07df7198021659b616c935be3b2bb5adcd
|
refs/heads/master
| 2020-05-30T13:29:42.649357 | 2014-08-17T20:19:38 | 2014-08-17T20:19:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
# Author: Lorenz Hübschle-Schneider
# Lincense: MIT
# encoding: utf-8
# Some fun with Balanced Ternary numbers
class BT(object):
# Get the balanced ternary representation for an integer
def int2bt(value):
if value == 0:
return ['0']
# Check for negative numbers
negate = (value < 0)
if negate:
value *= -1
ret, remainder = [], 0
while value >= 0 and not (value == 0 and remainder == 1):
remainder, value = value % 3, value // 3
if remainder == 0:
ret.append('0')
elif remainder == 1:
ret.append('+')
else:
ret.append('-')
value += 1
ret.reverse()
if negate:
ret = BT.negate(ret)
return ret
# Convert a balanced ternary number back into an integer
def bt2int(value):
if type(value) == str: # parse strings like '--+0'
return BT.bt2int(list(value))
result, exponent = 0, len(value) - 1
for index, element in enumerate(value):
delta = 3 ** (exponent - index)
if element == '+':
result += delta
elif element == '-':
result -= delta
return result
def pretty(value):
return '{val} ({int})'.format(val=''.join(value), int=BT.bt2int(value))
# negate a BT value (multiply by -1)
def negate(value):
result = value[:] # make a copy
for index, element in enumerate(result):
if element == '+':
result[index] = '-'
elif element == '-':
result[index] = '+'
return result
def is_negative(value):
return len(value) > 0 and BT.trunc(value)[0] == '-'
def is_zero(value):
return all(map(lambda x: x == '0', value))
def _align_to_length(value, length):
assert len(value) <= length
return ['0'] * (length - len(value)) + value
def trunc(value):
for index, elem in enumerate(value):
if elem != '0':
return value[index:]
return ['0']
def align(valueA, valueB):
length = max(len(valueA), len(valueB))
return (BT._align_to_length(valueA, length), BT._align_to_length(valueB, length))
# add two trits (and a carry), returning (sum, carry)
def trit_add(trit1, trit2, carry='0'):
# this is the lame version but of course you can also do this conditionally
table = {'+': 1, '0': 0, '-': -1}
value = sum(map(lambda x: table[x], [trit1, trit2, carry]))
if value == -3:
return ('0', '-')
elif value == -2:
return ('+', '-')
elif value == -1:
return ('-', '0')
elif value == 0:
return ('0', '0')
elif value == 1:
return ('+', '0')
elif value == 2:
return ('-', '+')
elif value == 3:
return ('0', '+')
# add to balanced ternary values
def add(valueA, valueB):
fst, snd = BT.align(valueA, valueB)
result, carry = [], '0'
for x, y in reversed(list(zip(fst, snd))):
digit, carry = BT.trit_add(x, y, carry)
result.append(digit)
if carry != '0':
result.append(carry)
return list(reversed(result))
# subtract valueB from valueA
def sub(valueA, valueB):
return BT.add(valueA, BT.negate(valueB))
def mul(valueA, valueB):
if len(valueB) < len(valueA):
return mul(valueB, valueA)
result = ['0']
if BT.is_zero(valueA) or BT.is_zero(valueB):
return result # no need to multiply by zero
for index, elem in enumerate(reversed(valueA)):
if elem == '0':
continue # nothing to do in this iteration
temp = valueB if elem == '+' else BT.negate(valueB)
temp += ['0'] * index # pad to get alignment right
result = BT.add(result, temp)
return result
def div(valueA, valueB):
if BT.is_zero(valueB):
raise ZeroDivisionError("Division of {list} ({int}) by zero!"
.format(list=valueA, int=BT.bt2int(valueA)))
valA, valB = BT.trunc(valueA), BT.trunc(valueB)
# 0 / foo = 0
if BT.is_zero(valA):
return ['0']
# foo / 1 = foo
if valB == ['+']:
return valA
# foo / -1 = -foo
if valB == ['-']:
return BT.negate(valA)
# foo / -x = -(foo / x) (remainder unchanged)
if BT.is_negative(valB):
(quotient, remainder) = BT.div(valA, negate(valB))
return (negate(quotient), remainder)
# -foo / x = -(foo / x) (remainder negated)
if BT.is_negative(valA):
(quotient, remainder) = BT.div(negate(valA), valB)
return (negate(quotient), negate(remainder))
quotient, remainder = ['0'], valA[:]
while True: # I kinda need a ">=" here...
new_remainder = BT.sub(remainder, valB)
if BT.is_negative(new_remainder):
break
remainder = new_remainder
quotient = BT.add(quotient, ['+'])
return (quotient, BT.trunc(remainder))
# Execute an operation on two arguments and print the result in a pretty manner
def pretty(intA, intB, op, op_name):
valA, valB = BT.int2bt(intA), BT.int2bt(intB)
res = op(valA, valB)
out = ''
if type(res) == tuple: # for operations with multiple results like div
out = ", ".join(map(lambda x : BT.pretty(x), res))
else:
out = BT.pretty(res)
print('{A} {op} {B} = {R}'
.format(A=BT.pretty(valA), op=op_name, B=BT.pretty(valB), R=out))
if __name__ == '__main__':
pretty(5, 6, BT.add, 'add')
pretty(8, -13, BT.sub, 'sub')
pretty(-4, 5, BT.mul, 'mul')
pretty(1337, 42, BT.div, 'div')
|
UTF-8
|
Python
| false | false | 2,014 |
10,290,741,689,497 |
6f633395b4df06cbe5c4396e528b98443dbc4006
|
12ed579a77398a18f88cc69fe0d46a0296b42f23
|
/measure/collision.py
|
3bc1c5da2a1dc0f711344dd19fb942a2b58c4c2a
|
[
"LicenseRef-scancode-free-unknown",
"MIT"
] |
non_permissive
|
rwest/MEASURE
|
https://github.com/rwest/MEASURE
|
19c0aa7629092d25b09f6232bf1283394b99c850
|
dd002c61891958d4d2a4facedab6c38b76ddb00d
|
refs/heads/master
| 2021-01-15T22:33:17.539950 | 2010-07-17T21:32:21 | 2010-07-17T21:39:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# MEASURE - Master Equation Automatic Solver for Unimolecular REactions
#
# Copyright (c) 2010 by Joshua W. Allen ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
Contains classes that represent the collision models available in MEASURE.
Each collision model provides a collisional energy transfer probability function
that returns the value of :math:`P(E, E^\prime)` for that model.
"""
import math
import numpy
import chempy.constants as constants
################################################################################
class CollisionError(Exception):
"""
An exception raised when working with collision models causes exceptional
behavior for any reason. Pass a string describing the cause of the
exceptional behavior.
"""
pass
################################################################################
def calculateCollisionFrequency(species, T, P, bathGas):
"""
Calculate the collision frequency for a given `species` with a bath gas
`bathGas` at a given temperature `T` in K and pressure `P` in Pa. The
Lennard-Jones collision model is used, which generally is a slight
underestimate, but reasonable enough.
"""
gasConc = P / constants.kB / T
mu = 1 / (1/species.molecularWeight + 1/bathGas.molecularWeight) / 6.022e23
sigma = 0.5 * (species.lennardJones.sigma + bathGas.lennardJones.sigma)
epsilon = math.sqrt(species.lennardJones.epsilon * bathGas.lennardJones.epsilon)
# Evaluate configuration integral
Tred = constants.kB * T / epsilon
omega22 = 1.16145 * Tred**(-0.14874) + 0.52487 * math.exp(-0.77320 * Tred) + 2.16178 * math.exp(-2.43787 * Tred)
# Evaluate collision frequency
return omega22 * math.sqrt(8 * constants.kB * T / math.pi / mu) * math.pi * sigma**2 * gasConc
################################################################################
def calculateCollisionEfficiency(species, T, Elist, densStates, collisionModel, E0, Ereac):
"""
Calculate an efficiency factor for collisions, particularly useful for the
modified strong collision method. The collisions involve the given
`species` with density of states `densStates` in mol/J corresponding to
energies `Elist` in J/mol, ground-state energy `E0` in J/mol, and first
reactive energy `Ereac` in J/mol. The collisions occur at temperature `T`
in K and are described by the collision model `collisionModel`. The
algorithm here is implemented as described by Chang, Bozzelli, and Dean.
"""
if not isinstance(collisionModel, SingleExponentialDownModel):
raise CollisionError('Modified strong collision method requires the single exponential down collision model.')
alpha = collisionModel.alpha
# Ensure that the barrier height is sufficiently above the ground state
# Otherwise invalid efficiencies are observed
if Ereac - E0 < 100000:
Ereac = E0 + 100000
Ngrains = len(Elist)
dE = Elist[1] - Elist[0]
FeNum = 0; FeDen = 0
Delta1 = 0; Delta2 = 0; DeltaN = 0; Delta = 1
for r in range(Ngrains):
value = densStates[r] * math.exp(-Elist[r] / constants.R / T)
if Elist[r] > Ereac:
FeNum += value * dE
if FeDen == 0:
FeDen = value * constants.R * T
if FeDen == 0: return 1.0
Fe = FeNum / FeDen
# Chang, Bozzelli, and Dean recommend "freezing out" Fe at values greater
# than 1e6 to avoid issues of roundoff error
# They claim that the collision efficiency isn't too temperature-dependent
# in this regime, so it's an okay approximation to use
if Fe > 1e6: Fe = 1e6
for r in range(Ngrains):
value = densStates[r] * math.exp(-Elist[r] / constants.R / T)
# Delta
if Elist[r] < Ereac:
Delta1 += value * dE
Delta2 += value * dE * math.exp(-(Ereac - Elist[r]) / (Fe * constants.R * T))
DeltaN += value * dE
Delta1 /= DeltaN
Delta2 /= DeltaN
Delta = Delta1 - (Fe * constants.R * T) / (alpha + Fe * constants.R * T) * Delta2
beta = (alpha / (alpha + Fe * constants.R * T))**2 / Delta
if beta > 1:
logging.warning('Collision efficiency %s calculated at %s K is greater than unity, so it will be set to unity..' % (beta, T))
if beta < 0:
raise CollisionError('Invalid collision efficiency %s calculated at %s K.' % (beta, T))
return beta
################################################################################
class CollisionModel:
"""
A base class for collision models.
"""
pass
################################################################################
class SingleExponentialDownModel(CollisionModel):
"""
A single exponential down collision model, based around the collisional
energy transfer probability function
.. math:: P(E, E^\prime) = C(E^\prime) \exp \left( - \frac{E^\prime - E}{\alpha} \right) \hspace{40pt} E < E^\prime
where the parameter :math:`\alpha = \left< \Delta E_\mathrm{d} \right>`
represents the average energy transferred in a deactivating collision.
=============== =============== ============================================
Attribute Type Description
=============== =============== ============================================
`alpha` ``double`` The average energy transferred in a deactivating collision in J/mol
=============== =============== ============================================
"""
def __init__(self, alpha=0.0):
self.alpha = alpha
def generateCollisionMatrix(self, Elist, T, densStates):
"""
Generate and return the collisional transfer probability matrix
:math:`P(E, E^\prime)` for this model for a given
set of energies `Elist` in J/mol, temperature `T` in K, and isomer
density of states `densStates`.
"""
Ngrains = len(Elist)
P = numpy.zeros((Ngrains,Ngrains), numpy.float64)
start = -1
for i in range(Ngrains):
if densStates[i] > 0 and start == -1:
start = i
break
# Determine unnormalized entries in collisional transfer probability matrix
for r in range(start, Ngrains):
P[0:r+1,r] = numpy.exp(-(Elist[r] - Elist[0:r+1]) / self.alpha)
P[r+1:,r] = numpy.exp(-(Elist[r+1:] - Elist[r]) / self.alpha) * densStates[r+1:] / densStates[r] * numpy.exp(-(Elist[r+1:] - Elist[r]) / (constants.R * T))
# Normalize using detailed balance
# This method is much more robust, and corresponds to:
# [ 1 1 1 1 ...]
# [ 1 2 2 2 ...]
# [ 1 2 3 3 ...]
# [ 1 2 3 4 ...]
for r in range(start, Ngrains):
C = (1 - numpy.sum(P[start:r,r])) / numpy.sum(P[r:Ngrains,r])
# Check for normalization consistency (i.e. all numbers are positive)
if C < 0: raise ChemPyError('Encountered negative normalization coefficient while normalizing collisional transfer probabilities matrix.')
P[r,r+1:Ngrains] *= C
P[r:Ngrains,r] *= C
P[r,r] -= 1
# This method is described by Pilling and Holbrook, and corresponds to:
# [ ... 4 3 2 1 ]
# [ ... 3 3 2 1 ]
# [ ... 2 2 2 1 ]
# [ ... 1 1 1 1 ]
#for r in range(Ngrains, start, -1):
#C = (1 - numpy.sum(M[r:Ngrains,r])) / numpy.sum(M[0:r,r])
## Check for normalization consistency (i.e. all numbers are positive)
#if C < 0: raise ChemPyError('Encountered negative normalization coefficient while normalizing collisional transfer probabilities matrix.')
#P[r,0:r-1] *= C
#P[0:r,r] *= C
#P[r,r] -= 1
return P
################################################################################
|
UTF-8
|
Python
| false | false | 2,010 |
13,778,255,109,173 |
db33f03ec6165e4322897803b80b3a7c722feca6
|
48fa0e1de1cb1d33f2839b1cc08167f32b0a7a2f
|
/code/test/test-p2-anne-funcsWithListDict.py
|
c345b3c6f5ab9748855081714ffe6ddea1ffe385
|
[
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"GPL-3.0-only"
] |
non_permissive
|
asayler/CU-CS5525-PythonCompiler
|
https://github.com/asayler/CU-CS5525-PythonCompiler
|
680752665d16a06336ea429a92931deee43766c1
|
d30b119d8ff2e06ed5edf0a0742c0f1c43914aae
|
refs/heads/master
| 2016-09-05T15:12:11.764036 | 2012-12-15T18:15:09 | 2012-12-15T18:15:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# CU CS5525
# Fall 2012
# Python Compiler
#
# test-p2-anne-funcsWithListDict.py
# Test Case
# Subset: p2
# Type: Student
# Tesing: Funcs with Lists and Dicts
#
# Repository:
# https://github.com/asayler/CU-CS5525-PythonCompiler
#
# By :
# Anne Gatchell
# http://annegatchell.com/
# Andy Sayler
# http://www.andysayler.com
# Michael (Mike) Vitousek
# http://csel.cs.colorado.edu/~mivi2269/
def f(a,b):
a[1] = b
b = {3:1,True:5}
def a(x, b):
print x[b[3]]
return 0
print a({0:False, 4:5}, b)
return b
|
UTF-8
|
Python
| false | false | 2,012 |
9,070,970,960,965 |
d333ba147b0739711c4883164dc4052f2ade10c8
|
efdaeaf80c6f9633121082e1f0b985bb3e48267c
|
/haskell_type.py
|
94935e0f7b197ee2f3f2c43807c0ac660f218f15
|
[
"MIT"
] |
permissive
|
rodlogic/SublimeHaskell
|
https://github.com/rodlogic/SublimeHaskell
|
4d66dfb5f8d59d30fadb98b07e2de50838bfe5e2
|
c0e24290ec253637db106357391bfa892ec13ca3
|
refs/heads/master
| 2021-01-18T12:24:31.789874 | 2013-02-23T10:54:56 | 2013-02-23T10:54:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sublime
import sublime_plugin
import re
from sublime_haskell_common import call_ghcmod_and_wait, is_enabled_haskell_command
# Used to find out the module name.
MODULE_RE_STR = r'module\s+([^\s\(]*)' # "module" followed by everything that is neither " " nor "("
MODULE_RE = re.compile(MODULE_RE_STR)
# Parses the output of `ghc-mod type`.
# Example: 39 1 40 17 "[Char]"
GHCMOD_TYPE_LINE_RE = re.compile(r'(?P<startrow>\d+) (?P<startcol>\d+) (?P<endrow>\d+) (?P<endcol>\d+) "(?P<type>.*)"')
# Name of the sublime panel in which type information is shown.
TYPE_PANEL_NAME = 'haskell_type_panel'
def parse_ghc_mod_type_line(l):
"""
Returns the `groupdict()` of GHCMOD_TYPE_LINE_RE matching the given line,
of `None` if it doesn't match.
"""
match = GHCMOD_TYPE_LINE_RE.match(l)
return match and match.groupdict()
# TODO rename to SublimeHaskellShowTypeCommand
class SublimeHaskellShowType(sublime_plugin.TextCommand):
def ghcmod_get_type_of_cursor(self):
view = self.view
filename = str(view.file_name())
row, col = view.rowcol(view.sel()[0].a)
row1, col1 = row + 1, col + 1 # ghc-mod uses rows/cols starting with 1
module_region = view.find(MODULE_RE_STR, 0)
if module_region is None:
sublime.status_message("SublimeHaskell: Could not determine module name!")
return None
# RE must match; there is only one group in the RE.
module = MODULE_RE.match(view.substr(module_region)).group(1)
ghcmod_args = ['type', filename, module, str(row1), str(col1)]
out = call_ghcmod_and_wait(ghcmod_args, filename)
if not out:
sublime.status_message("ghc-mod %s returned nothing" % ' '.join(ghcmod_args))
return None
# ghc-mod type returns the type of the expression at at the given row/col.
# It can return multiple lines, extending the expression scope by one level each.
# The last line belongs to the toplevel expression.
types = map(parse_ghc_mod_type_line, out.strip().splitlines())
result_type = types[0]['type'] # innermost expression's type
if not result_type:
sublime.error_message("ghc-mod type returned unexpected output")
return None
return result_type
def run(self, edit):
result_type = self.ghcmod_get_type_of_cursor()
if result_type:
self.write_output(self.view, result_type)
def write_output(self, view, text):
"Write text to Sublime's output panel."
output_view = view.window().get_output_panel(TYPE_PANEL_NAME)
output_view.set_read_only(False)
# Write to the output buffer:
edit = output_view.begin_edit()
output_view.insert(edit, 0, text)
output_view.end_edit(edit)
# Set the selection to the beginning of the view so that "next result" works:
output_view.set_read_only(True)
# Show the results panel:
view.window().run_command('show_panel', {'panel': 'output.' + TYPE_PANEL_NAME})
def is_enabled(self):
return is_enabled_haskell_command(self.view, False)
# Works only with the cursor being in the name of a toplevel function so far.
class SublimeHaskellInsertType(SublimeHaskellShowType):
def run(self, edit):
view = self.view
result_type = self.ghcmod_get_type_of_cursor()
if result_type:
# TODO get this from ghc-mod as well, e.g. from the range of the type
word_region = view.word(view.sel()[0])
line_region = view.line(view.sel()[0])
indent_region = sublime.Region(line_region.begin(), word_region.begin())
indent = view.substr(indent_region)
fn_name = view.substr(word_region)
signature = "{0}{1} :: {2}\n".format(indent, fn_name, result_type)
view.insert(edit, line_region.begin(), signature)
|
UTF-8
|
Python
| false | false | 2,013 |
9,397,388,459,462 |
bad0d8c03690e69f06073064eed8515170023e42
|
dbe3a43c1208cf867166bfe101fc797208464b3b
|
/scripts/extract.py
|
67deb8644a1bba022694e2b4e62a5a862d7d3466
|
[] |
no_license
|
UMN-RXInformatics/protk
|
https://github.com/UMN-RXInformatics/protk
|
e616f60233ff0e0cc37c107b6482dbb85ec34994
|
4cee8ceeaea8e5efbeb45e1e62710947f92e84cc
|
refs/heads/master
| 2021-03-12T19:56:48.532918 | 2012-10-29T22:53:29 | 2012-10-29T22:53:29 | 6,448,931 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""
ingest.py : Data ingest script for ProTK 2
"""
import os,sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from protk2.db.core import *
from protk2.db.types import *
from protk2.loaders import *
from protk2.parsers import *
from protk2.fs import *
from protk2.praat import *
from protk2.util import *
from protk2.config import *
opts = parse_args()
if len(sys.argv) == 1 or opts.has_key("help"):
print("""``ingest.py`` is used to ingest all relevant data for a fileset (textgrids and audio files). It will parse the textgrids and then generate and run Praat analysis scripts for the audio files. It will also run feature extraction for the desired units of analysis (UOAs) (e.g. phonemes, words, speech segments, etc.) extracted from the text grids.
Options
-------
General options:
**WARNING:** directory paths **must** be absolute, otherwise Praat will not properly find files.
* ``--audio=<directory>``: **required** -- directory containing audio files
* ``--execpraat``: run praat analysis
* ``--scriptdir=<directory>``: directory to output generated Praat scripts to
* ``--outputdir=<directory>``: directory to output Praat analysis results to
Feature extraction options:
**WARNING:** feature extraction will only work if you have already imported or generated speech events using ``ingest.py``.
* ``--pitches``: load pitch features for all prosodic events
* ``--intensities``: load intensity features for all prosodic events
* ``--formants``: load formant features for all prosodic events
* ``--shimmer``: load shimmer features for all prosodic events
* ``--jitter``: load jitter features for all prosodic events
* ``--framesize=<float>``: generate frames of specified size (in seconds) as
prosodic events for analysis. You must specify the size when using this option.
* ``--windowsize=<float>``: overlap the frames by the specified size (in seconds). You must specify the size when using this option.""")
exit(1)
if opts.has_key("config"):
if os.path.exists(opts["config"]):
execfile(opts["config"])
db = DatabaseManager(DATABASE)
create_tables(db.engine)
db_session = db.get_session()
if not opts.has_key("audio"):
audio_dir = AUDIO_PATH
else:
audio_dir = normalize_dir_path(opts["audio"])
load_audio(db_session, audio_dir)
print audio_dir
script_dir = audio_dir+"script/"
output_dir = audio_dir+"output/"
make_dirs(script_dir)
make_dirs(output_dir)
if opts.has_key("scriptdir"):
script_dir = normalize_dir_path(opts["scriptdir"])
if opts.has_key("outputdir"):
output_dir = normalize_dir_path(opts["outputdir"])
normalize = opts.has_key("normalize")
if opts.has_key("execpraat"):
psr = PraatScriptRunner(audio_dir, script_dir, output_dir)
psr.generate_scripts()
psr.run_scripts()
if opts.has_key("formants"):
load_formant_sl(db_session, output_dir, normalize=normalize)
if opts.has_key("shimmer"):
load_shimmers(db_session, output_dir)
if opts.has_key("jitter"):
load_jitters(db_session, output_dir)
if opts.has_key("intensities"):
load_intensities(db_session, output_dir,normalize=normalize)
if opts.has_key("pitches"):
load_pitches(db_session, output_dir,normalize=normalize)
|
UTF-8
|
Python
| false | false | 2,012 |
15,496,242,021,752 |
25287ef5feec7db09afbef4bf30ba17b1ae1f34d
|
dc0afa06c8219d05514cf37a1acc846ffb30a56c
|
/views.py
|
5647ca95689627e0eea8ad1cffa1b5235ad0e520
|
[] |
no_license
|
nerdfiles/ploud-prod
|
https://github.com/nerdfiles/ploud-prod
|
79207a40c126b60beaaf4c1ec7b85c60b8b87fbf
|
008da9313293b0de7cca329077c45e76047ecd9a
|
refs/heads/master
| 2021-01-23T10:04:10.582673 | 2014-02-11T02:57:42 | 2014-02-11T02:57:42 | 2,707,143 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
""" views """
import os.path
from webob import Response
from ptah import view
from pyramid.httpexceptions import HTTPFound
from pyramid.security import authenticated_userid
from root import IPloudApplicationRoot
view.static('ploud', 'ploud.frontend:assets')
class FaviconView(view.View):
view.pview(route='frontend-favicon', layout=None)
icon_path = os.path.join(
os.path.dirname(__file__), 'assets', '_img-ui', 'favicon.ico')
def render(self):
response = self.request.response
response.content_type='image/x-icon'
return open(self.icon_path).read()
class RobotsView(view.View):
view.pview(route='frontend-robots', layout=None)
robots_path = os.path.join(
os.path.dirname(__file__), 'assets', 'robots.txt')
def render(self):
response = self.request.response
response.content_type = 'text/plain'
return open(self.robots_path).read()
@view.pview('', IPloudApplicationRoot)
def default(request):
raise HTTPFound(location='/index.html')
@view.pview(route='frontend-home', layout='page',
template = view.template('newui/homepage.pt'))
def homepage(request):
return {'isanon': 1 if authenticated_userid(request) else 0}
@view.pview(route='frontend-themes')
def themes(request):
raise HTTPFound(location = '/themes/')
view.register_route('frontend-home', '/index.html')
view.register_route('frontend-favicon', '/favicon.ico')
view.register_route('frontend-robots', '/robots.txt')
view.register_route('frontend-policy', '/privacy-policy.html')
view.register_route('frontend-toc', '/terms-of-service.html')
view.register_route('frontend-disabled', '/disabled.html')
view.register_route('frontend-404', '/404.html')
view.register_route('frontend-themes', '/themes')
view.register_view(
route='frontend-policy', layout='page',
template = view.template('newui/privacy-policy.pt'))
view.register_view(
route='frontend-toc', layout='page',
template = view.template('newui/terms-of-service.pt'))
view.register_view(
route='frontend-disabled', layout='page',
template = view.template('newui/disabled_site.pt'))
view.register_view(
route='frontend-404', layout='page',
template = view.template('newui/404.pt'))
|
UTF-8
|
Python
| false | false | 2,014 |
5,128,190,955,896 |
a15504dae97e7eae1ce712533bf90825b32905ae
|
f576751f4f56b5720b97eb91b9ae2dc170dd511d
|
/Knowledge/Area.py
|
544f79a9838ba9957497317ed367181b0ce1d3e0
|
[] |
no_license
|
lobenmai/mudmaker
|
https://github.com/lobenmai/mudmaker
|
f2c74e182bb4e2c764614d1fc5afc2f0cfd9f7fe
|
88a1f06c9d510fa99263cb7c76542c0a4360e756
|
refs/heads/master
| 2016-07-26T16:17:53.530974 | 2013-09-30T17:59:56 | 2013-09-30T17:59:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Area.py - Areas of knowledge
from MUD import *
# Areas
#
_AREA_LIST = []
_AREAS = {}
def initAreas(areas):
for dict in areas:
area = Area()
area.load(dict)
_AREA_LIST.append(area)
_AREAS[area._id] = area
def getArea(areaID):
if _AREAS.has_key(areaID):
return _AREAS[areaID]
else:
return None
#
# Info about a specific area
#
class Area:
def __init__(self):
self._id = None
self._name = None # name of the area
self._requires = [] # ids of required areas
self._maxLearnable = 50 # 0 ~ 150 how much can be learned with teach/study
self._minLearn = 4 # min. to learn each time
self._maxLearn = 10 # max. to learn each time
def load(self, dict):
if dict.has_key("id"):
self._id = dict["id"]
if dict.has_key("name"):
self._name = dict["name"]
if dict.has_key("requires"):
self._requires = dict["requires"]
if dict.has_key("maxLearnable"):
self._maxLearnable = dict["maxLearnable"]
if dict.has_key("minLearn"):
self._minLearn = dict["minLearn"]
if dict.has_key("maxLearn"):
self._maxLearn = dict["maxLearn"]
def canBeLearned(self, knowledgeL):
"""
Checks whether this area can be learned by whoever
has the knowledge in knowledgeL.
"""
# min proficiency to learn a new area that depends on this one
MIN_PROFICIENCY = 40
for r in self._requires:
ok = 0
for k in knowledgeL:
if k._areas.has_key(r):
area = k._areas[r]
if area._proficiency >= MIN_PROFICIENCY:
ok = 1
break
if not ok:
return NO
return YES
|
UTF-8
|
Python
| false | false | 2,013 |
14,121,852,496,069 |
d4a84e8789372e25a391c7f42f8002ca8ac1f406
|
6db4f09acd23cfbd08748037aaaecc8d4963e43f
|
/pyclient/config.py
|
2dbd3626cc6b7f7f730eff6280a9a0c2a749cde1
|
[] |
no_license
|
EasyMultiPlayer/GameClient
|
https://github.com/EasyMultiPlayer/GameClient
|
65c10c2c24cb41ca6bbb95470659ac5938aaeca7
|
2ba4fffe20525e2cb1e78140a51a3918ec19ee57
|
refs/heads/master
| 2016-09-06T11:48:37.669281 | 2014-02-10T06:35:00 | 2014-02-10T06:35:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
HOST = "127.0.0.1"
PORT_SUB = "6001"
PORT_PUSH = ["6002", "6003", "6004", "6005", "6006"]
# To be set TBS
API_KEY = "a191e1d7a51d4ece08bb38e0b4e0ffa8252c2343"
SHARED_KEY="87a03209ac9a198c71e32d7a70b4b442"
#API_KEY = "e89ebdc6bf313f740a2761dca98f9c3a26a5b8e4"
#SHARED_KEY="85c6cfb69646ed31ba97be3876327546"
SERVER_SHARED_KEY = "b8b15ab61f3fe23b968bf72762ba3d77"
# this is set later in the game
SESSION_KEY = "TBS"
# Users shouldnt change these
ALIVE_PULSE=60
|
UTF-8
|
Python
| false | false | 2,014 |
11,699,490,925,783 |
5cc29a746b8bc14f771ea44707572d1211de316c
|
87fd48356c541dbc91414d3553831a4f427e39ef
|
/src/loader.py
|
bba1a41a72d14f01b9102f61b8a40d607d14a260
|
[
"MIT"
] |
permissive
|
g6123/cumulus
|
https://github.com/g6123/cumulus
|
c4fff24d3784015a8e760bef4a58d16e823a3fe8
|
bf5aed6c59e04cf31ab71c9585ded12348220dfb
|
refs/heads/master
| 2016-08-03T16:23:03.815377 | 2013-03-02T12:53:20 | 2013-03-02T12:53:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*- #
class Application:
def __init__(self, req, conf):
self.preload = None
self.app = None
if type(req['cookie']) == type(''):
from re import match
RawCookie = req['cookie'].split(';')
for e in RawCookie:
if match('CumulSess=(.*)', e):
RawCookie = e.strip().split('=')[1]
break
if type(RawCookie) != type(''):
RawCookie = ''
elif type(req['cookie']) == type({}):
RawCookie = req['cookie'].get('CumulSess', '')
else: raise TypeError('ReqParseError : Cookie must be str or dict.')
del(req['cookie'])
KeyPhrase = conf['server']['KeyPhrase']
from cumulus.utils import decrypt
try: session = eval(decrypt(RawCookie.replace('^', '='), KeyPhrase))
except: session = {}
req['session'] = session
class Method:
def __init__(self, method):
self.method = method.upper()
__str__ = lambda self: self.method
isMethod = lambda self, target: self.method == target.upper()
req['method'] = Method(req['method'])
if conf['server']['CsrfProtect'] and req['method'].isMethod('POST'):
from cumulus.utils import ChkToken
if not ChkToken(
req['post'].get('csrf_token', ''),
conf['server']['TokenExpire'], KeyPhrase
): self.preload = 400
try: del(req['post']['csrf_token'])
except KeyError: pass
if conf['db']:
from cumulus.db import cdo
self.db = cdo(conf)
else:
self.db = None
EscRule = conf['server']['AutoEscape']
BlankEscaper = lambda value: value
if EscRule:
TypeChk = lambda target, sample: type(target) == type(sample)
EscapeError = TypeError('Cannot auto escape a query with such type of escaper.')
from collections import OrderedDict as oDict
if TypeChk(EscRule, {}) or TypeChk(EscRule, oDict()):
from cumulus.utils import replace
escaper = lambda value: replace(value, EscRule)
elif TypeChk(EscRule, []) or TypeChk(EscRule, ()):
from cumulus.utils import EscapeHtml
def escaper(value):
if 'sql' in EscRule and self.db:
value = self.db.escape(value)
if 'html' in EscRule:
value = EscapeHtml(value)
return value
else:
raise EscapeError
def EscapeReq(value):
if TypeChk(value, {}):
value['filename'] = EscapeReq(value['filename'])
elif TypeChk(value, ''):
value = escaper(value)
return value
from cumulus.utils import DictMap
FinalEscaper = lambda d: dict(DictMap(EscapeReq, d))
req['get'] = FinalEscaper(req['get'])
req['post'] = FinalEscaper(req['post'])
from re import search
req['client'] = Dict2Obj({
'addr': req['client'],
'agent': req['agent'],
'mobile': False,
'country': None,
'region': None
})
if search('|'.join(conf['mobile']), req['client'].agent):
req['client'].mobile = True
del(req['agent'])
geoip = conf['server']['GeoIP']
if geoip:
from pygeoip import GeoIP
try:
geoip = GeoIP(geoip).region_by_addr(req['client']['addr'])
req['client'].country = geoip['country_code']
req['client'].region = geoip['region_name']
except TypeError:
pass
self.req = Dict2Obj(req)
self.conf = conf
def load(self):
if not self.preload:
from re import match
for e in self.conf['urls']:
MatchResult = match(e[0], self.req.path)
if MatchResult:
module = e[1]
break
try:
self.preload = module
self.req.get.update({
key: value
for key, value in MatchResult.groupdict().items()
if value != None
})
except NameError:
self.preload = 404
from cumulus.utils import LoadModule
self = LoadModule(self.conf['reviser']['pre'], self)
self.log('init')
if type(self.preload) == type('') or type(self.preload) == type(lambda: None):
self.log('preload', str(self.preload))
try:
from cumulus.utils import LoadModule
self.app = LoadModule(self.preload)
self.resp = self.app(self)
except:
if self.conf['error']['debug'] or self.conf['path']['log']:
from sys import exc_info
exc_info = exc_info()
from traceback import extract_tb
traceback = extract_tb(exc_info[2])[-1]
from os.path import abspath
filepath = abspath(traceback[0])
line = traceback[1]
ErrDetail = exc_info[1]
ErrType = ErrDetail.__class__.__name__
detail = ErrType+' : '+str(ErrDetail)
traceback = {
'filepath': filepath, 'line': line,
'detail': detail
}
self.log('error', filepath+' ('+str(line)+') : '+detail)
else:
traceback = False
self.resp = {
'header': { 'status': 500 },
'traceback': traceback
}
else:
self.resp = { 'header': { 'status': self.preload } }
if self.preload == 404:
self.log('preload', '404')
self.log(
'error', 'PreloadError : No module was assigned for the url.'
)
elif self.preload == 400:
ErrMsg = 'CSRF protection token is invalid'
if self.conf['error']['debug']:
self.resp['traceback'] = {
'filepath': 'Cannot specify file path for CsrfError.', 'line': 0,
'detail': ErrMsg
}
self.log('preload', '400')
self.log('error', 'PreloadError : '+ErrMsg)
if self.db:
self.db.close()
self.log(end=True)
return Respond(self.resp, self.conf, req=self.req)
def log(self, context='custom', content='', end=False):
if not self.conf['path']['log']: return
if end:
LogFile = open(self.conf['path']['log'], 'a')
LogFile.write('\n'+self.LogBuf.strip()+'\n')
elif context == 'init':
from time import localtime; now = localtime();
date = '-'.join([str(now.tm_year), str(now.tm_mon), str(now.tm_mday)])
time = ':'.join([str(now.tm_hour), str(now.tm_min), str(now.tm_sec)])
self.LogBuf = str(self.req.method)+' '+self.req.path+' (%s)'
self.LogBuf += ' <- '+self.req.client.addr+' ('+date+' '+time+')\n'
self.LogBuf += ' '+str(self.req.get)+'\n'
self.LogBuf += ' '+str(self.req.post)+'\n'
self.LogBuf += ' '+str(self.req.session)+'\n'
elif context == 'preload':
self.LogBuf = self.LogBuf % content
elif context in ['error', 'custom']:
self.LogBuf += ' '+content+'\n'
class Respond:
def __init__(self, resp, conf, req=None):
self.resp = resp
self.conf = conf
self.req = req
def tidy(self):
resp = self.resp
DefaultHeader = lambda status: {
'status': status, 'location': '',
'type': 'text/html', 'session': False
}
ErrMsg = None
if type(resp) == type({}): pass
elif type(resp) == type('') or type(resp) == type(b''):
resp = { 'content': resp }
elif type(resp) == type(0):
resp = { 'header': { 'status': resp } }
else:
ErrMsg = 'Type of return must be dict, str, bytes or int.'
if not ErrMsg:
try:
if type(resp['header']) != type({}): raise ValueError
except KeyError: resp['header'] = DefaultHeader(200)
except ValueError:
ErrMsg = "Type of 'header' must be dict."
try:
if type(resp['header']['status']) != type(0): raise ValueError
except KeyError:
if resp['header'].get('location', False):
resp['header']['status'] = 307
else: resp['header']['status'] = 200
except ValueError:
ErrMsg = "Type of 'status' must be int."
try:
location = resp['header']['location']
if type(location) != type(''): raise ValueError
except KeyError: resp['header']['location'] = ''
except ValueError:
ErrMsg = "Type of 'location' must be str or bytes."
try:
if not (
type(resp['header']['type']) == type('') and
len(resp['header']['type'].split('/')) == 2
): raise ValueError
except KeyError: resp['header']['type'] = 'text/html'
except ValueError:
ErrMsg = "Value or type of 'type' is invalid."
try:
session = self.req.session
if session == False: raise KeyError
NewItem = resp['header']['session']
if type(NewItem) == type({}):
session.update(NewItem)
resp['header']['session'] = session
elif NewItem == None:
resp['header']['session'] = {}
elif NewItem == False: raise KeyError
else: raise ValueError
except (AttributeError, KeyError):
resp['header']['session'] = False
except ValueError:
ErrMsg = "Type of 'session' must be dict or NoneType."
try:
if type(resp['content']) == type(b''): pass
elif type(resp['content']) == type(''):
resp['content'] = resp['content'].encode('utf-8')
else: raise ValueError
except KeyError: resp['content'] = b''
except ValueError:
ErrMsg = "Type of 'content' must be str or bytes."
try:
if not (
type(resp['traceback']) == type({})
or resp['traceback'] == False
): raise ValueError
except KeyError: resp['traceback'] = False
except ValueError:
ErrMsg = "Type of 'traceback' must be dict."
if ErrMsg:
resp = { 'header': DefaultHeader(500), 'traceback': False }
if self.conf['error']['debug']:
resp['traceback'] = {
'filepath': 'Cannot specify file path for RespFinalizeError.', 'line': 0,
'detail': ErrMsg
}
if self.conf['path']['log']:
LogFile = open(self.conf['path']['log'], 'a')
LogFile.write(' RespFinalizeError : '+ErrMsg+'\n')
StatusCode = resp['header']['status']
StatusConf = self.conf['status']
StatusMsg = StatusConf.get(StatusCode, StatusConf['default'])
if not StatusMsg[0]:
StatusMsg[0] = StatusConf['default'][0]
if not StatusMsg[1]:
StatusMsg[1] = StatusConf['default'][1]
resp['header']['status'] = (StatusCode, str(StatusCode)+' '+StatusMsg[0], StatusMsg[1])
from cumulus.utils import LoadModule
resp = LoadModule(self.conf['reviser']['post'], resp)
self.resp = resp
return self.resp
def LoadError(self):
if self.resp['header']['status'][0] >= 400:
from cumulus.utils import LoadModule
self.resp = Respond(LoadModule(
self.conf['error']['handler'], Dict2Obj({
'req': self.req,
'res': self.resp,
'conf': self.conf
})
), self.conf).tidy()
return self.resp
def finalize(self):
self.tidy()
return self.LoadError()
class Dict2Obj:
def __init__(self, data):
self.__dict__.update(data)
update = __init__
|
UTF-8
|
Python
| false | false | 2,013 |
11,390,253,285,339 |
afa960b142cdf2a788edbece1073e1c3f30460ce
|
3191537d2db11d1dfc29cb6f66461a0e9b2be13e
|
/TEST.py
|
d48a4e7ba3a3be256b8c431f93963ae3b28f507d
|
[] |
no_license
|
optedoblivion/Bluedar
|
https://github.com/optedoblivion/Bluedar
|
cf23d474979e2efb513c83fe4a01ca830c7bc302
|
876e953167cabfd683118396818634648ee76a88
|
refs/heads/master
| 2020-04-24T15:19:40.812023 | 2010-04-12T03:00:38 | 2010-04-12T03:00:38 | 605,905 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
try:
import pyrssi
print "1"
except:
print "0"
|
UTF-8
|
Python
| false | false | 2,010 |
18,734,647,364,924 |
da9e2154de66a9ca28b5335c0ff348f3911fbde1
|
be435471d73fb9aa6b15494020c2f39f9fbe52af
|
/204_2/jeff_and_rounding_v2.py
|
211905a2dedca85b718d593c85c1b590eeac7a7b
|
[] |
no_license
|
jakab922/codeforces
|
https://github.com/jakab922/codeforces
|
8c88d6df9c9f27b0485b9d40771991e2a8e1d8b9
|
20af41a751f57ac721b35c612bd774bfc197f256
|
refs/heads/master
| 2016-09-05T10:00:39.741890 | 2013-12-20T09:57:43 | 2013-12-20T09:57:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from math import floor
n = int(raw_input().strip())
ais = map(float, raw_input().strip().split(' '))
small_parts = sorted(map(lambda x: x - floor(x), ais))
bottom = []
top = []
free = 0
while free < 2 * n and small_parts[free] == 0.0:
free += 1
bottom = small_parts[free:]
cl = len(bottom)
for i in xrange(1, cl):
bottom[i] += bottom[i - 1]
top = small_parts[free:]
top = map(lambda x: 1.0 - x, top)
for i in xrange(1, cl):
top[cl - 1 - i] += top[cl - i]
half = cl / 2
mi = float(cl)
for i in xrange(cl):
if
|
UTF-8
|
Python
| false | false | 2,013 |
9,028,021,282,125 |
4aedf852fca9d805d36acf3c1e0cba6871c6c277
|
79bc253757b94097e1ef2e5646685d3457a6a7a5
|
/w10.py
|
9dcd455abf0e67618197a0d3f85e90e159cca759
|
[] |
no_license
|
smpss91341/CP_w10
|
https://github.com/smpss91341/CP_w10
|
0412d1a0d13e57109b9be048060c72e631cee9b7
|
18363f9d4fd4c09423db61520c8f4513aac4fd96
|
refs/heads/master
| 2016-09-05T09:06:59.418618 | 2014-11-18T09:25:57 | 2014-11-18T09:25:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
x = "a"
try :
h = int(input("Please input a number:"))
print("The number you want is ",h)
print("Which type you want? Incremental(0) or Less(1) ")
t= int(input("Please input the 0 or 1:"))
if t == 0 :
for i in range(1,h+1,1):
print(x*i)
for i in range(h-1,0,-1):
print(x*i)
if t == 1 :
for i in range(h,0,-1):
print(x*i)
for i in range(2,h+1,1):
print(x*i)
if t !=0 and t !=1 :
print("You insert a wrong number")
except:
print("You insert a wrong number")
print ("Please run again")
# This is test
|
UTF-8
|
Python
| false | false | 2,014 |
6,640,019,481,264 |
9e7ea7c7b9b7c0b82e7bdd0f8e0c298ebe0af8c3
|
1adba89e0253ad67f49060e61a8c97de6fe98e86
|
/modules/insult/insult.py
|
1e1cd43a225bccaef54bb5a9a87149295c9cadb5
|
[
"MIT"
] |
permissive
|
tcoppi/scrappy
|
https://github.com/tcoppi/scrappy
|
8a4566262f983fb7fdb319e74f4a72b11129d2f0
|
a83c132839e5e70342171e57bd19fed2500dae3f
|
refs/heads/master
| 2020-05-17T04:07:31.368708 | 2014-07-16T15:56:24 | 2014-07-16T15:56:24 | 254,654 | 2 | 1 | null | false | 2013-01-30T18:18:10 | 2009-07-18T23:53:13 | 2013-01-30T18:18:09 | 2013-01-30T18:18:09 | 6,716 | null | 1 | 0 |
Python
| null | null |
#insult mod via http://i.imgur.com/dXCGBE0.png
#see file insultdb.py for creating the initial insult database
from random import randint
import shelve
import threading
from ..module import Module
class insult(Module):
def __init__(self, scrap):
super(insult, self).__init__(scrap)
self.lock = threading.Lock()
self.insults = {}
self.open_insultdb()
scrap.register_event("insult", "msg", self.distribute)
self.register_cmd("insult", self.insult_me)
#opens the insultdb file using shelve
def open_insultdb(self, dbfile = 'insultdb'):
with self.lock:
try:
#open the file and store the insultdb to self.insults as dict
insultdb = shelve.open("modules/insult/%s" % dbfile)
self.insults.update(insultdb)
insultdb.close()
except Exception:
self.logging.debug("Error reading insultdb file:" + Exception)
return
def add_insult(self, part, word):
with self.lock:
self.insults[part].append(word)
self.save_insultdb()
def del_insult(self, part, word):
with self.lock:
try:
self.insults[part].remove(word)
except:
#didn't have that insult, nothing to do
return
self.save_insultdb()
def save_insultdb(self, dbfile = 'insultdb'):
with self.lock:
try:
#open the file and store self.insults
insultdb = shelve.open("modules/insult/%s" % dbfile)
insultdb.update(self.insults)
insultdb.close()
except Exception:
self.logging.debug("Error reading insultdb file:" + Exception)
return
def insult_me(self, server, event, bot):
'''Usage: "insult [-p | -add (adjective|bodypart|profession) word | -del (adjective|bodypart|profession) word] [foo]" where foo is someone/thing to insult. Leave blank to make the bot insult yourself.'''
#save the lengths of each insult component list
a = len(self.insults['adjectives'])
b = len(self.insults['bodyparts'])
p = len(self.insults['professions'])
#select a random word from each insult component
adjective = self.insults['adjectives'][randint(0, a-1)]
bodypart = self.insults['bodyparts'][randint(0, b-1)]
profession = self.insults['professions'][randint(0, p-1)]
#insert a/an if adjective starts with a consonant/vowel
if adjective[0] in 'aeiou':
adjective = "an %s" % adjective
else:
adjective = "a %s" % adjective
#check for '-p' or 'word(s)' passed to insult
if len(event.tokens) >= 2:
arg = event.tokens[1]
msg = event.tokens[2:]
#return number of possible insult combinations if -p flag is given
if arg == "-p":
possible = a*b*p
insult = "I know %d adjectives, %d bodyparts, and %d professions for a total of %d possible insult combinations!" % (a, b, p, possible)
#add a new word to the db
elif arg == "-add":
if len(msg) < 2:
insult = "Usage: insult -add (adjective|bodypart|profession) word"
else:
part = msg[0]+'s'
word = ' '.join(msg[1:])
if part in self.insults.keys():
self.add_insult(part, word)
insult = "Successfully added \"%s\" to %s." % (word, part)
else:
insult = "Error: \"%s\" is not a valid insult part. Try one of the following: %s." % (part, ', '.join(self.insults.keys()))
#delete a word from the db
elif arg == "-del":
if len(msg) < 2:
insult = "Usage: insult -del (adjective|bodypart|profession) word"
else:
part = msg[0]+'s'
word = ' '.join(msg[1:])
if part in self.insults.keys():
self.del_insult(part, word)
insult = "Successfully deleted \"%s\" from %s." % (word, part)
else:
insult = "Error: \"%s\" is not a valid insult part. Try one of the following: %s." % (part, ', '.join(self.insults.keys()))
#no flag given, but a word or words was given, so insult word(s), stripping extra spaces
else:
insult = "%s is %s %s %s!" % (' '.join(event.tokens[1:]).strip(), adjective, bodypart, profession)
#no arguments given so insult the user
else:
insult = "%s, you're %s %s %s!" % (event.source.nick, adjective, bodypart, profession)
server.privmsg(event.target, insult)
|
UTF-8
|
Python
| false | false | 2,014 |
15,616,501,108,542 |
d04c6ba42829c8b9e5a5013b0e250ab5033003eb
|
e32bb97b6b18dfd48760ed28553a564055878d48
|
/source_py3/python_toolbox/nifty_collections/frozen_counter.py
|
fd7ef390df0b1a0045e8ed9d52e4159678f1f422
|
[
"MIT"
] |
permissive
|
rfdiazpr/python_toolbox
|
https://github.com/rfdiazpr/python_toolbox
|
26cb37dd42342c478931699b00d9061aedcd924a
|
430dd842ed48bccdb3a3166e91f76bd2aae75a88
|
refs/heads/master
| 2020-12-31T04:15:53.977935 | 2014-04-30T23:54:58 | 2014-04-30T23:54:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright 2009-2014 Ram Rachum.,
# This program is distributed under the MIT license.
import operator
import heapq
import itertools
import collections
from .frozen_dict import FrozenDict
try: # Load C helper function if available
from _collections import _count_elements
except ImportError:
def _count_elements(mapping, iterable):
'''Tally elements from the iterable.'''
mapping_get = mapping.get
for element in iterable:
mapping[element] = mapping_get(element, 0) + 1
class FrozenCounter(FrozenDict):
'''
An immutable counter.
A counter that can't be changed. The advantage of this over
`collections.Counter` is mainly that it's hashable, and thus can be used as
a key in dicts and sets.
In other words, `FrozenCounter` is to `Counter` what `frozenset` is to
`set`.
'''
def __init__(self, iterable=None, **kwargs):
super().__init__()
if iterable is not None:
if isinstance(iterable, collections.Mapping):
self._dict.update(iterable)
else:
_count_elements(self._dict, iterable)
if kwargs:
self._dict.update(kwargs)
for key, value in self.items():
if value == 0:
del self._dict[key]
__getitem__ = lambda self, key: self._dict.get(key, 0)
def most_common(self, n=None):
'''
List the `n` most common elements and their counts, sorted.
Results are sorted from the most common to the least. If `n is None`,
then list all element counts.
>>> FrozenCounter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=operator.itemgetter(1),
reverse=True)
return heapq.nlargest(n, self.items(),
key=operator.itemgetter(1))
def elements(self):
'''
Iterate over elements repeating each as many times as its count.
>>> c = FrozenCounter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = FrozenCounter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, `.elements()` will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return itertools.chain.from_iterable(
itertools.starmap(itertools.repeat, self.items())
)
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'FrozenCounter.fromkeys() is undefined. Use '
'FrozenCounter(iterable) instead.'
)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
try:
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
except TypeError:
# handle case where values are not orderable
return '{0}({1!r})'.format(self.__class__.__name__, dict(self))
__pos__ = lambda self: self
__neg__ = lambda self: type(self)({key: -value for key, value
in self.items()})
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += FrozenCounter()
def __add__(self, other):
'''
Add counts from two counters.
>>> FrozenCounter('abbb') + FrozenCounter('bcc')
FrozenCounter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, FrozenCounter):
return NotImplemented
result = collections.Counter()
for element, count in self.items():
new_count = count + other[element]
if new_count > 0:
result[element] = new_count
for element, count in other.items():
if element not in self and count > 0:
result[element] = count
return FrozenCounter(result)
def __sub__(self, other):
'''
Subtract count, but keep only results with positive counts.
>>> FrozenCounter('abbbc') - FrozenCounter('bccd')
FrozenCounter({'b': 2, 'a': 1})
'''
if not isinstance(other, FrozenCounter):
return NotImplemented
result = collections.Counter()
for element, count in self.items():
new_count = count - other[element]
if new_count > 0:
result[element] = new_count
for element, count in other.items():
if element not in self and count < 0:
result[element] = 0 - count
return FrozenCounter(result)
def __or__(self, other):
'''
Get the maximum of value in either of the input counters.
>>> FrozenCounter('abbb') | FrozenCounter('bcc')
FrozenCounter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, FrozenCounter):
return NotImplemented
result = collections.Counter()
for element, count in self.items():
other_count = other[element]
new_count = other_count if count < other_count else count
if new_count > 0:
result[element] = new_count
for element, count in other.items():
if element not in self and count > 0:
result[element] = count
return FrozenCounter(result)
def __and__(self, other):
'''
Get the minimum of corresponding counts.
>>> FrozenCounter('abbb') & FrozenCounter('bcc')
FrozenCounter({'b': 1})
'''
if not isinstance(other, FrozenCounter):
return NotImplemented
result = collections.Counter()
for element, count in self.items():
other_count = other[element]
new_count = count if count < other_count else other_count
if new_count > 0:
result[element] = new_count
return FrozenCounter(result)
|
UTF-8
|
Python
| false | false | 2,014 |
395,137,033,551 |
f046887b12e49332ee1b2beb053fbeda5ad24453
|
60f8e3d6e5855bae1e33a4de85be6667302fe5ff
|
/app/tools/API_extract/yelp.py
|
eb9514b51e3a3699eaeef26dcc58afc5cfb4c4d2
|
[] |
no_license
|
cristianfr/BargainMeal
|
https://github.com/cristianfr/BargainMeal
|
5a25646c4d9e9835f13ee6e03cbc404f2bf84929
|
a93e8480fe0b855cb31d6833df574fdc58ece74b
|
refs/heads/master
| 2020-04-13T01:28:43.797830 | 2013-09-27T23:44:26 | 2013-09-27T23:44:26 | 12,580,496 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from ..db import common as db
import requests
'''
Yelp related functions.
'''
HOST = 'http://api.yelp.com/phone_search'
KEY='vKdquZAPRDlSVXve3vZTBA'
auth = { 'ywsid': KEY }
def getRating(phone):
#Query yelp API for ratings.
r_params= {'phone':phone}
r_params.update(auth)
r = requests.get(HOST, params= r_params)
rating = r.json()['businesses'][0]['avg_rating']
return rating
def addRatings():
#Add the ratings to the database.
con = db.connect_db()
cur = con.cursor()
cur.execute("SELECT id, phone FROM Additional WHERE yelp_r=0 ")
coupons = cur.fetchall()
for coupon in coupons:
(the_id, phone) = coupon
rating = getRating(phone)
print 'Found that id '+the_id+ ' has rating '+ str(rating)
cur.execute("UPDATE Additional SET yelp_r = %s WHERE phone = %s"% (rating, phone))
con.commit()
cur.close()
con.close()
def main():
addRatings()
if __name__=='__main__':
main()
|
UTF-8
|
Python
| false | false | 2,013 |
16,423,954,967,502 |
b0f61b0b1eb921739956a88e7b041f5ebdc14e9b
|
ce564f0a9b6f261e5303779ab95f8c1629487ac7
|
/django_mysql_fix/backends/mysql/validation.py
|
022138fc6d0a4d1389aa84bebbe5e3c2b4367847
|
[
"MIT"
] |
permissive
|
frol/django-mysql-fix
|
https://github.com/frol/django-mysql-fix
|
192e334cb94c0fdf14516383022d6c5d4486c1d8
|
96d1e960b49ab686ea6d8d766bb4d86edb806e47
|
refs/heads/master
| 2021-01-19T14:09:38.956874 | 2014-05-03T16:07:11 | 2014-05-03T16:07:11 | 18,802,306 | 6 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db.backends.mysql.validation import *
|
UTF-8
|
Python
| false | false | 2,014 |
8,126,078,146,440 |
66d79dbccbe8b37008fe4a981681e618f23aa069
|
3587e4c248005c6df500caea7ee18675d8676022
|
/Web/DataDiscovery/oldapi/DBSUtil.py
|
7ad530eed46fb31c0e65ac5b434c2b65687910d6
|
[] |
no_license
|
bbockelm/DBS
|
https://github.com/bbockelm/DBS
|
1a480e146010e3d6b234ba5ee471f7c87a4877e6
|
4e47d578610485e0503fc1270c7d828064643120
|
refs/heads/master
| 2021-01-16T18:18:29.989833 | 2012-08-21T15:01:05 | 2012-08-21T15:01:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#-*- coding: ISO-8859-1 -*-
#
# Copyright 2006 Cornell University, Ithaca, NY 14853. All rights reserved.
#
# Author: Valentin Kuznetsov, 2006
"""
Common utilities module used by DBS data discovery.
"""
# import system modules
import os, string, sys, time, types, logging, traceback, random, difflib
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
filename='/tmp/DataDiscovery_debug.log',
filemode='w')
logging.getLogger('sqlalchemy.engine').setLevel(logging.NOTSET)
logging.getLogger('sqlalchemy.orm.unitofwork').setLevel(logging.NOTSET)
logging.getLogger('sqlalchemy.pool').setLevel(logging.NOTSET)
# import DBS modules
import DBSOptions
SENDMAIL = "/usr/sbin/sendmail" # sendmail location
RES_PER_PAGE=5 # number of results per page shown
GLOBAL_STEP =5 # number of iterators shown in Results bar
# file created by crontab job, see getDLSsites.sh
DLS_INFO='dls.all'
# Tips
TIPS= [
"to save your history, open history menu and authenticate yourself",
"to find all data on particular site, use expert site search",
"DBS only information can be accessed from expert page",
"don't use 'back' button, use history menu instead",
"to send found data to your buddy, use 'bare URL' link at bottom of the page"
]
def tip():
idx = random.randint(0,len(TIPS)-1)
return TIPS[idx]
SYMBOLS_LIST=[('+','__pl__'),('-','__mi__'),('/','__sl__'),('#','__po__')]
def encode(dataset):
for s in SYMBOLS_LIST:
dataset=string.replace(dataset,s[0],s[1])
return dataset
def decode(dataset):
for s in SYMBOLS_LIST:
dataset=string.replace(dataset,s[1],s[0])
return dataset
def nPages(tot,max):
if tot%max:
return (tot-tot%max)/max+1
return tot/max
def findRssFiles(dir):
oList=[]
for item in os.walk(dir):
if item[2] and len(item[2])==1 and item[2][0]=='rss.xml':
oList.append('%s/%s'%(item[0],item[2][0]))
return oList
def uniqueList(alist):
set = {}
map(set.__setitem__, alist, [])
return set.keys()
def convertListToString(iList):
s="["
for item in iList:
s+="'%s'"%item+","
s=s[:-1]+"]"
return s
def getListOfSites(dbsInst='all'):
"""
Generats list of DLS sites out given DBS instance and DLS_INFO (dls.all) file.
"""
fName = DLS_INFO
f=open(fName,'r')
sList=[]
for item in f.readlines():
try:
dbs,site=string.split(item)
except:
pass
continue
if string.lower(dbsInst)=='all':
if not sList.count(site):
sList.append(site)
else:
if dbs==dbsInst:
if not sList.count(site):
sList.append(site)
f.close()
sList.sort()
return sList
def colorSizeHTMLFormat(i):
n = sizeFormat(i)
# PB are in red
if string.find(n,'PB')!=-1:
return string.replace(n,'PB','<span class="box_red">PB</span>')
# TB are in blue
elif string.find(n,'TB')!=-1:
return string.replace(n,'TB','<span class="box_blue">TB</span>')
# GB are in block
# MB are in green
elif string.find(n,'MB')!=-1:
return string.replace(n,'MB','<span class="box_green">MB</span>')
# KB are in lavender
elif string.find(n,'KB')!=-1:
return string.replace(n,'KB','<span class="box_lavender">KB</span>')
else:
return n
def sizeFormat(i):
"""
Format file size utility, it converts file size into KB, MB, GB, TB, PB units
"""
num=long(i)
for x in ['','KB','MB','GB','TB','PB']:
if num<1024.:
return "%3.1f%s" % (num, x)
num /=1024.
def splitString(s,size,separator=' '):
_size=size
# take care of HTML symbols, like & by looking around for & and ;
if len(s)>size:
if string.find(s[:size],"&")!=-1:
n=len(s)
if n>size+5: n = size+5
pos = string.find(s[0:n],";")
if pos!=-1: size=pos+1
return s[0:size]+separator+splitString(s[size:],_size,separator)
else:
return s
def splitString_orig(s,size,separator=' '):
if len(s)>size:
return s[0:size]+separator+splitString(s[size:],size)
else:
return s
def printDictForJS(dict,space=""):
"""
Print the dictionary
pythonDict = { DBSInst: { dbs:appDict={ app:primDict={ primD:tierDict={ tier:null } } }, } }
For instance,
::
{'dbs2': {'a20': {'p21': {'t23': []}, 'p20': {'t21': [], 't22': []}}, 'a21': {'p21': {'t22': []}}}, 'dbs1': {'a10': {'p10': {'t11': [], 't12': []}, 'p12': {'t13': []}}, 'a12': {'p10': {'t12': []}}}}
{ menuList: ["dbs2","dbs1"],
nextObj : {"dbs2": { menuList: ["a20","a21"],
nextObj : {"a20": { menuList: ["p21","p20"],
nextObj : {"p21": { menuList: ["t23"], nextObj:null}
,"p20": { menuList: ["t21","t22"], nextObj:null}
}
}
,"a21": { menuList: ["p21"],
nextObj : {"p21": { menuList: ["t22"], nextObj:null}
}
}
}
}
,"dbs1": { menuList: ["a10","a12"],
nextObj : {"a10": { menuList: ["p10","p12"],
nextObj : {"p10": { menuList: ["t11","t12"], nextObj:null}
,"p12": { menuList: ["t13"], nextObj:null}
}
}
,"a12": { menuList: ["p10"],
nextObj : {"p10": { menuList: ["t12"], nextObj:null}
}
}
}
}
}
}
@type dict: dictionary
@param dict: input dictionary which we want to printout
@type space: string
@param space: space separator
@rtype : dictionary
@return : { DBSInst: { dbs:appDict={ app:primDict={ primD:tierDict={ tier:null } } }, } }
"""
ads = len("{ menuList: ")
s = "\n"+space
s+= "{ menuList: ["
keyList=dict.keys()
keyList.sort()
if type(dict[keyList[0]]) is types.DictType:
keyList.reverse()
deadEnd=0
for key in keyList:
s+='\"%s\"'%key
if key!=keyList[-1]: s+=","
if type(dict[key]) is not types.DictType: deadEnd=1
s+="],\n"
if deadEnd:
s+=space+" nextObj:null"
else:
s+=space+" nextObj : {"
for key in keyList:
s+='\"%s\": '%key
if type(dict[key]) is types.DictType:
# s+=printDictForJS(dict[key],space+" "*ads)
s+=printDictForJS(dict[key])
else:
s+="\"%s\":null"%key
if key!=keyList[-1]: s+=","
if key!=keyList[-1]: s+=","
s+=space+"}\n" # end of nextObj
s+=space+"}\n" # end of menuList
return s
def printMsg(msg):
"""
@type msg: string
@param msg: input message
@rtype : string
@return: formatted output
"""
print msg
s = ""
for i in xrange(0,len(msg)):
s+="-"
print s
def getExceptionInHTML():
"""
returns exception type/value in HTML form
"""
exp = sys.exc_info()
msg = """
<table>
<tr>
<td align="right"><b>Exception type:</b></td>
<td><em>%s</em></td>
</tr>
<tr>
<td align="right"><b>Exception value:</b></td>
<td><em>%s</em></td>
</tr>
</table>
"""%(exp[0],exp[1])
return msg
def getExcept():
"""
return exception type, value and traceback in a message
"""
msg ="Exception type: \n%s\n\n"%sys.exc_info()[0]
msg+="Exception value: \n%s\n\n"%sys.exc_info()[1]
msg+="Traceback: \n"
for m in traceback.format_tb(sys.exc_info()[2]):
msg+=m
msg+="\n\n"
return msg
def constructExpression(s,listName):
"""
For given string 's' and list name construct the expression statement.
For instance,
word1 or (word2 and word3)
converted to
listName.count(word1) or (listName.count(word2) and listName.count(word3))
Such expression statement is further used by eval in search method of DBSHelper.
"""
specialSymbols=["(",")","and","or","not"]
oList = []
ss = string.lower(s)
for elem in specialSymbols:
item = " %s "%elem
ss=ss.replace(elem,item)
# print "looking for",ss
for elem in string.split(ss):
if specialSymbols.count(elem):
oList.append(elem)
else:
# oList.append("%s.count('%s')"%(listName,elem))
oList.append("[s for s in %s if s.find('%s')!=-1]"%(listName,elem))
result = ' '.join(oList)
# print "construct",result
return result
def validator(s):
"""
Evaluate given string 's' and validate if it has correct number of "(" and ")".
For instance it check if expression
(word1 or (test1 and test2) and (w1 or w2) )
has correct number of open and cloased brackets.
"""
if s.count("(")!=s.count(")"):
return False
open=0
for char in s:
if char=="(" or char==")":
if char=="(": open+=1
if char==")": open-=1
return (not open)
def demanglePattern(pattern):
"""
Demangle given pattern into elements. Return a list of elements,
e.g. /path1/path2/path3 will produce a list ['path1','path2','path3']
@type pattern: string
@param pattern: input pattern
@rtype : list
@return: list of compoenents
"""
if pattern=="*" or not pattern: return ['','','']
components = string.split(os.path.normpath(pattern),'/')
if pattern[0]!='/':
msg = "path pattern '%s' doesn't start with /"%pattern
raise DbsPatternError(args=msg)
# replace '*' with '' in pattern, since later we'll skip such where clause
for idx in xrange(0,len(components)):
if components[idx]=="*": components[idx]=''
return components[1:]
def printListElements(iList,msg=""):
"""
Loop over elements in a list and print one in a time on stdout
@type iList: list
@param iList: input list
@type msg: string
@param msg: input message
@rtype : none
@return: none
"""
if msg:
print
print "### %s:"%msg
for item in iList:
print item
def formattingDictPrint(iDict):
"""
print dictionary in formated way, e.g.
::
{
'key1': []
'key2': []
}
@type iDict: dictionary
@param iDict: input dictionary
@rtype : string
@return: return formatted representation of dictionary
"""
s="{\n"
for key in iDict.keys():
s+="\"%s\": "%key
s+=formattingListPrint(iDict[key])
if iDict.keys()[-1]!=key:
s+=",\n"
s+="\n}\n"
return s
def toLower(iList):
oList=[]
for i in iList:
if type(i) is not types.NoneType:
try:
oList.append(string.lower(str(i)))
except:
print iList
raise "fail at lowering '%s'"%i
return oList
def tupleToList(x):
"""fully copies trees of tuples to a tree of lists.
deep_list( (1,2,(3,4)) ) returns [1,2,[3,4]]"""
if type(x)!=type( () ):
return x
return map(tupleToList,x)
def formattingListPrint(iList,n=3):
"""
print list in formated way, e.g.
::
[
item1,item2,item3,
item4,item5,item6
]
n provides number of items per line
@type iList: list
@param iList: input list
@type n: int
@param n: number of items per line
@rtype : string
@return: return a printed list
"""
iList.sort()
iList.reverse()
s="[\n"
count = 0
for idx in xrange(0,len(iList)):
item = iList[idx]
if idx!=len(iList)-1:
s+=" \"%s\","%item
else:
s+=" \"%s\""%item+"\n"
count+=1
if count==n:
s+="\n"
count=0
s+="]"
return s
def sortedDictValues(adict):
"""
Sort values in a dictinoary
@type adict: dictinoary
@param adict: input dictionary
@rtype : list
@return: sorted list of values
"""
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
def addToDict(iDict,key,value):
"""
Add value as a list to the dictionary for given key.
@type key: key type
@param key: key
@type value: value type
@param value: value
@rtype : none
@return: none
"""
iDict.setdefault(key,[]).append(value)
def monthId(month):
d={'jan':1,'feb':2,'mar':3,'apr':4,'may':5,'jun':6,'jul':7,'aug':8,'sep':9,'oct':10,'nov':11,'dec':12}
return d[string.lower(month)[:3]]
def sendEmail(msg):
"""
Send an Email with given message
"""
p = os.popen("%s -t" % SENDMAIL, "w")
p.write("To: [email protected]\n")
p.write("Subject: DBS DD error\n")
p.write("\n") # blank line separating headers from body
p.write("\n"+msg+"\n\n\n")
sts = p.close()
if sts != 0:
print "mail exit status", sts
class DDLogger:
"""
DDLogger class
"""
def __init__(self,name="Logger",verbose=0):
"""
Logger constructor.
@type name: string
@param name: name of the logger, default "Logger"
@type verbose: boolean or int
@param : level of verbosity
@rtype : none
@return: none
"""
if verbose==1:
self.logLevel = logging.INFO
elif verbose==2:
self.logLevel = logging.DEBUG
else:
self.logLevel = logging.NOTSET
self.name = name
self.setLogger()
def setLevel(self,level):
self.logLevel=level
def writeLog(self,msg):
"""
Write given message to the logger
@type msg: string
@param msg: message
@rtype : none
@return: none
"""
if self.verbose==1:
self.logger.info(msg)
elif self.verbose>=2:
self.logger.debug(msg)
else:
pass
def setLogger(self):
"""
Set logger settings, style, format, verbosity.
@type self: class object
@param self: none
@rtype : none
@return: none
"""
# set up logging to file
print "\n\nlog level",self.logLevel,self.name
logging.getLogger('sqlalchemy.engine').setLevel(self.logLevel)
logging.getLogger('sqlalchemy.orm.unitofwork').setLevel(self.logLevel)
logging.getLogger('sqlalchemy.pool').setLevel(self.logLevel)
self.logger = logging.getLogger(self.name)
self.logger.setLevel(self.logLevel)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
self.logger.addHandler(ch)
def removeEmptyLines(s):
return ''.join(line for line in s.splitlines(1) if not line.isspace())
def textDiff(a, b, th_a="", th_b="", title=""):
"""Takes in strings a and b and returns a human-readable HTML diff."""
out = []
a = removeEmptyLines(a).splitlines(1)
b = removeEmptyLines(b).splitlines(2)
s = difflib.SequenceMatcher(None, a, b)
out.append('<p><b>%s</b>\n'%title)
out.append('<table class="table_diff">\n')
if title:
out.append('<tr><th><b>%s</b></th><th></th><th><b>%s</b></th></tr>\n'%(th_a,th_b))
for e in s.get_opcodes():
if e[0] == "replace":
old=''.join(a[e[1]:e[2]])
new=''.join(b[e[3]:e[4]])
sep='⇔'
tdOld="from"
tdNew="to"
elif e[0] == "delete":
old=''.join(a[e[1]:e[2]])
new=''
sep='&8212;'
tdOld="delete"
tdNew=""
elif e[0] == "insert":
old=''
new=''.join(b[e[3]:e[4]])
sep='+'
tdOld=""
tdNew="insert"
elif e[0] == "equal":
old=new=''.join(b[e[3]:e[4]])
sep=tdOld=tdNew=''
else:
out="""<div class="box_red">Unable to diff a file '%s' in '%s' '%s'"""%(title,th_a,th_b)
return out
old=string.replace(old,'\n','<br>')
new=string.replace(new,'\n','<br>')
s="""<tr><td class="%s">%s</td><td>%s</td><td class="%s">%s</td></tr>\n"""%(tdOld,old,sep,tdNew,new)
out.append(s)
out.append('</table></p>\n')
return ''.join(out)
#
# main
#
if __name__ == "__main__":
# print formattingListPrint([1,2,3,4,5,6,7,8,9,10])
# print formattingDictPrint({'test':[1,2,3,4,5,6,7,8,9,10],'vk':[21,22,23,24]})
# print getDictOfSites()
# print convertListToString([1,2,3,4,5,6,7,8,9,10])
print tip()
|
UTF-8
|
Python
| false | false | 2,012 |
19,129,784,355,023 |
df4d2f63c4dfa5e7b37dc81592ab13bcac57e492
|
941b168e1267c966b8e3a5eeb81ba34a0f3bc063
|
/test_hdf_heap_corruption_bug.py
|
76d79d55fa4a835cd4fea2fe1e031504aec173f9
|
[] |
no_license
|
ulrikpedersen/ADScripts
|
https://github.com/ulrikpedersen/ADScripts
|
6f319a1ff55e70e0dcfececc70aaf31729bd3296
|
9fbf1fef22d85baa2b7fa51990651858a0045c15
|
refs/heads/master
| 2021-01-23T06:35:33.145776 | 2014-10-07T13:48:07 | 2014-10-07T13:48:07 | 21,942,244 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/bin/env dls-python
# A small test script to reproduce the heap corruption bug as found
# and described by Arhtur here:
# https://github.com/areaDetector/ADCore/pull/24
from pkg_resources import require
require("numpy")
require("cothread")
import cothread
from cothread import dbr
from cothread.catools import caget, caput
def load_settings( settings ):
'''Load a whole bunch of PV values in one go
settings is a list of tuples: (pv, value, datatype)
'''
for (pv, value, dtype,) in settings:
caput( pv, value, datatype=dtype, wait=True )
def setup_hdf_writer_plugin():
settings = [
("13SIM1:HDF1:FilePath","H:/tmp/hdfbug", dbr.DBR_CHAR_STR),
("13SIM1:HDF1:FileName", "testbug", dbr.DBR_CHAR_STR),
("13SIM1:HDF1:AutoIncrement", "Yes", None),
("13SIM1:HDF1:FileTemplate", "%s%s%d.h5", dbr.DBR_CHAR_STR),
("13SIM1:HDF1:AutoSave", "Yes", None),
("13SIM1:HDF1:FileWriteMode", "Single", None),
("13SIM1:HDF1:NumCapture", 1, None),
("13SIM1:HDF1:DeleteDriverFile", "No", None),
("13SIM1:HDF1:NumRowChunks", 0, None),
("13SIM1:HDF1:NumColChunks", 0, None),
("13SIM1:HDF1:NumFramesChunks", 0, None),
("13SIM1:HDF1:BoundaryAlign", 0, None),
("13SIM1:HDF1:BoundaryThreshold", 65536, None),
("13SIM1:HDF1:NumFramesFlush", 0, None),
("13SIM1:HDF1:Compression", "None", None),
("13SIM1:HDF1:NumExtraDims", 0, None),
("13SIM1:HDF1:ExtraDimSizeN", 1, None),
("13SIM1:HDF1:ExtraDimSizeX", 1, None),
("13SIM1:HDF1:ExtraDimSizeY", 1, None),
]
load_settings( settings )
def stop_ioc():
print "Waiting 30sec for autosave to do its thing"
cothread.Sleep(30.0)
print "Please SHUT DOWN the IOC - and restart it!"
raw_input("Hit enter when IOC is running again and autosave has restored PVs... ")
def capture_one_image_single():
settings = [
("13SIM1:cam1:ImageMode", "Single", None),
("13SIM1:cam1:ArrayCallbacks", "Enable" , None),
("13SIM1:cam1:ArrayCounter", 0, None),
("13SIM1:HDF1:EnableCallbacks", "Enable", None),
("13SIM1:HDF1:ArrayCounter", 0, None),
]
load_settings( settings )
timeout = caget( "13SIM1:cam1:AcquirePeriod_RBV" ) * 1.5 + 1.0
print "Acquiring and storing a single image in \'Single\' mode"
caput( "13SIM1:cam1:Acquire", 1, wait=True, timeout=timeout )
# Wait for a brief moment to allow the file saving to complete
cothread.Sleep( 1.0 )
fname = caget( "13SIM1:HDF1:FullFileName_RBV", datatype=dbr.DBR_CHAR_STR )
print "Captured into image file: ", fname
def capture_one_image_capture():
settings = [
("13SIM1:HDF1:FileWriteMode", "Capture", None),
]
load_settings( settings )
print "Start capture mode"
caput( "13SIM1:HDF1:Capture", 1, wait=False )
# Wait for a brief moment to allow the file saving plugin to create the file
cothread.Sleep(1.0)
print "Acquire a single frame"
caput( "13SIM1:cam1:Acquire", 1, wait=False )
def capture_one_image_stream():
settings = [
("13SIM1:HDF1:FileWriteMode", "Stream", None),
]
load_settings( settings )
print "Start capture mode"
caput( "13SIM1:HDF1:Capture", 1, wait=False )
# Wait for a brief moment to allow the file saving plugin to create the file
cothread.Sleep(1.0)
print "Acquire a single frame"
caput( "13SIM1:cam1:Acquire", 1, wait=False )
def enable_asyn_trace():
hdfport = caget("13SIM1:HDF1:PortName_RBV")
settings = [
("13SIM1:cam1:AsynIO.PORT", hdfport, None),
("13SIM1:HDF1:PoolUsedMem.SCAN", "Passive", None),
("13SIM1:cam1:AsynIO.TMSK", 0x31, None),
("13SIM1:cam1:AsynIO.TINM", 0x4, None),
]
load_settings( settings )
def main():
setup_hdf_writer_plugin()
stop_ioc()
#enable_asyn_trace()
capture_one_image_single()
enable_asyn_trace()
capture_one_image_capture()
#capture_one_image_stream()
if __name__=="__main__":
main()
|
UTF-8
|
Python
| false | false | 2,014 |
15,272,903,712,244 |
c1ca0fac3d99f3fce6fa9bee1251fa8135e1261c
|
6d71ecb320c202e364dc99ea8c9adbc85abca53a
|
/iPyle/Previous Versions/iPyle-2.0.py
|
7cc6c2c3020c2ce8a15454d05a369475d7d05f0c
|
[] |
no_license
|
csr1471/CSC544Projects
|
https://github.com/csr1471/CSC544Projects
|
b37e5ef069812d1575cd775b179e1335ce0c4892
|
c9e9515bd2bff370f0bb32b99d9316ec08478a1b
|
refs/heads/master
| 2020-05-17T16:44:45.953834 | 2014-10-30T20:01:05 | 2014-10-30T20:01:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys, socket, re
import cPickle as pickle
from os import listdir
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 1060
def main(args):
hostname = args.pop() if len(args) == 3 else '127.0.0.1'
if args[1:] == ['server']:
iPyleServer(hostname)
elif args[1:] == ['client']:
iPyleClient(hostname)
else:
print 'usage: iPyle.py server|client [host]'
def iPyleServer(hostname):
s.bind((hostname, port))
s.listen(1)
socket_conn, socket_addr = s.accept()
server_dir = './Server'
server_before = [f for f in listdir(server_dir) if f[0]!='.']
client_before = pickle.loads(socket_conn.recv(8192))
to_add_client = [f for f in server_before if not f in client_before]
to_add_server = [f for f in client_before if not f in server_before]
sendFiles(socket_conn, to_add_client, server_dir)
socket_conn.sendall(pickle.dumps(to_add_server))
server_addition, num_files = pickle.loads(socket_conn.recv(16))
if server_addition:
recvFiles(socket_conn, num_files, server_dir)
s.close()
print 'Connection closed.'
def iPyleClient(hostname):
s.connect((hostname, port))
client_dir = './Client'
client_files = [f for f in listdir(client_dir) if f[0]!='.']
s.sendall(pickle.dumps(client_files))
client_addition, num_files = pickle.loads(s.recv(16))
if client_addition:
recvFiles(s, num_files, directory)
to_add_server = pickle.loads(s.recv(2097152))
sendFiles(s, to_add_server, client_dir)
s.close()
print 'Connection closed.'
def formatFile(filename):
with open(filename, 'r') as f:
match = re.search('(.+)/(.+)', f.name)
if match:
file_path = match.group(1)
file_name = match.group(2)
file_content = f.read()
file_data = {
'filename': file_name,
'content': file_content
}
return pickle.dumps(file_data)
def sendFiles(sock, file_list, directory):
if file_list:
sock.sendall(pickle.dumps([True, len(file_list)]))
for f in file_list:
sock.sendall(formatFile('{}/{}'.format(directory, f)))
else:
sock.sendall(pickle.dumps([False, 0]))
def recvFiles(sock, n, directory):
for i in xrange(n):
recv_file = pickle.loads(sock.recv(2097152))
with open('{}/{}'.format(directory, recv_file['filename']), 'w') as f:
f.write(recv_file['content'])
if __name__ == '__main__':
main(sys.argv)
|
UTF-8
|
Python
| false | false | 2,014 |
19,327,352,858,971 |
0a0cd858e561d708a5cb332b4199072d53de1b88
|
a07c50240888730c0dca7575ee87dc5c243e3c41
|
/10_Processes_and_Threads/10.3.a_threading管理并发操作.py
|
110b1541848bcd7a9d659900af109d1904006b10
|
[] |
no_license
|
CoryVegan/PythonSL
|
https://github.com/CoryVegan/PythonSL
|
190449bc783bbba2c4b62102f145fac2057e05f8
|
f4d281d2a0d2526364b62f16c3e6b48aa7b718f2
|
refs/heads/master
| 2020-03-08T12:13:17.588156 | 2014-07-06T02:38:45 | 2014-07-06T02:38:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
#10.3threading管理并发操作
#10.3.1Tread对象
print '10.3.1Tread对象\n'
#要使用Thread,最简单的方法就是用一个目标函数实例化一个Thread对象,并调用start()让它开始工作。
import threading
def worker():
"""thread worker function"""
print 'Worker'
return
threads = []
for i in range(5):
t = threading.Thread(target=worker)
threads.append(t)
t.start()
print
#如果能够创建一个线程,并向它传递参数告诉它要完成什么工作,这会很有用。任何类型的对象都可以作为参数传递到线程。下面的例子传递了一个数,线程将打印出这个数。
def worker(num):
"""thread worker function"""
print 'Worker: %s' % num
return
threads = []
for i in range(5):
t = threading.Thread(target=worker, args=(i,))
threads.append(t)
t.start()
#10.3.2确定当前线程
print '\n10.3.2确定当前线程'
#使用参数来标识或命名线程很麻烦,也没有必要。每个Thread实例都有一个名称,它有个一默认值,可以再创建线程时改变。如果服务器进程由处理不同操作的多个服务线程构成,在这样的服务器进程中,对线程命名就很有用。
import time
def worker():
print threading.currentThread().getName(), 'Starting'
time.sleep(2)
print threading.currentThread().getName(), 'Exiting'
def my_service():
print threading.currentThread().getName(), 'Starting'
time.sleep(3)
print threading.currentThread().getName(), 'Exiting'
t = threading.Thread(name='my_service', target=my_service)
w = threading.Thread(name='worker', target=worker)
w2 = threading.Thread(target=worker) # use default name
w.start()
w2.start()
t.start()
#调试输出的每一行中包含有当前线程的名称。线程名称列为Thread-11的行对应未命名的线程w2
time.sleep(5)
print
#大多数程序并不使用print来进行调试。logging模块支持线程名嵌入到各个日志消息中(使用格式化代码%(threadName)s).通过线程名包含在日志消息中,这样就能跟踪这些消息的来源。
import logging
logging.basicConfig(
level = logging.DEBUG,
format='[%(levelname)s], (%(threadName)-10s), %(message)s',
)
def worker():
logging.debug('Starting')
time.sleep(2)
logging.debug('Exiting')
def my_service():
logging.debug('Starting')
time.sleep(3)
logging.debug('Exiting')
t = threading.Thread(name='my_service', target=my_service)
w = threading.Thread(name='worker', target=worker)
w2 = threading.Thread(target=worker) # use default name
w.start()
w2.start()
t.start()
#logging也是线程安全的,所以来自不同线程的消息在输出中会有所区分
|
UTF-8
|
Python
| false | false | 2,014 |
15,633,680,957,467 |
aebf122bdf13e094ca560c3132ec559979d1f037
|
49e99f37172b7a4cccf826e0f65ab3c01fd19af8
|
/lib/sqlalchemy/orm/scoping.py
|
40bbb3299d9dfc2a3cc2fb624bf10e78c015c4c2
|
[
"MIT"
] |
permissive
|
obeattie/sqlalchemy
|
https://github.com/obeattie/sqlalchemy
|
401533ebfa4b7aba4b98332a84f7278b1befb284
|
376007fed7746d494dcb0166b22e512bfece02cd
|
refs/heads/master
| 2016-09-06T12:07:22.090887 | 2010-04-20T18:12:15 | 2010-04-20T18:12:15 | 164,502 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# scoping.py
# Copyright (C) the SQLAlchemy authors and contributors
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sqlalchemy.exceptions as sa_exc
from sqlalchemy.util import ScopedRegistry, ThreadLocalRegistry, \
to_list, get_cls_kwargs, deprecated
from sqlalchemy.orm import (
EXT_CONTINUE, MapperExtension, class_mapper, object_session
)
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm.session import Session
__all__ = ['ScopedSession']
class ScopedSession(object):
"""Provides thread-local management of Sessions.
Usage::
Session = scoped_session(sessionmaker(autoflush=True))
... use session normally.
"""
def __init__(self, session_factory, scopefunc=None):
self.session_factory = session_factory
if scopefunc:
self.registry = ScopedRegistry(session_factory, scopefunc)
else:
self.registry = ThreadLocalRegistry(session_factory)
self.extension = _ScopedExt(self)
def __call__(self, **kwargs):
if kwargs:
scope = kwargs.pop('scope', False)
if scope is not None:
if self.registry.has():
raise sa_exc.InvalidRequestError("Scoped session is already present; no new arguments may be specified.")
else:
sess = self.session_factory(**kwargs)
self.registry.set(sess)
return sess
else:
return self.session_factory(**kwargs)
else:
return self.registry()
def remove(self):
"""Dispose of the current contextual session."""
if self.registry.has():
self.registry().close()
self.registry.clear()
@deprecated("Session.mapper is deprecated. "
"Please see http://www.sqlalchemy.org/trac/wiki/UsageRecipes/SessionAwareMapper "
"for information on how to replicate its behavior.")
def mapper(self, *args, **kwargs):
"""return a mapper() function which associates this ScopedSession with the Mapper.
DEPRECATED.
"""
from sqlalchemy.orm import mapper
extension_args = dict((arg, kwargs.pop(arg))
for arg in get_cls_kwargs(_ScopedExt)
if arg in kwargs)
kwargs['extension'] = extension = to_list(kwargs.get('extension', []))
if extension_args:
extension.append(self.extension.configure(**extension_args))
else:
extension.append(self.extension)
return mapper(*args, **kwargs)
def configure(self, **kwargs):
"""reconfigure the sessionmaker used by this ScopedSession."""
self.session_factory.configure(**kwargs)
def query_property(self, query_cls=None):
"""return a class property which produces a `Query` object against the
class when called.
e.g.::
Session = scoped_session(sessionmaker())
class MyClass(object):
query = Session.query_property()
# after mappers are defined
result = MyClass.query.filter(MyClass.name=='foo').all()
Produces instances of the session's configured query class by
default. To override and use a custom implementation, provide
a ``query_cls`` callable. The callable will be invoked with
the class's mapper as a positional argument and a session
keyword argument.
There is no limit to the number of query properties placed on
a class.
"""
class query(object):
def __get__(s, instance, owner):
try:
mapper = class_mapper(owner)
if mapper:
if query_cls:
# custom query class
return query_cls(mapper, session=self.registry())
else:
# session's configured query class
return self.registry().query(mapper)
except orm_exc.UnmappedClassError:
return None
return query()
def instrument(name):
def do(self, *args, **kwargs):
return getattr(self.registry(), name)(*args, **kwargs)
return do
for meth in Session.public_methods:
setattr(ScopedSession, meth, instrument(meth))
def makeprop(name):
def set(self, attr):
setattr(self.registry(), name, attr)
def get(self):
return getattr(self.registry(), name)
return property(get, set)
for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map', 'is_active', 'autoflush'):
setattr(ScopedSession, prop, makeprop(prop))
def clslevel(name):
def do(cls, *args, **kwargs):
return getattr(Session, name)(*args, **kwargs)
return classmethod(do)
for prop in ('close_all', 'object_session', 'identity_key'):
setattr(ScopedSession, prop, clslevel(prop))
class _ScopedExt(MapperExtension):
def __init__(self, context, validate=False, save_on_init=True):
self.context = context
self.validate = validate
self.save_on_init = save_on_init
self.set_kwargs_on_init = True
def validating(self):
return _ScopedExt(self.context, validate=True)
def configure(self, **kwargs):
return _ScopedExt(self.context, **kwargs)
def instrument_class(self, mapper, class_):
class query(object):
def __getattr__(s, key):
return getattr(self.context.registry().query(class_), key)
def __call__(s):
return self.context.registry().query(class_)
def __get__(self, instance, cls):
return self
if not 'query' in class_.__dict__:
class_.query = query()
if self.set_kwargs_on_init and class_.__init__ is object.__init__:
class_.__init__ = self._default__init__(mapper)
def _default__init__(ext, mapper):
def __init__(self, **kwargs):
for key, value in kwargs.iteritems():
if ext.validate:
if not mapper.get_property(key, resolve_synonyms=False,
raiseerr=False):
raise sa_exc.ArgumentError(
"Invalid __init__ argument: '%s'" % key)
setattr(self, key, value)
return __init__
def init_instance(self, mapper, class_, oldinit, instance, args, kwargs):
if self.save_on_init:
session = kwargs.pop('_sa_session', None)
if session is None:
session = self.context.registry()
session._save_without_cascade(instance)
return EXT_CONTINUE
def init_failed(self, mapper, class_, oldinit, instance, args, kwargs):
sess = object_session(instance)
if sess:
sess.expunge(instance)
return EXT_CONTINUE
def dispose_class(self, mapper, class_):
if hasattr(class_, 'query'):
delattr(class_, 'query')
|
UTF-8
|
Python
| false | false | 2,010 |
12,936,441,511,682 |
9780f107a56421aff2f616a27787f8c9b77b45a9
|
dda14157de49ccad72320723416f1f15127f362b
|
/src/WatersOfShiloah.py
|
79584e0fe26fd94ffe1efb7b2ff1ccb15cac72ee
|
[
"LGPL-2.1-only"
] |
non_permissive
|
epage/Waters-of-Shiloah
|
https://github.com/epage/Waters-of-Shiloah
|
b9240501fae07d260aa170a0bb1093e3d48c066e
|
f7a4eb919b0826221e80371f8d9c53e74bfff56d
|
refs/heads/master
| 2016-09-08T02:07:20.682876 | 2011-03-16T03:32:58 | 2011-03-16T03:32:58 | 1,367,582 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append('/opt/WatersOfShiloah/lib')
import watersofshiloah_gtk
if __name__ == "__main__":
watersofshiloah_gtk.run()
|
UTF-8
|
Python
| false | false | 2,011 |
1,125,281,435,243 |
3451f283e3158d38c3a1f43cbd8dfbf7b624470e
|
50e39231d8bea2a01a9d5db69aeb5c1a8054642b
|
/wafer/modules/attribute.py
|
06cf74ae39e9ad33e86f530aa0508403842cb303
|
[] |
no_license
|
leecrest/wafer
|
https://github.com/leecrest/wafer
|
eb09e96d79e149cfee4d6fc40270996618bdea6c
|
58b148d03dc18dcfdf6bac1c5ed410f1fe112ad3
|
refs/heads/master
| 2020-05-18T18:16:41.566961 | 2014-07-15T13:37:31 | 2014-07-15T13:37:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding=utf-8
"""
@author : leecrest
@time : 2014/7/4 9:55
@brief : 属性
"""
import wafer
class CAttrBase:
"""
属性基类,连接数据表中的一行数据
"""
m_TableName = ""
m_AttrConfig = None
def __init__(self, iIndex):
self.m_ID = iIndex
self.m_Redis = wafer.GetModule("redis")
self.m_Inited = False
self.m_Update = False
self.m_New = True
self.InitAttr()
self.Load()
def Load(self):
data = self.m_Redis.hget(self.m_TableName, self.m_ID)
if data is None:
self.m_Inited = False
self.m_New = True
else:
self.m_Data.update(data)
self.m_Inited = True
self.m_New = False
self.m_Update = False
def InitAttr(self):
if not self.m_AttrConfig:
return
dAttrCfg = self.m_AttrConfig.get("Attr", {})
for dCfg in dAttrCfg.iteritems():
self.m_Data[dCfg["Name"]] = dCfg["Default"]
def New(self):
if not self.m_New:
return
self.m_New = False
self.m_Inited = True
self.m_Data = {}
self.InitAttr()
self.Update()
def Delete(self):
if self.m_New:
return
self.m_Redis.hdel(self.m_TableName, self.m_ID)
self.m_New = True
self.m_Data = None
self.m_Inited = False
self.m_Update = False
def SetAttr(self, sKey, sValue):
if not self.m_Inited:
return
dAttrCfg = self.m_AttrConfig.get("Attr", {})[sKey]
if dAttrCfg and not sValue is None:
iMin = dAttrCfg.get("Min", None)
if not iMin is None and sValue < iMin:
sValue = iMin
iMax = dAttrCfg.get("Max", None)
if not iMax is None and sValue > iMax:
sValue = iMax
if sKey in self.m_Data:
if self.m_Data[sKey] == sValue:
return
if sValue is None:
del self.m_Data[sKey]
else:
self.m_Data[sKey] = sValue
else:
if sValue is None:
return
self.m_Data[sKey] = sValue
self.Update()
def GetAttrByName(self, sName):
if not self.m_Inited:
return
return self.m_Data.get(sName, None)
def GetAttrByID(self, iAttrID):
if not self.m_Inited:
return
sName = self.m_AttrConfig["ID2Name"][str(iAttrID)]
return self.m_Data.get(sName, None)
def DelAttr(self, sKey):
if not self.m_Inited or not sKey in self.m_Data:
return
del self.m_Data[sKey]
self.Update()
def Clear(self):
if not self.m_Inited:
return
self.m_Data = {}
self.Update()
def Update(self):
if not self.m_Inited:
return
self.m_Update = True
def Save(self):
if not self.m_Inited or not self.m_Update:
return
self.m_Redis.hset(self.m_TableName, self.m_ID, self.m_Data)
self.m_Update = False
__all__ = ["CAttrBase"]
|
UTF-8
|
Python
| false | false | 2,014 |
14,276,471,297,202 |
161795990f7e1024ab84405b8f14fd9341386984
|
94b9973b987d55f281575a9c48cb1593bdfa7adf
|
/problem_31.py
|
b8b4af4a7382051402d6e61071263d2b668c7d50
|
[] |
no_license
|
maxwolffe/ProjectEuler
|
https://github.com/maxwolffe/ProjectEuler
|
225a1d7ef264e90e9cf8ac5781646a1c02cfa21d
|
caee1c150e585f30a4ce395a690b899d0c457ee8
|
refs/heads/master
| 2021-01-13T01:29:51.539708 | 2014-06-08T06:58:53 | 2014-06-08T06:58:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def coin_sum(list_of_coins, sum1):
#print(list_of_coins)
#print(sum1)
if abs(sum1) <= 0.001:
return 1
if sum1 <= 0 or len(list_of_coins) == 0:
return 0
return coin_sum(list_of_coins, sum1 - list_of_coins[0]) + coin_sum(list_of_coins[1:], sum1)
uk_list = [2, 1, .5, .2, .1, .05, .02, .01]
test_list = [2, 1, .5, .2]
|
UTF-8
|
Python
| false | false | 2,014 |
15,109,694,984,434 |
a42ed2d2a7b89d5a7006104b882987a86e222c45
|
c9cf1c1e34907952aea74a5f2c548de653c325ab
|
/substanced/principal/tests/test_principal.py
|
0f14c05e85abb9a5f5e26afe86948fcf3c2faf56
|
[
"BSD-3-Clause-Modification"
] |
permissive
|
dextermilo/substanced
|
https://github.com/dextermilo/substanced
|
9c02f35aa3265b08699a8f63e33f057fdee8570e
|
dde61c10b6c89cc0a9e217414be89b3ebafea49f
|
refs/heads/master
| 2021-01-20T15:50:59.354739 | 2012-09-13T23:05:41 | 2012-09-13T23:05:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import unittest
from pyramid import testing
import colander
from zope.interface import implementer
class TestPrincipals(unittest.TestCase):
def _makeOne(self):
from .. import Principals
return Principals()
def test___sd_addable__True(self):
intr = {'content_type':'Users'}
inst = self._makeOne()
self.assertTrue(inst.__sd_addable__(intr))
def test___sd_addable__False(self):
intr = {'content_type':'Wrong'}
inst = self._makeOne()
self.assertFalse(inst.__sd_addable__(intr))
def test_after_create(self):
inst = self._makeOne()
ob = testing.DummyResource()
content = DummyContentRegistry(ob)
registry = testing.DummyResource()
registry.content = content
inst.after_create(None, registry)
self.assertEqual(inst['users'], ob)
self.assertEqual(inst['groups'], ob)
self.assertEqual(inst['resets'], ob)
def test_add_user(self):
inst = self._makeOne()
users = inst['users'] = testing.DummyResource()
user = inst.add_user('login', 'password')
self.assertTrue('login' in users)
self.assertEqual(user.__name__, 'login')
def test_add_group(self):
inst = self._makeOne()
groups = inst['groups'] = testing.DummyResource()
group = inst.add_group('groupname')
self.assertTrue('groupname' in groups)
self.assertEqual(group.__name__, 'groupname')
def test_add_reset(self):
from .. import UserToPasswordReset
resets = testing.DummyResource()
inst = self._makeOne()
objectmap = DummyObjectMap()
inst.__objectmap__ = objectmap
inst.add('resets', resets)
user = testing.DummyResource()
reset = inst.add_reset(user)
self.assertEqual(
objectmap.connections,
[(user, reset, UserToPasswordReset)])
self.assertTrue(reset.__acl__)
self.assertEqual(len(inst), 1)
class TestUsers(unittest.TestCase):
def _makeOne(self):
from .. import Users
return Users()
def test___sd_addable__True(self):
intr = {'content_type':'User'}
inst = self._makeOne()
self.assertTrue(inst.__sd_addable__(intr))
def test___sd_addable__False(self):
intr = {'content_type':'Wrong'}
inst = self._makeOne()
self.assertFalse(inst.__sd_addable__(intr))
class TestPasswordResets(unittest.TestCase):
def _makeOne(self):
from .. import PasswordResets
return PasswordResets()
def test___sd_addable__True(self):
intr = {'content_type':'Password Reset'}
inst = self._makeOne()
self.assertTrue(inst.__sd_addable__(intr))
def test___sd_addable__False(self):
intr = {'content_type':'Wrong'}
inst = self._makeOne()
self.assertFalse(inst.__sd_addable__(intr))
class TestGroups(unittest.TestCase):
def _makeOne(self):
from .. import Groups
return Groups()
def test___sd_addable__True(self):
intr = {'content_type':'Group'}
inst = self._makeOne()
self.assertTrue(inst.__sd_addable__(intr))
def test___sd_addable__False(self):
intr = {'content_type':'Wrong'}
inst = self._makeOne()
self.assertFalse(inst.__sd_addable__(intr))
class Test_groupname_validator(unittest.TestCase):
def _makeOne(self, node, kw):
from .. import groupname_validator
return groupname_validator(node, kw)
def _makeKw(self):
request = testing.DummyRequest()
context = DummyFolder()
services = DummyFolder()
principals = DummyFolder()
groups = DummyFolder()
users = DummyFolder()
context['__services__'] = services
context['__services__']['principals'] = principals
context['__services__']['principals']['groups'] = groups
context['__services__']['principals']['users'] = users
request.services = services
request.context = context
return dict(request=request)
def test_it_not_adding_with_exception(self):
kw = self._makeKw()
request = kw['request']
request.registry.content = DummyContentRegistry(True)
kw['request'].context['abc'] = testing.DummyResource()
def check_name(*arg, **kw):
raise Exception('fred')
kw['request'].services['principals']['groups'].check_name = check_name
node = object()
v = self._makeOne(node, kw)
self.assertRaises(colander.Invalid, v, node, 'abc')
def test_it_adding_with_exception(self):
kw = self._makeKw()
request = kw['request']
request.registry.content = DummyContentRegistry(False)
request.context['abc'] = testing.DummyResource()
node = object()
v = self._makeOne(node, kw)
self.assertRaises(colander.Invalid, v, node, 'abc')
def test_it_adding_with_exception_exists_in_users(self):
kw = self._makeKw()
request = kw['request']
request.registry.content = DummyContentRegistry(False)
services = kw['request'].services
services['principals']['users']['abc'] = testing.DummyResource()
node = object()
v = self._makeOne(node, kw)
self.assertRaises(colander.Invalid, v, node, 'abc')
class Test_members_widget(unittest.TestCase):
def _makeOne(self, node, kw):
from .. import members_widget
return members_widget(node, kw)
def test_it(self):
from ...testing import make_site
site = make_site()
user = testing.DummyResource()
user.__objectid__ = 1
site['__services__']['principals']['users']['user'] = user
request = testing.DummyRequest()
request.context = site
kw = dict(request=request)
result = self._makeOne(None, kw)
self.assertEqual(result.values, [('1', 'user')])
class TestGroupPropertysheet(unittest.TestCase):
def _makeOne(self, context, request):
from .. import GroupPropertySheet
return GroupPropertySheet(context, request)
def _makeParent(self):
parent = DummyFolder()
parent['__services__'] = DummyFolder()
objectmap = DummyObjectMap()
parent.__objectmap__ = objectmap
return parent
def test_get(self):
context = testing.DummyResource()
context.__name__ = 'name'
context.memberids = [1]
context.description = 'desc'
request = testing.DummyRequest()
inst = self._makeOne(context, request)
props = inst.get()
self.assertEqual(props['description'], 'desc')
self.assertEqual(props['members'], ['1'])
self.assertEqual(props['name'], 'name')
def test_set_newname_different_than_oldname(self):
context = testing.DummyResource()
request = testing.DummyRequest()
parent = self._makeParent()
parent['oldname'] = context
def rename(old, new):
self.assertEqual(old, 'oldname')
self.assertEqual(new, 'name')
context.renamed = True
parent.rename = rename
def clear():
context.cleared = True
def connect(members):
self.assertEqual(members, (1,))
context.connected = True
context.memberids = testing.DummyResource()
context.memberids.clear = clear
context.memberids.connect = connect
inst = self._makeOne(context, request)
inst.set({'description':'desc', 'name':'name', 'members':(1,)})
self.assertEqual(context.description, 'desc')
self.assertTrue(context.renamed)
self.assertTrue(context.cleared)
self.assertTrue(context.connected)
def test_set_newname_same_as_oldname(self):
context = testing.DummyResource()
request = testing.DummyRequest()
parent = self._makeParent()
parent['oldname'] = context
def clear():
context.cleared = True
def connect(members):
self.assertEqual(members, (1,))
context.connected = True
context.memberids = testing.DummyResource()
context.memberids.clear = clear
context.memberids.connect = connect
inst = self._makeOne(context, request)
inst.set({'description':'desc', 'name':'name', 'members':(1,)})
self.assertEqual(context.description, 'desc')
self.assertTrue(context.cleared)
self.assertTrue(context.connected)
class TestGroup(unittest.TestCase):
def _makeOne(self, description=''):
from .. import Group
return Group(description)
def test_ctor(self):
inst = self._makeOne('abc')
self.assertEqual(inst.description, 'abc')
class Test_login_validator(unittest.TestCase):
def _makeOne(self, node, kw):
from .. import login_validator
return login_validator(node, kw)
def test_adding_check_name_fails(self):
from ...testing import make_site
site = make_site()
user = testing.DummyResource()
user.__objectid__ = 1
def check_name(v): raise ValueError(v)
user.check_name = check_name
site['__services__']['principals']['users']['user'] = user
request = testing.DummyRequest()
request.context = user
request.registry.content = DummyContentRegistry(False)
kw = dict(request=request)
inst = self._makeOne(None, kw)
self.assertRaises(colander.Invalid, inst, None, 'name')
def test_not_adding_check_name_fails(self):
from ...testing import make_site
site = make_site()
user = testing.DummyResource()
user.__objectid__ = 1
def check_name(*arg):
raise ValueError('a')
users = site['__services__']['principals']['users']
users['user'] = user
users.check_name = check_name
request = testing.DummyRequest()
request.context = user
request.registry.content = DummyContentRegistry(True)
kw = dict(request=request)
inst = self._makeOne(None, kw)
self.assertRaises(colander.Invalid, inst, None, 'newname')
def test_not_adding_newname_same_as_old(self):
from ...testing import make_site
site = make_site()
user = testing.DummyResource()
user.__objectid__ = 1
def check_name(v): raise ValueError(v)
user.check_name = check_name
site['__services__']['principals']['users']['user'] = user
request = testing.DummyRequest()
request.context = user
request.registry.content = DummyContentRegistry(True)
kw = dict(request=request)
inst = self._makeOne(None, kw)
self.assertEqual(inst(None, 'user'), None)
def test_groupname_exists(self):
from ...testing import make_site
site = make_site()
user = testing.DummyResource()
user.__objectid__ = 1
def check_name(v): raise ValueError(v)
user.check_name = check_name
site['__services__']['principals']['users']['user'] = user
site['__services__']['principals']['groups']['group'] = user
request = testing.DummyRequest()
request.context = user
request.registry.content = DummyContentRegistry(True)
kw = dict(request=request)
inst = self._makeOne(None, kw)
self.assertRaises(colander.Invalid, inst, None, 'group')
class Test_groups_widget(unittest.TestCase):
def _makeOne(self, node, kw):
from .. import groups_widget
return groups_widget(node, kw)
def test_it(self):
from ...testing import make_site
site = make_site()
group = testing.DummyResource()
group.__objectid__ = 1
site['__services__']['principals']['groups']['group'] = group
request = testing.DummyRequest()
request.context = site
kw = dict(request=request)
result = self._makeOne(None, kw)
self.assertEqual(result.values, [('1', 'group')])
class TestUserPropertySheet(unittest.TestCase):
def _makeOne(self, context, request):
from .. import UserPropertySheet
return UserPropertySheet(context, request)
def test_get(self):
context = testing.DummyResource()
context.__name__ = 'fred'
context.email = 'email'
context.groupids = [1,2]
request = testing.DummyRequest()
inst = self._makeOne(context, request)
self.assertEqual(inst.get(),
{'email':'email', 'login':'fred', 'groups':['1', '2']})
def test_set_newname_different_than_oldname(self):
context = testing.DummyResource()
request = testing.DummyRequest()
parent = testing.DummyResource()
parent['oldname'] = context
def rename(old, new):
self.assertEqual(old, 'oldname')
self.assertEqual(new, 'name')
context.renamed = True
parent.rename = rename
def clear():
context.cleared = True
def connect(members):
self.assertEqual(members, (1,))
context.connected = True
context.groupids = testing.DummyResource()
context.groupids.clear = clear
context.groupids.connect = connect
inst = self._makeOne(context, request)
inst.set({'email':'email', 'login':'name', 'groups':(1,)})
self.assertEqual(context.email, 'email')
self.assertTrue(context.renamed)
self.assertTrue(context.cleared)
self.assertTrue(context.connected)
def test_set_newname_same_as_oldname(self):
context = testing.DummyResource()
request = testing.DummyRequest()
parent = testing.DummyResource()
parent['name'] = context
def clear():
context.cleared = True
def connect(members):
self.assertEqual(members, (1,))
context.connected = True
context.groupids = testing.DummyResource()
context.groupids.clear = clear
context.groupids.connect = connect
inst = self._makeOne(context, request)
inst.set({'email':'email', 'login':'name', 'groups':(1,)})
self.assertEqual(context.email, 'email')
self.assertTrue(context.cleared)
self.assertTrue(context.connected)
class TestUser(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def _makeOne(self, password, email=''):
from .. import User
return User(password, email)
def test_check_password(self):
inst = self._makeOne('abc')
self.assertTrue(inst.check_password('abc'))
self.assertFalse(inst.check_password('abcdef'))
def test_set_password(self):
inst = self._makeOne('abc')
inst.set_password('abcdef')
self.assertTrue(inst.pwd_manager.check(inst.password, 'abcdef'))
def test_email_password_reset(self):
from ...testing import make_site
from pyramid_mailer import get_mailer
site = make_site()
principals = site['__services__']['principals']
resets = principals['resets'] = testing.DummyResource()
def add_reset(user):
self.assertEqual(user, inst)
resets.add_reset = add_reset
request = testing.DummyRequest()
request.mgmt_path = lambda *arg: '/mgmt'
request.root = site
self.config.include('pyramid_mailer.testing')
inst = self._makeOne('password')
principals['users']['user'] = inst
inst.email_password_reset(request)
self.assertTrue(get_mailer(request).outbox)
class Test_groupfinder(unittest.TestCase):
def _callFUT(self, userid, request):
from .. import groupfinder
return groupfinder(userid, request)
def test_with_no_objectmap(self):
from ...interfaces import IFolder
request = testing.DummyRequest()
context = testing.DummyResource(__provides__=IFolder)
services = testing.DummyResource()
context['__services__'] = services
request.context = context
result = self._callFUT(1, request)
self.assertEqual(result, None)
def test_with_objectmap_no_user(self):
from ...interfaces import IFolder
request = testing.DummyRequest()
context = testing.DummyResource(__provides__=IFolder)
omap = testing.DummyResource()
omap.object_for = lambda *arg: None
context.__objectmap__ = omap
request.context = context
result = self._callFUT(1, request)
self.assertEqual(result, None)
def test_garden_path(self):
from ...interfaces import IFolder
request = testing.DummyRequest()
context = testing.DummyResource(__provides__=IFolder)
omap = testing.DummyResource()
user = testing.DummyResource()
user.groupids = (1,2)
omap.object_for = lambda *arg: user
context.__objectmap__ = omap
request.context = context
result = self._callFUT(1, request)
self.assertEqual(result, (1,2))
class TestPasswordReset(unittest.TestCase):
def _makeOne(self):
from .. import PasswordReset
return PasswordReset()
def test_reset_password(self):
from ...interfaces import IFolder
parent = testing.DummyResource(__provides__=IFolder)
user = testing.DummyResource()
def set_password(password):
user.password = password
user.set_password = set_password
objectmap = DummyObjectMap((user,))
inst = self._makeOne()
parent.__objectmap__ = objectmap
parent['reset'] = inst
inst.reset_password('password')
self.assertEqual(user.password, 'password')
self.assertFalse('reset' in parent)
from ...interfaces import IFolder
@implementer(IFolder)
class DummyFolder(testing.DummyResource):
def check_name(self, value):
if value in self:
raise KeyError(value)
def rename(self, oldname, newname):
old = self[oldname]
del self[oldname]
self[newname] = old
class DummyObjectMap(object):
def __init__(self, result=()):
self.result = result
self.connections = []
def sources(self, object, reftype):
return self.result
def connect(self, source, target, reftype):
self.connections.append((source, target, reftype))
class DummyContentRegistry(object):
def __init__(self, result):
self.result = result
def istype(self, context, type):
return self.result
def create(self, name, *arg, **kw):
return self.result
|
UTF-8
|
Python
| false | false | 2,012 |
17,660,905,558,338 |
edef4ff9a7c71acb12301fd2469cae2726d31f8f
|
663de8810049c93e1c233fc1f6eb6d1adbd720b1
|
/web-service/gae/05.JSON/utils.py
|
0bf62196681aa57712a462f4320c84ec2e79dd51
|
[] |
no_license
|
raindrop-aqua/learn-python
|
https://github.com/raindrop-aqua/learn-python
|
781947407261237649cdb874f9b43e6d22397c68
|
ebc0c07e740d731de7923720db29d4bfaa93ecc9
|
refs/heads/master
| 2020-04-09T11:10:50.995887 | 2014-11-16T11:02:34 | 2014-11-16T11:02:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from google.appengine.ext import db
import time
import datetime
SIMPLE_TYPES = (int, long, float, bool, dict, basestring, list)
def obj_to_dict(obj):
'''
オブジェクトをdictionaryに変換します
'''
output = {}
for key, prop in obj.properties().iteritems():
value = getattr(obj, key)
if value is None or isinstance(value, SIMPLE_TYPES):
output[key] = value
elif isinstance(value, datetime.date):
# Convert date/datetime to ms-since-epoch ("new Date()").
ms = time.mktime(value.utctimetuple())
ms += getattr(value, 'microseconds', 0) / 1000
output[key] = int(ms)
elif isinstance(value, db.GeoPt):
output[key] = {'lat': value.lat, 'lon': value.lon}
elif isinstance(value, db.Model):
output[key] = obj.to_dict(value)
else:
raise ValueError('cannot encode ' + repr(prop))
return output
|
UTF-8
|
Python
| false | false | 2,014 |
5,480,378,296,605 |
736579b0beac34b9bc29d98add806d04a62f3c03
|
5c12e15f4b0d8be3fdade7a7da029aea8477eb9c
|
/abjad/trunk/abjad/tools/measuretools/apply_full_measure_tuplets_to_contents_of_measures_in_expr.py
|
b82943ae76d2ab24afdf90fdfca7d8bf1e0ad7b0
|
[
"GPL-3.0-only"
] |
non_permissive
|
Alwnikrotikz/abjad
|
https://github.com/Alwnikrotikz/abjad
|
0c1821d4728b5472b40da0522b8590f0069d8e7b
|
883dcb4464439514a0a2541133031e385f505652
|
refs/heads/master
| 2016-09-01T14:31:49.719604 | 2013-10-29T05:13:24 | 2013-10-29T05:13:24 | 44,893,314 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- encoding: utf-8 -*-
import copy
from abjad.tools import mutationtools
def apply_full_measure_tuplets_to_contents_of_measures_in_expr(
expr, supplement=None):
r'''Applies full-measure tuplets to contents of measures in `expr`:
::
>>> staff = Staff([
... Measure((2, 8), "c'8 d'8"),
... Measure((3, 8), "e'8 f'8 g'8")])
>>> show(staff) # doctest: +SKIP
.. doctest::
>>> f(staff)
\new Staff {
{
\time 2/8
c'8
d'8
}
{
\time 3/8
e'8
f'8
g'8
}
}
::
>>> measuretools.apply_full_measure_tuplets_to_contents_of_measures_in_expr(staff)
>>> show(staff) # doctest: +SKIP
.. doctest::
>>> f(staff)
\new Staff {
{
\time 2/8
{
c'8
d'8
}
}
{
\time 3/8
{
e'8
f'8
g'8
}
}
}
Returns none.
'''
from abjad.tools import iterationtools
from abjad.tools import selectiontools
from abjad.tools import tuplettools
supplement = selectiontools.ContiguousSelection(supplement)
assert isinstance(supplement, selectiontools.ContiguousSelection)
for measure in iterationtools.iterate_measures_in_expr(expr):
target_duration = measure._preprolated_duration
tuplet = tuplettools.FixedDurationTuplet(target_duration, measure[:])
if supplement:
new_supplement = mutationtools.mutate(supplement).copy()
tuplet.extend(new_supplement)
|
UTF-8
|
Python
| false | false | 2,013 |
14,577,119,040,911 |
3eb59e836e087601539360d202f4c4856705f95e
|
b9bc9c203e68900cd6601e082a12fc8899461df0
|
/tests/repository_test/repository_test_skeleton.py
|
a3dfea9996be47f563a7417381c3d50365c2b96e
|
[] |
no_license
|
nbarendt/PgsqlBackup
|
https://github.com/nbarendt/PgsqlBackup
|
6be6566c69b16680e993f8d402b5a2ac78f9bb48
|
1758acbaed7e2fc96d0f4d990b43a10ac296470c
|
refs/heads/master
| 2020-04-14T14:50:25.373236 | 2011-09-14T18:57:16 | 2011-09-14T18:57:16 | 1,459,431 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from unittest import TestCase
from testfixtures import TempDirectory
from bbpgsql.repository import BBRepository
from bbpgsql.repository import DuplicateTagError
from datetime import datetime
class Skeleton_Repository_Operations_With_SpecificCommitStorage(TestCase):
__test__ = False # to prevent nose from running this skeleton
def setUp(self):
raise Exception('This is a skeleton for test - you need to provide'
' your own setUp() and tearDown()')
def setup_tempdir(self):
# call this from your setUp
self.tempdir = TempDirectory()
self.file1_contents = 'some contents'
self.file2_contents = 'some other contents'
self.filename1 = self.tempdir.write('file1', self.file1_contents)
self.filename2 = self.tempdir.write('file2', self.file2_contents)
def teardown_tempdir(self):
# call this from your tearDown
self.tempdir.cleanup()
def setup_repository(self):
# call this from your setUp after creating your store
self.repo = BBRepository(self.store)
def commit_filename1(self, tag, message=None):
self.repo.create_commit_from_filename(tag, self.filename1, message)
def commit_filename2(self, tag, message=None):
self.repo.create_commit_from_filename(tag, self.filename2, message)
def test_can_commit_filenames_to_repository(self):
self.commit_filename1('some-tag')
def test_commit_tag_characters_are_limited(self):
def will_raise_Exception():
self.commit_filename1('illegal tag with spaces')
self.assertRaises(Exception, will_raise_Exception)
def test_commit_tag_must_be_non_empty(self):
def will_raise_Exception():
self.commit_filename1('')
self.assertRaises(Exception, will_raise_Exception)
def test_repo_is_empty_to_start(self):
self.assertEqual([], [c for c in self.repo])
def test_can_commit_files_and_list_commits(self):
self.commit_filename1('some-tag')
self.assertEqual(['some-tag'], [c.tag for c in self.repo])
def test_can_commit_and_retrieve_contents(self):
self.commit_filename1('some-tag')
commit = self.repo['some-tag']
restore_file = self.tempdir.getpath('file3')
commit.get_contents_to_filename(restore_file)
self.assertEqual(self.file1_contents, open(restore_file, 'rb').read())
def test_tags_are_unique(self):
self.commit_filename1('some-tag')
def will_raise_DuplicateTagError():
self.repo.create_commit_from_filename('some-tag', self.filename2)
self.assertRaises(DuplicateTagError, will_raise_DuplicateTagError)
def test_duplicate_tag_with_identical_contents_okay(self):
self.commit_filename1('some-tag')
self.commit_filename1('some-tag')
commit = self.repo['some-tag']
restore_file = self.tempdir.getpath('file3')
commit.get_contents_to_filename(restore_file)
self.assertEqual(self.file1_contents, open(restore_file, 'rb').read())
def test_can_get_commit_before_a_given_commit(self):
self.commit_filename1('a')
self.commit_filename1('b')
commit_b = self.repo['b']
self.assertEqual('a', self.repo.get_commit_before(commit_b).tag)
def test_commit_before_first_raises_ValueError(self):
self.commit_filename1('a')
def will_raise_ValueError():
self.repo.get_commit_before(self.repo['a'])
self.assertRaises(ValueError, will_raise_ValueError)
def test_commits_are_sorted(self):
self.commit_filename1('c')
self.commit_filename1('a')
self.commit_filename1('b')
self.assertEqual(['a', 'b', 'c'], [c.tag for c in self.repo])
def test_can_delete_commits_before_a_specified_commit(self):
self.commit_filename1('a')
self.commit_filename1('b')
self.commit_filename1('c')
self.repo.delete_commits_before(self.repo['c'])
self.assertEqual(['c'], [c.tag for c in self.repo])
def test_can_store_and_retrieve_message_with_commit(self):
message = 'some-extra-data'
self.commit_filename1('a', message)
commit = self.repo['a']
self.assertEqual(message, commit.message)
def test_message_characters_limited_to_alphanumeric_and_underscore(self):
def will_raise_Exception():
self.commit_filename1('a', 'some illegal message')
self.assertRaises(Exception, will_raise_Exception)
def test_UTC_iso_datetime_is_a_valid_tag(self):
self.commit_filename1(datetime.utcnow().isoformat())
def test_UTC_iso_datetime_is_a_valid_message(self):
self.commit_filename1('a', datetime.utcnow().isoformat())
self.commit_filename1(datetime.utcnow().isoformat())
def test_empty_repo_has_zero_size(self):
self.assertEqual(0, self.repo.get_repository_size())
def get_expected_size_from_contents(self, file_contents):
expected_size = 0
for item in file_contents:
expected_size += len(item)
return expected_size
def test_can_get_repo_size_one_commit(self):
self.commit_filename1('a', 'A')
self.assertEqual(
self.get_expected_size_from_contents(self.file1_contents),
self.repo.get_repository_size())
def test_can_get_repo_size_many_different_commits(self):
file_contents = []
self.commit_filename1('a', 'A')
file_contents.append(self.file1_contents)
self.commit_filename2('b', 'B')
file_contents.append(self.file2_contents)
self.commit_filename1('c', 'C')
file_contents.append(self.file1_contents)
self.commit_filename2('d', 'D')
file_contents.append(self.file2_contents)
expected_size = self.get_expected_size_from_contents(file_contents)
self.assertEqual(expected_size,
self.repo.get_repository_size())
def test_can_get_repo_size_after_delete(self):
file_contents = []
self.commit_filename1('a', 'A')
file_contents.append(self.file1_contents)
self.commit_filename2('b', 'B')
file_contents.append(self.file2_contents)
self.commit_filename1('c', 'C')
file_contents.append(self.file1_contents)
self.commit_filename2('d', 'D')
file_contents.append(self.file2_contents)
self.repo.delete_commits_before(self.repo['d'])
file_contents = file_contents[3:]
expected_size = \
self.get_expected_size_from_contents(file_contents)
self.assertEqual(expected_size,
self.repo.get_repository_size())
def test_empty_repo_has_zero_items(self):
self.assertEqual(0, self.repo.get_number_of_items())
def test_can_get_number_items_one_commit(self):
file_contents = []
self.commit_filename1('a', 'A')
file_contents.append(self.file1_contents)
items = len(file_contents)
self.assertEqual(items, self.repo.get_number_of_items())
def test_can_get_number_of_items_many_different_commits(self):
file_contents = []
self.commit_filename1('a', 'A')
file_contents.append(self.file1_contents)
self.commit_filename2('b', 'B')
file_contents.append(self.file2_contents)
self.commit_filename1('c', 'C')
file_contents.append(self.file1_contents)
self.commit_filename2('d', 'D')
file_contents.append(self.file2_contents)
expected_size = len(file_contents)
self.assertEqual(expected_size,
self.repo.get_number_of_items())
def test_can_get_number_of_items_after_delete(self):
file_contents = []
self.commit_filename1('a', 'A')
file_contents.append(self.file1_contents)
self.commit_filename2('b', 'B')
file_contents.append(self.file2_contents)
self.commit_filename1('c', 'C')
file_contents.append(self.file1_contents)
self.commit_filename2('d', 'D')
file_contents.append(self.file2_contents)
self.repo.delete_commits_before(self.repo['d'])
file_contents = file_contents[3:]
expected_size = len(file_contents)
self.assertEqual(expected_size, self.repo.get_number_of_items())
|
UTF-8
|
Python
| false | false | 2,011 |
1,159,641,192,761 |
6f37dfcbd8c392490107aeea837e61795df2e6c3
|
7b10aa76ee7221ceaf485da7995137f58323d322
|
/setup.py
|
49a4212a74e847ec79919c474dd8cdd7b4062c83
|
[] |
no_license
|
vilos/django-cms-columns
|
https://github.com/vilos/django-cms-columns
|
38b9099e1084966b7aaee23c6311be49621006ef
|
282cdc336c7a3d4dfc085b89c7e5746e248ec637
|
refs/heads/master
| 2021-01-15T18:09:57.527742 | 2012-11-30T16:32:24 | 2012-11-30T16:32:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# encoding=utf8
from setuptools import setup, find_packages
setup(
name = 'django-cms-columns',
version = '0.1a1',
license = 'BSD',
description = 'Variable column support for django-cms plugins',
author = u'Samuel Lüscher',
author_email = '[email protected]',
url = 'http://github.com/philomat/django-cms-columns',
packages = find_packages(),
package_data = {
'cms_columns': [
'templates/cms_columns/*.html',
'templates/cms_columns/*.txt',
'locale/*/LC_MESSAGES/*',
],
},
zip_safe=False,
)
|
UTF-8
|
Python
| false | false | 2,012 |
2,628,519,996,535 |
b032bdc504094fe4aaae1fd04a1d6ebcd58e2253
|
05a53ee28fc7d0cad8d5c83b22990999f0b9e9be
|
/test/test_all.py
|
6ee74c4101a03b216b707410582c0c83f5cad3cd
|
[] |
no_license
|
visionsystems/BLIP
|
https://github.com/visionsystems/BLIP
|
eac70984e070fd3096a217c69484edbb92254e8d
|
6e41773aa4ea9ac244fcbac11ffb2455b90d1c0d
|
refs/heads/master
| 2020-06-04T04:48:42.776426 | 2012-06-21T09:32:47 | 2012-06-21T09:32:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import sys
def print_seperator():
print '#'*50
if __name__ == '__main__':
name = sys.argv[0]
def is_testfile(x): return x.endswith('.py') and x.startswith('test_')
test_files = [x for x in os.listdir('.') if is_testfile(x) and x != name]
python_bin = sys.executable
print_seperator()
print 'found tests: ', '\n'.join(' ' + x for x in test_files)
print
# test execution
print 'executing test files'
failures = 0
for x in test_files:
cmd = '%s %s --batch'%(python_bin, x)
print_seperator()
print'test file ', x
print cmd
ret = os.system(cmd)
if not ret == 0:
print ' test failed'
failures += 1
print
print
nr_tests = len(test_files)
print_seperator()
print_seperator()
print '%i of %i test files finished without error'%(nr_tests-failures, nr_tests)
print
print
|
UTF-8
|
Python
| false | false | 2,012 |
5,446,018,542,177 |
67cf2b15e1860170c0550e061818c4ca0f6ed7da
|
ce40e78d0b4e33c161e42a3adcc38f6e000beba4
|
/Tile.py
|
682614dbfebe1b865de057829e39c0d4f3c21f96
|
[] |
no_license
|
HommeBarbu/MapEditor
|
https://github.com/HommeBarbu/MapEditor
|
3c7789021616bade0bb03cf32582d15355a2f4c9
|
02c6409989d59a90ab02f382c7f76f23b03fbac6
|
refs/heads/master
| 2020-05-20T10:03:36.217297 | 2013-11-13T00:09:52 | 2013-11-13T00:09:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class Tile:
def __init__(self):
self.type = 'map'
self.name = 'grass'
self.solid = False
|
UTF-8
|
Python
| false | false | 2,013 |
17,051,020,181,049 |
6b363b1db774f2b4a26f1f0ee1131e5fd9e80ad5
|
1a8774dc21fe26c67ab06260f0178d6a693dbc2f
|
/lib/groupthink/__init__.py
|
2173e02de7ac474af566078185c26d0d2334b5b3
|
[] |
no_license
|
52North/glaps
|
https://github.com/52North/glaps
|
e109f156059a55160fc063cb5d214b02cd3f5596
|
2f06e1d53dc59a7af75a7b2d8f66fcd38c38edf1
|
refs/heads/master
| 2021-01-16T19:20:46.442986 | 2013-04-10T09:20:17 | 2013-04-10T09:20:17 | 9,342,449 | 5 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""Groupthink is a library of self-sharing data structures written in python
and shared over dbus. Together with the D-Bus Tubes provided by the Telepathy
framework, this enables data structures to be shared over a network.
Groupthink's tagline is "Collaboration should be easy.".
Concepts
========
The goal of Groupthink is to make it tremendously easy for programmers to write
distributed networked applications. To achieve this goal, Groupthink
introduces the concept of a Distributed Object, or DObject. A DObject is an
abstract object whose state is regarded as existing independently of any
individual computer. Each instance of a shared application contains local
objects that may be regarded as views into a shared DObject. Edits to the local
object propagate into the shared state, and vice versa.
The DObject provides an abstraction barrier that permits a programmer to
manipulate the data structure without worrying about any network-related issues.
For example, if an application stores its local state in a python dict(), it may
be possible to substitute a Groupthink L{CausalDict} and immediately achieve
coherent sharing over the network.
The key guarantee provided by DObjects is decentralized coherence. Coherence,
in Groupthink, means that any group of connected computers maintaining instances
of a single DObject will observe that object to be in the same state at
quiescence, which will occur in a bounded, short amount of time. Decentralized
coherence means that this property is achieved without any single point of
failure. Anyone with a view into a DObject can leave the group unexpectedly
without destroying the DObject, or even interrupting communication.
Providing these guarantees, and also maintaining useful behaviors,
is not always easy, and sometimes constrains the
behavior or efficiency of a DObject. Many DObjects employ techniques related
to Operational Transformation in order to ensure coherence without negotiation.
Dependencies
============
Mandatory dependencies:
- Python 2.5 or later
- dbus-python
Optional dependencies:
- pygtk
- Sugar (U{http://wiki.sugarlabs.org/})
Groupthink is written in Python. It requires Python 2.5 or later, and depends
heavily on dbus-python for all communication between
instances of an application. In principle, dbus-python is the only dependency.
However, Groupthink's principal use is in shared, networked applications. To
forward D-Bus messages over IP, Groupthink supports the Telepathy library via
telepathy-python.
For users writing GTK applications, Groupthink provides the L{gtk_tools}
submodule, which contains GTK-related objects like self-sharing widgets.
For users writing Sugar activities, Groupthink provides the L{sugar_tools}
submodule, which contains additional Sugar-related convenience objects,
including L{GroupActivity}, a subclass of sugar.activity.activity.Activity
that handles all
Telepathy interaction and L{Group} setup automatically.
Status
======
Groupthink is under active development, and is very much a work in progress.
No official releases have yet been made, and further API changes are virtually
certain. However, Groupthink is already sufficiently useful that developers may
wish to take a snapshot of the repository and import it into their projects.
"""
__license__ = """
Copyright 2008 Benjamin M. Schwartz
Groupthink is LGPLv2+
Groupthink is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
Groupthink is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with Groupthink. If not, see <http://www.gnu.org/licenses/>.
"""
from groupthink_base import *
|
UTF-8
|
Python
| false | false | 2,013 |
11,562,051,967,410 |
28c1945d6d6353f6c3e4652260e6d5db67bd0e03
|
43ed3680c535894e2e9f300482e1a0f471aa0147
|
/server/line-of-effect.py
|
4aabe6936eed7d3098b02a7d829c4325b2290df6
|
[] |
no_license
|
MMORPG-developers/spiffy-mmorpg
|
https://github.com/MMORPG-developers/spiffy-mmorpg
|
1b0b9da1d08c5151f826969ee42c1887943fdb1e
|
3fddfc9fbdb6ba66b17de2884ae5e08f47767ea2
|
refs/heads/master
| 2021-01-19T16:50:18.544388 | 2013-09-15T03:18:45 | 2013-09-15T03:18:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Human madable inconvenient.
Way too sucks.
"""
def get_line(start, end):
"""
Return a list containing all points between start and end.
Start and end should be 2-tuples (x, y), given in world coordinates.
We're assuming x2 >= x1, y2 > y1, and (y2 - y1) <= (x2 - x1)
"""
h_dist_halves = 2 * (end[0] - start[0])
v_dist_halves = 2 * (end[1] - start[1])
# Calculate the horizontal crossings.
h_crossings = [0]
for h_progress in range(1, h_dist_halves, 2):
h_crossings.append(h_progress * v_dist_halves)
# print h_crossings
# Convert to the actual points on the line.
path = []
old_v_progress = 0
next_v_crossing = h_dist_halves
(x, y) = start
for curr_v_progress in h_crossings:
# Check if we crossed a horizontal gridline
if curr_v_progress >= next_v_crossing:
y += 1
if old_v_progress < next_v_crossing < curr_v_progress:
path.append((x-1, y))
next_v_crossing += 2*h_dist_halves
path.append((x, y))
old_v_progress = curr_v_progress
x += 1
return path
### # def floor_div(numer, denom):
### # return numer // denom
### #
### # def ceil_div(numer, denom):
### # q, r = divmod(numer, denom)
### #
### # if r == 0:
### # return q
### # else:
### # return q + 1
### #
### # return floor_div(numer + floor_div(denom, 2), denom)
###
### def div_round_up(numer, denom):
### """
### Calculate numer / denom, rounded. In case numer / denom is exactly halfway
### between two integers, round up.
### """
###
### # return ceil_div(numer + ceil_div(denom, 2), denom)
### return (numer + (denom // 2)) // denom
###
### def div_round_down(numer, denom):
### """
### Calculate numer / denom, rounded. In case numer / denom is exactly halfway
### between two integers, round down.
### """
###
### return - div_round_up(- numer, denom)
###
### print div_round_up(8, 3)
### print div_round_down(8, 3)
###
###
### def get_line2(start, end):
### """
### Return a list containing all points between start and end.
### Start and end should be 2-tuples (x, y), given in world coordinates.
### We're assuming x2 >= x1, y2 > y1, and (y2 - y1) <= (x2 - x1)
### """
###
### h_dist_halves = 2 * (end[0] - start[0])
### v_dist_halves = 2 * (end[1] - start[1])
###
### # Calculate the places where we cross the horizontal gridlines.
### h_gridline_crossings_scaled = []
###
### for v_coord_halves in range(1, v_dist_halves, 2):
### h_gridline_crossings_scaled.append(v_coord_halves * h_dist_halves)
###
### print h_gridline_crossings_scaled
###
### path = []
###
### # TODO: Handle first row
###
### for i in range(len(h_gridline_crossings_scaled) - 1):
### left_crossing = h_gridline_crossings_scaled[i]
### right_crossing = h_gridline_crossings_scaled[i+1]
###
### row_start = div_round_fuck(left_crossing, v_dist_halves * 2)
### # row_start = (2 * i + h_dist_halves) / (4 * v_dist_halves)
###
### # TODO: Handle last row
###
### # We want to divide each of the scaled crossings by v_dist_halves to
### # convert to the actual number of half-cells, then divide by 2 to convert
### # to the actual number of whole cells. The result should be rounded to
### # calculate the correct cell. To save on division, we do this instead.
### # Whatever this is.
### # Screw it, I'm tired. Comment better later.
### h_gridline_crossings = [(2 * x + h_dist_halves) / (4 * v_dist_halves)
### for x in h_gridline_crossings_scaled]
###
### print h_gridline_crossings
###
### raise NotImplementedError
###
### # Convert to the actual points on the line.
### path = []
### old_v_progress = 0
### next_v_crossing = h_dist_halves
###
### print h_crossings
###
### (x, y) = start
###
### for curr_v_progress in h_crossings:
### # Check if we crossed a horizontal gridline
### if curr_v_progress >= next_v_crossing:
### y += 1
### if old_v_progress < next_v_crossing < curr_v_progress:
### path.append((x-1, y))
### next_v_crossing += 2*h_dist_halves
###
### path.append((x, y))
###
### old_v_progress = curr_v_progress
### x += 1
###
### return path
###
###
###
###
###
###
###
###
###
### # def get_line(start, end):
### # """
### # Return a list containing all points between start and end.
### # Start and end should be 2-tuples (x, y), given in world coordinates.
### # We're assuming x2 >= x1, y2 > y1, and (y2 - y1) <= (x2 - x1)
### # """
### #
### # # Get the horizontal and vertical distances (in half-cells).
### # h_dist_halves = 2 * (end[0] - start[0])
### # v_dist_halves = 2 * (end[1] - start[1])
### #
### # # Find the vertical components (in half-cells) of all crossings of
### # # vertical gridlines, scaled by the horizontal distance (in half-cells).
### # v_crossing_components = []
### # v_crossing_component = h_dist_halves
### #
### # for h_crossing_component in range(1, h_dist_halves, 2):
### # v_crossing_components.append(
def draw_path(path):
x_min = min([thing[0] for thing in path])
x_max = max([thing[0] for thing in path])
y_min = min([thing[1] for thing in path])
y_max = max([thing[1] for thing in path])
pic = []
for y in range(y_min, y_max+1):
row = ""
for x in range(x_min, x_max+1):
if (x, y) == path[0]:
row += "A"
elif (x, y) == path[-1]:
row += "B"
elif (x, y) in path:
row += "0"
else:
row += " "
pic.append(row)
return "\n".join(pic)
if __name__ == "__main__":
path = get_line((0, 0), (17, 13))
print draw_path(path)
|
UTF-8
|
Python
| false | false | 2,013 |
3,624,952,412,729 |
e832728a32bf7aa51b2cd7bb7f19929ea1fcf091
|
47a9a75db1fab96afe0ca6246e157ebf3d8d690b
|
/django/web_annotations_server/datastore/models.py
|
e4022799110ff3ec360d4000118f854cc83ebbeb
|
[] |
no_license
|
junkal/cv-web-annotation-toolkit
|
https://github.com/junkal/cv-web-annotation-toolkit
|
c8cf4f14eec5c89a504dbac2c4de4121933eddac
|
0945efe318821aaa90ba17ff84f46bae552a489f
|
refs/heads/master
| 2016-08-04T18:08:26.475450 | 2011-06-13T05:24:43 | 2011-06-13T05:24:43 | 38,425,931 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from xml.dom import minidom
from xmlmisc import *
# Create your models here.
class Dataset(models.Model):
name=models.SlugField();
def __init__(self,*args):
models.Model.__init__(self,*args)
def __str__(self):
return self.name
def get_stat_counts(self):
if "stat_counts" in self.__dict__:
return self.stat_counts
else:
voc2008_box_AT=AnnotationType.objects.get(name="voc2008_boxes");
voc_bbox_AT=AnnotationType.objects.get(name="voc_bbox");
LabelMe_boxes_AT=AnnotationType.objects.get(name="LabelMe_boxes");
flag_AT=AnnotationType.objects.get(name="flags");
flag_AT
num_flags={};
for flag_name in ["red","white","blue"]:
c=DataItem.objects.filter(annotation__annotation_type__id=flag_AT.id,ds__id=self.id,annotation__data=flag_name,annotation__is_active=True).count()
num_flags[flag_name]=c;
self.stat_counts={
'data_items':self.dataitem_set.count(),
'voc2008_boxes':DataItem.objects.filter(annotation__annotation_type__id=voc2008_box_AT.id,ds__id=self.id).count(),
'LabelMe_boxes':DataItem.objects.filter(annotation__annotation_type__id=LabelMe_boxes_AT.id,ds__id=self.id).count(),
'voc_bbox':DataItem.objects.filter(annotation__annotation_type__id=voc_bbox_AT.id,ds__id=self.id).count(),
'num_flags':num_flags
};
return self.stat_counts
class DataItem(models.Model):
url=models.TextField();
type=models.SlugField();
ds=models.ForeignKey(Dataset);
def __str__(self):
return "%s(%d,%s)" % (self.ds.name,self.id,self.url)
def get_public_id(self):
return self.url
def get_name_parts(self):
str_img=self.url.split("/")
str_img_file=str_img[-1].replace('.jpg','');
path_components=str_img[3:len(str_img)-1];
if path_components:
str_img_path=reduce(lambda a,b:a+"/"+b,path_components);
str_img=str_img_path+"/"+str_img_file
else:
str_img_path="";
str_img=str_img_file;
return (str_img,str_img_path,str_img_file);
class AnnotationType(models.Model):
category=models.SlugField();
name=models.SlugField();
annotation_metadata=models.TextField(blank=True);
explanation=models.URLField(blank=True);
def __str__(self):
return self.name
def get_annotation_metadata(self):
metadata=self.annotation_metadata.split("&");
meta_dict={}
for (k,v) in map(lambda v:v.split("="),metadata):
meta_dict[k]=v;
return meta_dict;
def get_annotation_metadata2(self):
metadata=self.annotation_metadata.split("&");
meta_dict={}
for (k,v) in map(lambda v:v.split("="),metadata):
meta_dict[k]=v.split(",");
return meta_dict;
class Annotation(models.Model):
ref_data=models.ForeignKey(DataItem);
annotation_type=models.ForeignKey(AnnotationType);
author=models.ForeignKey(User);
created = models.DateTimeField(auto_now_add=True);
is_active=models.BooleanField(default=True);
is_locked=models.BooleanField(default=False);
data=models.TextField();
canonic_url=models.URLField(blank=True);
rel_reference = models.ManyToManyField('self', symmetrical=False,blank=True)
vs_attr_values=None;
def visual_similarity_get_attribute_values(self):
if self.vs_attr_values:
return self.vs_attr_values
self.vs_attr_values=[];
x_doc = minidom.parseString(self.data);
for x_sim in xget(x_doc,"similarity"):
(attr,val,explain)=xget_v2(x_sim,["attribute","value","explanation"]);
self.vs_attr_values.append([attr,val,explain])
#attributes=self.annotation_type.get_annotation_metadata2()['attributes'];
#return map(lambda s:[s,"3","name"],attributes);
return self.vs_attr_values
class AnnotationRevisions(models.Model):
target_annotation = models.ForeignKey(Annotation,related_name="revisions_set");
revision = models.ForeignKey(Annotation,related_name="revisedannotation_set"); #In most cases this should be one-to-one, but in rare cases it could be many-to-many
author=models.ForeignKey(User);
created = models.DateTimeField(auto_now_add=True);
class PredictionsSet(models.Model):
title = models.TextField();
description = models.TextField();
annotations = models.ManyToManyField(Annotation);
author=models.ForeignKey(User);
created = models.DateTimeField(auto_now_add=True);
|
UTF-8
|
Python
| false | false | 2,011 |
14,585,708,949,830 |
a98f39506885fcfcc60dfe24e3c79acd52ad4f59
|
03b708a590853add93bdce5474d092a5837ed201
|
/mro_contact/tables.py
|
c13f0df42da9ad24eb699ff4cd4f23fe40c00e4d
|
[
"GPL-3.0-or-later"
] |
non_permissive
|
yaacov/django-mro
|
https://github.com/yaacov/django-mro
|
22d17023124528870f89c793a3a559efdb4b130a
|
b9b73d458dfb70611ec7b5f599b65e402f0b12c1
|
refs/heads/master
| 2021-01-10T21:25:12.446537 | 2014-04-02T08:14:49 | 2014-04-02T08:14:49 | 8,181,196 | 2 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright (C) 2013 Yaacov Zamir <[email protected]>
# Author: Yaacov Zamir (2013) <[email protected]>
from django.utils.translation import ugettext_lazy as _
import django_tables2 as tables
from mro_contact.models import Employee, Business
class EmployeeTable(tables.Table):
first_name = tables.TemplateColumn(
'<a href="{{ record.pk }}/" >{{ record.first_name }}</a>')
last_name = tables.TemplateColumn(
'<a href="{{ record.pk }}/" >{{ record.last_name }}</a>')
department_list = tables.Column(accessor='department_list', orderable=False)
department_list.verbose_name = _('Departments')
phone = tables.TemplateColumn(
'<a href="tel:{{ value }}/" >{{ value }}</a>')
phone.verbose_name = _('Phone')
fax = tables.TemplateColumn(
'<a href="tel:{{ value }}/" >{{ value }}</a>')
fax.verbose_name = _('Fax')
cell_phone = tables.TemplateColumn(
'<a href="tel:{{ value }}/" >{{ value }}</a>')
cell_phone.verbose_name = _('Cell phone')
class Meta:
model = Employee
template = 'mro/table.html'
attrs = {'class': 'table table-striped'}
fields = (
'last_name', 'first_name',
'department_list',
'phone',
'cell_phone',
'fax',
'address',
'email')
class BusinessTable(tables.Table):
name = tables.TemplateColumn(
'<a href="{{ record.pk }}/" >{{ record.name }}</a>')
department_list = tables.Column(accessor='department_list', orderable=False)
department_list.verbose_name = _('Departments')
phone = tables.TemplateColumn(
'<a href="tel:{{ value }}/" >{{ value }}</a>')
phone.verbose_name = _('Phone')
class Meta:
model = Employee
template = 'mro/table.html'
attrs = {'class': 'table table-striped'}
fields = (
'name', 'contact_person',
'department_list',
'phone',
'fax',
'address',
'email')
|
UTF-8
|
Python
| false | false | 2,014 |
18,786,186,952,935 |
ea50072fc35ca573b345c89ea14d2ecb7b846969
|
e94b4283aaff494e155140364ea874955e259b90
|
/linetest.py
|
f3643eac90dee02231bad95dc53d072168d7d562
|
[] |
no_license
|
phatboyle/programs
|
https://github.com/phatboyle/programs
|
420a0d3a10c787b0bde42d5269bda8a5a02d321d
|
256b26cb6ae86cd3cbd64c32403b25de9b6a2b09
|
refs/heads/master
| 2020-06-05T15:33:31.597460 | 2013-06-01T18:29:21 | 2013-06-01T18:29:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pygame
screenHeight=400
screenWidth=400
black=(0,0,0)
blue=(0,0,255)
red=(255,0,0)
white=(255,255,255)
pink=(255,51,255)
screen=pygame.display.set_mode((screenWidth, screenHeight))
pygame.draw.line(screen,red , (0, 0), (screenWidth, screenHeight))
x=0
y=0
inc=1
r=pygame.Rect(x,y,50,50)
while True:
screen.fill(pink)
r=r.move(inc,inc)
pygame.draw.rect(screen,blue,r)
pygame.display.flip() # draw it
if x>350:
inc=inc*-1
if x<0:
inc=inc*-1
event=pygame.event.poll()
if event.type == pygame.QUIT:
break
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
break
|
UTF-8
|
Python
| false | false | 2,013 |
3,762,391,356,663 |
2df2b12e8ce037d38e844351aff5dba1c443f2ba
|
a1c0fcb43183a37b7590d7e5a54f95b557410826
|
/lesson-04/users/views.py
|
e00df627903eb53f099a486cf7adaf12f1d5fb3f
|
[] |
no_license
|
ngocluu263/web-01
|
https://github.com/ngocluu263/web-01
|
2c6d34019b09161d3bcbc78a845031d4bab7cdd2
|
26504b5d482415bbf04c0072e0c22c0aaa081090
|
refs/heads/master
| 2021-04-30T22:12:14.619933 | 2013-02-21T10:02:27 | 2013-02-21T10:02:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import auth
from django.core.exceptions import ValidationError
from django.shortcuts import redirect
from annoying.decorators import render_to
from users.forms import LoginForm, RegisterForm
@render_to('users/login.html')
def login(request):
"""
Login page.
"""
if request.user.is_authenticated():
return redirect('index')
form = LoginForm(request.POST or None)
if form.is_valid():
try:
user = form.login(request)
except ValidationError as err:
form._errors['__all__'] = err.messages
else:
return redirect('index')
return {'form': form}
def logout(request):
"""
Logout page.
"""
if request.user.is_authenticated():
auth.logout(request)
return redirect('index')
@render_to('users/register.html')
def register(request):
"""
Register page.
"""
if request.user.is_authenticated():
return redirect('index')
form = RegisterForm(request.POST or None)
if form.is_valid():
try:
user = form.register(request)
except ValidationError as err:
form._errors['__all__'] = err.messages
else:
return redirect('index')
return {'form': form}
|
UTF-8
|
Python
| false | false | 2,013 |
7,352,984,048,001 |
ff47ffe5b3fc1388c793e257a9903322556f30bc
|
95b4d797bdea14d70416e01cd69354fb4d8654fd
|
/Tools/LogAnalyzer/tests/TestVCC.py
|
21c015b2631852aa1b820e2f8d876ca7bc7dcaee
|
[] |
no_license
|
chapman/common
|
https://github.com/chapman/common
|
085d778a40b384b44279d5401b53d0493ffedc72
|
b7bfa07510c092dfd12e878eb30c22fe0e03c327
|
refs/heads/master
| 2016-09-01T23:22:30.717328 | 2014-06-15T21:40:09 | 2014-06-15T21:40:09 | 16,061,519 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from LogAnalyzer import Test,TestResult
import DataflashLog
import collections
class TestVCC(Test):
'''test for VCC within recommendations, or abrupt end to log in flight'''
def __init__(self):
self.name = "VCC"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
if not "CURR" in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No CURR log data"
return
# just a naive min/max test for now
vccMin = logdata.channels["CURR"]["Vcc"].min()
vccMax = logdata.channels["CURR"]["Vcc"].max()
vccDiff = vccMax - vccMin;
vccMinThreshold = 4.6 * 1000;
vccMaxDiff = 0.3 * 1000;
if vccDiff > vccMaxDiff:
self.result.status = TestResult.StatusType.WARN
self.result.statusMessage = "VCC min/max diff %sv, should be <%sv" % (vccDiff/1000.0, vccMaxDiff/1000.0)
elif vccMin < vccMinThreshold:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "VCC below minimum of %sv (%sv)" % (`vccMinThreshold/1000.0`,`vccMin/1000.0`)
|
UTF-8
|
Python
| false | false | 2,014 |
8,254,927,161,109 |
8ea2cf1e0d142cd4699a650e01a2971591a4ad46
|
b46198873a4bff09f984ed1c121434c480b88054
|
/todo/views.py
|
6c17984a841f21f4b96f89b04cacd582ba631d94
|
[] |
no_license
|
iconpin/easy-todo
|
https://github.com/iconpin/easy-todo
|
223b5979fedc5c64824b82cc2b2d8ad748377cf1
|
55f8ba865784ae14f1569479dabedc8d0d75058f
|
refs/heads/master
| 2020-12-14T15:14:55.241157 | 2014-01-21T19:25:23 | 2014-01-21T19:29:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import random
import sqlite3
import string
from flask import Flask
from flask import render_template
from flask import request
from flask import redirect
from flask import url_for
from flask import make_response
from . import app
from . import DATABASE
from . import utils
from . import security
connection = utils.Connection(DATABASE)
# Just return the static page index.html
@app.route('/')
def index():
return render_template('index.html')
# Creates a new ToDo list. If password is sent, it sets the password for the
# list. Same for the title.
@app.route('/create', methods=['POST'])
def create():
with connection as c:
title = request.form['title']
hashed_password = security.get_hash(request.form['password'])
list_id = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for x in range(5))
c.execute('INSERT INTO todos (list_id, title, password) VALUES (?, ?, ?)', (list_id, title, hashed_password,))
return redirect(url_for('todo', list_id=list_id))
# Returns the view for the list with list_id. TODO: If a password is set for that
# list, it should not show the items until logged in.
@app.route('/<list_id>')
def todo(list_id):
with connection as c:
c.execute('SELECT title, password FROM todos WHERE list_id=?', (list_id,))
list_data = c.fetchone()
if not list_data:
return render_template('notfound.html', list_id=list_id)
c.execute('SELECT todo, done, item_id FROM items WHERE list_id=?', (list_id,))
todo = c.fetchall()
data = {
'title': list_data[0],
'logged': 'password' in request.cookies,
'has_password': len(list_data[1]) > 0,
'password': list_data[1],
'todo': [{'todo':t[0], 'done':t[1], 'item_id':t[2]} for t in todo],
'list_id': list_id
}
return render_template('todo.html', **data)
# Adds the item into the list list_id.
@app.route('/add/<list_id>', methods=['POST'])
def add_item(list_id):
text = request.form['todo']
with connection as c:
c.execute('INSERT INTO items (list_id, todo, done) VALUES (?, ?, 0)', (list_id, text,))
return redirect(url_for('todo', list_id=list_id))
# Removes the item from the list list_id.
@app.route('/remove/<item_id>', methods=['GET'])
def remove(item_id):
with connection as c:
c.execute('SELECT list_id FROM items WHERE item_id=?', (item_id,))
list_id = c.fetchone()[0]
c.execute('DELETE FROM items WHERE item_id=?', (item_id,))
return redirect(url_for('todo', list_id=list_id))
# Removes all marked items from the list list_id.
@app.route('/remove_marked/<list_id>', methods=['GET'])
def remove_marked(list_id):
with connection as c:
c.execute('DELETE FROM items WHERE list_id=? AND done=1', (list_id,))
return redirect(url_for('todo', list_id=list_id))
# Marks the item item_id.
@app.route('/mark/<item_id>', methods=['POST'])
def mark(item_id):
with connection as c:
c.execute('UPDATE items SET done=1 WHERE item_id=?', (item_id,))
return 'marked'
# Unmarks the item item_id.
@app.route('/unmark/<item_id>', methods=['POST'])
def unmark(item_id):
with connection as c:
c.execute('UPDATE items SET done=0 WHERE item_id=?', (item_id,))
return 'unmarked'
# Sets the title to the list list_id.
@app.route('/settitle/<list_id>', methods=['POST'])
def set_title(list_id):
title = request.form['title']
with connection as c:
c.execute('UPDATE todos SET title=? WHERE list_id=?', (title, list_id,))
return redirect(url_for('todo', list_id=list_id))
# Sets the password to the list list_id.
@app.route('/setpassword/<list_id>', methods=['POST'])
def set_password(list_id):
hashed_password = security.get_hash(request.form['password'])
with connection as c:
c.execute('UPDATE todos SET password=? WHERE list_id=?', (hashed_password, list_id,))
return redirect(url_for('todo', list_id=list_id))
# TODO: Logs in the user so he can edit a todo list with password.
@app.route('/login/<list_id>', methods=['POST'])
def login(list_id):
raw_password = request.form['password']
with connection as c:
c.execute('SELECT password FROM todos WHERE list_id=?', (list_id,))
db_password = c.fetchone()[0]
if security.verify_password(raw_password, db_password):
response = make_response(redirect(url_for('todo', list_id=list_id)))
response.set_cookie('password', db_password)
else:
response = make_response(redirect(url_for('todo', list_id=list_id)))
return response
# Logs out the current session
@app.route('/logout/<list_id>', methods=['POST'])
def logout(list_id):
response = make_response(redirect(url_for('todo', list_id=list_id)))
response.set_cookie('password', '', expires=0)
return response
|
UTF-8
|
Python
| false | false | 2,014 |
16,140,487,103,360 |
62edf7495b74167fa48ab4c7d92e8633dbd8f0a4
|
f1340565fcfc5657cabfab3f26671d888c1f0ffb
|
/cleanData.py
|
616e491be12b332d5c9b454c968aa9d52a655009
|
[] |
no_license
|
HaohanWang/nGramPoem
|
https://github.com/HaohanWang/nGramPoem
|
6940f56e3587d73997a313788fe6f12f436dc3d1
|
9cc53a6226332b53abaa012e607bcc4ae9d7372e
|
refs/heads/master
| 2021-01-23T07:02:42.461888 | 2013-04-06T04:10:23 | 2013-04-06T04:10:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import nltk
text = [line.strip() for line in open("data/rawdata.txt")]
punct = '!,.?";\''
filt = []
filt.append("Contact Us /")
filt.append("Bookmark Site /")
filt.append("Link To Us /")
filt.append("Terms Of Use /")
filt.append("Privacy Policy /")
filt.append("References")
for line in text:
out = True
if line == "":
out = False
for s in filt:
if line==s:
out = False
if line!="" and line[0]=='-' and line[-1]=='-':
out = False
if line.endswith('...'):
line=line[:-3]
if out:
words = nltk.word_tokenize(line)
s=""
for word in words:
if word[-1] in punct:
s+=word[:-1]+" "
elif word not in punct:
s+=word+" "
print s.lower()
|
UTF-8
|
Python
| false | false | 2,013 |
6,554,120,099,047 |
2739f9c50ed2f320de9e4ce4152cb6b7144486df
|
2e6b78eab2773f23027bc40c71b8090b772b3e1b
|
/server/apps/mob/admin.py
|
b2df3134dc9571a7cc9ddbfcf470dd02ad6b9c91
|
[
"LicenseRef-scancode-proprietary-license"
] |
non_permissive
|
jingchan/jh_rogue
|
https://github.com/jingchan/jh_rogue
|
fd74fafe684d2d2a5f7026c0164602631baa16fe
|
d1564aafc5076eae6188635e236560706eb19069
|
refs/heads/master
| 2020-04-10T14:49:17.926039 | 2011-04-26T23:59:14 | 2011-04-26T23:59:14 | 1,650,997 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import models
from django.contrib import admin
admin.site.register(models.MobType)
admin.site.register(models.Mob)
|
UTF-8
|
Python
| false | false | 2,011 |
19,275,813,243,626 |
bdac87963544679c30d8140f4f03a22c6118a39d
|
d1f14c36a4b8bcfb4907ad9128bed88b3fd90098
|
/setup.py
|
e3993765c98db9f27250bda265579e08675ddf5a
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-copyleft",
"LGPL-2.1-or-later",
"GPL-1.0-or-later",
"GPL-3.0-only",
"GPL-3.0-or-later"
] |
non_permissive
|
fogo/ss
|
https://github.com/fogo/ss
|
82473b28d2e0de87d3231f2c2aff8e05b7093fb9
|
997803323d7863d0397a6a84cc075841b711e6d9
|
refs/heads/master
| 2021-01-18T06:12:41.432222 | 2013-02-28T13:41:09 | 2013-02-28T13:41:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
description = "This is a command line script that automatically searches for video subtitles using OpenSubtitles.org APIs (http://www.opensubtitles.org).",
setup(
name = "ss",
version = "0.2",
packages = [],
scripts = ['ss.py'],
entry_points = {'console_scripts' : ['ss = ss:Main']},
# metadata for upload to PyPI
author = "[email protected]",
author_email = "[email protected]",
description = description,
license = "GPL",
keywords = "hello world example examples",
url = "http://nicoddemus.github.com/ss/",
use_2to3=True,
)
|
UTF-8
|
Python
| false | false | 2,013 |
9,869,834,856,979 |
30f87e94ef32181319344148678f82eb9658492f
|
b9d245ef9c42fee4e705ee4c8b312e2132d5c063
|
/src/tests/test_datasets.py
|
7f3c9a8700e7156f80d2d8ca2e26e2b398f1610f
|
[] |
no_license
|
nunberty/chars-movement-graph
|
https://github.com/nunberty/chars-movement-graph
|
68a908716475cd62cef46661b50e826ed59382ce
|
2778e1b5b23867b24481204a4844de30cc7b37fd
|
refs/heads/master
| 2016-09-05T13:08:57.964839 | 2014-12-22T14:37:04 | 2014-12-22T14:37:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pytest
from persona import datasets
def test_can_fetch_existing_fb2_dataset():
sents = datasets.fetch_dataset('1')
assert (len(sents) > 42)
def test_raises_exception_for_non_existing_dataset():
with pytest.raises(Exception):
datasets.fetch_dataset("Plan 9 From Outer Space")
|
UTF-8
|
Python
| false | false | 2,014 |
14,465,449,897,271 |
806d0625ec52b72aaeb3786c18e1b894383791d0
|
6c82b7f81cba8151435f5a7bfc5869b036b5941a
|
/code/dataset.py
|
027dabc61dca03cc6e3f37d7899104a1c9e81f77
|
[
"ISC"
] |
permissive
|
chrinide/FrequentPatternMining
|
https://github.com/chrinide/FrequentPatternMining
|
2292bb9dc5c4178aa7f9dba4adc06f5c2f938fdb
|
d04d02ba87fd5550c70753134e45798c65bc6f8f
|
refs/heads/master
| 2020-12-24T13:44:37.871004 | 2012-12-12T18:25:36 | 2012-12-12T18:25:36 | 42,161,422 | 1 | 0 | null | true | 2015-09-09T06:53:00 | 2015-09-09T06:53:00 | 2015-09-09T06:20:17 | 2012-12-12T19:12:39 | 3,756 | 0 | 0 | 0 | null | null | null |
#!/usr/bin/env python2.6
######################################################################
# dataset.py
######################################################################
# In which we define a Dataset object which stores rows of itemsets
# and can parse information from a file which separates itemsets by
# newlines and separates items within an itemset by a single space.
######################################################################
# For license information, see LICENSE file
# For copyright information, see COPYRIGHT file
######################################################################
from collections import defaultdict
######################################################################
# Dataset
######################################################################
# The most basic dataset which stores itemsets as rows.
######################################################################
class Dataset(object):
def __init__(self):
self.rows = []
def __len__(self):
return len(self.rows)
def __iter__(self):
return iter(self.rows)
def readFromFile(self,f):
for line in f:
canonical = line.strip().lower()
self.rows.append(canonical.split(" "))
def readFromDataset(self,ds):
if not hasattr(ds,'__IS_VERTICAL__'):
self.rows = []
for row in ds.rows:
self.rows.append(row[:])
return
transactions = defaultdict(list)
for key in ds.tidsets.keys():
for val in ds.tidsets[key]:
transactions[val].append(key)
self.rows = transactions.values()
######################################################################
# NumericalDataset
######################################################################
# This dataset also stores itemsets as rows, but converts the items to
# integers.
######################################################################
class NumericalDataset(Dataset):
def _convertToNumerical(self):
for row in range(len(self.rows)):
for col in range(len(self.rows[row])):
val = self.rows[row][col]
self.rows[row][col] = int(val)
def readFromFile(self,f):
Dataset.readFromFile(self,f)
self._convertToNumerical()
def readFromDataset(self,ds):
Dataset.readFromDataset(self,ds)
self._convertToNumerical()
######################################################################
# VerticalDataset
######################################################################
# This dataset stores item values in a list of values and for each
# item value there is a list of itemsets in which it appears, these
# lists are stored in rows.
######################################################################
class VerticalDataset(Dataset):
def __init__(self):
self.tidsets = defaultdict(set)
self.__IS_VERTICAL__ = True
def _convertToVertical(self):
transactions = self.rows
tidsets = defaultdict(set)
for (i,row) in enumerate(transactions):
for val in row:
tidsets[val].add(i)
self.tidsets = tidsets
def readFromFile(self,f):
Dataset.readFromFile(self,f)
self._convertToVertical()
def readFromDataset(self,ds):
if hasattr(ds,'__IS_VERTICAL__'):
self.rows = ds.rows[:]
self.values = ds.values[:]
return
Dataset.readFromDataset(self,ds)
self._convertToVertical()
######################################################################
# Basic Tests
######################################################################
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print "usage: {0} [file]".format(sys.argv[0])
sys.exit(-1)
filename = sys.argv[1]
ds = Dataset()
with open(filename,'rU') as f:
ds.readFromFile(f)
print "Read {0} lines in {1}".format(len(ds),filename)
for row in ds.rows:
print row
vds = VerticalDataset()
vds.readFromDataset(ds)
for key in vds.tidsets.keys():
print "{0}:{1}".format(key,vds.tidsets[key])
ds.readFromDataset(vds)
for row in ds.rows:
print row
|
UTF-8
|
Python
| false | false | 2,012 |
7,988,639,185,072 |
e27b69c3ead42faeb3c0e4dab0cad5b11858f578
|
ae8d215f35b9a65627e22d69fb2e402832f1677c
|
/src/mapreduce/elespectador.py
|
40b810f0b69415d15e9eb1f27181cb5ee4c1b43c
|
[] |
no_license
|
dsarmientos/bigdata_proyecto
|
https://github.com/dsarmientos/bigdata_proyecto
|
1aed5b1a4bf69077d5f8653d6f430460f9ebf2f3
|
101354d15ba2eb651e6db38f6506e493eb4eeb25
|
refs/heads/master
| 2016-09-10T00:07:38.474191 | 2014-03-15T15:57:38 | 2014-03-15T15:57:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import codecs
import collections
import hashlib
import heapq
import string
import dumbo
import nltk
import redis
import noticias.parser as noticias_parser
import noticias.noticia_pb2 as noticia_pb2
import scripts.crear_automata
import datastructures.interval_tree
import utils
r = redis.StrictRedis(host='localhost', port=6379, db=0)
def html_mapper(key, value):
with codecs.open(value, 'r', 'utf-8') as infile:
html = infile.read()
yield value, html
def html_reducer(key, value):
html = value.next()
yield key, html
class NoticiaMapper(object):
def __call__(self, key, value):
try:
noticia_str = self.parse(value)
except Exception:
raise
else:
noticia = noticia_pb2.Article()
noticia.ParseFromString(noticia_str)
match_list = self.find_congresistas(noticia)
if match_list:
noticia_id = self.save_noticia(noticia)
for match in match_list:
yield noticia_id, match
def parse(self, html):
parser = noticias_parser.ElEspectadorParser(html)
noticia = parser.as_protobuf_string()
return noticia
def find_congresistas(self, noticia):
automata = scripts.crear_automata.get_automata()
content = noticia.content
return automata.query(content)
def save_noticia(self, noticia):
id_ = hashlib.sha1(noticia.content).hexdigest()
r.setnx('noticia:' + id_, noticia.SerializeToString())
return id_
class NoticiaReducer(object):
def __call__(self, noticia_id, match_list):
noticia = self.get_noticia(noticia_id)
sentence_tree = self.build_sentence_tree(noticia.content)
matches_heap = self.build_heap(match_list)
sentence_match = {}
for match in self.no_overlaps_iter(matches_heap):
person_id, match_range = match[2], match[:2]
if person_id is not None:
sentences = sentence_tree.findRange(match_range)
assert len(sentences) == 1
sentence = sentences[0]
sentence_id = hashlib.sha1(sentence).hexdigest()[:10]
if sentence_id not in sentence_match:
sentence_match[sentence_id] = {'sentence': sentence,
'people_ids': set((person_id,))}
else:
sentence_match[sentence_id]['people_ids'].add(person_id)
for sentence_id, match in sentence_match.iteritems():
yield sentence_id, (match['sentence'],
list(match['people_ids']))
def build_heap(self, match_list):
heap = []
for match in match_list:
heapq.heappush(heap,
(match[0][0], match[0][1], match[1]))
return heap
def get_noticia(self, noticia_id):
noticia = noticia_pb2.Article()
serialized_noticia = r.get('noticia:' + noticia_id)
noticia.ParseFromString(serialized_noticia)
return noticia
def build_sentence_tree(self, content):
sent_detector = nltk.data.load('tokenizers/punkt/spanish.pickle')
span_list = sent_detector.span_tokenize(content)
sent_intervals = [(span[0], span[1],
content[span[0]:span[1]]) for span in span_list]
tree = datastructures.interval_tree.IntervalTree(sent_intervals,
0, 1, 0, len(content))
return tree
def no_overlaps_iter(self, match_list):
match = heapq.heappop(match_list)
while True:
assert(match[0] < match[1])
if not len(match_list):
yield match
break
next_match = heapq.heappop(match_list)
if next_match[0] < match[1]:
if match[2] != next_match[2]:
# TODO: Improve NER. This type of errors are currently caused by
# duplicate entity matches.
# For now, mark as None and ignore.
entity_id = None
else:
entity_id = match[2]
match = (match[0], max(next_match[1], match[1]), entity_id)
else:
if next_match[0] > match[1]:
yield match
match = next_match
raise StopIteration
class OracionMapper(object):
def __call__(self, sentence_id, sentence_people_list):
self.sentence = sentence_people_list[0].rstrip('.').lower()
people_list = self.get_people_list(
sentence_people_list[1])
self.pipe = r.pipeline()
self.index_sentence(people_list)
self.pipe.sadd('indexados:oraciones', sentence_id)
self.pipe.execute()
yield self.sentence, people_list
def get_people_list(self, id_list):
pipe = r.pipeline()
for id_ in id_list:
pipe.hget('congresista:' + str(id_),
'nombre_ascii')
names = pipe.execute()
return zip(id_list, names)
def index_sentence(self, people_list):
stop_words = self.load_stop_words()
terms_tf = self.get_terms_tf(stop_words)
for person_id, person_name in people_list:
self.add_terms_to(terms_tf, person_id, person_name)
self.pipe.sadd('indexados:congresistas', person_id)
def load_stop_words(self):
stop_words = [
utils.remove_accents(w.decode('utf-8'))
for w in nltk.corpus.stopwords.words(
'spanish')]
return stop_words
def add_terms_to(self, terms_tf, person_id, person_name):
congresista_id = str(person_id)
palabras_nombre = [p.lower() for p in person_name.split(' ')]
for term, tf in terms_tf.iteritems():
if term not in palabras_nombre:
self.pipe.zincrby('indice:congresista:' + str(congresista_id),
term, tf)
def get_terms_tf(self, stopwords):
tokenizer = nltk.RegexpTokenizer('\s+', gaps=True)
trans_table = dict(
(ord(symbol), u'') for symbol in string.punctuation)
terms_tf = collections.defaultdict(float)
for token in tokenizer.tokenize(self.sentence):
token = token.translate(trans_table).strip()
if token not in stopwords and len(token) > 1:
terms_tf[token] += 1
return terms_tf
if __name__ == "__main__":
job = dumbo.Job()
job.additer(html_mapper, html_reducer)
job.additer(NoticiaMapper, NoticiaReducer)
job.additer(OracionMapper, dumbo.lib.identityreducer)
job.run()
|
UTF-8
|
Python
| false | false | 2,014 |
3,899,830,354,330 |
2812b2ed976fe3a7c9aa8a73123858ddcfa3116e
|
e52bbfae48b63753e9b82df6d4bccd2ba3d67aa0
|
/webserver/api/serializers.py
|
1c5ff26be9d74fad40390f05aace53773ae020d1
|
[] |
no_license
|
andrspc/mit-aiti
|
https://github.com/andrspc/mit-aiti
|
b52314cd81a45adc9fd8101ba610fdf34583f73c
|
8cd07cb98d1e112c4c7bcea9ece1a0f66d6b41be
|
refs/heads/master
| 2020-02-07T22:07:18.276667 | 2013-07-19T00:12:35 | 2013-07-19T00:12:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from recharge.models import Recharge
from rest_framework import serializers
class RechSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Recharge
fields = ('created', 'card_id', 'value')
|
UTF-8
|
Python
| false | false | 2,013 |
15,006,615,762,049 |
e8d95c3dcbe80929997b1639c71fb0285234101e
|
eb7606c3190ad949326fd6a3488d9ee3f805470f
|
/puzzler/bin/iamonds/polyiamonds-12345-peanut.py
|
ff86848f7ddb3f7181ae112b3af7d7e86e63f9d8
|
[
"GPL-2.0-only",
"LicenseRef-scancode-philippe-de-muyter",
"GPL-1.0-or-later"
] |
non_permissive
|
dc25/puzzler
|
https://github.com/dc25/puzzler
|
9101695ea1a1864910fc49097d852753f607ac1c
|
8e353713b758b866da25f71d33da33b8625bb6d8
|
refs/heads/master
| 2020-05-17T18:33:20.039119 | 2014-09-14T13:19:44 | 2014-11-11T01:05:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# $Id$
"""362,047 solutions"""
import puzzler
from puzzler.puzzles.polyiamonds12345 import Polyiamonds12345Peanut
puzzler.run(Polyiamonds12345Peanut)
|
UTF-8
|
Python
| false | false | 2,014 |
8,392,366,143,058 |
36cd72851e0db403c1fa3db90fcaf6e503abbb55
|
efc507de3530e5a3a69aa412cd65108a081e1ecf
|
/propRandom.py
|
a0bb6a9b93d54a20e665bb14b290b2b80b1005ec
|
[] |
no_license
|
pbaylis/unclaimed-prop
|
https://github.com/pbaylis/unclaimed-prop
|
fa6f88b29c279a6146499054cabaf684a2bc64b8
|
f82c62e9c54b5b30fbb241e938688ed48238e72d
|
refs/heads/master
| 2016-09-09T14:43:45.305834 | 2014-05-02T06:50:45 | 2014-05-02T06:50:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
### gets table from https://ucpi.sco.ca.gov/ucp/...
### outputs csv
### There are two types (that I know of so far): Property and Notice
### - Property: https://ucpi.sco.ca.gov/ucp/PropertyDetails.aspx?propertyRecID=8162422
### - Notice: https://ucpi.sco.ca.gov/ucp/NoticeDetails.aspx?propertyRecID=2033380
# takes required arguments N/P, min, max. ex:
# - python scraping.py N 1 1000
# - python scraping.py P 500 2000
import sys
import csv
import time
import urllib2
import datetime
from bs4 import BeautifulSoup
from geopy import geocoders
from random import randint
def main():
# optional parameter 1: maximum # of a call
maxCalls = 2001
if len(sys.argv) > 1:
maxCalls = int(sys.argv[1])
# optional parameters 2 and 3: lower and upper limits for random call
lowerLim = 1
upperLim = 200000 #
if len(sys.argv) > 3:
lowerLim = int(sys.argv[2])
upperLim = int(sys.argv[3])
print("Starting. maxCalls = " + str(maxCalls) + ", looking within [" + str(lowerLim) + ", " + str(upperLim) + "].")
print(type(maxCalls))
sleep = 0 # how much to wait in between calls
getProperties(sleep, maxCalls, lowerLim, upperLim)
print("Done!")
def getProperties(sleep = 0, maxCalls = 2001, lowerLim = 1, upperLim = 200000):
print("Getting Properties...")
header = [["propID", "ownerName", "ownerAdd", "propType", "cashRep", "sharesRep", "nameSec", "repBy", "newAdd", "lat", "lng", "dateRetrieved", "URL"]]
propertyList = list(header)
errorCount = 0
i = 1
while (errorCount <= 10000 and i <= maxCalls):
randID = randint(lowerLim, upperLim) # right now it doesn't seem like it goes past 10m. maybe spottier after 1m?
# https://ucpi.sco.ca.gov/ucp/PropertyDetails.aspx?propertyID=001331061
# 9 digit numbers, needs to be padded with 0s
dt = datetime.datetime.now().strftime("%Y-%m-%d_%H_%M_%S")
url = "https://ucpi.sco.ca.gov/ucp/PropertyDetails.aspx?propertyID=" + str(randID).zfill(9)
print(str(i) + ": " + url)
try:
property = processProperty(url)
errorCount = 0
except Exception:
print("Property ID " + str(randID) + " not found.")
property = [randID, "", "", "", "", "", "", "", "", "", ""]
errorCount = errorCount + 1
pass
propertyList.append(property + [dt] + [url])
time.sleep(sleep)
if i % 1000 == 0:
print("Writing CSV.")
outputCSV(propertyList,"properties_" + str(dt))
propertyList = list(header) # reset propertyList - remember, lists are mutable.
i = i + 1
def processProperty(requestURL):
response = urllib2.urlopen(requestURL)
responseHTML = response.read()
soup = BeautifulSoup(responseHTML)
# get property ID number
propID = soup.find('table', id="tbl_HeaderInformation").findAll('span')[2].contents[0].encode('ascii', 'ignore').strip()
PropertyDetailsTable = soup.find('table', id="PropertyDetailsTable")
PropertyDetailsList = getListFromTable(PropertyDetailsTable)
# They add two fields (sharesRep and nameSec) if shares were reported
if PropertyDetailsTable.find('tr', id="ctl00_ContentPlaceHolder1_trNumberOfShares") is None:
PropertyDetailsList.insert(4, "")
PropertyDetailsList.insert(4, "")
# get latlon and better address from address
geogList = getGeog(PropertyDetailsList[1])
propertyRow = [propID] + PropertyDetailsList + geogList
return(propertyRow)
def getListFromTable(table):
# processes a table where we want the second column of every row, returns list
rows = table.findAll('tr')
# get the second column of each row - that's our data
outputList = []
for row in rows:
col = row.findAll('td')[1].contents
if isinstance(col, list): # if it's a list, fix to string
col = fixList(col)
else:
col = col[0].encode('ascii', 'ignore') # otherwise, just get string
outputList.append(col.strip())
return(outputList)
def fixList(tagstrList):
# take in a list that includes strings and tags, return property formatted string and ignore tags
fixed = ""
for part in tagstrList:
if isinstance(part, unicode):
fixed = fixed + part.strip().encode('ascii', 'ignore') + "\n"
return(fixed.strip())
def getGeog(address):
# take in an address, return the address, (lat, lon) in a list
us = geocoders.GeocoderDotUS()
try:
place, (lat, lng) = us.geocode(address)
except TypeError:
print "Couldn't geocode address."
place, (lat, lng) = "", (0,0)
return list((place, lat, lng))
def outputCSV(mylist, name):
writer=csv.writer(file("data/random/" + name + '.csv','wb'),dialect='excel')
writer.writerows(mylist)
print("Wrote " + name + '.csv')
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 2,014 |
5,978,594,495,763 |
a6eb8fbe3cf9eb829886925bd78dcc97b57d049f
|
e67375a8275922ef4191786abe7a2dbf5e22461d
|
/sdk/external/BlueTune-SRC-1-4-1-44/Bento4/Source/Python/bento4/errors.py
|
251e5a03c4e92749c530019af12450be99cf8744
|
[
"GPL-1.0-or-later",
"GPL-2.0-only"
] |
non_permissive
|
prpplague/VTech-InnoTab
|
https://github.com/prpplague/VTech-InnoTab
|
c66e3118b8f808922eda65a5a8b2a894b412a326
|
53453477ffa0693782659e39db8481bf4f2832bd
|
refs/heads/master
| 2019-02-27T12:26:56.678457 | 2012-07-03T15:41:00 | 2012-07-03T15:41:00 | 4,873,975 | 3 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null |
SUCCESS = 0
FAILURE = -1
ERROR_OUT_OF_MEMORY = -2
ERROR_INVALID_PARAMETERS = -3
ERROR_NO_SUCH_FILE = -4
ERROR_PERMISSION_DENIED = -5
ERROR_CANNOT_OPEN_FILE = -6
ERROR_EOS = -7
ERROR_WRITE_FAILED = -8
ERROR_READ_FAILED = -9
ERROR_INVALID_FORMAT = -10
ERROR_NO_SUCH_ITEM = -11
ERROR_OUT_OF_RANGE = -12
ERROR_INTERNAL = -13
ERROR_INVALID_STATE = -14
ERROR_LIST_EMPTY = -15
ERROR_LIST_OPERATION_ABORTED = -16
ERROR_INVALID_RTP_CONSTRUCTOR_TYPE = -17
ERROR_NOT_SUPPORTED = -18
ERROR_INVALID_TRACK_TYPE = -19
ERROR_INVALID_RTP_PACKET_EXTRA_DATA = -20
ERROR_BUFFER_TOO_SMALL = -21
ERROR_NOT_ENOUGH_DATA = -22
RESULT_EXCEPTION_MAP = {
FAILURE: (Exception, ''),
ERROR_OUT_OF_MEMORY: (MemoryError, ''),
ERROR_INVALID_PARAMETERS: (ValueError, 'Invalid parameter '),
ERROR_NO_SUCH_FILE: (IOError, 'No such file '),
ERROR_PERMISSION_DENIED: (IOError, 'Permission denied '),
ERROR_CANNOT_OPEN_FILE: (IOError, 'Cannot open file '),
ERROR_EOS: (EOFError, ''),
ERROR_WRITE_FAILED: (IOError, 'Write failed '),
ERROR_READ_FAILED: (IOError, 'Read failed '),
ERROR_INVALID_FORMAT: (ValueError, 'Invalid format '),
ERROR_NO_SUCH_ITEM: (LookupError, ''),
ERROR_OUT_OF_RANGE: (IndexError, ''),
ERROR_INTERNAL: (RuntimeError, 'Bento4 internal error '),
ERROR_INVALID_STATE: (RuntimeError, 'Bento4 invalid state'),
ERROR_LIST_EMPTY: (IndexError, 'List empty '),
ERROR_LIST_OPERATION_ABORTED: (RuntimeError, 'List operation aborted '),
ERROR_INVALID_RTP_CONSTRUCTOR_TYPE: (ValueError, 'Invalid RTP constructor type '),
ERROR_NOT_SUPPORTED: (NotImplementedError, ''),
ERROR_INVALID_TRACK_TYPE: (ValueError, 'Invalid track type '),
ERROR_INVALID_RTP_PACKET_EXTRA_DATA: (ValueError, 'Invalid Rtp packet extra data '),
ERROR_BUFFER_TOO_SMALL: (MemoryError, 'Buffer too small '),
ERROR_NOT_ENOUGH_DATA: (IOError, 'Not enough data ')
}
def check_result(result, msg=''):
# shortcut
if result == SUCCESS:
return
try:
exception, msg_prefix = RESULT_EXCEPTION_MAP[result]
except KeyError:
raise RuntimeError("Bento4 unknown error: code %d" % result)
raise exception(msg_prefix+msg)
|
UTF-8
|
Python
| false | false | 2,012 |
5,858,335,423,069 |
abda37178a08c47da7ff745400796be752daffb6
|
b93e9038563ce017add89f50601e2de328a62c26
|
/gp2gff.py
|
e0987f0efe03d96a86d2bfa52c11a5deaf2c5354
|
[
"MIT"
] |
permissive
|
xysheep/play_sequence
|
https://github.com/xysheep/play_sequence
|
733c1b4bd900ac9a6f4d3a90eacb429c6fd036f6
|
e1326938f4a2a047b43eae9a9ab318e1f7dd0151
|
refs/heads/master
| 2016-08-04T05:51:59.701330 | 2014-05-24T07:53:22 | 2014-05-24T07:53:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
import sys
import os
for l in open(sys.argv[1]):
p=l.rstrip().split()
ch=p[0].split('_')[1]
gid=p[0].split('_')[3]
print('\t'.join([ch,'prediction','Gene',p[1],p[2],'.',p[3],'0','gid=%s'%(gid)]))
|
UTF-8
|
Python
| false | false | 2,014 |
18,940,805,803,329 |
1b09541c0f5bdfa082eb70b5b5347939afc8f157
|
f4472303685ce51e95bcff2406c2a494a87b730a
|
/src/main/python/les/decomposers/max_clique_decomposer.py
|
f781425df7d5182cd36df5e914c74a4bb1b7a499
|
[
"Apache-2.0"
] |
permissive
|
robionica/les
|
https://github.com/robionica/les
|
4219f12116b5ba10b793f0b09b7c3a084fc48f79
|
125d7dd9562bc2aa1885633b1b453b8ca7ca1d51
|
refs/heads/master
| 2021-01-19T18:36:14.316579 | 2014-04-23T19:56:43 | 2014-04-23T19:56:43 | 4,104,185 | 0 | 1 | null | false | 2014-04-23T20:04:17 | 2012-04-22T13:39:40 | 2014-04-23T20:04:17 | 2014-04-23T20:04:17 | 3,156 | 3 | 4 | 0 |
Python
| null | null |
# Copyright (c) 2012-2013 Oleksandr Sviridenko
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import networkx
from les.graphs import interaction_graph
from les.decomposers import decomposer_base
from les.mp_model import MPModel
from les.mp_model import MPModelBuilder
from les.graphs import decomposition_tree
from les.utils import logging
class MaxCliqueDecomposer(decomposer_base.DecomposerBase):
def __init__(self, model):
decomposer_base.DecomposerBase.__init__(self, model)
self._A = model.rows_coefficients
def _build_submodel(self, clique):
rows_scope = set()
cols_scope = list()
for label in clique:
i = self._model.columns_names.index(label)
cols_scope.append(i)
rows_indices = set(self._A.getcol(i).nonzero()[0])
if not len(rows_scope):
rows_scope.update(rows_indices)
else:
rows_scope = rows_scope.intersection(rows_indices)
return self._model.slice(list(rows_scope), cols_scope)
def decompose(self):
self._decomposition_tree = decomposition_tree.DecompositionTree(self._model)
igraph = interaction_graph.InteractionGraph(self.get_model())
cliques = list(map(set, networkx.find_cliques(igraph)))
logging.debug("%d clique(s) were found." % len(cliques))
submodels_cache = []
for i, clique in enumerate(cliques):
submodel = self._build_submodel(clique)
self._decomposition_tree.add_node(submodel)
submodels_cache.append(submodel)
for j, other_clique in enumerate(cliques[:i]):
other_submodel = submodels_cache[j]
shared_cols_scope = clique & other_clique
if shared_cols_scope:
self._decomposition_tree.add_edge(submodel, other_submodel,
shared_cols_scope)
self._decomposition_tree.set_root(submodels_cache[-1])
|
UTF-8
|
Python
| false | false | 2,014 |
2,362,232,038,310 |
2865a2f96aa96bc580c2a97db947e2f64f875b9b
|
08f3068f4d5fa811c064d9cf60605b5a1678156c
|
/chapter_7/exercise_7_4_eval.py
|
83b9f207777d4bd40559a8d1d68bfb2b901e9512
|
[] |
no_license
|
rishavh/UW_python_class_demo.code
|
https://github.com/rishavh/UW_python_class_demo.code
|
fea84af39b12e71f26a6dbd15c212004ac295eb6
|
a50c3fe36bd1b55b4ecb8336dd68df677806cf6f
|
refs/heads/master
| 2021-01-18T08:10:33.179065 | 2013-03-24T04:33:56 | 2013-03-24T04:33:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/python
#
# http://greenteapress.com/thinkpython/html/thinkpython008.html
# Chapter 7 exercise 4
#
def eval_loop () :
while True :
e = raw_input(" Enter an expression to evaluate (be gentle) or 'done' if done " )
if e == "done" :
break
r = eval( e )
print r
eval_loop()
|
UTF-8
|
Python
| false | false | 2,013 |
6,820,408,105,990 |
8afcefc12cebc3cfe70b70c35dcbdaa60b6c00a9
|
ab2502b9db5190e9067986e52666ff0d1f653611
|
/settings/data.py
|
03e6733a48559bd859909a103b8395e19037130c
|
[] |
no_license
|
jakosz/wiki-tools
|
https://github.com/jakosz/wiki-tools
|
65604e0d8b9aedf5c08ec83b5201c3fe944a201b
|
94f4307268b0ded9fde290de442612c5de5e2da4
|
refs/heads/master
| 2021-01-22T02:28:58.862747 | 2014-08-09T09:36:18 | 2014-08-09T09:36:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
root = '/home/y/proj/wiki/data'
dumps = '%s/dumps' % root
trans = '%s/trans' % root
meta = {
'mainc': {'de': [236366, 224908, 691624, 8132391, 235991, 235635, 242544, 242681],
'fr': []},
'namec': {'de': ['Geographie', 'Geschichte', 'Gesellschaft', 'Kunst_und_Kultur', 'Religion', 'Sport', 'Technik', 'Wissenschaft'],
'fr': []}
}
|
UTF-8
|
Python
| false | false | 2,014 |
18,811,956,783,181 |
677e1b53ab2f3819ea88a6006674b76951ee4973
|
fc76ebfb40f66228814cb57cb331b4534dafb044
|
/plugin.video.hdrepo/default.py
|
e608247ec321911136cb5dcfe7019ccfeee92967
|
[
"GPL-2.0-only"
] |
non_permissive
|
hoangngt/xbmc-plugins
|
https://github.com/hoangngt/xbmc-plugins
|
20bc5b857263dbc59f9f2193d210618df8711637
|
a5d6792e1f8d73b76b89555ee5973969a0402171
|
refs/heads/master
| 2020-12-25T22:19:07.053877 | 2014-10-03T16:25:54 | 2014-10-03T16:25:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import CommonFunctions as common
import urllib
import urllib2
import os
import xbmcplugin
import xbmcgui
import xbmcaddon
import urlfetch
import Cookie
from BeautifulSoup import BeautifulSoup
try:
import json
except:
import simplejson as json
__settings__ = xbmcaddon.Addon(id='plugin.video.hdrepo')
__language__ = __settings__.getLocalizedString
home = __settings__.getAddonInfo('path')
icon = xbmc.translatePath( os.path.join( home, 'icon.png' ) )
saveSearch = 'false'
freeAccount = __settings__.getSetting('freeAccount')
email = __settings__.getSetting('email')
if saveSearch=='true':
cache = StorageServer.StorageServer("fshare2")
HTTP_DESKTOP_UA = {
'Host':'www.fshare.vn',
'Accept-Encoding':'gzip, deflate',
'Referer':'https://www.fshare.vn/login.php',
'Connection':'keep-alive',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'User-Agent':'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0'
}
searchList=[]
headers = HTTP_DESKTOP_UA
def _makeCookieHeader(cookie):
cookieHeader = ""
for value in cookie.values():
cookieHeader += "%s=%s; " % (value.key, value.value)
return cookieHeader
def doLogin():
cookie = Cookie.SimpleCookie()
form_fields = {
"login_useremail": __settings__.getSetting('username'),
"login_password": __settings__.getSetting('password'),
"url_refe": "https://www.fshare.vn/index.php"
}
form_data = urllib.urlencode(form_fields)
response = urlfetch.fetch(
url = 'https://www.fshare.vn/login.php',
method='POST',
headers = headers,
data=form_data,
follow_redirects = False
)
cookie.load(response.headers.get('set-cookie', ''))
headers['Cookie'] = _makeCookieHeader(cookie)
if headers['Cookie'].find('-1')>0:
xbmc.executebuiltin((u'XBMC.Notification("%s", "%s", %s)' % ('Login', 'Login failed. You must input correct FShare username/pass in Add-on settings', '15')).encode("utf-8"))
return False
else:
return headers['Cookie']
def make_request(url):
headers = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'User-Agent':'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0'
}
try:
req = urllib2.Request(url,headers=headers)
f = urllib2.urlopen(req)
body=f.read()
return body
except urllib2.URLError, e:
print 'We failed to open "%s".' % url
if hasattr(e, 'reason'):
print 'We failed to reach a server.'
print 'Reason: ', e.reason
if hasattr(e, 'code'):
print 'We failed with error code - %s.' % e.code
def get_categories():
if saveSearch=='true':
add_dir('Search', '', 1, icon, query, type, 0)
else:
add_dir('Search', url, 2, icon, '', 'folder', 0)
add_dir('Apple iTunes', 'apple_root', 14, icon, '', 'folder', 0)
hdrepo('root','')
add_dir('Add-on settings', '', 10, icon, query, type, 0)
def hdrepo(provider, param, start=0):
if provider=='search':
param = common.getUserInput('Search', '')
param = param.replace(' ', '%20')
data = {'provider': provider, 'param': param, 'start': start}
data = urllib.urlencode(data)
result = json.load(urllib.urlopen('http://feed.hdrepo.com/v1/feed.php', data))
for item in result:
if item['type'] == 'fshare_folder':
#add_dir(item['title'], item['param'], 5, item['thumb'])
add_dir(item['title'], item['provider'], 12, item['thumb'], item['param'])
else:
if item['type'] == 'fshare_file' and item['title'] is not None:
add_link(item['date'], item['title'], item['duration'], item['param'], item['thumb'], item['desc'])
else:
if item['type'] == 'folder':
try:
if 'start' in item:
add_dir(item['title'], item['provider'], 12, item['thumb'], item['param'], '', item['start'])
else:
add_dir(item['title'], item['provider'], 12, item['thumb'], item['param'])
except:
pass
def apple(provider, param, start=0):
if provider=='apple_root':
add_dir('Genre', 'apple', 14, icon, 'http://trailers.apple.com/itunes/us/json/genres.json', 'folder', 0)
add_dir('Action and Adventure', 'apple', 14, icon, 'http://trailers.apple.com/itunes/us/json/action_and_adventure.json', 'folder', 0)
add_dir('Comedy', 'apple', 14, icon, 'http://trailers.apple.com/itunes/us/json/comedy.json', 'folder', 0)
add_dir('Family', 'apple', 14, icon, 'http://trailers.apple.com/itunes/us/json/family.json', 'folder', 0)
add_dir('Fantasy', 'apple', 14, icon, 'http://trailers.apple.com/itunes/us/json/fantasy.json', 'folder', 0)
add_dir('Foreign', 'apple', 14, icon, 'http://trailers.apple.com/itunes/us/json/foreign.json', 'folder', 0)
add_dir('Horror', 'apple', 14, icon, 'http://trailers.apple.com/itunes/us/json/horror.json', 'folder', 0)
add_dir('Musical', 'apple', 14, icon, 'http://trailers.apple.com/itunes/us/json/musical.json', 'folder', 0)
add_dir('Romance', 'apple', 14, icon, 'http://trailers.apple.com/itunes/us/json/romance.json', 'folder', 0)
add_dir('Science Fiction', 'apple', 14, icon, 'http://trailers.apple.com/itunes/us/json/science_fiction.json', 'folder', 0)
add_dir('Thriller', 'apple', 14, icon, 'http://trailers.apple.com/itunes/us/json/thriller.json', 'folder', 0)
if provider=='apple':
result = json.load(urllib.urlopen(param))
if not 'data' in result:
movies = result;
else:
movies = result['data'];
for item in movies:
if item.get('location') is None:
add_dir(item['title'], 'search4', 12, 'http://trailers.apple.com/' + item['poster'], item['title'])
else:
add_dir(item['title'], 'search4', 12, 'http://trailers.apple.com/' + item['poster'], item['title'], thumbnailImage = 'http://trailers.apple.com' + item.get('location') + 'images/background.jpg')
#print 'http://trailers.apple.com/' + item['location'] + 'images/background.jpg';
def sendLink(url):
data = {'email': email, 'url': url}
data = urllib.urlencode(data)
try:
response = urllib.urlopen('http://feed.hdrepo.com/sendlink.php', data)
result = json.load(response)
xbmc.executebuiltin((u'XBMC.Notification("%s", "%s", %s)' % ('Download link', result['message'], '5000')).encode("utf-8"))
except:
xbmc.executebuiltin((u'XBMC.Notification("%s", "%s", %s)' % ('Download link', 'Server only accepts 1 request/minute', '5000')).encode("utf-8"))
def clearstring(str):
str = ''.join(e for e in str if e.isalnum() or e in '.-_ ()')
return str
def addlib(url, name):
print 'URL' + url
id = url
library_folder = __settings__.getSetting('library_folder')
if library_folder == "":
xbmc.executebuiltin((u'XBMC.Notification("%s", "%s", %s)' % ('Add to your library', 'You need to setup library folder in Add-on Setting', '5000')).encode("utf-8"))
return
if not os.path.exists(library_folder):
os.makedirs(library_folder)
if '/file/' in url:
filename = name
k = filename.rfind("(")
k = filename.rfind(".", 0, k)
filename = filename[:k] + '.strm'
target = open (library_folder + '/' + clearstring(filename), 'w')
target.write('plugin://plugin.video.hdrepo/?mode=4&url=' + id)
target.close()
return
if '/folder/' in url:
data = {'provider': 'fshare_folder', 'param': url, 'start': 0}
data = urllib.urlencode(data)
result = json.load(urllib.urlopen('http://feed.hdrepo.com/v1/feed.php', data))
library_folder = library_folder + '/' + clearstring(name)
os.makedirs(library_folder)
for item in result:
url = item['title']
id = item['param']
k = url.rfind("/")
filename = url[k+1:]
k = filename.rfind("(")
k = filename.rfind(".", 0, k)
filename = filename[:k] + '.strm'
target = open (library_folder + '/' + clearstring(filename), 'w')
target.write('plugin://plugin.video.hdrepo/?mode=4&url=' + id)
target.close()
return
def find_similar(url, query):
query = ''.join(e for e in query if e.isalnum() or e in '. ')
query = query.lower()
specials = ["480","720","1080","mhd","bluray","x264","dvdrip","vie","hdtv", "extended"]
k = len(query)
for e in specials:
if e in query:
i = query.index(e)
if i>0 and k > i:
k = i
query = query[0:k]
hdrepo('search_file', query)
def searchMenu(url, query = '', type='folder', page=0):
add_dir('New Search', url, 2, icon, query, type, 0)
add_dir('Clear Search', url, 3, icon, query, type, 0)
searchList=cache.get('searchList').split("\n")
for item in searchList:
add_dir(item, url, 2, icon, item, type, 0)
def clearSearch():
cache.set('searchList','')
def clearCache():
cache.delete('http%')
def search(url, query = '', type='folder', page=0):
if query is None or query=='':
query = common.getUserInput('Search', '')
if query is None:
return
if saveSearch=='true':
searchList = cache.get('searchList').split("\n")
if not query in searchList:
searchList.append(query)
cache.set('searchList','\n'.join(searchList))
hdrepo('search4', query)
def resolve_url(url):
if freeAccount == 'true':
response = urlfetch.fetch("http://feed.hdrepo.com/fshare.php")
if response.status == 200:
headers['Cookie'] = response.content
else:
xbmc.executebuiltin((u'XBMC.Notification("%s", "%s", %s)' % ('Login', 'Server only accepts 1 request/minute', '5000')).encode("utf-8"))
return
else:
headers['Cookie'] = doLogin()
response = urlfetch.get(url,headers=headers, follow_redirects=False)
if response.status==302 and response.headers['location'].find('logout.php')<0:
url=response.headers['location']
# logout
if freeAccount == 'true':
cookie = Cookie.SimpleCookie()
cookie.load(response.headers.get('set-cookie', ''))
headers['Cookie'] = _makeCookieHeader(cookie)
urlfetch.get("https://www.fshare.vn/logout.php",headers=headers, follow_redirects=False)
else:
if response.status==200:
soup = BeautifulSoup(str(response.content), convertEntities=BeautifulSoup.HTML_ENTITIES)
item = soup.find('form', {'name' : 'frm_download'})
if item:
url = item['action']
else:
xbmc.executebuiltin((u'XBMC.Notification("%s", "%s", %s)' % ('Login', 'Login failed. You must input correct FShare username/pass in Add-on settings', '5000')).encode("utf-8"))
return
item = xbmcgui.ListItem(path=url)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
def add_link(date, name, duration, href, thumb, desc):
description = date+'\n\n'+desc
u=sys.argv[0]+"?url="+urllib.quote_plus(href)+"&mode=4"
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=thumb)
liz.setInfo(type="Video", infoLabels={ "Title": name, "Plot": description, "Duration": duration})
liz.setProperty('IsPlayable', 'true')
if email != '' and freeAccount == 'true':
liz.addContextMenuItems([('Send download link',"XBMC.RunPlugin(%s?mode=%s&url=%s) "%(sys.argv[0],13,href))])
liz.addContextMenuItems([('Add to your library',"XBMC.RunPlugin(%s?mode=%s&url=%s&query=%s) "%(sys.argv[0],15,href,name)),('Find similar movies',"XBMC.Container.Update(%s?mode=%s&url=%s&query=%s) "%(sys.argv[0],16,href,name))])
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz)
def add_dir(name,url,mode,iconimage,query='',type='folder',page=0, thumbnailImage=''):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&query="+str(query)+"&type="+str(type)+"&page="+str(page)#+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name, iconImage=iconimage, thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name} )
liz.setProperty('Fanart_Image', thumbnailImage)
if url == 'fshare_folder':
liz.addContextMenuItems([('Add to your library',"XBMC.RunPlugin(%s?mode=%s&url=%s&query=%s) "%(sys.argv[0], 15, query, name))])
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
params=get_params()
url=''
name=None
mode=None
query=None
type='folder'
page=0
try:
type=urllib.unquote_plus(params["type"])
except:
pass
try:
page=int(urllib.unquote_plus(params["page"]))
except:
pass
try:
query=urllib.unquote_plus(params["query"])
except:
pass
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
if mode==10:
__settings__.openSettings()
mode=None
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
if mode==None:
get_categories()
elif mode==1:
searchMenu(url, '', type, page)
elif mode==2:
search(url, query, type, page)
elif mode==3:
clearSearch()
elif mode==4:
resolve_url(url)
elif mode==9:
searchMenu(url, '', 'file', page)
elif mode==10:
__settings__.openSettings()
elif mode==12:
hdrepo(url, str(query), str(page))
elif mode==13:
sendLink(url)
elif mode==14:
apple(url, str(query), str(page))
elif mode==15:
addlib(url, query)
elif mode==16:
find_similar(url, query)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
|
UTF-8
|
Python
| false | false | 2,014 |
13,228,499,296,175 |
7146e2ee9a8464102e06c564525b73b232730d46
|
ac991b24fe5e1ff6c80abc951d2e02441ac1e340
|
/Ejercicio 7 adicional/Secuencia.py
|
d9a6d893758fdfdfc8f589dc42a73220064b8531
|
[] |
no_license
|
Diego300591/PRACTICA-1
|
https://github.com/Diego300591/PRACTICA-1
|
c9ab65071c7f982dedbdf487ddb8638fc8a66ae4
|
4a01a88c2097aeb9901eb6a0fd9f2fb355cf9fef
|
refs/heads/master
| 2021-01-01T17:47:45.039800 | 2014-09-03T20:59:02 | 2014-09-03T20:59:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
num=raw_input()
num=int(num)
for i in range(0,num):
cad=""
aux=i+1
for j in range(0,aux):
dig=str(aux)
cad=cad+dig
print cad
val=num-1
while val>=1:
cad=""
aux1=val
for k in range(0,aux1):
dig=str(aux1)
cad=cad+dig
print cad
val=val-1
|
UTF-8
|
Python
| false | false | 2,014 |
9,603,546,898,004 |
54fefc39caa9813147c3297aa47ea6950f569080
|
275ec039ae1ad3ebcd35f8dcb97ebcd630457946
|
/wordpress/views.py
|
c42dfcf001e446c7e1959cf38cf30fcf3a2f108f
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
kennethreitz-archive/django-wordpress
|
https://github.com/kennethreitz-archive/django-wordpress
|
f588da126e3cee988944dc576089fd6e18c11c97
|
0f8df8272daa49077779d41a89af7e783eb69864
|
refs/heads/master
| 2023-08-27T07:32:00.922023 | 2010-11-04T03:04:38 | 2010-11-04T03:04:38 | 1,050,170 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf import settings
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.views.generic import list_detail, date_based
from wordpress.models import Post
PER_PAGE = getattr(settings, 'WP_PER_PAGE', 10)
TAXONOMIES = {
'term': 'post_tag',
'category': 'category',
'link_category': 'link_category',
}
def object_detail(request, year, month, day, slug):
return date_based.object_detail(request, queryset=Post.objects.published(),
date_field='post_date', year=year, month=month, month_format="%m",
day=day, slug=slug, template_object_name='post', allow_future=True)
def archive_day(request, year, month, day):
return date_based.archive_day(request, queryset=Post.objects.published(),
date_field='post_date', year=year, month=month, month_format="%m",
day=day, template_object_name='post')
def archive_month(request, year, month):
return date_based.archive_month(request, queryset=Post.objects.published(),
date_field='post_date', year=year, month=month, month_format="%m",
template_object_name='post')
def archive_year(request, year):
return date_based.archive_year(request, queryset=Post.objects.published(),
date_field='post_date', year=year)
def archive_index(request):
p = request.GET.get('p', None)
if p:
post = Post.objects.get(pk=p)
return HttpResponseRedirect(post.get_absolute_url())
posts = Post.objects.published().select_related()
return list_detail.object_list(request, queryset=posts,
paginate_by=10, template_name='wordpress/post_archive.html',
template_object_name='post', allow_empty=True)
def taxonomy(request, taxonomy, term):
taxonomy = TAXONOMIES.get(taxonomy, None)
if taxonomy:
posts = Post.objects.term(term, taxonomy=taxonomy).select_related()
return list_detail.object_list(request, queryset=posts,
paginate_by=10, template_name='wordpress/post_term.html',
template_object_name='post', allow_empty=True,
extra_context={'term': term})
|
UTF-8
|
Python
| false | false | 2,010 |
13,993,003,470,973 |
81edf6fed8842baee7df99383ad09bec5fba4207
|
af38a8e2b8afde2bbee87937ff24c04ea71a246f
|
/socio_demografico/models.py
|
c008ff657dced51dfb77a8615e9e3e79b15e44e9
|
[] |
no_license
|
byroncorrales/sissan
|
https://github.com/byroncorrales/sissan
|
2b94dc0b9af99cb5b935ffb5327f5860365aa6fd
|
c81af04db8220b7e8b0c78c150013c31d3e393cd
|
refs/heads/master
| 2021-01-15T21:09:53.556202 | 2009-11-24T14:57:30 | 2009-11-24T14:57:30 | 238,022 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: UTF-8 -*-
from django.db import models
import datetime
CHOICESQUINQUENIO = (
('1950-1955', '1950-1955'),('1955-1960','1955-1960'),('1960-1965','1960-1965'),('1965-1970', '1965-1970'),('1970-1975', '1970-1975'),('1975-1980', '1975-1980'),('1980-1985', '1980-1985'),('1985-1990', '1985-1990'),('1990-1995', '1990-1995'),('1995-2000', '1995-2000'),( '2000-2005', '2000-2005'),('2005-2010', '2005-2010'),('2010-2015', '2010-2015'),('2015-2020', '2015-2020'),('2020-2025', '2020-2025'),('2025-2030', '2025-2030'),('2030-2035', '2030-2035'),('2035-2040', '2035-2040'),('2040-2045', '2040-2045'),('2045-2050', '2045-2050'),('2050-2055', '2050-2055')
)
CHOICESANO=[]
for i in range (datetime.date.today().year,1959,-1):
CHOICESANO.append((i,str(i)))
class Crecimiento(models.Model):
ano = models.CharField("Quinquenio", choices=CHOICESQUINQUENIO, max_length=12, help_text='Introduzca el quinquenio', unique=True)
crecimiento = models.DecimalField("Tasa de crecimiento poblacional (%)",max_digits=10,decimal_places=2)
class Meta:
ordering = ['ano']
verbose_name_plural = "Tasa de crecimiento"
class Esperanza(models.Model):
ano = models.CharField("Quinquenio", choices=CHOICESQUINQUENIO, max_length=12, help_text='Introduzca el quinquenio', unique=True)
ambos_sexos = models.DecimalField("Total ambos sexos",max_digits=10,decimal_places=2, editable=False)
mujer = models.DecimalField("Mujer",max_digits=10,decimal_places=2)
hombre = models.DecimalField("Hombre",max_digits=10,decimal_places=2)
class Meta:
ordering = ['ano']
verbose_name_plural = "Esperanza de vida"
def save(self, force_insert=False, force_update=False):
self.ambos_sexos = self.hombre + self.mujer
super(Esperanza,self).save(force_insert, force_update)
class Fecundidad(models.Model):
ano = models.CharField("Quinquenio", choices=CHOICESQUINQUENIO, max_length=12, help_text='Introduzca el quinquenio', unique=True)
fecundidad = models.DecimalField("Fecundidad, No hijos por mujer",max_digits=10,decimal_places=2)
natalidad = models.DecimalField("Tasa bruta natalidad",max_digits=10,decimal_places=2)
class Meta:
ordering = ['ano']
verbose_name_plural = "Fecundidad"
class Mortalidad_materna(models.Model):
ano = models.IntegerField("Año",max_length=5, choices=CHOICESANO, help_text='Introduzca el año', unique=True)
mortalidad = models.DecimalField("Tasa mortalidad materna", max_digits=10, decimal_places=2)
class Meta:
ordering = ['ano']
verbose_name_plural = "Mortalidad Materna"
class Mortalidad_infantil(models.Model):
ano = models.IntegerField("Año",max_length=5, choices=CHOICESANO, help_text='Introduzca el año', unique=True)
mortalidad = models.DecimalField("Tasa mortalidad minfantil", max_digits=10, decimal_places=2)
mortalidad_menor = models.DecimalField("Tasa mortalidad infantil < 5 años", max_digits=10, decimal_places=2)
class Meta:
ordering = ['ano']
verbose_name_plural = "Mortalidad Infantil"
|
UTF-8
|
Python
| false | false | 2,009 |
9,079,560,895,007 |
4d0379c2561e4d055272587f6915664942b8d80d
|
c303e3b13eb088477a34385b5af282e30c7a307b
|
/controller/ajax.py
|
73b69e705d9cbf6f8ff30a738a3f4b698cdc2640
|
[
"MIT"
] |
permissive
|
oldcai/huixiang
|
https://github.com/oldcai/huixiang
|
055539512ca7457ee6c41650263cf660f5a5ad16
|
38a0af9bbf045854e84b5053333e5579bc5b3010
|
refs/heads/master
| 2021-01-17T21:42:02.069735 | 2013-06-15T02:22:34 | 2013-06-15T02:22:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#encoding=utf-8
import web
import requests
from datetime import datetime
from config import setting
from model import user
import urllib
from util import login
from util import oauth
import json
config = setting.config
render = setting.render
db = setting.db
def common_check(post=[],get=[]):
""" request decorator """
def check(post,get):
""" 检查登录与否及参数 """
post_data = web.input(_method="post")
get_data = web.input(_method="get")
user = login.logged()
if not user:
raise Exception(json.dumps({"code":403,"msg":"access deny"}))
for k in post:
if not k in post_data:
raise Exception(json.dumps({"code":500,"msg":str(k)+" is required"}))
for k in get:
if not k in get_data:
raise Exception(json.dumps({"code":500,"msg":str(k)+" is required"}))
return {"post":post_data,"get":get_data,"user":user}
def checkwrap(fn):
def inner(self):
try:
ctx = check(post,get)
return fn(self,ctx)
except Exception, e:
return e
return inner
return checkwrap
def ok(msg="ok"):
return json.dumps({"code":200,"msg":msg})
def fail(msg="fail"):
return json.dumbs({"code":500,"msg":msg})
def unfavpiece(pieceid,userid):
where={"pieceid":pieceid,"userid":userid}
row = db.select("fav",where="pieceid=$pieceid and userid=$userid",vars=where)
if not row:
raise Exception(json.dumps({"code":300,"msg":"you've not faved this piece"}))
db.delete("fav",where="pieceid=$pieceid and userid=$userid",vars=where)
def favpiece(pieceid,userid):
row = db.select("fav",where="pieceid=$pieceid and userid=$userid",vars={"pieceid":pieceid,"userid":userid})
if row:
raise Exception(json.dumps({"code":200,"msg":{"id":row[0]["id"]}}))
db.insert("fav",pieceid=pieceid,userid=userid,addtime=datetime.now())
class add:
@common_check(post=["content"])
def POST(self,ctx):
""" add one """
content = ctx["post"]["content"]
userid = ctx["user"]["id"]
if "link" in ctx["post"]:
link = ctx["post"]["link"]
else:
link = None
pieces = db.select("piece",where="content=$content",vars={"content":content})
# 检查是否已有相同内容
if not pieces:
pieceid = db.insert("piece",content=content,user=userid,addtime=datetime.now(),link=link)
else:
pieceid = pieces[0]["id"]
share = ctx["post"]["share"].split(",")
for key in share:
if not key:
continue
client = oauth.createClientWithName(key,ctx["user"])
post_content = u"「" + content + u"」" + " http://" + web.ctx.host + "/piece/" + str(pieceid)
client.post(post_content)
favpiece(pieceid,userid)
return ok({"id":pieceid})
class fav:
@common_check(post=["pieceid"])
def POST(self,ctx):
""" fav a piece """
pieceid=ctx["post"]["pieceid"]
favpiece(pieceid,ctx["user"]["id"])
return ok({"id":pieceid})
class userinfo:
@common_check()
def GET(self,ctx):
user = ctx["user"]
return json.dumps({"name":user["name"],"id":user["id"],"avatar":user["avatar"]})
class unfav:
@common_check(post=["pieceid"])
def POST(self,ctx):
""" fav a piece """
unfavpiece(ctx["post"]["pieceid"],ctx["user"]["id"])
return ok()
# class delete:
# def POST(self):
# try:
# ctx = common_check(post=["pieceid"])
# pieceid = ctx["post"]["pieceid"]
# userid = ctx["user"]["id"]
# unfavpiece(pieceid,userid)
# row = db.select("piece",where="id=$id and user=$user",vars={"id":pieceid,"user":userid})
# if not row:
# raise Exception(json.dumps({"code":401,"msg":"permission denied"}))
# db.delete("piece",where="id=$id and user=$user",vars={"id":pieceid,"user":userid})
# except Exception, e:
# return e
# return ok()
class pieces:
def GET(self):
"get pieces"
pieces_itr = db.query('select id,content from piece order by rand() limit 10')
pieces=[]
for piece in pieces_itr:
pieces.append(piece)
return json.dumps(pieces)
|
UTF-8
|
Python
| false | false | 2,013 |
9,466,107,924,244 |
563f9daa17b98dc67380f171132203be2f55998c
|
e925794068398c4d668a7aed411c7846cf764594
|
/clearbreach.py
|
a2ab75c9ea53f24ed68d2a714a33cc571b859a5b
|
[] |
no_license
|
jacksyen/webpy-mock
|
https://github.com/jacksyen/webpy-mock
|
717057224e23fdce084ed775a2318385b7fc4efc
|
55e07ba6c40c0adbd7a4a08658915a8f563eac33
|
refs/heads/master
| 2021-01-23T07:30:01.556309 | 2014-09-30T06:00:42 | 2014-09-30T06:00:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding:utf-8 -*-
# clearbreach.py ---
#
# Filename: clearbreach.py
# Description:
# Author:
# Maintainer:
# Created: 周四 八月 7 17:51:18 2014 (+0800)
# Version:
# Package-Requires: ()
# Last-Updated:
# By:
# Update #: 20
# URL:
# Doc URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change Log:
#
#
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>.
#
#
import web
import json
from log import logger
from util import DateUtil
from webglobal import Global
from dbase import SQLite
# Code:
class ClearBreach:
def __init__(self):
self.conn = SQLite.conn()
self.db = self.conn.cursor()
def __del__(self):
if self.conn:
SQLite.close(self.conn)
def POST(self):
args = web.input()
logger.info(u'入参:%s' %args)
userCode = args.get('usercode')
channelCode = args.get('channelcode')
result = {}
try:
# 修改欠费明细滞纳金
self.db.execute('UPDATE %s SET breach = ?, updatetime = ? WHERE channelcode = ? AND usercode = ?' %Global.GLOBAL_TABLE_USER_ARREARS, ('0.0', DateUtil.getDate(format='%Y-%m-%d %H:%M:%S'), channelCode, userCode))
self.conn.commit()
# 查询用户总欠费、滞纳金
self.db.execute('SELECT sum(itemmoney) paymentmoney, sum(breach) breach FROM %s WHERE usercode = ?' %Global.GLOBAL_TABLE_USER_ARREARS, (userCode, ))
newInfo = self.db.fetchone()
# 返回结果
result['status'] = 'SUCCESS'
result['breach'] = '0.0'
result['totalmoney'] = float(format(newInfo['paymentmoney'], '.2f'))
result['totalbreach'] = float(format(newInfo['breach'], '.2f'))
result['msg'] = '修改成功'
except Exception, e:
logger.error(u'清空滞纳金失败')
result['msg'] = u'修改失败'
r = json.dumps(result)
logger.info(u'清空滞纳金返回:%s' %r)
return r
#
# clearbreach.py ends here
|
UTF-8
|
Python
| false | false | 2,014 |
15,659,450,782,257 |
069b019eb6950a2f3c10db029aeecf058828e712
|
4e3931bd059c51e35e31aab9013212c6999ef6ac
|
/test/test_model.py
|
bd1fd957029fdbfa073d60a2dded92a9ce7ea9c0
|
[] |
no_license
|
paurullan/simulation_project
|
https://github.com/paurullan/simulation_project
|
8931c85e96e0f9d1893735df11f4471756c163c6
|
d792cd9801f6ff4e0923061e339224011202d3b4
|
refs/heads/master
| 2020-05-17T10:57:38.066203 | 2012-07-13T12:57:14 | 2012-07-13T12:57:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
from __future__ import print_function, division
__version__ = "0.0.1"
import unittest
import model
class TestServer(unittest.TestCase):
def setUp(self):
self.server = model.Server()
def test_add_queue(self):
self.server.add(1)
self.assertFalse(self.server.empty)
self.assertTrue(self.server.empty_queue)
self.server.process()
self.assertTrue(self.server.empty)
self.assertTrue(self.server.empty_queue)
def test_empty_queue(self):
self.server.add(1)
self.assertTrue(self.server.empty_queue)
self.server.add(2)
self.assertFalse(self.server.empty_queue)
self.server.process()
self.assertTrue(self.server.empty_queue)
self.server.process()
self.assertTrue(self.server.empty_queue)
def test_process(self):
self.server.add(1)
self.server.add(2)
self.server.add(3)
task, time = self.server.process()
self.assertEqual(task, 1)
task, time = self.server.process()
self.assertEqual(task, 2)
task, time = self.server.process()
self.assertEqual(task, 3)
def test_process_time(self):
self.server.add(1)
self.server.add(2)
self.server.add(3)
task, time = self.server.process()
self.assertEqual(time, self.server.service())
task, time = self.server.process()
self.assertEqual(time, self.server.service())
task, time = self.server.process()
self.assertEqual(time, None)
def test_service_example(self):
first = 1
self.server.add(first)
item, service = self.server.process()
self.assertEqual(first, item)
self.assertEqual(service, None)
class TestUser(unittest.TestCase):
def setUp(self):
self.user = model.User()
def test_service(self):
time = self.user.service()
result = 5117.05858038
self.assertAlmostEqual(time, result)
def test_service_2(self):
num_list = [self.user.service() for i in range(5)]
results = [
5117.05858038,
9508.44572792,
5684.49491893,
4829.02431238,
7054.95497321,
]
for num, result in zip(num_list, results):
self.assertAlmostEqual(num, result)
class TestCPU(unittest.TestCase):
def setUp(self):
self.cpu = model.CPU()
def test_service(self):
"""El temps de la CPU és constant a 0.4"""
const = 0.4
time = self.cpu.service()
self.assertAlmostEqual(time, const)
def test_choose(self):
"""Escollim un 0.833 de vegades el disk"""
steps = 10*1000
choices = [self.cpu.choose() for i in range(steps)]
n_disk = filter(lambda x: x == 'disk', choices)
appereances = len(n_disk)/steps
self.assertLess(appereances, 0.85)
self.assertGreater(appereances, 0.8)
self.assertAlmostEqual(appereances, 0.8318)
class TestDisk(unittest.TestCase):
def setUp(self):
self.disk = model.Disk()
def test_service(self):
time = self.disk.service()
self.assertAlmostEqual(time, 7.694141795723532)
def test_service_seed_one(self):
self.disk = model.Disk(seed=1)
time = self.disk.service()
self.assertAlmostEqual(time, 5.248957469228879)
def test_four_services(self):
time = sum([self.disk.service() for i in range(4)])
self.assertAlmostEqual(time, 30.92443576341943)
def test_five_services(self):
time = sum([self.disk.service() for i in range(5)])
self.assertAlmostEqual(time, 39.459474646190515)
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false | false | 2,012 |
15,058,155,372,182 |
58077302ee4aa07375b6a9f6d3d77b3504427ca7
|
d0960d64efc42587f465397740ad26bb280c4488
|
/python/sorting/mergesort.py
|
1ac8e27841e2f4d27be386b4f812e016b02d8b41
|
[] |
no_license
|
noudald/programming-etudes
|
https://github.com/noudald/programming-etudes
|
77fc84dcc7cdffaa306b8bc5b0ce25976aff0b29
|
211f7ab92b773af2123bf5c3523dcdec84eb3820
|
refs/heads/master
| 2021-05-27T13:52:19.232294 | 2013-12-28T12:20:17 | 2013-12-28T12:20:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
def merge_sort(l):
'''Merge sort implementation.'''
if type(l) != list:
raise TypeError, 'Expected list.'
if len(l) <= 1:
return l
middle = len(l)/2
left = merge_sort(l[:middle])
right = merge_sort(l[middle:])
ret = []
i, j = 0, 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
ret.append(left[i])
i += 1
else:
ret.append(right[j])
j += 1
ret += left[i:] + right[j:]
return ret
|
UTF-8
|
Python
| false | false | 2,013 |
8,315,056,698,496 |
01412f7b2d1a0b8e1bb1f02697554c9badc35249
|
079d8088119f76b9d0f7eeb21e41e3d96002aa38
|
/binding.gyp
|
caa582396322f2a90b4e601b9e7bbb8e44a339d7
|
[
"MIT"
] |
permissive
|
ironman9967/NodeSixense
|
https://github.com/ironman9967/NodeSixense
|
d89a7ec3f7a2c9bec308d127b51dffe5c5b179a3
|
6d834cbfc156d5f6b8678da8179f24348d38a3cf
|
refs/heads/master
| 2020-05-18T14:04:40.182623 | 2014-02-27T00:09:53 | 2014-02-27T00:09:53 | 14,609,287 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
{
"targets": [
{
"target_name": "sixense",
"sources": [ "sixense.cc" ]
}
]
}
|
UTF-8
|
Python
| false | false | 2,014 |
1,726,576,873,427 |
8e44d890c2819f343098d19049363236dd2d1773
|
0bccf0da5aeb4f6adac653da398e41e19960f9e2
|
/runCozy.py
|
23fd270f4e31083c0b7e18357b55136e9c8d945e
|
[] |
no_license
|
g-ortuno/CoZy
|
https://github.com/g-ortuno/CoZy
|
3bb9d795478bec27261da63607b204cd329b3c2d
|
edbe8a439bf366960539b76d1683b40032e64b0a
|
refs/heads/master
| 2017-10-07T02:43:10.335684 | 2014-05-13T02:58:46 | 2014-05-13T02:58:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import fileinput
import sys
import re
import getopt
import cozyLex, cozyYacc, codeGenerator
class CoZyTester:
def __init__(self):
# Build the parser
self.parser = cozyYacc.CoZyParser()
def run_code(self, code_str, outputfile):
result = self.parser.parse(code_str)
code = codeGenerator.codeGenerator(result).ret
if outputfile == '':
exec code in locals()
else:
## Makes the output file
f = open(outputfile+'.py', 'w')
f.write(code)
f.close()
# print code
# exec code in locals()
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=", "ofile="])
except getopt.GetoptError:
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -i <inputfile>'
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
try:
cfile = open(inputfile)
prog = cfile.read()
myTester = CoZyTester()
myTester.run_code(prog, outputfile)
except IOError as e:
print "{1}".format(e.strerror)
if __name__ == "__main__":
if len(sys.argv) == 1:
print 'test.py -i <inputfile>'
else:
main(sys.argv[1:])
|
UTF-8
|
Python
| false | false | 2,014 |
19,026,705,146,237 |
45872d6b180929ffd22ae02c9b9738df4fc4e83f
|
996cdea86be07dd6b63360b0532d3aee65bcdc95
|
/Euler/13/Hundred.py
|
f676c0db96a51cd61d697adba3a3c365ec20eefa
|
[] |
no_license
|
DarkMatter/Project-Euler-Code-Challenges
|
https://github.com/DarkMatter/Project-Euler-Code-Challenges
|
28b50b9d1aadb1878db8d6689916c8a29ff94424
|
fe5f8e843be17a58dea28812c7fb951daf4c02d5
|
refs/heads/master
| 2020-04-27T10:45:40.771169 | 2011-10-24T15:04:43 | 2011-10-24T15:04:43 | 2,636,876 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
def sumhundred ():
Lines = open("hundred.txt")
number = 0
for Line in Lines.readlines():
#for each number, add it to a global Number type
num = []
for i in range(0, 50):
number += (int(Line[i])) * pow(10, (50 - i))
#num.append(int(Line[i]))
print number
sumhundred()
#print range(50,0,-1)
|
UTF-8
|
Python
| false | false | 2,011 |
1,211,180,802,098 |
207637496909de2bc37d96194fb5d1cfba2b46fb
|
9c9f128c5fdb7acfe14a4fe62536aac83839ca86
|
/hydraclient/contrib/django/hydraclient/views.py
|
7c2d53ae8c2a633a1eb27e1041dfd6d75cdaab04
|
[
"BSD-3-Clause"
] |
permissive
|
ericmoritz/hydra-python-client
|
https://github.com/ericmoritz/hydra-python-client
|
e6f05cceb6912e63442a3da389a94316e8a00549
|
a4f5564600e074ff0e835fe34ce6cb16fb31193d
|
refs/heads/develop
| 2021-01-22T00:59:38.898935 | 2014-04-03T19:17:57 | 2014-04-03T19:17:57 | 17,547,168 | 1 | 0 | null | false | 2014-03-21T15:39:37 | 2014-03-08T17:38:21 | 2014-03-21T15:39:37 | 2014-03-21T15:39:37 | 308 | 0 | 0 | 0 |
Python
| null | null |
from time import time
from django.template import RequestContext
from hydraclient.contrib.django.hydraclient import client
from hydraclient.core import mounts
from hydraclient.core.http import guess_serializer
from .render import render_response
import os
import mimetypes
from functools import wraps
from django.http import Http404
def resource(request, cfg_url=None):
client_url = request.build_absolute_uri("/") # TODO: use django's equiv to SCRIPT_NAME
request_url = request.build_absolute_uri()
cfg_resp = client.get(cfg_url, client_url)
service_url = mounts.resolve(cfg_resp.graph, request_url)
if service_url is None:
raise Http404("no <http://vocabs-ld.org/vocabs/web-framework#ServiceMount> in {} for {}".format(cfg_url, request_url))
service_resp = client.get(service_url, request_url)
user_agent_accept = request.META.get("HTTP_ACCEPT", "")
response = render_response(
service_resp,
request_url,
user_agent_accept,
context_instance=RequestContext(request)
)
return response
def tc(callback):
start = time()
ret = callback()
end = time()
return (end-start, ret)
def time_header_value(seconds):
return u"{t}ms".format(t=seconds * 10000)
def _ext(path_info):
bits = path_info.split(".")
ext = bits[-1]
base = ".".join(bits[:-1])
return base, ext
|
UTF-8
|
Python
| false | false | 2,014 |
17,051,020,197,318 |
01afe78879edb60c5fee33f5dd5698923cc70201
|
bb8db8040b98a6c3ec5dd07b03809dcffbe1f5c3
|
/updatetermine.py
|
25ac5484deab1ef50eaac0fe4d7dc710862b43a1
|
[] |
no_license
|
koebi/psycalendar
|
https://github.com/koebi/psycalendar
|
444b8281c038339db5ab310cad6a45bd8a7bf477
|
46a1bb84addb175faeeb564543a6bb8991720c0d
|
refs/heads/master
| 2016-09-05T17:36:44.496570 | 2014-09-07T08:51:14 | 2014-09-07T08:51:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python3
import io
import datetime
global TRENNER
global calTxtPath
calTxtPath = "/home/koebi/scratch/termine.txt"
TRENNER = "================="
def updateTermine():
with open(calTxtPath, 'rb') as f:
data = f.read().decode('utf-8')
t = data.split('\n')
newdays = []
# Ersetze jede Zeile mit = vorne durch TRENNER
for i in range(len(t)):
if t[i] != "":
if t[i][0] == "=":
if t[i] != TRENNER:
t[i] = TRENNER
# Finde alle Indizes von Tagen
for i in range(len(t)):
if t[i] == TRENNER:
newdays += [i-1]
today = datetime.date.today()
latest = t[newdays[-1]]
for i in newdays:
first = t[i]
firstwd, firstmm, firstyy = first.split("/")
firstwd, firstdd = firstwd.split()
firstdd, firstmm, firstyy = int(firstdd), int(firstmm), int(firstyy)
first = datetime.date(firstyy, firstmm, firstdd)
if first == today:
first = i
break
old = t[:first]
t = t[first:]
deldays = 0
for i in old:
if i != "":
if i[0] == "=":
deldays += 1
latestwd, latestmm, latestyy = latest.split("/")
latestwd, latestdd = latestwd.split()
latestdd, latestmm, latestyy = int(latestdd), int(latestmm), int(latestyy)
latest = datetime.date(latestyy, latestmm, latestdd)
for i in range(deldays):
newday = latest + datetime.timedelta(i+1)
wd = dayToString(newday.weekday())
dd = str(newday.day) if len(str(newday.day)) == 2 else '0' + str(newday.day)
mm = str(newday.month) if len(str(newday.month)) == 2 else '0' + str(newday.month)
yy = str(newday.year)
newline1 = wd + " " + dd + "/" + mm + "/" + yy
newline2 = TRENNER
newline3 = ""
t += [newline1, newline2, newline3]
newdata = ""
for i in t:
newdata = newdata + i + "\n"
newdata = newdata[:-2]
f = open(calTxtPath, "wt")
f.write(newdata)
f.close
def dayToString(wd):
days = ["Mo", "Di", "Mi", "Do", "Fr", "Sa", "So"]
return days[wd]
if __name__ == "__main__":
updateTermine()
|
UTF-8
|
Python
| false | false | 2,014 |
13,606,456,431,775 |
74fce78df368444d44b7f52445c0d60436adb56c
|
0e46d1c443fe9f0a945e29dfd56d0012a3876098
|
/urls.py
|
686921201faff1049c0395ea25c3617f603d7f8c
|
[] |
no_license
|
tualatrix/Beauty-Django
|
https://github.com/tualatrix/Beauty-Django
|
f82a2d1972c87e965059de3e0c2375fc667c52b8
|
58af41c34d6767b40464809de63b5fe404ce7011
|
refs/heads/master
| 2016-09-06T19:41:36.838648 | 2011-12-12T20:19:25 | 2011-12-12T20:19:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls.defaults import patterns, include, url
from usbeauty.views import home,upload,vote,about,top,rate
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', home),
url(r'^upload/$', upload),
url(r'^vote/$', vote),
url(r'^votea/$', vote),
url(r'^voteb/$', vote),
url(r'^about/$', about),
url(r'^top/$', top),
url(r'^rate/$', rate),
#url(r'^votea/$', vote),
#url(r'^voteb/$', vote),
# url(r'^usbeauty/', include('usbeauty.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
UTF-8
|
Python
| false | false | 2,011 |
13,374,528,203,159 |
ba26c4b5fd52544353914c85fc9800a755ffff9f
|
ad10052619b7bc79d311940bd2419772c4bc8a53
|
/topcoder-master-2/project_euler/004.py
|
e61e692db6b5dd6710f86df67d8a77520e8833e6
|
[] |
no_license
|
bluepine/topcoder
|
https://github.com/bluepine/topcoder
|
3af066a5b1ac6c448c50942f98deb2aa382ba040
|
d300c8a349a8346dba4a5fe3b4f43b17207627a1
|
refs/heads/master
| 2021-01-19T08:15:06.539102 | 2014-04-02T21:10:58 | 2014-04-02T21:10:58 | 18,381,690 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import math
def is_palindrome(s):
for i in xrange(int(len(s)/2)):
if s[i] != s[-i - 1]:
return False
return True
ret = 0
for i in range(100, 1000):
for j in range(i, 1000):
if is_palindrome(str(i * j)):
ret = max(ret, i * j)
print ret
|
UTF-8
|
Python
| false | false | 2,014 |
2,379,411,891,218 |
6a942a7718abd57a488163c7dbef480f017c3fc9
|
1dc73d1dd6ea5bd66920402219a5f2f775d4a7b1
|
/chain_bitcoin/urls.py
|
43f723e035a814eb3e8631069c56188a0f6f6998
|
[
"MIT"
] |
permissive
|
cardforcoin/chain-bitcoin-python
|
https://github.com/cardforcoin/chain-bitcoin-python
|
f012bf058ac23f5cade5c4429b398dcca9286d7a
|
a242a538c6905467d2342183e8d20558c492dcd2
|
refs/heads/master
| 2020-05-18T15:30:03.681448 | 2014-10-06T00:12:38 | 2014-10-06T00:12:38 | 23,376,671 | 2 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from __future__ import absolute_import
__all__ = ('make_url', 'make_query_string')
try:
from urllib.parse import urlencode # python 3
except ImportError:
from urllib import urlencode # python 2
import re
path_component_regex = re.compile(r'^[0-9a-z_\-,]+$', re.I)
def make_url(base, path_components, query_params=None):
for c in path_components:
if not path_component_regex.match(c):
raise ValueError('Invalid character in "{0}"'.format(c))
return '?'.join(filter(None, [
'/'.join([base] + path_components),
make_query_string(query_params),
]))
def make_query_string(query_params):
query_params = filter(lambda p: p[1] is not None, query_params or [])
return '&'.join(map(url_encode_tuple, query_params))
def url_encode_tuple(tup):
return urlencode(dict([tup]))
|
UTF-8
|
Python
| false | false | 2,014 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.