__id__
int64 17.2B
19,722B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
133
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 3
values | repo_name
stringlengths 7
73
| repo_url
stringlengths 26
92
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 12
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 61.3k
283M
⌀ | star_events_count
int64 0
47
| fork_events_count
int64 0
15
| gha_license_id
stringclasses 5
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
5.76M
⌀ | gha_stargazers_count
int32 0
82
⌀ | gha_forks_count
int32 0
25
⌀ | gha_open_issues_count
int32 0
80
⌀ | gha_language
stringclasses 5
values | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 19
187k
| src_encoding
stringclasses 4
values | language
stringclasses 1
value | is_vendor
bool 1
class | is_generated
bool 1
class | year
int64 2k
2.01k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
12,592,844,153,160 |
a8278487accb6943c384c8cbafbbf5d836ae25b0
|
d5d2cb4dc56781056f0c74de7b0ce94aaead25c4
|
/scripts/myWork.py
|
22a06cbcbbb1140f167067795f6167e78a0f49af
|
[] |
no_license
|
mickle00/geektool
|
https://github.com/mickle00/geektool
|
6840458baca8c62fb639b91c1451137ec4821322
|
6b1faa1786139679781fedd7629cf3b1b84be7ec
|
refs/heads/master
| 2021-01-01T19:15:34.416282 | 2012-12-04T19:00:26 | 2012-12-04T19:00:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python2.7
from enterprise import SforceEnterpriseClient
from ConfigParser import SafeConfigParser
import sys
from subprocess import call
import urllib2
import urllib
import json
import keyring
import os
def getCredentials():
username = '[email protected]'
password = keyring.get_password('GSO_Production', username)
return username, password, ''
def doLogin():
username, password, securitytoken = getCredentials()
scriptDir = os.path.split(os.path.abspath(__file__))[0]
wsdlFile = os.path.join(scriptDir, '..', 'config', 'wsdl.jsp.xml')
h = SforceEnterpriseClient(wsdlFile)
result = h.login(username, password, securitytoken)
return result['sessionId']
def doUrllib(queryString):
queryString = queryString.replace(' ', '%20')
sessionId = doLogin()
myURL = "https://na8.salesforce.com/services/data/v24.0/query/?q="+queryString
req = urllib2.Request(myURL)
req.add_header('Authorization', 'OAuth '+sessionId)
req.add_header('X-PrettyPrint', 1)
res = urllib2.urlopen(req)
return res.read()
def parseCaseJSON(jsonResponse):
output = open('/Users/mistewart/geektool/tmp/cases.txt','w')
output.write('\n My Open Cases\n')
output.write('----------------\n')
jsonResponse = json.loads(jsonResponse)
if (jsonResponse['totalSize'] > 0):
for record in jsonResponse['records']:
#output.write(record['Status'] + '\t' + record['CaseNumber'] + '\t' + record['Subject'])
output.write('{0: <25}{1: <10}{2}'.format(record['Status'], record['CaseNumber'], record['Subject']))
output.write('\n')
output.close()
def parseProjectJSON(jsonResponse):
output = open('/Users/mistewart/geektool/tmp/log.txt','w')
output.write('\n My Open Projects\n')
output.write('----------------\n')
jsonResponse = json.loads(jsonResponse)
if (jsonResponse['totalSize'] > 0):
for record in jsonResponse['records']:
output.write('{0: <20}{1}'.format(record['Status__c'], record['Project_Name__c']))
output.write('\n')
output.close()
def parseRequirementJSON(jsonResponse):
output = open('/Users/mistewart/geektool/tmp/log.txt','w')
output.write('\n My Open Requirements\n')
output.write('----------------\n')
jsonResponse = json.loads(jsonResponse)
if (jsonResponse['totalSize'] > 0):
for record in jsonResponse['records']:
output.write('{0: <20}{1}'.format(record['Status__c'], record['Description__c']))
output.write('\n')
output.close()
def parseCaseImage(jsonResponse):
jsonResponse = json.loads(jsonResponse)
if (jsonResponse['totalSize'] > 0):
chartVals = unicode('')
chartNames = unicode('')
for record in jsonResponse['records']:
#print record['Name'] + '\t' + str(record['total'])
chartVals += str(record['total']) + ','
chartNames += str(record['Name']) + ' (' + str(record['total']) + ')|'
chartVals = chartVals[:-1]
chartNames = chartNames [:-1]
chartUrl = "https://chart.googleapis.com/chart?cht=p3&chd=t:" + chartVals + "&chco=0000FF&chs=750x300&chl=" + chartNames + "&chf=bg,s,000000"
chartUrl = chartUrl.replace(' ', '%20')
#print chartUrl
chartFile = urllib2.urlopen(chartUrl)
output = open('/Users/mistewart/geektool/tmp/image.png','wb')
output.write(chartFile.read())
output.close()
def getCaseImage():
#https://chart.googleapis.com/chart?cht=p3&chd=t:90,10&chs=750x300&chdl=Hello|World&chf=bg,s,000000
#chdl=lables
#chd=t: values
#TODO: INCLUDE Closed-Reply
allCaseQuery = "SELECT%20count(id)%20total%2C%20owner.name%20from%20Case%20WHERE%20RecordTypeId%20%3D%20'012C00000004XXi' AND isClosed=FALSE%20GROUP%20BY%20Owner.Name"
parseCaseImage(doUrllib(allCaseQuery))
caseQuery = "SELECT Id, CaseNumber, Status, Subject FROM Case WHERE OwnerId = '005C0000003oJCT' AND (isClosed = FALSE OR Status ='Closed - Reply')"
parseCaseJSON(doUrllib(caseQuery))
#projectQuery = "SELECT Id, Name, Project_Name__c, Status__c FROM PM_Project__c WHERE (OwnerId = '005C0000003oJCT' OR Developer__c = '005C0000003oJCT') AND Status__c NOT IN ('Completed', 'Cancelled') ORDER BY Status__c"
#parseProjectJSON(doUrllib(projectQuery))
requirementQuery = "SELECT Description__c, Release__r.Deployment_Date__c, Status__c FROM PM_Requirement__c WHERE Current_Assigned_To_Viewing__c = 1 AND Status__c NOT IN ('Completed','Cancelled','Duplicate', 'Not a Requirement') ORDER BY Status__c"
parseRequirementJSON(doUrllib(requirementQuery))
getCaseImage()
|
UTF-8
|
Python
| false | false | 2,012 |
17,918,603,590,421 |
a2a3760b3cba5db82695441eb0381c380a773437
|
1d1ae8830f16b1f39534b2e362fef81dc2f511d0
|
/jobtest_Ruslan/mydata/admin.py
|
b1c22f1119c83473d49389ea7b310a7cdcc8e3a5
|
[] |
no_license
|
Desperado/Ruslan-test-project
|
https://github.com/Desperado/Ruslan-test-project
|
d04be3fef964033e075738cc0321d18b72dc8abd
|
7566b00fac1d744d6e2626e1e4d0aa55c38d397c
|
refs/heads/master
| 2020-05-28T09:14:51.478535 | 2010-04-06T22:02:19 | 2010-04-06T22:02:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from models import Mybio
class MybioAdmin(admin.ModelAdmin):
list_display = ("first_name", "last_name", "bio", "contacts", "date_of_birth", )
admin.site.register(Mybio, MybioAdmin)
|
UTF-8
|
Python
| false | false | 2,010 |
15,341,623,202,483 |
e6f9fa0ab64eb7ef51f8fc82de27a002525f9c77
|
67b2be20c931cf53df782dbfa7783542e60fa484
|
/dgreed/tools/fnt2bft.py
|
e551940ca5275ee102c8c0fb484a7d4f357fef2c
|
[] |
no_license
|
carlossless/quibble
|
https://github.com/carlossless/quibble
|
78d3c19b552c447b2181c24d3bbd9462f9e20cc0
|
f816d7fc2aa8566d3e410ed9ed6579364c9b9913
|
refs/heads/master
| 2021-01-10T13:27:55.083085 | 2014-10-13T11:29:03 | 2014-10-13T11:29:03 | 45,132,977 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python2
from __future__ import with_statement
import sys
reload(sys)
sys.setdefaultencoding('latin-1')
import struct, xml.parsers.expat
output = "FONT"
def put_byte(val):
global output
output += struct.pack('<h', val)
def start(name, attrs):
global output
if name == 'common':
put_byte(int(attrs['lineHeight']))
if name == 'page':
put_byte(len(attrs['file']))
output += attrs['file']
if name == 'chars':
put_byte(int(attrs['count']))
if name == 'char':
put_byte(int(attrs['id']))
put_byte(int(attrs['x']))
put_byte(int(attrs['y']))
put_byte(int(attrs['width']))
put_byte(int(attrs['height']))
put_byte(int(attrs['xoffset']))
put_byte(int(attrs['yoffset']))
put_byte(int(attrs['xadvance']))
def end(name):
pass
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Provide file to convert and output file"
else:
parser = xml.parsers.expat.ParserCreate()
parser.StartElementHandler = start
parser.EndElementHandler = end
with open(sys.argv[1]) as infile:
parser.ParseFile(infile)
with open(sys.argv[2], 'wb') as outfile:
outfile.write(output)
|
UTF-8
|
Python
| false | false | 2,014 |
7,679,401,539,981 |
bc2f23e1b3dd19864c292deaab84731e0bf6de64
|
73ec0708d8af80296330b482e17f537ff75a87d0
|
/sem.py
|
c64128cf3c630df5aa4e4a25a71b557b9c172157
|
[] |
no_license
|
alansalomonr/SEM
|
https://github.com/alansalomonr/SEM
|
501b80e297930d78d533917240654746d494b065
|
13f2f1c6abf438271aa1869f9ec479e205cd87cc
|
refs/heads/master
| 2020-12-31T01:47:43.250475 | 2012-05-25T21:13:41 | 2012-05-25T21:13:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
#
# SEM (Security Enhanced Messaging) is a PoC for implementing Covert Channels over ICMP protocol.
# by renateitor
#
# Last release available in:
# https://github.com/renateitor/SEM
#
# Debe ejecutarse como root ya que es la unica forma de crear paquete de red a medida
# Dependencias: tcpdump, python-scapy
#
# Importamos librerias
import os
import subprocess
import time
import sys
import getopt
import logging
import random
import math
# Definimos que solamente se debe alertar ante un error
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
# Por defecto se ejecuta en modo simple
verbose = False
# Levantamos lo que se pasa por parametro
opts, extra = getopt.getopt(sys.argv[1:], 't:e:hvb', ['target=','encode=','help','verbose','background'])
# Variables que van a ser globales
global targetfromparam
global target
global encodefromparam
global encodetype
# Comenzamos a definir las funciones que van a hacer todo el trabajo
# Funcion que encodea en base64 los archivos para enviarlos
def backgroundmode():
print '\n'
encodetype = raw_input('Encode Type [0]: ')
passwd = raw_input('Key for the communication(8-char) [20121357]: ')
interface = raw_input('Interface for the communication (listening) [eth0]: ')
logtime = str(int(time.time()))
logsfile = raw_input('Log File [./semlogfile_'+logtime+']: ')
if encodetype == '':
encodetype = '0'
if passwd == '':
passwd = '20121357'
if interface == '':
interface = 'eth0'
if logsfile == '':
logsfile = './semlogfile_'+logtime
# Trunco las variables en caso de que excedan el tamanio max
passwd = passwd[0:8]
encodetype = encodetype[0:1]
# Completo las variables en caso de que sean mas cortas que el min
while passwd.__len__() < 8:
passwd = passwd+'_'
os.system('echo \'\' > '+logsfile)
rec_p = subprocess.Popen(['python', 'recive.py','--interface='+interface,'--file='+logsfile,'--password='+passwd,'--encode='+encodetype,'--background','&'])
rec_pid = rec_p.pid
print '\nStarting SEM in background mode...'
time.sleep(1)
print '\nTo watch the log file while it grows run: tail -f '+logsfile
time.sleep(1)
print '\nPID: '+str(rec_pid)
print '\nTo stop recording run: kill -9 '+str(rec_pid)+'\n\n'
exit()
def encoder(source,dest):
os.system('base64 '+source+' > '+dest)
# Funcion que decodea base64 para poder volver a convertirlo en el archivo original
def decoder(source,dest):
os.system('base64 -d '+source+' > '+dest)
# Funcion que manda el texto que se le pasa por parametro
# Si (tipo) es 'f' estamos mandando un archivo, si es 't' estamos mandando un string (chat)
def sendtxt(txt,tipo):
# a partir de aca empieza el armado del paquete y el envio
# construimos la capa 3 del paquete (IP)
l3 = IP()
l3.dst = target
# construimos la capa 4 del paquete (ICMP)
l4 = ICMP()
# definimos el resto de las variables
msgsize = 12 # como vamos a dividir el mensaje en partes, aca definimos el tamano de cada parte
# las variables (first) (last) (count) las utilizamos para el proceso de corte y envio del paquete
first = 0
last = (msgsize)
count = (len(txt)/msgsize)+1
# entramos en un bucle en el cual vamos a enviar un paquete para cada parte de los datos
if verbose:
print " [ %s : " %(count),
for a in range(0, count):
if verbose:
print "%s " %(a + 1),
# Me fijo si se esta enviando un chat (texto)
if tipo == 't':
# si es la primer parte del envio, y NO es la unica pongo el bit 13 en '0'
if (a == 0) and (a+1 != count):
payload = cypher(passwd,encodetype) + cypher(name,encodetype) +'0' + encodetype + cypher(txt[first:last],encodetype)
# si es la primer parte del envio, y SI es la unica pongo el bit 13 en '5'
elif (a == 0) and (a+1 == count):
payload = cypher(passwd,encodetype) + cypher(name,encodetype) +'5' + encodetype + cypher(txt[first:last],encodetype)
# si es la ultima parte del envio pongo el bit 13 en '9'
elif a+1 == count:
payload = cypher(passwd,encodetype) + cypher(name,encodetype) +'9' + encodetype + cypher(txt[first:last],encodetype)
# si no es la primer parte ni la ultima pongo el bit 13 en '1'
else:
payload = cypher(passwd,encodetype) + cypher(name,encodetype) +'1' + encodetype + cypher(txt[first:last],encodetype)
# Me fijo si lo que se manda es un archivo
elif tipo == 'f':
# Me fijo que sea el primer paquete correspondiente al archivo
if a == 0:
payload = cypher(passwd,encodetype) + cypher(name,encodetype) +'4' + encodetype + cypher(txt[first:last],encodetype)
# Me fijo que no sea el primer ni el ultimo paquete correspondiente al archivo
elif (a+1 != count) and (a != 0):
payload = cypher(passwd,encodetype) + cypher(name,encodetype) +'2' + encodetype + cypher(txt[first:last],encodetype)
# Si es la ultima parte de un archivo
else:
payload = cypher(passwd,encodetype) + cypher(name,encodetype) +'3' + encodetype + cypher(txt[first:last],encodetype)
# Me fijo si lo que se manda es un md5sum
elif tipo == 's':
# Me fijo que no sea la ultima parte del sum
if a+1 != count:
payload = cypher(passwd,encodetype) + cypher(name,encodetype) +'7' + encodetype + cypher(txt[first:last],encodetype)
else:
payload = cypher(passwd,encodetype) + cypher(name,encodetype) +'8' + encodetype + cypher(txt[first:last],encodetype)
# armamos el paquete (las capas que no definimos son definidas automaticamente por scapy)
pkt = l3/l4/payload
# enviamos el paquete
a = sr(pkt, verbose = 0, retry = 0, timeout = 1)
first += msgsize
last += msgsize
if verbose:
print ']'
# Funcion que muestra la ayuda
def showhelp():
print '''
:h! Show this help
:q! Exit Program
:c! Clear Screen
:v! Start Verbose Mode
:s! Start Simple Mode (Stop Verbose)
:send! Send a File
:save! Save a Recived File
'''
def getmd5(file_sum):
os.system('md5sum '+file_sum+' > '+file_sum+'.sum')
fsum = open(file_sum+'.sum', "r")
md5 = fsum.read()
fsum.close()
os.system('rm -f '+file_sum+'.sum')
return md5
def cypher(txt,tipocifrado):
if tipocifrado == '0': # No se aplica ninguna codificacion
return txt
if tipocifrado == '1': # Se incrementa el valor decimal (ord) de los ASCII en 5
charnum = 0
listatxt = list(txt)
while charnum < txt.__len__():
if (ord(txt[charnum]) <= 245) and (ord(txt[charnum]) >= 20):
listatxt[charnum] = chr(ord(txt[charnum])+5)
charnum += 1
txt = ''.join(listatxt)
return txt
if tipocifrado == '2': # Las posiciones pares son aumentadas en 7 y las impares disminuidas en 3 (valores decimales de los ASCII)
charnum = 0
listatxt = list(txt)
while charnum < txt.__len__():
if math.fmod(charnum,2) != 0:
if (ord(txt[charnum]) <= 253) and (ord(txt[charnum]) >= 28):
listatxt[charnum] = chr(ord(txt[charnum])-3)
else:
if (ord(txt[charnum]) <= 243) and (ord(txt[charnum]) >= 18):
listatxt[charnum] = chr(ord(txt[charnum])+7)
charnum += 1
txt = ''.join(listatxt)
return txt
# Para cambiar todos los caracteres criticos (a-z,A-Z,0-9) por caracteres no llamativos a simple vista
if tipocifrado == '3':
charnum = 0
listatxt = list(txt)
while charnum < txt.__len__():
# A todos los numeros les resto 30 (a su ASCII decimal)
if (ord(txt[charnum]) >= 48) and (ord(txt[charnum]) <= 57):
listatxt[charnum] = chr(ord(txt[charnum])-30)
# A todas las letras mayusculas les sumo 101 (a su ASCII decimal)
if (ord(txt[charnum]) >= 65) and (ord(txt[charnum]) <= 90):
listatxt[charnum] = chr(ord(txt[charnum])+101)
# A todas las letras minusculas les sumo 116 (a su ASCII decimal)
if (ord(txt[charnum]) >= 97) and (ord(txt[charnum]) <= 122):
listatxt[charnum] = chr(ord(txt[charnum])+116)
charnum += 1
txt = ''.join(listatxt)
return txt
encodefromparam = False
targetfromparam = False
for code,param in opts:
if code in ['-h','--help']:
print '''
SEM (Security Enhanced Messaging) is a PoC for implementing Covert Channels over ICMP protocol.
Last release available in: https://github.com/renateitor/SEM
*** Must run as root ***
Deps: tcpdump, python-scapy
EXTERNAL PARAMS:
-h --help
Show this message
-v --verbose
Show inside app information about the number of ICMP packages sent
-b --background
Run in background mode (logs all incoming chat and files)
-t --target
Define the target PC when calling the script, instead of defining it from inside the program
-e --encode
Define the encode type when calling the script, instead of defining it from inside the program
INSIDE APP PARAMS:
:q!
Exit Program
:c!
Clear Screen
:h!
Show Help
:v!
Start Verbose Mode
:s!
Start Simple Mode (Stop Verbose)
:send!
Send a File
:save!
Save a Recived a File
'''
exit()
elif code in ['-b','--background']:
backgroundmode()
else:
if code in ['-v','--verbose']:
verbose = True
if code in ['-t','--target']:
targetfromparam = True
target = param
if code in ['-e','--encode']:
encodefromparam = True
encodetype = param
# Comienza la interfaz del usr
# Creamos el archivo donde se van a almacenar los logs de esta sesion
os.system('echo \'\' > message.txt')
# Levanto los parametros necesarios para la comunicacion
name = raw_input('Name(4-char) [random]: ')
if targetfromparam == False:
target = raw_input('Target device [192.168.1.1]: ')
if encodefromparam == False:
encodetype = raw_input('Encode Type [0]: ')
passwd = raw_input('Key for the communication(8-char) [20121357]: ')
interface = raw_input('Interface for the communication (listening) [eth0]: ')
# Seteo los defaults en caso de que el usuario no complete algun parametro
if name == '':
# Creo un nombre random si no hay uno definido
name = ''.join([random.choice('abcdefghijklmnopqrstuvwxyz0123456789') for x in xrange(4)])
if target == '':
target = '192.168.1.1'
if encodetype == '':
encodetype = '0'
if passwd == '':
passwd = '20121357'
if interface == '':
interface = 'eth0'
# Trunco las variables en caso de que excedan el tamanio max
name = name[0:4]
passwd = passwd[0:8]
encodetype = encodetype[0:1]
# Completo las variables en caso de que sean mas cortas que el min
while name.__len__() < 4:
name = name+'_'
while passwd.__len__() < 8:
passwd = passwd+'_'
# Dejo monitoreando en background para la recepcion de mensajes
rec_p = subprocess.Popen(['python', 'recive.py','--name='+name,'--interface='+interface,'--password='+passwd,'--encode='+encodetype,'&'])
rec_pid = rec_p.pid
# Loop para chatear
print '\nTo exit write: \':q!\''
print 'To have help write: \':h!\'\n\n'
while True:
# Leemos el texto del usuario
txt=raw_input('')
# Parametros internos
if txt.strip() ==':c!': # Clear Screen
os.system('clear')
continue
elif txt.strip() ==':send!': # Send File
source = raw_input('File Path (no spaces): ')
dest = '/tmp/semSharedFile'
encoder(source,dest)
fdest = open(dest, "r")
txt = fdest.read()
fdest.close()
sendtxt(txt,'f') # Mando el archivo
md5orig = str(getmd5(source)) # Calculo el md5sum del archivo original que envio
sendtxt(md5orig,'s') # Mando el md5sum del archivo original (antes de convertirlo a base64)
# Borro el archivo temporal donde guarde el base64 del archivo que quiero enviar
os.system('rm -f /tmp/semSharedFile')
continue
elif txt.strip() ==':save!': # Save a recived File
transid = raw_input('Transfer ID: ')
# Lugar donde se habia almacenado temporalmente el base64 del archivo que nos mandaron
source = '/tmp/'+transid
dest = raw_input('Save in (full path): ')
decoder(source,dest)
print '\n\n\n ***[ File Successfully Saved! ]***'
print ' - path: '+dest+' -\n'
# Mostramos el md5sum del archivo que nos llego
print ' Local File md5sum: '+str(getmd5(dest)).split(' ')[0]+'\n\n\n'
continue
elif txt.strip() ==':h!': # Show Help
showhelp()
continue
elif txt.strip()==':q!': # Exit
break
elif txt.strip() ==':v!': # Verbose Mode
verbose = True
continue
elif txt.strip() ==':s!': # Simple Mode
verbose = False
continue
else: # Send User Text
txt=txt+'\n'
sendtxt(txt,'t')
# Mato el proceso que escucha los paquetes que llegan y los loguea/muestra por pantalla
os.system('kill -9 '+str(rec_pid))
# Creo el archivo donde van a quedar guardados los logs
logfilename = str(int(time.time()))
os.system('mv message.txt chatlog_'+logfilename[-6:-1]+'.txt')
print '\n\n\n*** [ Session Log File: chatlog_'+logfilename[-6:-1]+'.txt ] ***'
print '\n\nGood Bye!\n\n'
|
UTF-8
|
Python
| false | false | 2,012 |
18,751,827,216,436 |
1971bbb5a8c0e1f423c38445cef85cbef1042722
|
f35aa241b67961fb9820e6a99bb275e7093b0e24
|
/spotseeker_server/test/images/spot_info.py
|
b2eeebfc9918ea705cbb76336bfe9efa279e253f
|
[
"Apache-2.0"
] |
permissive
|
vegitron/spotseeker-server
|
https://github.com/vegitron/spotseeker-server
|
73dcce999763343e6fcd5d778dff0a391dd4bac5
|
23b8416025e478a1740200a9a9a4302fd09d7937
|
refs/heads/master
| 2021-01-16T19:20:46.218968 | 2012-08-08T17:15:22 | 2012-08-08T17:15:22 | 3,641,470 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.utils import unittest
from django.test.client import Client
from django.conf import settings
from os.path import abspath, dirname
from spotseeker_server.models import Spot, SpotImage
from django.core.files import File
from PIL import Image
import simplejson as json
TEST_ROOT = abspath(dirname(__file__))
class SpotResourceImageTest(unittest.TestCase):
def setUp(self):
settings.SPOTSEEKER_AUTH_MODULE = 'spotseeker_server.auth.all_ok'
spot = Spot.objects.create(name="This is to test images in the spot resource")
self.spot = spot
f = open("%s/../resources/test_gif.gif" % TEST_ROOT)
gif = SpotImage.objects.create(description="This is the GIF test", spot=spot, image=File(f))
f.close()
self.gif = gif
f = open("%s/../resources/test_jpeg.jpg" % TEST_ROOT)
jpeg = SpotImage.objects.create(description="This is the JPEG test", spot=spot, image=File(f))
f.close()
self.jpeg = jpeg
f = open("%s/../resources/test_png.png" % TEST_ROOT)
png = SpotImage.objects.create(description="This is the PNG test", spot=spot, image=File(f))
f.close()
self.png = png
def test_empty_image_data(self):
spot = Spot.objects.create(name="A spot with no images")
c = Client()
response = c.get('/api/v1/spot/{0}'.format(spot.pk))
spot_dict = json.loads(response.content)
self.assertEquals(len(spot_dict["images"]), 0, "Has an empty array for a spot w/ no images")
def test_image_data(self):
c = Client()
response = c.get('/api/v1/spot/{0}'.format(self.spot.pk))
spot_dict = json.loads(response.content)
self.assertEquals(len(spot_dict["images"]), 3, "Has 3 images")
has_gif = False
has_png = False
has_jpg = False
for image in spot_dict["images"]:
if image["id"] == self.gif.pk:
has_gif = True
self.assertEquals(image["url"], "/api/v1/spot/{0}/image/{1}".format(self.spot.pk, self.gif.pk))
self.assertEquals(image["thumbnail_root"], "/api/v1/spot/{0}/image/{1}/thumb".format(self.spot.pk, self.gif.pk))
self.assertEquals(image["content-type"], "image/gif")
img = Image.open("%s/../resources/test_gif.gif" % TEST_ROOT)
self.assertEquals(image["width"], img.size[0], "Includes the gif width")
self.assertEquals(image["height"], img.size[1], "Includes the gif height")
self.assertEquals(image["creation_date"], image["modification_date"], "Has the same modification and creation date")
self.assertEquals(image["upload_user"], "", "Lists an empty upload user")
self.assertEquals(image["upload_application"], "", "Lists an empty upload application")
if image["id"] == self.png.pk:
has_png = True
self.assertEquals(image["url"], "/api/v1/spot/{0}/image/{1}".format(self.spot.pk, self.png.pk))
self.assertEquals(image["thumbnail_root"], "/api/v1/spot/{0}/image/{1}/thumb".format(self.spot.pk, self.png.pk))
self.assertEquals(image["content-type"], "image/png")
img = Image.open("%s/../resources/test_png.png" % TEST_ROOT)
self.assertEquals(image["width"], img.size[0], "Includes the png width")
self.assertEquals(image["height"], img.size[1], "Includes the png height")
self.assertEquals(image["creation_date"], image["modification_date"], "Has the same modification and creation date")
self.assertEquals(image["upload_user"], "", "Lists an empty upload user")
self.assertEquals(image["upload_application"], "", "Lists an empty upload application")
if image["id"] == self.jpeg.pk:
has_jpg = True
self.assertEquals(image["url"], "/api/v1/spot/{0}/image/{1}".format(self.spot.pk, self.jpeg.pk))
self.assertEquals(image["thumbnail_root"], "/api/v1/spot/{0}/image/{1}/thumb".format(self.spot.pk, self.jpeg.pk))
self.assertEquals(image["content-type"], "image/jpeg")
img = Image.open("%s/../resources/test_jpeg.jpg" % TEST_ROOT)
self.assertEquals(image["width"], img.size[0], "Includes the jpeg width")
self.assertEquals(image["height"], img.size[1], "Includes the jpeg height")
self.assertEquals(image["creation_date"], image["modification_date"], "Has the same modification and creation date")
self.assertEquals(image["upload_user"], "", "Lists an empty upload user")
self.assertEquals(image["upload_application"], "", "Lists an empty upload application")
self.assertEquals(has_gif, True, "Found the gif")
self.assertEquals(has_jpg, True, "Found the jpg")
self.assertEquals(has_png, True, "Found the png")
|
UTF-8
|
Python
| false | false | 2,012 |
3,066,606,666,113 |
8d2bbe784fa7ea8c6c2466a37f98d619760859bf
|
891b7158b8d0e382fe9ebf01b606e158ce000fca
|
/copperhead/tests/recursive_equal.py
|
e141009daf6e4b9870a2994a534f4621a1147c08
|
[
"Apache-2.0"
] |
permissive
|
configithub/numpy-gpu
|
https://github.com/configithub/numpy-gpu
|
73247544d9c5a4c3295a09fd112bcdd4eef85f46
|
8320fba8f81a6dd0d6952f01e152f0146d70b9ab
|
refs/heads/master
| 2021-01-19T08:07:50.204577 | 2014-11-09T23:54:36 | 2014-11-09T23:54:36 | 26,409,230 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#
# Copyright 2012 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import collections
import itertools
def recursive_equal(a, b):
if isinstance(a, collections.Iterable):
elwise_equal = all(itertools.imap(recursive_equal, a, b))
length_check = sum(1 for x in a) == sum(1 for x in b)
return elwise_equal and length_check
else:
return a == b
|
UTF-8
|
Python
| false | false | 2,014 |
3,006,477,140,100 |
7727c2fb4775fd248f2d78ba8c5df2b58b3828f6
|
0e7fcd500240259e9e68d3b67c0c77c7b7c75e67
|
/views/web/dustWeb/viz/VizFields.py
|
7dc0a1137ebaea6176f70950b30909176884e5bc
|
[] |
no_license
|
dustcloud/dustlink
|
https://github.com/dustcloud/dustlink
|
2f3c881b0416ec94c8c7174b904136367e3b80d6
|
87be4f67a583a5403a38b642f35a37b93e88284d
|
refs/heads/master
| 2016-09-06T17:21:55.406434 | 2014-11-21T18:13:02 | 2014-11-21T18:13:02 | 12,114,286 | 4 | 6 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('VizFields')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import VizjQuery
class VizFields(VizjQuery.VizjQuery):
#======================== header ==========================================
templateHeader = '''
<style type="text/css">
</style>
'''
#======================== body ============================================
templateBody = '''
<script type='text/javascript'>
autorefresh_{VIZID} = {AUTOREFRESH};
var fields = new VizFields('{VIZID}', '{RESOURCE}', {RELOAD_PERIOD}, {AUTOREFRESH});
</script>
'''
|
UTF-8
|
Python
| false | false | 2,014 |
1,013,612,311,827 |
b9c19e830f45797c9f20667889027d27a19df6dc
|
5a7a45d8546822f7a2b412aae804ba0e632c059d
|
/origins/backends/redcap_mysql.py
|
a1604052dd185004ace34213e110a47cef4b67f3
|
[
"BSD-2-Clause"
] |
permissive
|
mikepluta/origins
|
https://github.com/mikepluta/origins
|
07e0167ae7bba5d395475aab249c6a039e51627e
|
b5755d76bd047c255fe919a009c2dedad8891e73
|
refs/heads/master
| 2021-01-24T04:44:16.711847 | 2014-06-21T11:43:28 | 2014-06-21T11:43:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from __future__ import division, unicode_literals, absolute_import
from . import mysql, _redcap
class Client(mysql.Client):
def __init__(self, project, database='redcap', **kwargs):
self.project_name = project
super(Client, self).__init__(database, **kwargs)
def projects(self):
query = '''
SELECT
project_name,
app_title
FROM redcap_projects
'''
keys = ('name', 'label')
projects = []
for row in self.fetchall(query):
attrs = dict(zip(keys, row))
if not attrs['label']:
attrs['label'] = attrs['name']
projects.append(attrs)
return projects
def project(self):
query = '''
SELECT
project_name,
app_title
FROM redcap_projects
WHERE project_name = %s
'''
keys = ('name', 'label')
values = self.fetchone(query, [self.project_name])
return dict(zip(keys, values))
def forms(self):
query = '''
SELECT DISTINCT
form_name,
form_menu_description
FROM redcap_metadata JOIN redcap_projects
ON (redcap_metadata.project_id = redcap_projects.project_id)
WHERE redcap_projects.project_name = %s
AND form_menu_description IS NOT NULL
ORDER BY field_order
'''
keys = ('name', 'label')
forms = []
for i, row in enumerate(self.fetchall(query, [self.project_name])):
attrs = dict(zip(keys, row))
if not attrs['label']:
attrs['label'] = attrs['name']
attrs['order'] = i
forms.append(attrs)
return forms
def sections(self, form_name):
query = '''
SELECT DISTINCT
element_preceding_header
FROM redcap_metadata JOIN redcap_projects
ON (redcap_metadata.project_id = redcap_projects.project_id)
WHERE redcap_projects.project_name = %s
AND form_name = %s
AND element_preceding_header IS NOT NULL
ORDER BY field_order
'''
keys = ('name',)
sections = [{
'name': form_name,
'order': 0,
}]
rows = self.fetchall(query, [self.project_name, form_name])
for i, row in enumerate(rows):
attrs = dict(zip(keys, row))
attrs['order'] = i + 1
sections.append(attrs)
return sections
def fields(self, form_name, section_name):
query = '''
SELECT
field_name,
element_label,
element_type,
element_note,
element_enum,
branching_logic,
element_validation_type,
element_validation_min,
element_validation_max,
field_phi,
field_req,
element_preceding_header,
custom_alignment,
question_num,
grid_name,
field_order
FROM redcap_metadata JOIN redcap_projects
ON (redcap_metadata.project_id = redcap_projects.project_id)
WHERE redcap_projects.project_name = %s
AND form_name = %s
ORDER BY field_order
'''
keys = ('name', 'label', 'type', 'note', 'choices', 'display_logic',
'validation_type', 'validation_min', 'validation_max',
'identifier', 'required', 'header', 'alignment', 'survey_num',
'matrix', 'order')
fields = []
current_section = form_name
for row in self.fetchall(query, [self.project_name, form_name]):
attrs = dict(zip(keys, row))
attrs['required'] = bool(attrs['required'])
attrs['identifier'] = bool(attrs['identifier'])
# Filter by section_name
current_section = attrs['header'] or current_section
if current_section != section_name:
continue
# Remove header attribute since it is redundant with respect to
# the parent node.
attrs.pop('header')
fields.append(attrs)
return fields
# Export class for API
Origin = _redcap.Project
|
UTF-8
|
Python
| false | false | 2,014 |
9,552,007,314,645 |
6ef33e181da94612e5edf917ef3630595cd09d53
|
dc7b8f46561aa120ab3be54285be6491746f0512
|
/private/blogilainen/blogilainen.py
|
ff4b2ed56511c95ca80b8c673fe8fa3cca1f1e5f
|
[
"MIT"
] |
permissive
|
konker/blogilainen
|
https://github.com/konker/blogilainen
|
9495feae1de5f5020d3a114049fff2bc2ba98406
|
2bae2bdadf8ba60786f672bb15cd3cc8b1a03560
|
refs/heads/master
| 2016-09-05T10:28:07.822962 | 2011-12-28T23:37:30 | 2011-12-28T23:37:30 | 2,984,434 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import logging
from lxml import etree
from source import Source
from target import Target
#import plugins.resource.enabled.get_meta_tags
BASE_PACKAGE = 'blogilainen.plugins'
class PluginException(Exception):
pass
class Blogilainen(object):
def __init__(self, source_dir, xslt_file, out_dir, resources_meta_file):
self.source_dir = source_dir
self.xslt_file = xslt_file
self.out_dir = out_dir
self.resources_meta_file = resources_meta_file
self.sources = []
self._load_sources()
self.resource_plugins = {}
self._load_resource_plugins()
logging.info("source_dir: %s" % self.source_dir)
logging.info("xslt_file: %s" % self.xslt_file)
logging.info("out_dir: %s" % self.out_dir)
logging.info("resources_meta_file: %s" % self.resources_meta_file)
def generate(self):
# generate the aggregate resources meta data and write it to file
# XXX: param to govern overwriting of this file?
self.generate_resources_meta()
# read in master xslt file
transform = etree.XSLT(etree.parse(self.xslt_file))
# go through each file and generate output
for s in self.sources:
xml = etree.parse(s.source)
if len(s.targets) == 0:
logging.info("WARNING: %s has no target formats" % s.source)
# generate for each target
for ext,t in s.targets.iteritems():
# make sure that physical_path exists
if not os.path.exists(t.physical_path):
os.makedirs(t.physical_path)
# execute XSLT transform and write output file
with open(t.out, 'w') as fh:
content = etree.tostring(transform(xml, format=etree.XSLT.strparam(t.ext)))
if content:
fh.write(content)
logging.info("OK: %s -> %s" % (s.source, t.out))
else:
logging.info("FAILED: %s -> %s" % (s.source, t.out))
def generate_resources_meta(self):
resources_meta = etree.Element('resources')
for s in self.sources:
resource = etree.Element('resource')
for k, plugin in self.resource_plugins.iteritems():
plugin.run(s, resource)
resources_meta.append(resource)
with open(self.resources_meta_file, 'w') as fh:
fh.write(etree.tostring(resources_meta, pretty_print=True))
return resources_meta
def _load_sources(self):
for source_file_physical_path, dirnames, filenames in os.walk(self.source_dir):
# get relative path starting from self.source_dir
source_file_relative_path = os.path.relpath(source_file_physical_path, self.source_dir)
get_formats = etree.XPath("//meta[@name='dcterms.Format']/@content")
for f in filenames:
# append a Source object to sources files list
s = Source(os.path.abspath(source_file_physical_path), source_file_relative_path, f)
xml = etree.parse(s.source)
formats = get_formats(xml)
for ext in formats:
physical_path = os.path.abspath(os.path.join(self.out_dir, s.relative_path))
target = Target(physical_path, s.relative_path, s.basename, ext)
s.add_target(target)
# XXX: only use one format for now
break
self.sources.append(s)
logging.debug(self.sources)
def _load_resource_plugins(self):
self._load_plugins('resource', self.resource_plugins)
def _load_plugins(self, plugin_type, store):
# read in and import available resource plugins
module = None
for py in os.listdir(os.path.join(os.path.dirname(__file__), 'plugins', plugin_type, 'enabled')):
basename,ext = os.path.splitext(py)
if ext != '.py' or py == '__init__.py':
continue
module = "%s.%s.enabled.%s" % (BASE_PACKAGE, plugin_type, basename)
logging.info("Found %s plugin: %s" % (plugin_type, module))
cls = 'Plugin'
try:
__import__(module, locals(), globals())
except:
logging.error('Could not import module %s' % module)
raise PluginException('Could not import module %s' % module)
if sys.modules.has_key(module):
if hasattr(sys.modules[module], cls):
# this is where the magic happens
store[module] = getattr(sys.modules[module], cls)()
else:
logging.error('Module has no class %s' % cls)
raise PluginException('Module has no class %s' % cls)
else:
logging.error('Could not import module %s' % module)
raise PluginException('Could not import module %s' % module)
del module
|
UTF-8
|
Python
| false | false | 2,011 |
5,377,299,062,341 |
972e4f55c1ad1be8248daffb705433d3199c1825
|
8ab3136ed7ab299229849e5462bde570f018939a
|
/msnlive/addfriend/loadaccount.py
|
bbc2365d9a6cee727de5ba7274236ac1eef2be3b
|
[] |
no_license
|
waitingzeng/ttwait
|
https://github.com/waitingzeng/ttwait
|
099e05456bcd99099c433ed18107ca53b0a31880
|
02d7b734478899830149d97ab4dd376d1136598e
|
refs/heads/master
| 2021-01-22T02:39:30.390703 | 2013-03-25T16:30:28 | 2013-03-25T16:30:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/bin/env python
#coding=utf-8
import os
import sys
from pycomm.log import log, open_log, open_debug
from pycomm.libs.rpc import MagicClient
from pycomm.utils.dict4ini import DictIni
from optparse import OptionParser
def load():
open_log("load account")
open_debug()
parser = OptionParser()
parser.add_option("-i", "--conf", dest="conf", action="store", type="string")
parser.add_option("-n", "--num", dest="num", action="store", type="int")
parser.add_option("-t", "--target", dest="target", action="store", type="string")
options, args = parser.parse_args(sys.argv[1:])
if not options.conf or not options.target:
parser.print_help()
sys.exit(-1)
conf = DictIni(options.conf)
num = options.num or 100000
if os.path.exists(options.target):
log.error("%s had exitst", options.target)
return
client = MagicClient(conf.account_server[0], int(conf.account_server[1]))
total = 0
f = file(options.target, 'w')
log.trace("begin load data")
while total < num:
data = client.get_add_friends()
if data:
total += len(data)
f.write('\n'.join([x.strip() for x in data]))
log.trace("load account success %s", total)
elif not data and not total:
log.trace("load account %s fail")
break
f.close()
log.trace("load accounts %s success %s", len(data), total)
if __name__ == "__main__":
load()
|
UTF-8
|
Python
| false | false | 2,013 |
2,138,893,715,447 |
2af85ee9f2fc2d6d87dfc05763ad01e3e34fcb7a
|
bab40385ada9c9fa8363c82d7c48a88f3af63a9b
|
/extractor/src/main/py/start.py
|
18c83bd0a5e29e72d441180670a12babd0c863b8
|
[
"MIT"
] |
permissive
|
alex-dow/psiopic
|
https://github.com/alex-dow/psiopic
|
48ce152dc9e67cdf473678a702c9ea50607b9993
|
5037a2d57eb7ca4956e9b98ffa0928bab8fbfd7d
|
refs/heads/master
| 2021-01-13T02:32:12.162517 | 2012-12-06T07:19:11 | 2012-12-06T07:19:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import optparse
from optparse import OptionGroup
from service import main
import cherrypy
import sys
import pickle
import os
from classifier import Classifier
from classifier import ClassifierException
if os.path.isdir("utils/"):
sys.path.append("utils/")
optparser = optparse.OptionParser()
optHttpGroup = OptionGroup(optparser, "HTTP Server")
optHttpGroup.add_option("--listen-ip", action="store", dest="listen_ip",
help="IP Address to listen for connections, defaults to 0.0.0.0",
default="0.0.0.0"
)
optHttpGroup.add_option("--port", action="store", dest="http_port",
help="HTTP Port number, defaults to 21212",
default=21212, type="int"
)
optClassifierGroup = OptionGroup(optparser, "Classifier")
optClassifierGroup.add_option("--on-demand", action="store_true", dest="classifier_on_demand",
help="If enabled, the classifier will be loaded on demand, instead of in memory when the service is started."
)
optClassifierGroup.add_option("--pki-file", action="store", dest="pki_file",
help="Location of the classifier PKI file"
)
optparser.add_option_group(optHttpGroup)
optparser.add_option_group(optClassifierGroup)
def verify_required_opts(options):
if options.pki_file == None:
print "You must supply the location of the PKI file"
sys.exit(1)
if __name__ == '__main__':
(options, optargs) = optparser.parse_args()
verify_required_opts(options)
conf = {
'global': {
'server.socket_host': options.listen_ip,
'server.socket_port': options.http_port
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher()
}
}
kl = not options.classifier_on_demand
print "Loading classifier (you should probably go get that cup of coffee now)..."
classifier = Classifier()
classifier.loadClassifier(options.pki_file)
class Root():
pass
root = Root()
root.extract = main.Extract(classifier)
cherrypy.quickstart(root, '/', conf)
|
UTF-8
|
Python
| false | false | 2,012 |
13,211,319,434,937 |
41db732809b856f6132d06dfa9e7bbcb5d31654b
|
3a85089c2498ff04d1b9bce17a4b8bf6cf2380c9
|
/FastSimulation/L1CaloTriggerProducer/python/__init__.py
|
ebea17129bd7729d4c08f46b8b629fbd0efdfeef
|
[] |
no_license
|
sextonkennedy/cmssw-ib
|
https://github.com/sextonkennedy/cmssw-ib
|
c2e85b5ffa1269505597025e55db4ffee896a6c3
|
e04f4c26752e0775bd3cffd3a936b288ee7b0268
|
HEAD
| 2016-09-01T20:09:33.163593 | 2013-04-26T12:05:17 | 2013-04-29T16:40:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/FastSimulation/L1CaloTriggerProducer/',1)[0])+'/cfipython/slc6_amd64_gcc480/FastSimulation/L1CaloTriggerProducer')
|
UTF-8
|
Python
| false | false | 2,013 |
15,625,091,049,024 |
4ea07983b77ecbc4e00c6f4f8c8d9fc93e3b457c
|
dca91c476ec3a4dfc4e3c0c68b99a7e9a70724ee
|
/Version 0.5/mainWindow.py
|
bb5c6cda09324e411860bcd37ad3a52df92f7741
|
[] |
no_license
|
rriem/debitse
|
https://github.com/rriem/debitse
|
6d1038fc14e0875f21ce8abfcc9a75cc0a455e39
|
e69f4a51f7a41de13307cf95260aab0c2403b7fb
|
refs/heads/master
| 2021-01-13T14:29:37.371021 | 2009-03-08T21:24:59 | 2009-03-08T21:24:59 | 32,127,578 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/rriem/Desktop/Version 0.5/mainWindow.ui'
#
# Created: Sat Feb 14 17:31:36 2009
# by: PyQt4 UI code generator 4.4.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_DebitSE(object):
def setupUi(self, DebitSE):
DebitSE.setObjectName("DebitSE")
DebitSE.resize(550, 400)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons/debit.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
DebitSE.setWindowIcon(icon)
DebitSE.setAutoFillBackground(False)
DebitSE.setUnifiedTitleAndToolBarOnMac(False)
self.centralwidget = QtGui.QWidget(DebitSE)
self.centralwidget.setGeometry(QtCore.QRect(0, 66, 550, 312))
self.centralwidget.setObjectName("centralwidget")
DebitSE.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(DebitSE)
self.menubar.setGeometry(QtCore.QRect(0, 0, 550, 22))
self.menubar.setObjectName("menubar")
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuEdit = QtGui.QMenu(self.menubar)
self.menuEdit.setObjectName("menuEdit")
self.menuCalc = QtGui.QMenu(self.menubar)
self.menuCalc.setTearOffEnabled(False)
self.menuCalc.setObjectName("menuCalc")
self.menuAide = QtGui.QMenu(self.menubar)
self.menuAide.setObjectName("menuAide")
DebitSE.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(DebitSE)
self.statusbar.setGeometry(QtCore.QRect(0, 378, 550, 22))
self.statusbar.setObjectName("statusbar")
DebitSE.setStatusBar(self.statusbar)
self.toolBar = QtGui.QToolBar(DebitSE)
self.toolBar.setGeometry(QtCore.QRect(0, 22, 550, 44))
self.toolBar.setObjectName("toolBar")
DebitSE.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionAbout_Debit_SE = QtGui.QAction(DebitSE)
self.actionAbout_Debit_SE.setShortcutContext(QtCore.Qt.WidgetShortcut)
self.actionAbout_Debit_SE.setVisible(True)
self.actionAbout_Debit_SE.setMenuRole(QtGui.QAction.AboutRole)
self.actionAbout_Debit_SE.setIconVisibleInMenu(False)
self.actionAbout_Debit_SE.setObjectName("actionAbout_Debit_SE")
self.actionPreferences = QtGui.QAction(DebitSE)
self.actionPreferences.setEnabled(False)
self.actionPreferences.setMenuRole(QtGui.QAction.PreferencesRole)
self.actionPreferences.setIconVisibleInMenu(False)
self.actionPreferences.setObjectName("actionPreferences")
self.actionQuit = QtGui.QAction(DebitSE)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("icons/quit.PNG"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionQuit.setIcon(icon1)
self.actionQuit.setShortcutContext(QtCore.Qt.WindowShortcut)
self.actionQuit.setMenuRole(QtGui.QAction.TextHeuristicRole)
self.actionQuit.setObjectName("actionQuit")
self.actionPrint = QtGui.QAction(DebitSE)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("icons/print.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPrint.setIcon(icon2)
self.actionPrint.setObjectName("actionPrint")
self.actionUndo = QtGui.QAction(DebitSE)
self.actionUndo.setEnabled(False)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("icons/undo.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionUndo.setIcon(icon3)
self.actionUndo.setObjectName("actionUndo")
self.actionCut = QtGui.QAction(DebitSE)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("icons/cut.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionCut.setIcon(icon4)
self.actionCut.setObjectName("actionCut")
self.actionCopy = QtGui.QAction(DebitSE)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("icons/copy.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionCopy.setIcon(icon5)
self.actionCopy.setObjectName("actionCopy")
self.actionPaste = QtGui.QAction(DebitSE)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("icons/paste.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPaste.setIcon(icon6)
self.actionPaste.setObjectName("actionPaste")
self.actionPatient_Data = QtGui.QAction(DebitSE)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap("icons/patient.PNG"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPatient_Data.setIcon(icon7)
self.actionPatient_Data.setObjectName("actionPatient_Data")
self.actionDrug_Data = QtGui.QAction(DebitSE)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap("icons/drug.PNG"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionDrug_Data.setIcon(icon8)
self.actionDrug_Data.setObjectName("actionDrug_Data")
self.actionCalc = QtGui.QAction(DebitSE)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap("icons/calculator.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionCalc.setIcon(icon9)
self.actionCalc.setObjectName("actionCalc")
self.actionExpress_Calc = QtGui.QAction(DebitSE)
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap("icons/calculator2.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionExpress_Calc.setIcon(icon10)
self.actionExpress_Calc.setObjectName("actionExpress_Calc")
self.actionTable = QtGui.QAction(DebitSE)
icon11 = QtGui.QIcon()
icon11.addPixmap(QtGui.QPixmap("icons/table.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionTable.setIcon(icon11)
self.actionTable.setObjectName("actionTable")
self.menuFile.addAction(self.actionAbout_Debit_SE)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionPreferences)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionPrint)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuEdit.addAction(self.actionUndo)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionCut)
self.menuEdit.addAction(self.actionCopy)
self.menuEdit.addAction(self.actionPaste)
self.menuCalc.addAction(self.actionPatient_Data)
self.menuCalc.addAction(self.actionDrug_Data)
self.menuCalc.addSeparator()
self.menuCalc.addAction(self.actionCalc)
self.menuCalc.addSeparator()
self.menuCalc.addAction(self.actionExpress_Calc)
self.menuCalc.addAction(self.actionTable)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuCalc.menuAction())
self.menubar.addAction(self.menuAide.menuAction())
self.toolBar.addAction(self.actionQuit)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionPrint)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionCut)
self.toolBar.addAction(self.actionCopy)
self.toolBar.addAction(self.actionPaste)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionPatient_Data)
self.toolBar.addAction(self.actionDrug_Data)
self.toolBar.addAction(self.actionCalc)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionExpress_Calc)
self.toolBar.addAction(self.actionTable)
self.retranslateUi(DebitSE)
QtCore.QMetaObject.connectSlotsByName(DebitSE)
def retranslateUi(self, DebitSE):
DebitSE.setWindowTitle(QtGui.QApplication.translate("DebitSE", "Débit SE", None, QtGui.QApplication.UnicodeUTF8))
DebitSE.setToolTip(QtGui.QApplication.translate("DebitSE", "Débit SE", None, QtGui.QApplication.UnicodeUTF8))
DebitSE.setStatusTip(QtGui.QApplication.translate("DebitSE", "Débit SE", None, QtGui.QApplication.UnicodeUTF8))
DebitSE.setWhatsThis(QtGui.QApplication.translate("DebitSE", "This is the main window of the application", None, QtGui.QApplication.UnicodeUTF8))
DebitSE.setAccessibleName(QtGui.QApplication.translate("DebitSE", "Débit SE", None, QtGui.QApplication.UnicodeUTF8))
DebitSE.setAccessibleDescription(QtGui.QApplication.translate("DebitSE", "Débit SE", None, QtGui.QApplication.UnicodeUTF8))
self.menubar.setStatusTip(QtGui.QApplication.translate("DebitSE", "The Menu bar", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setToolTip(QtGui.QApplication.translate("DebitSE", "File menu: print or quit", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setStatusTip(QtGui.QApplication.translate("DebitSE", "File menu", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setTitle(QtGui.QApplication.translate("DebitSE", "Fichier", None, QtGui.QApplication.UnicodeUTF8))
self.menuEdit.setToolTip(QtGui.QApplication.translate("DebitSE", "Edit menu", None, QtGui.QApplication.UnicodeUTF8))
self.menuEdit.setStatusTip(QtGui.QApplication.translate("DebitSE", "Edit menu", None, QtGui.QApplication.UnicodeUTF8))
self.menuEdit.setTitle(QtGui.QApplication.translate("DebitSE", "Édition", None, QtGui.QApplication.UnicodeUTF8))
self.menuCalc.setStatusTip(QtGui.QApplication.translate("DebitSE", "Calculation menu", None, QtGui.QApplication.UnicodeUTF8))
self.menuCalc.setTitle(QtGui.QApplication.translate("DebitSE", "Calculs", None, QtGui.QApplication.UnicodeUTF8))
self.menuAide.setTitle(QtGui.QApplication.translate("DebitSE", "Aide", None, QtGui.QApplication.UnicodeUTF8))
self.statusbar.setToolTip(QtGui.QApplication.translate("DebitSE", "This is the <i>Status bar</i> of the application.", None, QtGui.QApplication.UnicodeUTF8))
self.statusbar.setWhatsThis(QtGui.QApplication.translate("DebitSE", "This is the <i>Status bar</i> of the application", None, QtGui.QApplication.UnicodeUTF8))
self.toolBar.setWindowTitle(QtGui.QApplication.translate("DebitSE", "toolBar", None, QtGui.QApplication.UnicodeUTF8))
self.actionAbout_Debit_SE.setText(QtGui.QApplication.translate("DebitSE", "À propos de Débit SE", None, QtGui.QApplication.UnicodeUTF8))
self.actionAbout_Debit_SE.setStatusTip(QtGui.QApplication.translate("DebitSE", "À propos de Débit SE", None, QtGui.QApplication.UnicodeUTF8))
self.actionPreferences.setText(QtGui.QApplication.translate("DebitSE", "Préférences", None, QtGui.QApplication.UnicodeUTF8))
self.actionQuit.setText(QtGui.QApplication.translate("DebitSE", "Quitter", None, QtGui.QApplication.UnicodeUTF8))
self.actionQuit.setStatusTip(QtGui.QApplication.translate("DebitSE", "Quitter Débit SE", None, QtGui.QApplication.UnicodeUTF8))
self.actionQuit.setShortcut(QtGui.QApplication.translate("DebitSE", "Ctrl+Q", None, QtGui.QApplication.UnicodeUTF8))
self.actionPrint.setText(QtGui.QApplication.translate("DebitSE", "Imprimer...", None, QtGui.QApplication.UnicodeUTF8))
self.actionPrint.setToolTip(QtGui.QApplication.translate("DebitSE", "Open the <i>Print diaglog...</i>", None, QtGui.QApplication.UnicodeUTF8))
self.actionPrint.setStatusTip(QtGui.QApplication.translate("DebitSE", "Print dialog", None, QtGui.QApplication.UnicodeUTF8))
self.actionPrint.setShortcut(QtGui.QApplication.translate("DebitSE", "Ctrl+P", None, QtGui.QApplication.UnicodeUTF8))
self.actionUndo.setText(QtGui.QApplication.translate("DebitSE", "Annuler", None, QtGui.QApplication.UnicodeUTF8))
self.actionUndo.setStatusTip(QtGui.QApplication.translate("DebitSE", "Annule la dernière commande.", None, QtGui.QApplication.UnicodeUTF8))
self.actionUndo.setShortcut(QtGui.QApplication.translate("DebitSE", "Ctrl+Z", None, QtGui.QApplication.UnicodeUTF8))
self.actionCut.setText(QtGui.QApplication.translate("DebitSE", "Couper", None, QtGui.QApplication.UnicodeUTF8))
self.actionCut.setStatusTip(QtGui.QApplication.translate("DebitSE", "Coupe la sélection dans le presse papier.", None, QtGui.QApplication.UnicodeUTF8))
self.actionCut.setShortcut(QtGui.QApplication.translate("DebitSE", "Ctrl+X", None, QtGui.QApplication.UnicodeUTF8))
self.actionCopy.setText(QtGui.QApplication.translate("DebitSE", "Copier", None, QtGui.QApplication.UnicodeUTF8))
self.actionCopy.setStatusTip(QtGui.QApplication.translate("DebitSE", "Copie la sélection dans le presse papier.", None, QtGui.QApplication.UnicodeUTF8))
self.actionCopy.setShortcut(QtGui.QApplication.translate("DebitSE", "Ctrl+C", None, QtGui.QApplication.UnicodeUTF8))
self.actionPaste.setText(QtGui.QApplication.translate("DebitSE", "Coller", None, QtGui.QApplication.UnicodeUTF8))
self.actionPaste.setStatusTip(QtGui.QApplication.translate("DebitSE", "Colle le presse papier", None, QtGui.QApplication.UnicodeUTF8))
self.actionPaste.setShortcut(QtGui.QApplication.translate("DebitSE", "Ctrl+V", None, QtGui.QApplication.UnicodeUTF8))
self.actionPatient_Data.setText(QtGui.QApplication.translate("DebitSE", "Données patients...", None, QtGui.QApplication.UnicodeUTF8))
self.actionPatient_Data.setToolTip(QtGui.QApplication.translate("DebitSE", "Open the <i>Patient data</i> entry form", None, QtGui.QApplication.UnicodeUTF8))
self.actionPatient_Data.setStatusTip(QtGui.QApplication.translate("DebitSE", "Patient data entry form", None, QtGui.QApplication.UnicodeUTF8))
self.actionPatient_Data.setWhatsThis(QtGui.QApplication.translate("DebitSE", "Open the <i>Patient data</i> entry form", None, QtGui.QApplication.UnicodeUTF8))
self.actionDrug_Data.setText(QtGui.QApplication.translate("DebitSE", "Données médicament...", None, QtGui.QApplication.UnicodeUTF8))
self.actionDrug_Data.setToolTip(QtGui.QApplication.translate("DebitSE", "Open the <i>Drug data</i> entry form", None, QtGui.QApplication.UnicodeUTF8))
self.actionDrug_Data.setStatusTip(QtGui.QApplication.translate("DebitSE", "Drug data entry form", None, QtGui.QApplication.UnicodeUTF8))
self.actionDrug_Data.setWhatsThis(QtGui.QApplication.translate("DebitSE", "Open the <i>Drug data</i> entry form", None, QtGui.QApplication.UnicodeUTF8))
self.actionCalc.setText(QtGui.QApplication.translate("DebitSE", "Calcul", None, QtGui.QApplication.UnicodeUTF8))
self.actionExpress_Calc.setText(QtGui.QApplication.translate("DebitSE", "Calculs express", None, QtGui.QApplication.UnicodeUTF8))
self.actionTable.setText(QtGui.QApplication.translate("DebitSE", "Tableau", None, QtGui.QApplication.UnicodeUTF8))
self.actionTable.setStatusTip(QtGui.QApplication.translate("DebitSE", "Table tool", None, QtGui.QApplication.UnicodeUTF8))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
DebitSE = QtGui.QMainWindow()
ui = Ui_DebitSE()
ui.setupUi(DebitSE)
DebitSE.show()
sys.exit(app.exec_())
|
UTF-8
|
Python
| false | false | 2,009 |
3,298,534,907,487 |
30ee330a42f7312f5fddeb2b09e4eea3ed66e56a
|
3ce34310fb002241fd612e43de5fdb02f44a3387
|
/gameobject.py
|
14725eb5a49bc63f62285caf6f0024a9f6022d05
|
[] |
no_license
|
pxf/pxf-gamejam
|
https://github.com/pxf/pxf-gamejam
|
39c30252db18c711a9d034ed78ede9762136c0ac
|
465becb7383ef73e1adc6b82b60fe6ec6eb44c53
|
refs/heads/master
| 2020-04-03T08:36:25.191765 | 2010-02-03T14:47:41 | 2010-02-03T14:47:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import pygame
import util
import pymunk as pm
import random
import animation
def create_ball(self, pos, mass=1.0, radius=8.0):
moment = pm.moment_for_circle(mass, radius, 0.0, pm.Vec2d(0,0))
ball_body = pm.Body(mass, moment)
ball_body.position = pm.Vec2d(pos)
ball_shape = pm.Circle(ball_body, radius, pm.Vec2d(0,0))
ball_shape.friction = 1.5
#ball_shape.collision_type = COLLTYPE_DEFAULT
#self.space.add(ball_body, ball_shape)
# return ball_shape
return ball_body, ball_shape
def create_poly(space, points, mass = -5.0, pos = (0,0)):
moment = pm.moment_for_poly(mass, points, pm.Vec2d(0,0))
#moment = 1000
body = pm.Body(mass, moment)
body.position = pm.Vec2d(pos)
shape = pm.Poly(body, points, pm.Vec2d(0,0))
shape.friction = 0.5
#shape.collision_type = 0
#space.add(body, shape)
#space.add_static(shape)
return body, shape
def create_box(space, pos, size = 10, mass = 5.0):
box_points = map(pm.Vec2d, [(-size, -size), (-size, size), (size,size), (size, -size)])
return create_poly(space, box_points, mass = mass, pos = pos)
(OBJECT_TYPE_FAIL,
OBJECT_TYPE_PLAYER,
OBJECT_TYPE_GOAL,
OBJECT_TYPE_INFO,
OBJECT_TYPE_KEY_RED,
OBJECT_TYPE_KEY_GREEN,
OBJECT_TYPE_KEY_BLUE,
OBJECT_TYPE_RED,
OBJECT_TYPE_GREEN,
OBJECT_TYPE_BLUE,
OBJECT_TYPE_SPLOSION,
OBJECT_TYPE_BW,
OBJECT_TYPE_ALL) = range(13)
class GameObject:
def __init__(self, pos, sprite, space, obj_type, mass = 5.0):
self.draw_pos = util.vec2(0, 0)
self.sprite = sprite
#self.move(pos.x, pos.y)
self.object_type = obj_type
#self.shape.collision_type = obj_type
def move(self, x, y):
pass
#self.sprite.rect.move_ip(x, y)
#self.delta_move.x += x
#self.delta_move.y += y
def update(self, camera_pos):
self.body.angle = 0
self.draw_pos = util.vec2(self.body.position.x - camera_pos.x, self.body.position.y - camera_pos.y)#util.vec2(self.sprite.rect.left - camera_pos.x, self.sprite.rect.top - camera_pos.y)
def draw(self, canvas):
#canvas.blit(self.sprite.image, self.pos.get(), None, pygame.BLEND_MAX)
# lol fulhack :C
if self.object_type == OBJECT_TYPE_BW:
canvas.blit(self.sprite.image, self.draw_pos.get(), None)
else:
canvas.blit(self.sprite.image, self.draw_pos.get(), None, pygame.BLEND_MAX)
class StaticBlock(GameObject):
def __init__(self, pos, sprite, space, obj_type):
GameObject.__init__(self, pos, sprite, space, obj_type, pm.inf)
self.body, self.shape = create_box(space, (pos.x, pos.y), 8, pm.inf)
self.shape.collision_type = obj_type
space.add_static(self.shape)
#self.shape.collision_type = 1
def update(self, camera_pos):
GameObject.update(self, camera_pos)
pass
class MovableBlock(GameObject):
def __init__(self, pos, sprite, space, obj_type):
self.is_movable = True
GameObject.__init__(self, pos, sprite, space, obj_type, pm.inf)
self.body, self.shape = create_box(space, (pos.x, pos.y), 8, 12.0)
self.shape.collision_type = obj_type
space.add(self.body, self.shape)
#self.shape.collision_type = 1
def update(self, camera_pos):
GameObject.update(self, camera_pos)
pass
def draw(self, canvas):
#canvas.blit(self.sprite.image, self.pos.get(), None, pygame.BLEND_MAX)
# lol fulhack :C
if self.object_type == OBJECT_TYPE_BW:
canvas.blit(self.sprite.image, self.draw_pos.get(), None)
else:
canvas.blit(self.sprite.image, self.draw_pos.get(), None, pygame.BLEND_MAX)
class InfoBlock(GameObject):
def __init__(self, pos, image, space,anim_name = "",num_frames = 1,sequence = [0,1],frequency = 8):
self.is_movable = True
GameObject.__init__(self, pos, util.to_sprite(util.load_image("data/info_sign0.png")), space, OBJECT_TYPE_INFO, pm.inf)
self.body, self.shape = create_box(space, (pos.x, pos.y), frequency, 12.0)
self.shape.collision_type = OBJECT_TYPE_INFO
self.info_bubble = util.load_image(image)
space.add_static(self.shape)
self._show_info = False
self.cool_down = 0.0
if not anim_name == "":
self.animation = animation.new_animation(anim_name,"png",num_frames,frequency,sequence)
self.animation.play()
def update(self, camera_pos,dt):
if (self.cool_down > 0.0):
self.cool_down -= dt / 1000.0
if (self.cool_down <= 0.0):
self.deactivate()
GameObject.update(self, camera_pos)
self.animation.update(dt)
def draw(self,canvas):
canvas.blit(self.sprite.image, self.draw_pos.get(), None,pygame.BLEND_MAX)
if self._show_info:
if not self.animation.playing:
self.animation.play()
pos = (self.draw_pos.x - self.info_bubble.get_rect().width + 16,
self.draw_pos.y - self.info_bubble.get_rect().height)
self.animation.draw(canvas,pos,True)
else:
if self.animation.playing:
self.animation.stop()
def activate(self):
#called when player lala
self.cool_down = 2.0
self._show_info = True
def deactivate(self):
self._show_info = False
splosion_red = util.to_sprite(util.load_image("data/red_explosion.png"))
splosion_green = util.to_sprite(util.load_image("data/green_explosion.png"))
splosion_blue = util.to_sprite(util.load_image("data/blue_explosion.png"))
class SplosionBlock(GameObject):
def __init__(self, pos, space, color_type):
if color_type == OBJECT_TYPE_RED:
t_sprite = splosion_red
elif color_type == OBJECT_TYPE_GREEN:
t_sprite = splosion_green
else:
t_sprite = splosion_blue
GameObject.__init__(self, pos, t_sprite, space, OBJECT_TYPE_SPLOSION, pm.inf)
self.body, self.shape = create_ball(self, (pos.x, pos.y), mass=0.6, radius=0.4)
self.shape.collision_type = OBJECT_TYPE_SPLOSION
space.add(self.body, self.shape)
self.frame_id = random.randint(0, 7)
tx = self.frame_id % 4
ty = int(self.frame_id / 4)
self.area = (tx * 4,tx * 4,4,4) # make this random!
self.body.apply_impulse((random.randint(-100, 100), random.randint(-200, 0))) # make this also random!
def update(self, camera_pos):
GameObject.update(self, camera_pos)
def draw(self, canvas):
canvas.blit(self.sprite.image, (self.draw_pos.x, self.draw_pos.y + 4), self.area, pygame.BLEND_MAX)
|
UTF-8
|
Python
| false | false | 2,010 |
6,004,364,320,953 |
7626378e0b7ac7e6e88b0652647f9a3021acd14c
|
e7a5467d22d305d28944c1f95f82db85794f3bec
|
/bin/muttquery.py
|
8195d2aa85585da10e8a7e85fc32894e70e404ec
|
[] |
no_license
|
dedmajor/home-env
|
https://github.com/dedmajor/home-env
|
7282b2905a0f8ce84aa66230100930d648a794a5
|
4987c63f475415782d67e4e55fb6574bd3feae13
|
refs/heads/master
| 2021-01-23T07:33:49.766203 | 2011-12-01T23:24:40 | 2011-12-01T23:26:43 | 486,739 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# Original version is here: http://shove-it.de/open/jcm/muttquery.py
# See http://wiki.mutt.org/?QueryCommand
###############################################
# answer queries for mutt from kabc file
###############################################
import sys
import os
import re
KDE_ADDRESSBOOK=os.environ['HOME'] + '/.kde/share/apps/kabc/std.vcf'
# String to identify Mail address entrys in vcards
MAIL_INIT_STRING = r'EMAIL:'
# String to identify Name entrys in vcards
NAME_INIT_STRING = r'N:'
class vcard:
def __init__(self, email, name):
self.email = str(email)
self.name = str(name)
def parseFile(file_name):
if not os.access(file_name, os.F_OK|os.R_OK):
print 'Cannot open file ' , file_name
sys.exit(1)
try:
cf = open(file_name)
cards = cf.read()
finally:
cf.close()
re_vcard = re.compile(r'BEGIN:VCARD.*?END:VCARD', re.DOTALL)
vcards = re_vcard.findall(cards)
return vcards
def getMatches(vcards, search_string):
lines = []
search_re = re.compile(search_string, re.I)
mail_re = re.compile(r'^' + MAIL_INIT_STRING + r'(.*)$', re.MULTILINE)
name_re = re.compile(r'^' + NAME_INIT_STRING + r'(.*)$', re.MULTILINE)
for loop_vcard in vcards:
if search_re.search(loop_vcard):
if mail_re.search(loop_vcard) != None:
tmp_mail = mail_re.search(loop_vcard).group(1).strip()
if name_re.search(loop_vcard) != None:
tmp_name = name_re.search(
loop_vcard).group(1).replace(';', ' ').strip()
else:
tmp_name = ''
my_vcard = vcard(tmp_mail, tmp_name)
lines.append(my_vcard)
return lines
# main program starts here
vcards = parseFile(KDE_ADDRESSBOOK)
try:
search_string = sys.argv[1]
except IndexError:
print 'Use only with an argument'
sys.exit(1)
lines = getMatches(vcards, search_string)
print 'Searched ' + str(vcards.__len__()) + ' vcards, found ' + str(
lines.__len__())+ ' matches.'
for line in lines:
#tmp_fill = (40 - line.email.__len__() ) * ' '
print '%s\t%s' % (line.email, line.name)
#print '%s' % line.email,
if lines.__len__() > 0:
sys.exit(0)
else:
sys.exit(1)
|
UTF-8
|
Python
| false | false | 2,011 |
14,439,680,057,770 |
14f259f72058a5d65b7e6f4fc86a40e206340ca7
|
a64fbc148d94f33486205b3886305067c84eafac
|
/faqna_helpers.py
|
6917eef4b66b2ed0dc9718b861024e458f8a9356
|
[] |
no_license
|
jurrchen/faqna
|
https://github.com/jurrchen/faqna
|
3fb4552709fcc60411103bc823e02f5b9479d2c0
|
11d18008d269544e11d8863298ac09d72b6d9305
|
refs/heads/master
| 2023-01-28T01:10:07.638740 | 2011-03-25T23:36:19 | 2011-03-25T23:36:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import datetime
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from faqna_models import *
#ehhhhh
def ShowTemplate(webapp, template_file, template_values):
#reset cookies
user = webapp.request.cookies.get('user_name','')
add_template = {'user_name' : user}
if type(template_values) is dict:
template_values.update(add_template)
else:
template_values = add_template
path = os.path.join(os.path.dirname(__file__), template_file)
webapp.response.out.write(template.render(path, template_values))
def GetUser(webapp):
user_name = webapp.request.cookies.get('user_name','')
if user_name == '':
return None
else:
user = User.get_by_key_name(user_name)
if user:
return user
else:
return None
|
UTF-8
|
Python
| false | false | 2,011 |
7,060,926,268,946 |
ed0295dfc104483f6465ffbe927806a48afe3aa9
|
4fb1dd9f386c9690d4ded79d838f9a1eacbf36ed
|
/flicks/base/views.py
|
72a0220bcbf59fa7f194772eaaa42017e9f75b4f
|
[
"BSD-3-Clause"
] |
permissive
|
BryanQuigley/firefox-flicks
|
https://github.com/BryanQuigley/firefox-flicks
|
45ae94f093103d427748ad014c96e0bc3849460f
|
2fc046e5fa11f1d4cdd018ccbbdae0ddfb56ba40
|
refs/heads/master
| 2016-09-29T07:34:58.180089 | 2013-01-28T21:36:01 | 2013-01-28T21:36:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render
LINK_PDWIKI = {
'en-US': 'https://en.wikipedia.org/wiki/Public_domain',
'de': 'https://de.wikipedia.org/wiki/Gemeinfreiheit',
'fr': 'https://fr.wikipedia.org/wiki/Domaine_public_%28propri%C3%A9t%C3%A9_intellectuelle%29',
'es': 'https://es.wikipedia.org/wiki/Dominio_p%C3%BAblico',
'nl': 'https://nl.wikipedia.org/wiki/Publiek_domein',
'pl': 'https://pl.wikipedia.org/wiki/Domena_publiczna',
'pt-BR': 'https://pt.wikipedia.org/wiki/Dom%C3%ADnio_p%C3%BAblico',
'sl': 'https://sl.wikipedia.org/wiki/Javna_last',
'sq': 'https://sq.wikipedia.org/wiki/Domen_publik',
'zh-CN': 'https://zh.wikipedia.org/wiki/%E5%85%AC%E6%9C%89%E9%A2%86%E5%9F%9F ',
'zh-TW': 'https://zh.wikipedia.org/wiki/%E5%85%AC%E6%9C%89%E9%A2%86%E5%9F%9F ',
}
LINK_BRIEF = {
'en-US': 'http://static.mozilla.com/firefoxflicks/pdf/Filmmakers_Creative_Brief_en-US.pdf',
#'de': 'http://static.mozilla.com/firefoxflicks/pdf/Filmmakers_Creative_Brief_de.pdf',
#'es': 'http://static.mozilla.com/firefoxflicks/pdf/Filmmakers_Creative_Brief_es-ES.pdf',
#'nl': 'http://static.mozilla.com/firefoxflicks/pdf/Filmmakers_Creative_Brief_nl.pdf',
#'pl': 'http://static.mozilla.com/firefoxflicks/pdf/Filmmakers_Creative_Brief_pl.pdf',
#'sl': 'http://static.mozilla.com/firefoxflicks/pdf/Filmmakers_Creative_Brief_sl.pdf',
}
def home(request):
"""Home page."""
return render(request, 'home.html', {
'link_brief': LINK_BRIEF.get(request.locale, LINK_BRIEF['en-US'])
})
def faq(request):
"""FAQ page."""
return render(request, 'faq.html', {
'link_pdwiki': LINK_PDWIKI.get(request.locale, LINK_PDWIKI['en-US']),
'link_brief': LINK_BRIEF.get(request.locale, LINK_BRIEF['en-US'])
})
def strings(request):
"""Strings L10N page."""
return render(request, 'strings.html')
|
UTF-8
|
Python
| false | false | 2,013 |
12,086,038,002,532 |
b6d0a89dfe8ddfa7e1f0982aec1bdb4e4db41805
|
3bf4b699fdca0d5d5bcb43f64e9e2e8a085b8ff7
|
/src/prct05.py~
|
e534ca319181f43de5288297a54aa2eaee4c3f12
|
[] |
no_license
|
MisaelPeraza/prct05
|
https://github.com/MisaelPeraza/prct05
|
db17130b51f3875314956dd87eb1ea479fb4da5d
|
4d912a23e31a1f7cc4578a7a8a33bee8a41655a8
|
refs/heads/master
| 2021-01-02T23:13:25.552313 | 2014-03-06T12:29:05 | 2014-03-06T12:29:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
#!encoding: UTF-8
import sys
def calc_xi (n, i):
xi = (i-1.0/2.0)/n
return xi
argumentos = sys.argv[1:]
print argumentos
if (len(argumentos) == 1):
n = int (argumentos[0])
else:
n=int(raw_input('introduzca el número de intervalos: '))
pi_35 = 3.1415926535897931159979634685441852
if (n>0):
sumatorio = 0.0
inicial = 0
intervalos = 1.0 / float(n)
for i in range(n):
x_i = calc_xi (n, i+1)
fx_i = 4.0 / (1.0 ++ x_i * x_i)
print "Subintervalo: [", inicial, ",", inicial+intervalos, "]" "x_i:",x_i, "fx_i:", fx_i
inicial += intervalos
sumatorio += fx_i
aprox_pi = sumatorio / n
print "El valor de la aproximación es: ", aprox_pi
print "El valor de pi con 35 decimales es: %10.35f" % pi_35
else:
print 'El número de intervalos debe ser positivo'
|
UTF-8
|
Python
| false | false | 2,014 |
8,126,078,145,424 |
cf7f3309265d66ee99aa8585d34923dc0f3c1344
|
d5214b1331c9dae59d95ba5b3aa3e9f449ad6695
|
/qSiloGroup/tags/0.3.0/SiloSiteMap.py
|
296a790f2903b416916bc3a0f83d0878bccc1caa
|
[] |
no_license
|
kroman0/products
|
https://github.com/kroman0/products
|
1661ee25a224c4b5f172f98110944f56136c77cf
|
f359bb64db22f468db5d1e411638790e94d535a2
|
refs/heads/master
| 2021-01-10T07:58:04.579234 | 2014-06-11T12:05:56 | 2014-06-11T12:05:56 | 52,677,831 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from AccessControl import ClassSecurityInfo
from Products.Archetypes.public import Schema
from Products.qSiloGroup.config import PROJECTNAME
from Products.ATContentTypes.content.base import registerATCT
from Products.ATContentTypes.content.base import ATCTContent
from Products.ATContentTypes.content.schemata import ATContentTypeSchema
from Products.ATContentTypes.content.schemata import finalizeATCTSchema
from Products.ATContentTypes.lib.historyaware import HistoryAwareMixin
SiloSiteMapSchema = ATContentTypeSchema.copy()
SiloSiteMapSchema['id'].default = 'sitemap.htm'
SiloSiteMapSchema['id'].default_method = 'getDefaultId'
SiloSiteMapSchema['title'].default_method = 'getDefaultTitle'
SiloSiteMapSchema['allowDiscussion'].schemata = 'metadata'
SiloSiteMapSchema['relatedItems'].schemata = 'metadata'
SiloSiteMapSchema['description'].schemata = 'metadata'
class SiloSiteMap(ATCTContent, HistoryAwareMixin):
""" Silo Site Map """
schema = SiloSiteMapSchema
content_icon = 'document_icon.gif'
meta_type = 'SiloSiteMap'
portal_type = 'SiloSiteMap'
archetype_name = 'Silo Sitemap'
default_view = 'silositemap_view'
immediate_view = 'silositemap_view'
suppl_views = ()
typeDescription= 'Silo Sitemap'
typeDescMsgId = 'description_edit_document'
security = ClassSecurityInfo()
def getDefaultTitle(self):
""" Buid default title """
return self.aq_parent.Title() + ' Sitemap'
def getDefaultId(self):
""" """
return 'sitemap.htm'
registerATCT(SiloSiteMap, PROJECTNAME)
|
UTF-8
|
Python
| false | false | 2,014 |
14,740,327,802,212 |
c599296d058b2dce7f4d5c8822724e1ab7e40758
|
774a4e8a701029f839f630db96e4b4ae2fca241d
|
/psykokwak3/attendance.py
|
1075939ce79530d75a30b53fd76f4947f24711a0
|
[] |
no_license
|
davidrubino/WebDevProject
|
https://github.com/davidrubino/WebDevProject
|
e8285c5747157af333c270cbf4305457b59bb9ad
|
e9a96c24433b57bb8346c94f2e1218d68f01dd5c
|
refs/heads/master
| 2016-08-07T16:25:09.674972 | 2014-05-20T07:40:22 | 2014-05-20T07:40:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import hmac
from google.appengine.ext import db
import jinja2
import webapp2
import urllib2
from xml.dom import minidom
import datetime
from pytz.gae import pytz
from login import User
from google.appengine.api import memcache
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
autoescape=True)
def hash_str(s):
return hmac.new('i_m_so_secret', s).hexdigest()
def make_secure_val(s):
return "%s|%s" % (s, hash_str(s))
def check_secure_val(h):
val = h.split('|')[0]
if h == make_secure_val(val):
return val
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def read_secure_cookie(self, name):
cookie_val = self.request.cookies.get(name)
return cookie_val and check_secure_val(cookie_val)
def valid_nameTeacher(name):
t = memcache.get(name)
if t is None:
teacher = db.GqlQuery("select * from Teacher where nameTeacher=:1", name)
teacher = list(teacher)
for t in teacher:
if t.nameTeacher == name:
memcache.set(name, teacher)
return True
else:
return True
return False
def date_memcach():
tz = pytz.timezone('Europe/Paris')
now_utc = pytz.utc.localize(datetime.datetime.utcnow())
res = now_utc.astimezone(tz)
a = res.day+res.month+res.hour+res.minute+res.second
return str(a)
class AttendanceHandler(Handler):
def aVirer(self):
att = db.GqlQuery("select * from Student")
for a in att:
a.delete()
def get(self):
isAdminConnected = False
isStudentConnected = False
isTeacherConnected = False
uid = self.read_secure_cookie('user_id')
aid = self.read_secure_cookie('admin_id')
tid = self.read_secure_cookie('teacher_id')
user = None
if aid:
user = aid and User.by_id(int(aid))
isAdminConnected=True
if uid:
#user = uid and User.by_id(int(uid))
isStudentConnected=True
elif tid:
user = tid and User.by_id(int(tid))
isTeacherConnected=True
if user:
nameCo = user.username
else:
nameCo=""
self.redirect("/")
#self.aVirer()
self.render_front("", "", isStudentConnected, isAdminConnected, isTeacherConnected, nameCo)
def post(self):
isAdminConnected = False
isStudentConnected = False
isTeacherConnected = False
uid = self.read_secure_cookie('user_id')
aid = self.read_secure_cookie('admin_id')
tid = self.read_secure_cookie('teacher_id')
user = None
if aid:
user = aid and User.by_id(int(aid))
isAdminConnected=True
if uid:
#user = uid and User.by_id(int(uid))
isStudentConnected=True
elif tid:
user = tid and User.by_id(int(tid))
isTeacherConnected=True
if user:
nameCo = user.username
else:
nameCo=""
self.redirect("/")
if self.request.get("action") == "form1":
teachername = self.request.get("teacherName").upper()
t = valid_nameTeacher(teachername)
if not t:
self.render_front(teachername, "Teacher not found", isStudentConnected, isAdminConnected, isTeacherConnected, nameCo)
else:
self.research(teachername, isStudentConnected, isAdminConnected, isTeacherConnected, nameCo)
elif self.request.get("action") == "form2":
group = self.request.get("group")
course = self.request.get("course")
students = self.searchStudent(group)
students = sorted(students, key=lambda student: student.nameStudent)
for s in students:
select = self.request.get(s.nameStudent)
if select == "Absent":
s.absenceInjustifiee += 1
s.put()
a = Absence(nameStudent=s.nameStudent, nameGroup=s.nameGroup, nameCourse=course)
a.put()
self.redirect("/attendance")
else:
self.redirect("/")
def render_front(self, nom_prof="", error="", isStudentCo="", isAdminCo="", isTeacherCo="", username=""):
self.render("attendance.html", name_teacher=nom_prof, error=error, isStudentConnected=isStudentCo,
isAdminConnected=isAdminCo, isTeacherConnected=isTeacherCo, username=username)
def research(self, teacher="", isStudentCo="", isAdminCo="", isTeacherCo="", username=""):
contents = urllib2.urlopen(
"https://adeweb.univ-lorraine.fr/jsp/webapi?function=connect&login=ade_uhp&password=edt_uhp").read()
d = minidom.parseString(contents)
root = d.documentElement
atr = root.getAttributeNode('id')
sessionId = atr.nodeValue
activate_url = urllib2.urlopen(
"https://adeweb.univ-lorraine.fr/jsp/webapi?sessionId=" + sessionId + "&function=setProject&projectId=9").read()
d_project = minidom.parseString(activate_url)
root_project = d_project.documentElement
atr_project = root_project.getAttributeNode('projectId')
projectId = atr_project.nodeValue
#TODO: la date, la date convertie et l'heure locale et non UTC
tz = pytz.timezone('Europe/Paris')
now_utc = pytz.utc.localize(datetime.datetime.utcnow())
res = now_utc.astimezone(tz)
date = "%s/%s/%s" % (res.month, res.day, res.year)
if res.day < 10 and res.month < 10 :
dateVoulue = "0%s/0%s/%s" % (res.day, res.month, res.year)
elif res.day < 10 :
dateVoulue = "0%s/%s/%s" % (res.day, res.month, res.year)
elif res.month < 10:
dateVoulue = "%s/0%s/%s" % (res.day, res.month, res.year)
else:
dateVoulue = "%s/%s/%s" % (res.day, res.month, res.year)
if res.hour < 10 and res.minute < 10:
hour = "0%s:0%s" %(res.hour, res.minute)
elif res.hour < 10:
hour = "0%s:%s" %(res.hour, res.minute)
elif res.minute < 10:
hour = "%s:0%s" %(res.hour, res.minute)
else:
hour = "%s:%s" %(res.hour, res.minute)
#date ="04/22/2014"
#dateVoulue = "22/04/2014"
#hour = "14:20"
global_url = urllib2.urlopen(
"https://adeweb.univ-lorraine.fr/jsp/webapi?sessionId=" + sessionId + "&function=getEvents&date=" + date + "&detail=8").read()
d_event = minidom.parseString(global_url)
event = d_event.getElementsByTagName("event")
name_room = "room"
nameGroup = "gpe"
name_event = "course"
trouve = False
for node in event:
endHour = node.getAttribute("endHour")
startHour = node.getAttribute("startHour")
dateEvent = node.getAttribute("date")
if dateEvent == dateVoulue and hour < endHour and hour >= startHour and not trouve:
resources = node.getElementsByTagName("resources")
for node2 in resources:
resource = node2.getElementsByTagName("resource")
for r in resource:
if r.getAttribute("category") == "trainee":
nameGroup = r.getAttribute("name")
if r.getAttribute("category") == "classroom":
name_room = r.getAttribute("name")
#if r.getAttribute("category") == "category6":
# nameCourse = r.getAttribute("name")
if r.getAttribute("category") == "instructor" and r.getAttribute("name") == teacher:
name_event = node.getAttribute("name")
trouve = True
if trouve:
students = self.searchStudent(nameGroup)
if len(students) > 0:
students = sorted(students, key=lambda student: student.nameStudent)
self.render("attendanceStudent.html", course=name_event, group=nameGroup, students=students, error="",
isStudentConnected=isStudentCo, isAdminConnected=isAdminCo, isTeacherConnected=isTeacherCo, username=username)
else:
self.render_front(teacher, "Teacher has no course in this hour", isStudentCo, isAdminCo, isTeacherCo, username)
def searchStudent(self, group=""):
students = memcache.get(group+"-"+date_memcach())
if students is None:
students = db.GqlQuery("select * from Student where nameGroup >= :1 and nameGroup < :2", group,group+ u"\ufffd")
students = list(students)
memcache.set(group+"-"+date_memcach(), students)
#students = db.GqlQuery("select * from Student where nameGroup >= :1 and nameGroup < :2", "2A IL",u"2A IL"+ u"\ufffd")
return students
class Teacher(db.Model):
nameTeacher = db.StringProperty(required=True)
class Student(db.Model):
nameStudent = db.StringProperty(required=True)
nameGroup = db.StringProperty(required=True)
absenceJustifiee = db.IntegerProperty(required=True)
absenceInjustifiee = db.IntegerProperty(required=True)
class Absence(db.Model):
nameStudent = db.StringProperty(required=True)
nameGroup = db.StringProperty(required=True)
nameCourse = db.StringProperty(required=True)
motif = db.TextProperty()
date = db.DateProperty(auto_now_add=True)
|
UTF-8
|
Python
| false | false | 2,014 |
2,035,814,536,564 |
bfc3fbf60b4a6771f47e0f930c7ebe04d05bda1c
|
16d8ed403af2184c9db4026cc84b9fd040beeeea
|
/TwitterKov/util/twitter.py
|
044730b0b1d033000f5696a4bca52be007e8ad21
|
[] |
no_license
|
wpovell/twitter-kov
|
https://github.com/wpovell/twitter-kov
|
a51267e026aae983b0579d2f193d8c2e98f2f61b
|
371a7894783d33e68d64d67feca9379ba166017a
|
refs/heads/master
| 2016-09-06T15:13:55.346790 | 2014-10-11T04:04:57 | 2014-10-11T04:04:57 | 11,493,473 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
import string, base64, re, time, random, hmac, binascii, urllib.request
from hashlib import sha1
def percentEncode(toEncode):
toEncode = toEncode.encode("UTF-8")
okay = string.ascii_letters + string.digits + "-._~"
okay = okay.encode("UTF-8")
out = ""
for char in toEncode:
if not char in okay:
hexVal = hex(char)[2:].upper()
while len(hexVal) < 2:
hexVal = "0" + str(hexVal)
out += "%" + str(hexVal)
else:
out += chr(char)
return out
def createNonce():
out = ""
for x in range(32):
out += chr(int(random.random() * 256))
out = base64.urlsafe_b64encode(bytes(out, "UTF-8"))
out = re.sub(b"[^0-9a-zA-Z]", b"", out)
return out[:42].decode("UTF-8")
def createSignature(http_method, base_url, parameters, consumer_secret, token_secret):
parameter_string = ""
first = True
for key, value in sorted(parameters.items()):
if not first:
parameter_string += "&"
else:
first = False
parameter_string += percentEncode(key) + "=" + percentEncode(value)
base_string = http_method.upper() + "&" + percentEncode(base_url) + "&" + percentEncode(parameter_string)
signing_key = percentEncode(consumer_secret) + "&" + percentEncode(token_secret)
hashed = hmac.new(bytes(signing_key, "UTF-8"), bytes(base_string, "UTF-8"), sha1)
signature = binascii.b2a_base64(hashed.digest())[:-1]
return signature.decode("UTF-8")
def createAuthString(parameters):
auth_string = "OAuth "
first = True
for key, value in sorted(parameters.items()):
if not first:
auth_string += ", "
else:
first = False
auth_string += percentEncode(key) + '="' + percentEncode(value) + '"'
return auth_string
def genAuthFromParams(http_method, base_url, consumer_secret, consumer_key, token_secret, access_token, info_parameters=None):
if info_parameters == None:
info_parameters = {}
oauth_parameters = {
"oauth_consumer_key" : consumer_key,
"oauth_nonce" : createNonce(),
"oauth_signature_method" : "HMAC-SHA1",
"oauth_timestamp" : str(int(time.time())),
"oauth_token" : access_token,
"oauth_version": "1.0"
}
parameters = info_parameters.copy()
parameters.update(oauth_parameters.items())
oauth_parameters["oauth_signature"] = createSignature(http_method, base_url, parameters, consumer_secret, token_secret)
return createAuthString(oauth_parameters)
def getSample(consumer_secret, consumer_key, token_secret, access_token, debug=False):
http_method = "GET"
base_url = "https://stream.twitter.com/1.1/statuses/sample.json"
auth_string = genAuthFromParams(http_method, base_url, consumer_secret, consumer_key, token_secret, access_token)
req = urllib.request.Request("https://stream.twitter.com/1.1/statuses/sample.json", headers={"Authorization":auth_string})
data = urllib.request.urlopen(req)
return data
def postStatus(status, consumer_secret, consumer_key, token_secret, access_token):
http_method = "POST"
base_url = "https://api.twitter.com/1.1/statuses/update.json"
info_parameters = {
"status" : status,
"include_entities" : "true"}
auth_string = genAuthFromParams(http_method, base_url, consumer_secret, consumer_key, token_secret, access_token, info_parameters)
post_data = bytes("status=" + percentEncode(info_parameters["status"]), "UTF-8")
req = urllib.request.Request("https://api.twitter.com/1.1/statuses/update.json?include_entities=true", data=post_data, headers={"Authorization":auth_string})
urllib.request.urlopen(req)
|
UTF-8
|
Python
| false | false | 2,014 |
506,806,176,170 |
edfe34b21a5424d190539f1cbcf41ad3641f47b7
|
ab42b2c786a18f62d9e14f31c2633b34308d2371
|
/setup.py
|
f397147dd1b8e53de40c77e2e0cb7fc4342c187a
|
[] |
no_license
|
ped4747Organization/myTracking
|
https://github.com/ped4747Organization/myTracking
|
3cf9f7549de3d06a2e0a3ab3619a476dc58566f8
|
b64849ee99351a8789cb561f166d01164f49f02f
|
refs/heads/master
| 2016-09-10T00:23:36.021615 | 2014-12-08T14:32:47 | 2014-12-08T14:32:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#-*- coding: utf-8 -*-
# Copyright (C) 2014 ---------------
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. All advertising materials mentioning features or use of this
# software must display the following acknowledgment:
# "This product includes software developed by
# --------------- <--------------- AT --------------->"
#
# 4. Redistributions of any form whatsoever must retain the following
# acknowledgment:
# "This product includes software developed by
# --------------- <--------------- AT --------------->."
#
# THIS SOFTWARE IS PROVIDED BY --------------- ``AS IS'' AND ANY
# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Roman V. Kiseliov OR
# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import sys,os
from distutils.core import setup
from setuptools import find_packages
if os.path.exists("version.txt") :
with open("version.txt", "r") as f : lines = f.readlines()
subversion = lines[0].strip("\r\n ")
else :
subversion = 1
project_var_name = "tracking"
sversion = "0.0"
versionPython = "%s.%s" % (sys.version_info.major, sys.version_info.minor)
path = "Lib/site-packages/" + project_var_name
readme = 'README.rst'
KEYWORDS = \
project_var_name + ', first name, last name'
DESCRIPTION = \
"""This a project template including a setup and the generation of sphinx generation."""
CLASSIFIERS = \
[
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering',
'Topic :: Education',
'License :: OSI Approved :: BSD License',
]
if "bdist_wininst" not in sys.argv :
EXT_MODULES = [
#Extension(project_var_name + '.subproject.sample_module',
# ['src/' + project_var_name + '/subproject/sample_module.cpp'],
# include_dirs = ['src/' + project_var_name + '/subproject']),
]
else :
EXT_MODULES = [ ]
packages = find_packages('src', exclude='src')
package_dir = { k: "src/" + k.replace(".","/") for k in packages }
package_data = { project_var_name + ".subproject": ["*.tohelp"] }
with open(readme) as f : long_description = f.read()
if "--verbose" in sys.argv :
print ("---------------------------------")
print ("package_dir =",package_dir)
print ("packages =",packages)
print ("package_data=",package_data)
print ("current =", os.path.abspath(os.getcwd()))
print ("---------------------------------")
setup(
name = project_var_name,
version = '%s.%s' %(sversion, subversion) if "register" in sys.argv or "bdist_msi" in sys.argv else 'py%s-%s.%s' % (versionPython, sversion, subversion),
author = 'author',
author_email = 'author AT something.any',
url = "http://...",
download_url = "https://github.com/.../",
description = DESCRIPTION,
long_description = long_description,
keywords = KEYWORDS,
classifiers = CLASSIFIERS,
packages = packages,
package_dir = package_dir,
package_data = package_data,
#data_files = data_files,
#install_requires = [ "numpy (>= 1.7.1)", ],
ext_modules = EXT_MODULES,
#include_package_data = True,
)
|
UTF-8
|
Python
| false | false | 2,014 |
5,145,370,838,498 |
8ab2478c34ab885770b69c6ae4dbfa85f54ee661
|
6a74e9969c10f7c7557e9a8b08baf44ee4f60ccb
|
/src/gpsvideo/__init__.py
|
d97c0f3b3acc5a7d3c8cfc71cd17179346c78490
|
[] |
no_license
|
quangtrungtruong/mov2nmea
|
https://github.com/quangtrungtruong/mov2nmea
|
3ec1945f7aa9dd82880f3e1a9f2e362bda0fd3ba
|
5cd432a98212bdffbf81b00a50a10b1268546f70
|
refs/heads/master
| 2018-01-10T20:54:34.448926 | 2011-11-30T02:32:19 | 2011-11-30T02:32:19 | 54,942,828 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import quicktime as qt
import nmea
|
UTF-8
|
Python
| false | false | 2,011 |
4,741,643,897,438 |
839bcd6321f622b066496336ec63666b7e5d9a67
|
fb0124e22720ba4f8ed193d15f47217f9d99ff4b
|
/setup.py
|
357050b63d86a9d62034e648eeadc0b04a308333
|
[
"BSD-2-Clause"
] |
permissive
|
akheron/python-manhole
|
https://github.com/akheron/python-manhole
|
d60a8af8cece3970790b12726f20e202379a1fc8
|
0d1877fa3522f33f76d6b8ed4bde8ca88c18698b
|
refs/heads/master
| 2023-07-06T21:25:01.591277 | 2014-02-16T12:49:14 | 2014-02-16T12:49:14 | 17,430,182 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- encoding: utf8 -*-
from setuptools import setup, find_packages
import os
setup(
name = "manhole",
version = "0.6.0",
url = 'https://github.com/ionelmc/python-manhole',
download_url = '',
license = 'BSD',
description = "Inpection manhole for python applications. Connection is done via unix domain sockets.",
long_description = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
author = 'Ionel Cristian Mărieș',
author_email = '[email protected]',
package_dir = {'':'src'},
py_modules = ['manhole'],
include_package_data = True,
zip_safe = False,
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Software Development :: Debuggers',
'Topic :: Utilities',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
install_requires=[
]
)
|
UTF-8
|
Python
| false | false | 2,014 |
7,069,516,203,367 |
271aaa27f783a1b3130c1f123af7d1343b15732a
|
725883fc2d96737dfd8409d60adb422af17ada91
|
/examples/show.py
|
481536bad9f6caf5f736de64d3a32c392c639954
|
[
"MIT"
] |
permissive
|
jheinen/mogli
|
https://github.com/jheinen/mogli
|
74e81adef6f9dd1f37bc75bbb9e1f5fdab750369
|
483b8738fa85aaff4f7b695dd8edbe9d20781155
|
refs/heads/master
| 2021-01-18T04:27:08.911863 | 2014-10-25T10:37:28 | 2014-10-25T10:37:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import mogli
molecules = mogli.read('examples/dna.xyz')
for molecule in molecules:
mogli.show(molecule, bonds_param=1.15)
|
UTF-8
|
Python
| false | false | 2,014 |
17,025,250,364,348 |
5648cd4e6cfa52b61e12cbaa0cb0beb7cdddfce3
|
368a09f3fe215abcb186e7b68d6dad40dfed848d
|
/sandbox/T2G.py
|
53b24b8793dc9bf50818590eea804674a6e03f40
|
[] |
no_license
|
CurtLH/Titan
|
https://github.com/CurtLH/Titan
|
b324efaea22f1fb9471755131901fe6a0a35322d
|
95583749c8cbf45db67aa82652d894f600608b4f
|
refs/heads/master
| 2016-03-27T21:05:28.987795 | 2014-11-30T17:57:15 | 2014-11-30T17:57:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
#T2G: Twitter-to-Gephi converter, Python edition v 0.l (05/14/13)
#This script converts user mentions in tweets into a format capable of being imported into Gephi (https://gephi.org/), a social network visualization platform. It was written and tested under Python 2.7.3, so YMMV under different installations.
#To use it, begin by creating an input CSV file consisting of two columns: the first (leftmost) containing the usernames of the tweet authors, and the second containing their tweets. Each author username must have a corresponding tweet next to it. Move this file into the working directory of your choice (if using the interpreter this is usually the file where your Python binary lives). After executing T2G, type the name of your input file exactly (including the extension) and it should do its job. The output file should import into Gephi as a directed graph in which ties extend from authors to mentioned users.
#Please report all bugs and unexpected behavior to [email protected].
import csv
import re
file = raw_input('Please enter the name of a properly-formatted CSV file: ')
t_list = []
with open(file, 'rb') as f: #opens the CSV file, which must be properly formatted, and inserts all content into t_list
reader = csv.reader(f)
for row in reader:
t_list.append(row)
g_src = [t[0].lower() for t in t_list] #fills in the list g_src with the names of tweeting users
g_tmp = [' ' + t[1] + ' ' for t in t_list] #adds 1 space to beginning and end of each tweet
g_tmp = [t.split('@') for t in g_tmp] #splits each tweet along @s
g_trg = [[t[:re.search('[^A-Za-z0-9_]',t).start()].lower().strip() for t in chunk] for chunk in g_tmp] #strips out everything after the @ sign and trailing colons, leaving (hopefully) a list of lists of usernames
for line in g_trg:
if len(line) > 1 and line[0] == '': #removes blank entries from lines mentioning at least one name
del line[0]
final = []
i = 0
for list in g_trg: #creates final output list
for name in list:
final.append(g_src[i] + ',' + name + "\n")
i+=1
outfile = file + '_gephiready_python.csv'
with open(outfile,'wb') as out: #writes the final output to CSV
for row in final:
out.write(row)
print 'Conversion complete. Your export file is "' + outfile + '".'
|
UTF-8
|
Python
| false | false | 2,014 |
16,406,775,086,721 |
5f0dbecd92cf62921e1de290f8cbe60efb19a874
|
0a5523b94be65e0b37962f24a36c57ca3816cedb
|
/mobiletrans/mtimport/importers/importer_ctarailline.py
|
420efd72e6826b17260412d56a327f6f43566df4
|
[
"MIT"
] |
permissive
|
JoeJasinski/WindyTransit
|
https://github.com/JoeJasinski/WindyTransit
|
4aa3b2765227693d0444a5067726d6dd681fa8e2
|
b883c7eebe618923ecc7b1914a696543d8864215
|
refs/heads/master
| 2020-04-15T22:49:04.873737 | 2014-09-28T14:14:08 | 2014-09-28T14:14:08 | 3,424,466 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.contrib.gis.geos import Point, fromstr, fromfile, GEOSGeometry, MultiPoint, MultiPolygon, Polygon
from django.contrib.gis.gdal.error import OGRIndexError
from autoslug.settings import slugify
from mobiletrans.mtimport.importer import ShapeFileImportBase
from mobiletrans.mtlocation import models as loc_models
from mobiletrans.mtimport import models
from mobiletrans.mtimport.exceptions import *
class CTARailLines(ShapeFileImportBase):
@classmethod
def get_model_class(cls,):
return loc_models.CTARailLines
def get_geom_field(self):
return "line"
def parse_row(self, row):
existing = False
try:
primary_key = row.get("OBJECTID")
except OGRIndexError as error:
raise ImportException("primary key 'OBJECTID' not available", error)
try:
ctarailline = self.get_model_class().objects.get(objectid=primary_key)
existing = True
except ObjectDoesNotExist:
ctarailline = self.get_model_class()(objectid=primary_key)
existing = False
except MultipleObjectsReturned:
raise ImportException("multiple objects returned with OBJECTID %s " % primary_key)
try:
ctarailline.segment_id = row.get("SEGMENT_ID")
except OGRIndexError as error:
raise ImportException("field 'SEGMENT_ID' not available", error)
try:
ctarailline.asset_id = row.get("ASSET_ID")
except OGRIndexError as error:
raise ImportException("field 'ASSET_ID' not available", error)
try:
ctarailline.transit_lines = row.get("LINES")
except OGRIndexError as error:
raise ImportException("field 'LINES' not available", error)
try:
ctarailline.description = row.get("DESCRIPTIO")
except OGRIndexError as error:
raise ImportException("field 'DESCRIPTIO' not available", error)
try:
ctarailline.type = row.get("TYPE")
except OGRIndexError as error:
raise ImportException("field 'TYPE' not available", error)
try:
ctarailline.legend = row.get("LEGEND")
except OGRIndexError as error:
raise ImportException("field 'LEGEND' not available", error)
try:
ctarailline.alt_legend = row.get("ALT_LEGEND")
except OGRIndexError as error:
raise ImportException("field 'ALT_LEGEND' not available", error)
try:
ctarailline.branch = row.get("BRANCH")
except OGRIndexError as error:
raise ImportException("field 'BRANCH' not available", error)
try:
ctarailline.shape_len = row.get("SHAPE_LEN")
except OGRIndexError as error:
raise ImportException("field 'SHAPE_LEN' not available", error)
try:
geom = row.geom
geom.transform(self.coord_transform)
ctarailline.line = geom.wkt
except Exception as error:
raise ImportException("attribute 'geom' not available", error)
if existing:
self.stats['existing'] += 1
else:
self.stats['new'] += 1
return ctarailline
|
UTF-8
|
Python
| false | false | 2,014 |
1,864,015,818,409 |
fa568f46923dcf70721ebe020618d57a1d49134a
|
3cb09c27125307e052554c630b2cfd006e82afcf
|
/lossless2m4a.py
|
b50880c1261a5a6d42e39cecd3f5a1ffb7a25d87
|
[] |
no_license
|
camelcc/ape_flac_2_m4a
|
https://github.com/camelcc/ape_flac_2_m4a
|
307140462a322c7364e8c0cddce07c50f2a1cf6b
|
81794e65e7bd74ea764c31fc387621573b85507f
|
refs/heads/master
| 2021-01-10T20:00:07.796278 | 2012-08-19T14:15:50 | 2012-08-19T14:15:50 | 5,471,366 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import sys
import shutil
import re
def remove_readonly(fn, path, excinfo):
if fn is os.rmdir:
os.chmod(path, stat.S_IWRITE)
os.rmdir(path)
elif fn is os.remove:
os.chmod(path, stat.S_IWRITE)
os.remove(path)
# get the lossless filename from the cue file
def get_filename_from_cue(cue_filepath):
regex_pattern = re.compile(r"FILE\s*\"(?P<filename>.*)\"")
for line in open(cue_filepath).readlines():
matchs = regex_pattern.match(line)
if matchs:
return matchs.group('filename')
return ""
def convert_lossless_in_dir(src, des):
# check if cue or ape or flac file exists.
has_cue = False
has_lossess = False
cue_files = []
lossess_files = []
for file in os.listdir(src):
filepath = os.path.join(src, file)
#print filepath
if os.path.isfile(filepath):
extension = os.path.splitext(filepath)[1]
#print "extension = " + extension
if extension == '.cue':
if not has_cue:
has_cue = True
cue_files.append(filepath)
elif extension == '.ape' or extension == '.flac' or extension == '.wav':
if not has_lossess:
has_lossess = True
lossess_files.append(filepath)
else:
pass
else:
pass
# create output dir
if has_cue or has_lossess:
# delete if exist
if os.path.exists(des):
shutil.rmtree(des, onerror=remove_readonly)
# create des
os.makedirs(des)
# convert
#/Applications/XLD.app/Contents/MacOS/XLD --cmdline -c <test.cue> -f alac -o <output.dir> <filename.ape>
if has_cue:
# open cue with xld to des
for file in cue_files:
lossess_filename = get_filename_from_cue(file)
if lossess_filename:
lossess_filepath = os.path.join(src, lossess_filename)
convert_cue_cmd = "/Applications/XLD.app/Contents/MacOS/XLD --cmdline -c \"" + file + "\" -f alac -o \"" + des + "\" \"" + lossess_filepath + "\""
print convert_cue_cmd
os.system(convert_cue_cmd)
else:
pass
return
#/Applications/XLD.app/Contents/MacOS/XLD --cmdline -f alac -o <output.dir> <filename.ape>
if has_lossess:
# open lossess with xld to des
for file in lossess_files:
convert_lossless_cmd = "/Applications/XLD.app/Contents/MacOS/XLD --cmdline -f alac -o \"" + des + "\" \"" + file + "\""
print convert_lossless_cmd
os.system(convert_lossless_cmd)
return
def convert_dir(src, des):
if not os.path.exists(src):
print src + "doesn't exist, terminate convert process."
return
# create destination dir if not exist
if not os.path.exists(des):
os.makedirs(des)
# convert top level dir
convert_lossless_in_dir(src, des)
# walk through
for root, dirs, files in os.walk(src):
#print root
for dir in dirs:
source_dir = os.path.join(root, dir)
des_dir = source_dir.replace(src, des)
#print source_dir
#print des_dir
convert_lossless_in_dir(source_dir, des_dir)
if __name__ == '__main__':
src = sys.argv[1]
des = sys.argv[2]
#print "input folder = " + src
#print "output folder =" + des
convert_dir(src, des)
|
UTF-8
|
Python
| false | false | 2,012 |
10,943,576,684,549 |
869e939bd81434a2b0baf2f92b9ea86dd0f8a369
|
9d979962ea8394c42b4156c70fb9b9c705f510b9
|
/blog/models.py
|
eb7e3b029c93385c5b20a5c8a98a2046b3947463
|
[] |
no_license
|
ilvar/windysun
|
https://github.com/ilvar/windysun
|
bca6843373197c3e0a391bc61b2a2f7ce376159d
|
dfe9acf23d9a67bd8d2225862d6aa8ba6e1e46b3
|
refs/heads/master
| 2021-01-13T01:36:31.300761 | 2014-12-15T03:15:23 | 2014-12-15T03:15:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from django.contrib.comments.moderation import CommentModerator, moderator
from django.db import models
from django.db.models import permalink
from django.contrib.auth.models import User
from django.conf import settings
from django.utils.safestring import mark_safe
from django.contrib.sites.models import Site
from tagging.fields import TagField
from blog.managers import *
from photologue.models import *
from tinymce import models as tinymce_models
import tagging
from meta.models import MetaModel
class Category(models.Model):
""" Category """
title = models.CharField(max_length=100)
slug = models.SlugField(unique=True)
class Meta:
verbose_name_plural = 'categories'
db_table = 'blog_categories'
ordering = ('title',)
class Admin:
pass
def __unicode__(self):
return '%s' % self.title
@permalink
def get_absolute_url(self):
return ('blog_category_detail', None, { 'slug':self.slug })
class Post(MetaModel):
""" Post model """
STATUS_CHOICES = (
(1, 'Draft'),
(2, 'Public'),
(3, 'Closed'),
)
abstr = models.ImageField(upload_to='abstract', help_text='Image MUST be 191x127px resolution', blank=True, null=True)
title = models.CharField(max_length=200)
slug = models.SlugField(unique=True)
author = models.ForeignKey(User, blank=True, null=True)
body = tinymce_models.HTMLField()
tease = models.TextField(blank=True)
status = models.IntegerField(choices=STATUS_CHOICES, default=2)
publish = models.DateTimeField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
categories = models.ManyToManyField(Category, blank=True)
tags = TagField()
objects = ManagerWithPublished()
gallery = models.ForeignKey(Gallery, blank=True, null=True)
class Meta:
db_table = 'blog_posts'
ordering = ('-publish',)
get_latest_by = 'publish'
class Admin:
list_display = ('title', 'publish', 'status')
list_filter = ('publish', 'categories', 'status')
search_fields = ('title', 'body')
def __unicode__(self):
return self.title
@permalink
def get_absolute_url(self):
return ('blog_detail', None, {
'year' : self.publish.year,
'month' : self.publish.month,
'day' : self.publish.day,
'slug' : self.slug
})
@permalink
def get_short_url(self):
return ('blog_detail_short', None, {
'id' : self.pk,
})
def get_short_title(self):
domain = Site.objects.get_current().domain
twit_len = 140 - len(domain) - 30
if len(self.title) < twit_len:
return self.title
return self.title[0:twit_len] + '...'
def get_share_links(self):
site = Site.objects.get_current().domain
data = {
'media': settings.MEDIA_URL,
'url': 'http://' + site + self.get_short_url(),
'twit': self.get_short_title(),
'title': self.title,
}
result = u""
result += u"""<a title="Добавить в Twitter" href="http://twitter.com/home?status=%(twit)s:+%(url)s+%%23surfing+%%23bali" target="_blank" rel="nofollow"><img src="%(media)simg/icons/twitter.png"/></a> """ % data
result += u"""<a title="Добавить в Facebook" href="http://www.facebook.com/sharer.php?u=%(url)s&t=%(title)s" target="blank" rel="nofollow"><img src="%(media)simg/icons/facebook.png"/></a> """ % data
result += u"""<a title="Добавить в ВКонтакте" href="http://vkontakte.ru/share.php?url=%(url)s&title=%(title)s" target="blank" rel="nofollow"><img src="%(media)simg/icons/vkontakte.png"/></a> """ % data
return mark_safe(u'<span class="share">%s</span>' % result)
class PostModerator(CommentModerator):
email_notification = True
auto_close_field = 'publish'
# Close the comments after 7 days.
close_after = 70
#moderator.register(Post, PostModerator)
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^tinymce\.models\.HTMLField"])
add_introspection_rules([], ["^tagging\.fields\.TagField"])
from django.contrib.comments.moderation import CommentModerator, moderator
from django.contrib.sites.models import Site
from django.conf import settings
class AkismetModerator(CommentModerator):
def check_spam(self, request, comment, key, blog_url=None, base_url=None):
try:
from akismet import Akismet
except:
return False
if blog_url is None:
blog_url = 'http://%s/' % Site.objects.get_current().domain
ak = Akismet(
key=settings.AKISMET_API_KEY,
blog_url=blog_url
)
if base_url is not None:
ak.baseurl = base_url
if ak.verify_key():
data = {
'user_ip': request.META.get('REMOTE_ADDR', '127.0.0.1'),
'user_agent': request.META.get('HTTP_USER_AGENT', ''),
'referrer': request.META.get('HTTP_REFERER', ''),
'comment_type': 'comment',
'comment_author': comment.user_name.encode('utf-8'),
}
if ak.comment_check(comment.comment.encode('utf-8'), data=data, build_data=True):
return True
return False
def allow(self, comment, content_object, request):
allow = super(AkismetModerator, self).allow(comment, content_object, request)
# change this depending on which spam provider you want to use
spam = self.check_spam(request, comment,
key=settings.AKISMET_API_KEY,
)
return not spam and allow
#try:
# moderator.unregister(Post)
#except:
# pass
#moderator.register(Post, AkismetModerator)
|
UTF-8
|
Python
| false | false | 2,014 |
10,591,389,352,529 |
570d13b2a0d15b8f421463d35233d625f0d69dbc
|
18c0d76bc93fab90a6c3d07d415141ae29d493cd
|
/Gypsy/Environment.py
|
521acb1a7953fedffe5b780f34ddb8d5ad4ced78
|
[] |
no_license
|
hansandersson/gennsing
|
https://github.com/hansandersson/gennsing
|
7450e173f91cf8429351ea2a168c080154d98380
|
d70200e151c2ccf158d583c3e74f7443aabb553f
|
refs/heads/master
| 2021-01-02T22:31:55.284447 | 2012-10-28T18:37:16 | 2012-10-28T18:37:16 | 6,430,603 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#Copyright (c) Hans Andersson 2012
#All rights reserved.
import os, random, sys, math
cwd = os.getcwd()
sys.path.append("../")
import Decision
os.chdir(cwd)
###################################
### IMPLEMENTATION OF <<GYSPY>> ###
###################################
def getPlayersCountRange(): return (4, 4)
class Game:
def __init__(self, agents):
#agents : Agent list
assert type(agents) in (type(list()), type(set())) and \
(len(agents) == 3 or len(agents) == 4)
agents = list(agents)
random.shuffle(agents)
self.players = [Player(agent) for agent in agents]
#part of Game "interface"
#PRIMARY, ABSTRACT METHOD FOR CALLING AT RUNTIME
def doRound(self): #None
#lead
#follow
#
#represents the game as a dictionary, for use in the neural network, relative to a player's perspective
def getDictForPlayer(self, player): #float dictionary
#player : Player
return {"game":{"safe":min(float(len([corp for corp in self.corporations if corp.isSafe()])) / float(len(self.corporations)), 1.0), "size":min(float(max([len(corp.tiles) for corp in self.corporations])) / 41.0, 1.0), "full":float(len(self.tilesPlaced))/float(self.boardCols*self.boardRows), "oths":1.0 - (1.0/(float(len(self.players))-1.0))}, "agnt":player.getDict()}
#part of Game "interface"
#how far gone is the game (used in GeneticArena.autorun() to break the loop when the game is done)
def getCompletion(self): #boolean
percentSafe = 100.0 * float(len([corp for corp in self.corporations if corp.isSafe()])) / float(len(self.getCorporationsActive())) if len(self.getCorporationsActive()) > 0 else 0.0
largestSizeOver41 = 100.0 * float(max([len(corp.tiles) for corp in self.corporations])) / 41.0
#self.stuck starts each round as None
#whenever some agent places a tile, self.stuck = False
#if, at end of player turns, self.stuck still None, then reshuffle tiles;
#if anybody now has a tile they can place next round, then self.stuck = False
#finally, at very end of round, if self.stuck still None, then set it True
#have to use three values because checks getCompletion in middle of round...
# we want to declare game stuck only at end of round
return min(max(percentSafe, largestSizeOver41, 100.0 if self.stuck == True else 0.0), 100.0)
def getPerformance(self): return sum([player.wallet for player in self.players])
#part of Game "interface"
#completes any endgame cleanup and returns the list of players in descending order of score
def finalize(self): #Agent list
for corp in self.getCorporationsActive(): corp.payout()
for player in self.players: player.sellout()
#part of Game "interface"
#returns a dictionary that says how well everybody did
def getRanking(self):
return [(player.agent, player.wallet) for player in sorted(self.players, key = lambda player: player.wallet, reverse = True)]
#enables console play (returns a console representation of the game)
def __str__(self): #string
console = " " + "".join([str(c).center(3) for c in range(self.boardCols)])
for r in range(self.boardRows):
console += "\n" + Tile.nameForRow(r) + " "
for c in range(self.boardCols):
console += (self.board[r][c].corporation.consoleColor if (self.board[r][c].corporation != None and self.board[r][c].corporation.consoleColor != None) else "") + " " + ((self.board[r][c].corporation.name[0]) if self.board[r][c].corporation != None else ("*" if self.board[r][c] in self.tilesPlaced else " ")) + " \x1b[0m"
return self.console + console
class Suit:
@classmethod
def symbols(self):
return ("S", "H", "D", "C")
@classmethod
def names(self):
return ("Spades", "Hearts", "Diamonds", "Clubs")
def __init__(self, symbol):
assert symbol in self.__class__.symbols()
self.symbol = symbol
self.cards = set()
def __str__(self):
return self.symbol
def __gt__(self, otherSuit):
assert self.__class__ == otherSuit.__class__
return self.__class__.symbols().index(self.symbol) < self.__class__.symbols().index(otherSuit.symbol)
def __lt__(self, otherSuit):
assert self.__class__ == otherSuit.__class__
return self.__class__.symbols().index(self.symbol) > self.__class__.symbols().index(otherSuit.symbol)
def __eq__(self, otherSuit):
assert self.__class__ == otherSuit.__class__
return not self > otherSuit and not self < otherSuit
def __ne__(self, otherSuit):
return not self == otherSuit
class Card:
def __init__(self, suit, rank):
assert not rank < 1 and not rank > 13
assert suit.__class__ == Suit
self.rank = rank
self.suit = suit
suit.cards |= self
def isGypsy(self):
return self.suit.symbol == "C" and self.rank == 8
def __str__(self):
if self.isGypsy(): return "GY"
return (str(self.suit) + (["A"].append(map(str, range(2, 9))).append(["J", "Q", "K"]))[self.rank]
def __gt__(self, otherCard):
if self.isGypsy(): return True
if self.suit == otherCard.suit: return self.rank > otherCard.rank
return self.suit > otherCard.suit
def __eq__(self, otherCard):
return self.suit == otherCard.suit and self.rank == otherCard.rank
def __lt__(self, otherCard):
return not self > otherCard and not self == otherCard
def __ne__(self, otherCard):
return self > otherCard or self < otherCard
class Player:
def __init__(self, agent, wallet = 6000): #void / None
#agent : Agent
#wallet : int
self.agent = agent
self.tiles = set()
self.stock = set()
self.wallet = wallet
def buy(self, certificate): #void / None
#certificate : Certificate
assert certificate.owner == None
assert not self.wallet < certificate.corporation.getStockPrice()
self.stock.add(certificate)
certificate.owner = self
self.wallet -= certificate.corporation.getStockPrice()
def sell(self, certificate): #void / None
#certificate : Certificate
assert certificate in self.stock
assert certificate.owner == self
self.stock.remove(certificate)
certificate.owner = None
self.wallet += certificate.corporation.getStockPrice()
def draw(self, tiles): #Tile
#tiles : Tile set
assert len(tiles) > 0
draw = random.choice(list(tiles))
tiles.remove(draw)
self.tiles.add(draw)
assert not len(self.tiles) > 7
return draw
def getDict(self): #dictionary
assert not self.wallet < 0
return {"lqdt":self.wallet / (6000.0 + self.wallet)}
#End-Of-Game
def sellout(self): #None
for certificate in self.stock.copy(): self.sell(certificate)
|
UTF-8
|
Python
| false | false | 2,012 |
7,945,689,510,211 |
06b9c3cadf238481452503113e312fc77d703b07
|
ce555e78e8040812d8c4678805b488ce564a0fae
|
/source/paineldabolsa/urls.py
|
21063f1e2a4622b800c68c32bb40154197c63a59
|
[] |
no_license
|
fernandoe/paineldabolsa-server
|
https://github.com/fernandoe/paineldabolsa-server
|
2c8686d09fa179673cec740af69876c775d03859
|
6f54d918d9696396f913f8713444b19663c2ccc3
|
refs/heads/master
| 2020-04-24T18:55:05.728702 | 2014-11-04T10:09:23 | 2014-11-04T10:09:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding:utf-8 -*-
from django.conf.urls import patterns, url
urlpatterns = patterns('paineldabolsa.views',
url(r'^$', 'paineldabolsa', name="paineldabolsa"),
url(r'^grafico/(?P<papel_codigo>.{0,10})/$', 'grafico', name="grafico"),
url(r'^ibovespa/$', 'paineldabolsa_ibovespa', name="paineldabolsa_ibovespa"),
)
|
UTF-8
|
Python
| false | false | 2,014 |
17,051,020,172,076 |
2a96111f9a81c210f4afb340891733e862467ffb
|
6e51ae2bf2cc33bedd79e8892a1fb9283edd4d9b
|
/python/python-cookbook-master/src/1/removing_duplicates_from_a_sequence_while_maintaining_order/example2
|
1abc6827a7aa45309f0ffede11cb9901061a4b4b
|
[] |
no_license
|
yandongxiao/scripts
|
https://github.com/yandongxiao/scripts
|
ae9ac9371dccbe6511caf3fdf4091bbd1fba2db5
|
a27973b623b8e22402de62bfa3691311f303f499
|
refs/heads/master
| 2015-08-13T03:39:53.989382 | 2014-11-12T15:31:40 | 2014-11-12T15:31:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/python
#a = [1, 5, 2, 1, 9, 1, 5, 10]
a = [
{'x': 2, 'y': 3},
{'x': 1, 'y': 4},
{'x': 2, 'y': 3},
{'x': 2, 'y': 3},
{'x': 10, 'y': 15}
]
b = list()
#by doing this, you have not order.
for x in a:
if x not in b:
b.append(x)
print(a)
print(b)
def unique(iterable, key):
myset = set()
for elmt in iterable:
val = elmt if key is None else key(elmt)
if val not in myset:
yield elmt
myset.add(val)
print( list(unique(a, key=lambda a: (a['x'], a['y'])) ) )
|
UTF-8
|
Python
| false | false | 2,014 |
11,149,735,132,236 |
2d0c9953796565d3194f85550c86b4b92aa21fa4
|
84d784ec11023ef0973f973e4f0074654d2a2279
|
/yamcat/yamcatConfig.py
|
6e0120daf8bf1f2cc4353c611151a7519dd13268
|
[
"GPL-3.0-only"
] |
non_permissive
|
thedudeguy/yaMCat
|
https://github.com/thedudeguy/yaMCat
|
3b1b2dbf7cd925532fcdb2dc0436dc30487551f4
|
a2cf22815bfb2891f6d77163efc6659f0dae84d6
|
refs/heads/master
| 2016-08-03T09:32:05.555888 | 2011-08-10T14:38:05 | 2011-08-10T14:38:05 | 2,116,893 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright (C) 2011 Chris Churchwell
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import ConfigParser
import subprocess
from threading import Timer
class YamcatConfig(object):
def __init__(self):
if hasattr(sys, "frozen"):
cwd = os.path.dirname(os.path.abspath(unicode(sys.executable, sys.getfilesystemencoding( ))))
else:
cwd = os.path.dirname(os.path.abspath(__file__))
self.root_dir = cwd
self.config = ConfigParser.SafeConfigParser()
self.config.read(os.path.join(self.root_dir, "config.cfg"))
#url for downloading
self.minecraft_url = "http://www.minecraft.net/download/minecraft_server.jar"
self.bukkit_url = "http://ci.bukkit.org/job/dev-CraftBukkit/promotion/latest/Recommended/artifact/target/craftbukkit-0.0.1-SNAPSHOT.jar"
#version
self.yamcat_version = "0.1"
#folder paths
self.yamcat_dir = self.root_dir
self.ui_dir = os.path.join(self.yamcat_dir, "web")
self.assets_dir = os.path.join(self.yamcat_dir, "assets")
self.templates_dir = os.path.join(self.yamcat_dir, "templates")
self.backup_dir = os.path.join(self.yamcat_dir, "backups")
self.rdiff_backup_dir = os.path.join(self.yamcat_dir, "rdiff_backups")
self.minecraft_dir = os.path.join(self.yamcat_dir, "minecraft")
self.plugins_dir = os.path.join(self.minecraft_dir, "plugins")
self.world_dir = os.path.join(self.minecraft_dir, "world")
self.nether_world_dir = os.path.join(self.minecraft_dir, "world_nether")
#set defaults
self.setDefaults()
#update the config file
self.writeConfig()
#check dirs
#first thing to do, is to check that all the directories are present
if not os.path.exists(self.backup_dir):
os.makedirs(self.backup_dir, 0755)
if not os.path.exists(self.rdiff_backup_dir):
os.makedirs(self.rdiff_backup_dir, 0755)
if not os.path.exists(self.minecraft_dir):
os.makedirs(self.minecraft_dir, 0755)
if not os.path.exists(self.plugins_dir):
os.makedirs(self.plugins_dir, 0755)
if not os.path.exists(self.world_dir):
os.makedirs(self.world_dir, 0755)
if not os.path.exists(self.nether_world_dir):
os.makedirs(self.nether_world_dir, 0755)
if not self.is_rdiff_set():
self.auto_config_rdiff()
def str2bool(self, string):
trues = ["1", "true", "True"]
falses =["0", "false","False"]
if string in trues:
return True
if string in falses:
return False
def set(self, section, option, value):
self.config.set(section, option, str(value))
def get(self, section, option):
return self.config.get(section, option)
def getint(self, section, option):
return self.config.getint(section, option)
def getfloat(self, section, option):
return self.config.getfloat(section, option)
def getboolean(self, section, option):
#return self.str2bool(str(self.config.get(section, option)))
return self.str2bool(self.config.get(section, option))
"""
def getstr(self, section, option):
return str(self.config.get(section, option))
"""
def setDefaults(self):
if self.config.has_section("Yamcat")==False:
self.config.add_section("Yamcat")
if self.config.has_option("Yamcat", "theme")==False:
self.set("Yamcat", "theme", "boreme")
if self.config.has_option("Yamcat", "ip")==False:
self.set("Yamcat", "ip", "0.0.0.0")
if self.config.has_option("Yamcat", "port")==False:
self.set("Yamcat", "port", 8080)
if self.config.has_option("Yamcat", "username")==False:
self.set("Yamcat", "username", "")
if self.config.has_option("Yamcat", "password")==False:
self.set("Yamcat", "password", "")
if self.config.has_option("Yamcat", "dev-mode")==True:
self.set("Yamcat", "dev-mode", True)
if self.config.has_section("Console")==False:
self.config.add_section("Console")
if self.config.has_option("Console", "read-time")==False:
self.set("Console", "read-time", 0.5)
if self.config.has_option("Console", "read-time-burst")==False:
self.set("Console", "read-time-burst", 0.1)
if self.config.has_section("Backups")==False:
self.config.add_section("Backups")
if self.config.has_option("Backups", "rdiff-location")==False:
self.set("Backups", "rdiff-location", "")
if self.config.has_option("Backups", "auto-backups")==False:
self.set("Backups", "auto-backups", False)
if self.config.has_option("Backups", "auto-backup-hours")==False:
self.set("Backups", "auto-backup-hours", 1)
if self.config.has_section("Minecraft")==False:
self.config.add_section("Minecraft")
if self.config.has_option("Minecraft", "auto-start")==False:
self.set("Minecraft", "auto-start", True)
if self.config.has_option("Minecraft", "enable-bukkit")==False:
self.set("Minecraft", "enable-bukkit", False)
if self.config.has_option("Minecraft", "spawn-protection")==False:
self.set("Minecraft", "spawn-protection", 16)
if self.config.has_option("Minecraft", "memory_usage")==False:
self.set("Minecraft", "memory_usage", 1024)
if self.config.has_option("Minecraft", "allow-nether")==False:
self.set("Minecraft", "allow-nether", True)
if self.config.has_option("Minecraft", "view-distance")==False:
self.set("Minecraft", "view-distance", 10)
if self.config.has_option("Minecraft", "spawn-monsters")==False:
self.set("Minecraft", "spawn-monsters", True)
if self.config.has_option("Minecraft", "online-mode")==False:
self.set("Minecraft", "online-mode", True)
if self.config.has_option("Minecraft", "spawn-animals")==False:
self.set("Minecraft", "spawn-animals", True)
if self.config.has_option("Minecraft", "max-players")==False:
self.set("Minecraft", "max-players", 20)
if self.config.has_option("Minecraft", "pvp")==False:
self.set("Minecraft", "pvp", True)
if self.config.has_option("Minecraft", "allow-flight")==False:
self.set("Minecraft", "allow-flight", False)
if self.config.has_option("Minecraft", "level-name")==False:
self.set("Minecraft", "level-name", "world")
if self.config.has_option("Minecraft", "level-seed")==False:
self.set("Minecraft", "level-seed", "")
if self.config.has_option("Minecraft", "server-name")==False:
self.set("Minecraft", "server-name", "Minecraft Server")
if self.config.has_option("Minecraft", "server-ip")==False:
self.set("Minecraft", "server-ip", "")
if self.config.has_option("Minecraft", "server-port")==False:
self.set("Minecraft", "server-port", "25565")
if self.config.has_option("Minecraft", "white-list")==False:
self.set("Minecraft", "white-list", False)
def writeConfig(self):
with open(os.path.join(self.yamcat_dir, 'config.cfg'), 'wb') as configfile:
self.config.write(configfile)
def which(self, program):
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def is_rdiff_set(self):
if (self.get("Backups", "rdiff-location") == ""):
return False
return True
def auto_config_rdiff(self):
#check the os path to see if it can find rdiff
#an exe for windows
rdiff = self.which("rdiff-backup.exe")
if not rdiff == None:
self.set("Backups", "rdiff-location", rdiff)
self.writeConfig()
return True
#check for unix
rdiff = self.which("rdiff-backup")
if not rdiff == None:
self.set("Backups", "rdiff-location", rdiff)
self.writeConfig()
return True
return False
def start_timer(self):
if self.getboolean("Backups", "auto-backups") == True:
if self.is_rdiff_set() == True:
print "starting timer..."
time = self.getint("Backups", "auto-backup-hours")
if time == 0:
return
self.rdiff_backup()
#get seconds from hours
seconds = time * 60 * 60
#self.backup_timer = Timer(seconds, self.test)
self.backup_timer = Timer(seconds, self.start_timer)
self.backup_timer.start()
def stop_timer(self):
print "stopping timer..."
try:
self.backup_timer.cancel()
self.backup_timer.join()
except Exception as e:
print "Timer not running"
def reset_timer(self):
print "resetting timer..."
self.stop_timer()
self.start_timer()
def rdiff_backup(self):
if self.getboolean("Backups", "auto-backups") == True:
if self.is_rdiff_set() == True:
print "doing backup..."
rdiff_location = self.get("Backups", "rdiff-location")
mc_dir = self.minecraft_dir
bk_dir = self.rdiff_backup_dir
try:
subprocess.check_call([
rdiff_location,
mc_dir,
bk_dir
])
#self.log += "Server backed up with rdiff\n"
except Exception as e:
pass
#self.log += "Server backup with rdiff failed\n"
|
UTF-8
|
Python
| false | false | 2,011 |
19,722,489,824,324 |
d97060538d8de435636e619d02c352823a92f6bb
|
c7b2206085481c7525066aa9da534dc92f02dd1b
|
/master_sync_fkie/src/master_sync_fkie/master_sync.py
|
165f140043a1fab208f979a92cdff5bb9418c3f5
|
[] |
no_license
|
mjschuster/multimaster_fkie
|
https://github.com/mjschuster/multimaster_fkie
|
d46839a1367c2cfe314f8285b33d394203c1a6a7
|
c33f8dfe73cace7f5211f884d45888010118a64a
|
refs/heads/master
| 2020-04-08T16:39:15.460188 | 2013-11-12T17:03:39 | 2013-11-12T17:03:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import threading
import sys
import time
import xmlrpclib
import roslib; roslib.load_manifest('master_sync_fkie')
import rospy
from sync_thread import SyncThread
from master_discovery_fkie.msg import *
from master_discovery_fkie.srv import *
import master_discovery_fkie.interface_finder as interface_finder
class Main(object):
'''
'''
def __init__(self):
'''
Creates a new instance. Find the topic of the master_discovery node using
L{master_discovery_fkie.interface_finder.get_changes_topic()}. Also the
parameter C{~ignore_hosts} will be analyzed to exclude hosts from sync.
'''
self.masters = {}
# the connection to the local service master
self.materuri = self.getMasteruri()
'''@ivar: the ROS master URI of the C{local} ROS master. '''
self.__lock = threading.RLock()
self.ignore = []
'''@ivar: the list with host names, which are not sync.'''
if rospy.has_param('~ignore_hosts'):
self.ignore[len(self.ignore):] = rospy.get_param('~ignore_hosts')
topic_names = interface_finder.get_changes_topic(self.getMasteruri())
self.sub_changes = dict()
'''@ivar: {dict} with topics C{(name: L{rospy.Subscriber})} publishes the changes of the discovered ROS masters.'''
for topic_name in topic_names:
rospy.loginfo("listen for updates on %s", topic_name)
self.sub_changes[topic_name] = rospy.Subscriber(topic_name, MasterState, self.handlerMasterStateMsg)
rospy.on_shutdown(self.finish)
# initialize the ROS services
rospy.Service('~get_sync_info', GetSyncInfo, self.rosservice_get_sync_info)
self.update_timer = None
self.retrieveMasters()
def handlerMasterStateMsg(self, data):
'''
The method to handle the received MasterState messages. Based on this message
new threads to synchronize with remote ROS master will be created, updated or
removed.
@param data: the received message
@type data: L{master_discovery_fkie.MasterState}
'''
try:
self.__lock.acquire()
if data.state in [MasterState.STATE_REMOVED]:
self.removeMaster(data.master.name)
elif data.state in [MasterState.STATE_NEW, MasterState.STATE_CHANGED]:
m = data.master
self.updateMaster(m.name, m.uri, m.timestamp, m.discoverer_name, m.monitoruri)
finally:
self.__lock.release()
def getMasteruri(self):
'''
Requests the ROS master URI from the ROS master through the RPC interface and
returns it. The 'materuri' attribute will be set to the requested value.
@return: ROS master URI
@rtype: C{str} or C{None}
'''
if not hasattr(self, 'materuri') or self.materuri is None:
masteruri = self._masteruri_from_ros()
master = xmlrpclib.ServerProxy(masteruri)
code, message, self.materuri = master.getUri(rospy.get_name())
return self.materuri
def _masteruri_from_ros(self):
'''
Returns the master URI depending on ROS distribution API.
@return: ROS master URI
@rtype: C{str}
@see: L{rosgraph.rosenv.get_master_uri()} (fuerte)
@see: L{roslib.rosenv.get_master_uri()} (prior)
'''
try:
import rospkg.distro
distro = rospkg.distro.current_distro_codename()
if distro in ['electric', 'diamondback', 'cturtle']:
return roslib.rosenv.get_master_uri()
else:
import rosgraph
return rosgraph.rosenv.get_master_uri()
except:
return roslib.rosenv.get_master_uri()
def retrieveMasters(self):
'''
This method use the service 'list_masters' of the master_discoverer to get
the list of discovered ROS master. Based on this list the L{SyncThread} for
synchronization will be created.
@see: L{master_discovery_fkie.interface_finder.get_listmaster_service()}
'''
service_names = interface_finder.get_listmaster_service(self.getMasteruri(), False)
for service_name in service_names:
rospy.loginfo("service 'list_masters' found on %s", service_name)
self.__lock.acquire()
try:
# rospy.wait_for_service(service_name)
try:
discoverMasters = rospy.ServiceProxy(service_name, DiscoverMasters)
resp = discoverMasters()
masters = []
for m in resp.masters:
if not m.name in self.ignore: # do not sync to the master, if it is in ignore list
masters.append(m.name)
self.updateMaster(m.name, m.uri, m.timestamp, m.discoverer_name, m.monitoruri)
for key in set(self.masters.keys()) - set(masters):
self.removeMaster(self.masters[key].name)
except rospy.ServiceException, e:
rospy.logwarn("ERROR Service call 'list_masters' failed: %s", str(e))
except:
import traceback
rospy.logwarn("ERROR while initial list masters: %s", traceback.format_exc())
finally:
self.__lock.release()
self.update_timer = threading.Timer(15.0, self.retrieveMasters)
self.update_timer.start()
def updateMaster(self, mastername, masteruri, timestamp, discoverer_name, monitoruri):
'''
Updates the timestamp of the given ROS master, or creates a new SyncThread to
synchronize the local master with given ROS master.
@param mastername: the name of the remote ROS master to update or synchronize.
@type mastername: C{str}
@param masteruri: the URI of the remote ROS master.
@type masteruri: C{str}
@param timestamp: the timestamp of the remote ROS master.
@type timestamp: L{float64}
@param discoverer_name: the name of the remote master_discoverer node
@type discoverer_name: C{str}
@param monitoruri: the URI of the RPC interface of the remote master_discoverer node.
@type monitoruri: C{str}
'''
self.__lock.acquire()
try:
if (masteruri != self.materuri) and not mastername in self.ignore: # do not sync to the master, if it is in ignore list
# print "--update:", ros_master.uri, mastername
if (mastername in self.masters):
self.masters[mastername].update(mastername, masteruri, discoverer_name, monitoruri, timestamp)
else:
# print "add a sync thread to:", mastername, ros_master.uri
self.masters[mastername] = SyncThread(mastername, masteruri, discoverer_name, monitoruri, 0.0)
except:
import traceback
rospy.logwarn("ERROR while update master[%s]: %s", str(mastername), traceback.format_exc())
finally:
self.__lock.release()
def removeMaster(self, ros_master_name):
'''
Removes the master with given name from the synchronization list.
@param ros_master_name: the name of the ROS master to remove.
@type ros_master_name: C{str}
'''
self.__lock.acquire()
try:
if (ros_master_name in self.masters):
m = self.masters.pop(ros_master_name)
m.stop()
del m
except Exception:
import traceback
rospy.logwarn("ERROR while removing master[%s]: %s", ros_master_name, traceback.format_exc())
finally:
self.__lock.release()
def finish(self):
'''
Removes all remote masters and unregister their topics and services.
'''
rospy.logdebug("Stop synchronization")
self.__lock.acquire()
if not self.update_timer is None:
self.update_timer.cancel()
for key in self.masters.keys():
m = self.masters[key]
m.stop()
del m
if hasattr(self, "sub_changes"):
for key, item in self.sub_changes.items():
item.unregister()
self.__lock.release()
def rosservice_get_sync_info(self, req):
'''
Callback for the ROS service to get the info to synchronized nodes.
'''
masters = list()
self.__lock.acquire()
try:
for (mastername, s) in self.masters.iteritems():
(nodes, service) = s.getSyncInfo()
masters.append(SyncMasterInfo(s.masterInfo.uri, nodes, service))
except:
import traceback
traceback.print_exc()
finally:
self.__lock.release()
return GetSyncInfoResponse(masters)
|
UTF-8
|
Python
| false | false | 2,013 |
10,660,108,856,852 |
8a35cbe6573979d53977a0dfe63199054121b9bf
|
e6a08e84fb02fc1632253f95f3acf4335dcdbd1d
|
/robocup_stacks/iri/estirabot_msgs/src/estirabot_msgs/srv/_RepresentationToString.py
|
b49c1d376bed7881d4dc35be1cfe2a2d260c8247
|
[] |
no_license
|
pxlong/robocup-code
|
https://github.com/pxlong/robocup-code
|
9c83d10560ca05b7c877115a14e0520e88c42adb
|
d0a3913298a62c4dcce347cdba2ee5c9771c664f
|
refs/heads/master
| 2017-04-29T01:19:40.864602 | 2013-12-11T09:25:59 | 2013-12-11T09:25:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""autogenerated by genpy from estirabot_msgs/RepresentationToStringRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import estirabot_msgs.msg
import iri_perception_msgs.msg
class RepresentationToStringRequest(genpy.Message):
_md5sum = "2d8b4c9001b9bfee1329ea66851228ae"
_type = "estirabot_msgs/RepresentationToStringRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """estirabot_msgs/DirtyArea[] dirty_areas
estirabot_msgs/PointsDistanceMsg[] distances
estirabot_msgs/TraversedEllipses[] traversed_ellipses
================================================================================
MSG: estirabot_msgs/DirtyArea
int32 id
estirabot_msgs/Ellipse ellipse
bool sparse
uint8 area
uint8 shape
================================================================================
MSG: estirabot_msgs/Ellipse
iri_perception_msgs/ImagePoint center
iri_perception_msgs/ImageSize size
float64 angle
================================================================================
MSG: iri_perception_msgs/ImagePoint
uint32 x
uint32 y
================================================================================
MSG: iri_perception_msgs/ImageSize
uint32 width
uint32 height
================================================================================
MSG: estirabot_msgs/PointsDistanceMsg
uint32 origIdx
uint32 dstIdx
float64 distance
================================================================================
MSG: estirabot_msgs/TraversedEllipses
int32 idx1
int32 idx2
int32[] traversedIdxs
"""
__slots__ = ['dirty_areas','distances','traversed_ellipses']
_slot_types = ['estirabot_msgs/DirtyArea[]','estirabot_msgs/PointsDistanceMsg[]','estirabot_msgs/TraversedEllipses[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
dirty_areas,distances,traversed_ellipses
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(RepresentationToStringRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.dirty_areas is None:
self.dirty_areas = []
if self.distances is None:
self.distances = []
if self.traversed_ellipses is None:
self.traversed_ellipses = []
else:
self.dirty_areas = []
self.distances = []
self.traversed_ellipses = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
length = len(self.dirty_areas)
buff.write(_struct_I.pack(length))
for val1 in self.dirty_areas:
buff.write(_struct_i.pack(val1.id))
_v1 = val1.ellipse
_v2 = _v1.center
_x = _v2
buff.write(_struct_2I.pack(_x.x, _x.y))
_v3 = _v1.size
_x = _v3
buff.write(_struct_2I.pack(_x.width, _x.height))
buff.write(_struct_d.pack(_v1.angle))
_x = val1
buff.write(_struct_3B.pack(_x.sparse, _x.area, _x.shape))
length = len(self.distances)
buff.write(_struct_I.pack(length))
for val1 in self.distances:
_x = val1
buff.write(_struct_2Id.pack(_x.origIdx, _x.dstIdx, _x.distance))
length = len(self.traversed_ellipses)
buff.write(_struct_I.pack(length))
for val1 in self.traversed_ellipses:
_x = val1
buff.write(_struct_2i.pack(_x.idx1, _x.idx2))
length = len(val1.traversedIdxs)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(struct.pack(pattern, *val1.traversedIdxs))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.dirty_areas is None:
self.dirty_areas = None
if self.distances is None:
self.distances = None
if self.traversed_ellipses is None:
self.traversed_ellipses = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.dirty_areas = []
for i in range(0, length):
val1 = estirabot_msgs.msg.DirtyArea()
start = end
end += 4
(val1.id,) = _struct_i.unpack(str[start:end])
_v4 = val1.ellipse
_v5 = _v4.center
_x = _v5
start = end
end += 8
(_x.x, _x.y,) = _struct_2I.unpack(str[start:end])
_v6 = _v4.size
_x = _v6
start = end
end += 8
(_x.width, _x.height,) = _struct_2I.unpack(str[start:end])
start = end
end += 8
(_v4.angle,) = _struct_d.unpack(str[start:end])
_x = val1
start = end
end += 3
(_x.sparse, _x.area, _x.shape,) = _struct_3B.unpack(str[start:end])
val1.sparse = bool(val1.sparse)
self.dirty_areas.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.distances = []
for i in range(0, length):
val1 = estirabot_msgs.msg.PointsDistanceMsg()
_x = val1
start = end
end += 16
(_x.origIdx, _x.dstIdx, _x.distance,) = _struct_2Id.unpack(str[start:end])
self.distances.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.traversed_ellipses = []
for i in range(0, length):
val1 = estirabot_msgs.msg.TraversedEllipses()
_x = val1
start = end
end += 8
(_x.idx1, _x.idx2,) = _struct_2i.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
val1.traversedIdxs = struct.unpack(pattern, str[start:end])
self.traversed_ellipses.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
length = len(self.dirty_areas)
buff.write(_struct_I.pack(length))
for val1 in self.dirty_areas:
buff.write(_struct_i.pack(val1.id))
_v7 = val1.ellipse
_v8 = _v7.center
_x = _v8
buff.write(_struct_2I.pack(_x.x, _x.y))
_v9 = _v7.size
_x = _v9
buff.write(_struct_2I.pack(_x.width, _x.height))
buff.write(_struct_d.pack(_v7.angle))
_x = val1
buff.write(_struct_3B.pack(_x.sparse, _x.area, _x.shape))
length = len(self.distances)
buff.write(_struct_I.pack(length))
for val1 in self.distances:
_x = val1
buff.write(_struct_2Id.pack(_x.origIdx, _x.dstIdx, _x.distance))
length = len(self.traversed_ellipses)
buff.write(_struct_I.pack(length))
for val1 in self.traversed_ellipses:
_x = val1
buff.write(_struct_2i.pack(_x.idx1, _x.idx2))
length = len(val1.traversedIdxs)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(val1.traversedIdxs.tostring())
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.dirty_areas is None:
self.dirty_areas = None
if self.distances is None:
self.distances = None
if self.traversed_ellipses is None:
self.traversed_ellipses = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.dirty_areas = []
for i in range(0, length):
val1 = estirabot_msgs.msg.DirtyArea()
start = end
end += 4
(val1.id,) = _struct_i.unpack(str[start:end])
_v10 = val1.ellipse
_v11 = _v10.center
_x = _v11
start = end
end += 8
(_x.x, _x.y,) = _struct_2I.unpack(str[start:end])
_v12 = _v10.size
_x = _v12
start = end
end += 8
(_x.width, _x.height,) = _struct_2I.unpack(str[start:end])
start = end
end += 8
(_v10.angle,) = _struct_d.unpack(str[start:end])
_x = val1
start = end
end += 3
(_x.sparse, _x.area, _x.shape,) = _struct_3B.unpack(str[start:end])
val1.sparse = bool(val1.sparse)
self.dirty_areas.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.distances = []
for i in range(0, length):
val1 = estirabot_msgs.msg.PointsDistanceMsg()
_x = val1
start = end
end += 16
(_x.origIdx, _x.dstIdx, _x.distance,) = _struct_2Id.unpack(str[start:end])
self.distances.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.traversed_ellipses = []
for i in range(0, length):
val1 = estirabot_msgs.msg.TraversedEllipses()
_x = val1
start = end
end += 8
(_x.idx1, _x.idx2,) = _struct_2i.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
val1.traversedIdxs = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
self.traversed_ellipses.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_d = struct.Struct("<d")
_struct_i = struct.Struct("<i")
_struct_2I = struct.Struct("<2I")
_struct_3B = struct.Struct("<3B")
_struct_2Id = struct.Struct("<2Id")
_struct_2i = struct.Struct("<2i")
"""autogenerated by genpy from estirabot_msgs/RepresentationToStringResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class RepresentationToStringResponse(genpy.Message):
_md5sum = "5927826b25b95e12353eee87a92ed4ac"
_type = "estirabot_msgs/RepresentationToStringResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """string state_string
"""
__slots__ = ['state_string']
_slot_types = ['string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
state_string
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(RepresentationToStringResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.state_string is None:
self.state_string = ''
else:
self.state_string = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.state_string
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.state_string = str[start:end].decode('utf-8')
else:
self.state_string = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.state_string
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.state_string = str[start:end].decode('utf-8')
else:
self.state_string = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
class RepresentationToString(object):
_type = 'estirabot_msgs/RepresentationToString'
_md5sum = 'a1b22891042192e0316f71d2c67c0d87'
_request_class = RepresentationToStringRequest
_response_class = RepresentationToStringResponse
|
UTF-8
|
Python
| false | false | 2,013 |
3,075,196,599,446 |
165da0d1547fa825c651a1543f97e44cb7107c84
|
44bd660541efbf00ffbbef8d3c5af648e74d47dc
|
/src/ggrc/converters/import_helper.py
|
c21e86347b74c88644ee87fb710de94429c3e2bb
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
sriharshakappala/ggrc-core
|
https://github.com/sriharshakappala/ggrc-core
|
c4d74e2a6f0659ac0a125ac3bbdbeb3ebbf4142f
|
7561ce27cd987d73468a44df5b6e2b7425f050ef
|
refs/heads/master
| 2021-01-15T21:38:52.544644 | 2013-10-01T07:41:43 | 2013-10-01T07:41:43 | 13,235,587 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import csv
import chardet
import os
from ggrc.models import Directive, Section
from StringIO import StringIO
from flask import current_app
from ggrc import db
from .common import ImportException
from ggrc.converters.sections import SectionsConverter
from ggrc.converters.controls import ControlsConverter
def handle_csv_import(converter_class, filepath, **options):
rows = []
csv_file = filepath
if isinstance(filepath, basestring):
csv_file = open(filepath,'rbU')
if options.get('directive_id') and not options.get('directive'):
options['directive'] = Directive.query.filter_by(id=int(options['directive_id'])).first()
try:
rows = [row for row in csv_reader(csv_file.read().splitlines(True))]
except UnicodeDecodeError: # Decode error occurs when a special character symbol is inserted in excel.
raise ImportException("Could not import: invalid character encountered, verify the file is correctly formatted.")
csv_file.close()
converter = converter_class.from_rows(rows, **options)
return converter.do_import(options.get('dry_run', True))
def csv_reader(csv_data, dialect=csv.excel, **kwargs):
reader = csv.reader(utf_8_encoder(csv_data), dialect=dialect, **kwargs)
for row in reader:
yield [unicode(cell, 'utf-8') for cell in row]
def utf_8_encoder(csv_data):
"""This function is a generator that attempts to encode the string as utf-8.
It is assumed that the data is likely to be encoded in ascii. If encoding
fails, however, the function will attempt to guess the encoding and convert
it to utf-8.
Guessing is only done when a line fails to encode as there may be characters
further in the stream that aren't valid ascii and encoding is performed per
line yielded; guessing is the fallback on a per-line basis.
"""
for line in csv_data:
try:
yield line.encode('utf-8')
except UnicodeDecodeError:
encoding_guess = chardet.detect(line)['encoding']
yield line.decode(encoding_guess).encode('utf-8')
def handle_converter_csv_export(filename, objects, converter_class, **options):
headers = [('Content-Type', 'text/csv'), ('Content-Disposition','attachment; filename="{}"'.format(filename))]
status_code = 200
exporter = converter_class(objects, **options)
output_buffer = StringIO()
writer = csv.writer(output_buffer)
for metadata_row in exporter.do_export_metadata():
writer.writerow([ line.encode("utf-8") for line in metadata_row ])
exporter.do_export(writer)
body = output_buffer.getvalue()
output_buffer.close()
return current_app.make_response((body, 200, headers))
|
UTF-8
|
Python
| false | false | 2,013 |
10,857,677,327,954 |
2911d30f6530c12cea44db78e5157b0c1c0a4ce7
|
b5d2c5d8373bc8f0266ca00798d020520837f382
|
/setup.py
|
cce4d59667c09d6889b37645fbcff86ebf9cc0ef
|
[] |
no_license
|
toutpt/toutpt.photomanager
|
https://github.com/toutpt/toutpt.photomanager
|
d1fc88f21ce6b29b07d46975875631b9e753396c
|
19160c772df7743746100b6d7b75ae043f13f28e
|
refs/heads/master
| 2021-01-02T22:58:23.416118 | 2012-04-02T08:54:03 | 2012-04-02T08:54:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from setuptools import setup, find_packages
import os
version = '1.0'
setup(name='toutpt.photomanager',
version=version,
description="Tools to manage photos",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"Programming Language :: Python",
],
keywords='',
author='JeanMichel FRANCOIS',
author_email='[email protected]',
url='https://github.com/toutpt/toutpt.photomanager',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['toutpt'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'zope.interface',
'zope.component',
'zope.schema',
'gdata',
'flickrapi',
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
[console_scripts]
picasaweb2flickr = toutpt.photomanager.cmd.picasaweb2flickr:main
""",
)
|
UTF-8
|
Python
| false | false | 2,012 |
2,422,361,567,586 |
8be465f4f8524aa3d6f54c61dd3f30c2f63b3232
|
ccc0c15518d3a700f619b884a58445e892af4f36
|
/final/hierarchy_tree.py
|
e35e1fb56908e3d3ef58fde126ba6f5caad0575b
|
[] |
no_license
|
byoshi/EECS-498-IR-Project
|
https://github.com/byoshi/EECS-498-IR-Project
|
960ee1eeb5efa0f59a3b53f2ae577af915e223b9
|
77aa52ebc29249b813987150de017a47230156a8
|
refs/heads/master
| 2020-12-14T08:57:22.535177 | 2014-04-24T21:17:58 | 2014-04-24T21:17:58 | 18,300,906 | 0 | 1 | null | false | 2020-07-24T03:56:59 | 2014-03-31T17:01:43 | 2016-03-23T16:30:48 | 2014-04-24T21:17:58 | 2,048 | 0 | 1 | 1 |
Python
| false | false |
#!/usr/bin/python
from tree import *
import collections
def find_lowest_degrees(tree, to_links, parent_roots, rankings_by_index):
#print '======== Find Lowest Degrees =========='
ranking_subset = list()
for link in to_links:
#temp = [int(rankings_list[index]), index]
#next = tuple(temp)
ranking_subset.append(rankings_by_index[link])
ranking_subset.sort(key=lambda tup: tup[1])
#print '============ Rankings Subset Sorted =============='
#print ranking_subset
roots_inserted_into = list()
new_parents = list()
for rank in ranking_subset:
#if rank[1] not in tree.nodes.keys():
#break;
roots_to_check = tree.get_roots_of_nodes(rank[1])
for root in roots_to_check:
if root not in roots_inserted_into:
new_parents.append(rank[1])
roots_inserted_into.append(root)
if len(roots_inserted_into) == len(parent_roots):
break
#link_node = tree.get_node(to_link_index)
return new_parents
def build_tree(rankings, articles, links, rankings_by_index):
tree = Tree()
for index in range(0, len(rankings)):
#find a root node - high ranking with no articles ranked higher
highest_rank_index = rankings[index]
if highest_rank_index[1] in links:
to_links = links[highest_rank_index[1]]
else:
to_links = set()
print "Classify: ", articles[highest_rank_index[1]]
nodes_to_find_roots = list()
#print tree.nodes.keys()
for to_link_index in to_links:
#print "look for link: ", to_link_index, tree.nodes.keys(), to_link_index in tree.nodes.keys()
if to_link_index in tree.nodes.keys():
#print '==== LINK FOUND! ======== ' #, articles[int(to_link_index)]
nodes_to_find_roots.append(to_link_index)
# print "nodes_found: ", nodes_to_find_roots
parent_roots = list()
if len(nodes_to_find_roots) > 0:
parent_roots = tree.get_roots_of_nodes(nodes_to_find_roots)
#print "parent's roots found: ", parent_roots
#find lowest degree with different roots
if len(parent_roots) > 0:
#for root_index in parent_roots:
new_parents = find_lowest_degrees(tree, nodes_to_find_roots, parent_roots, rankings_by_index)
for p in new_parents:
print '========== Add node with parent %s %s ========' % (p, tree.get_node(p).get_value())
tree.add_node(highest_rank_index[1], articles[highest_rank_index[1]], p)
#tree.add_node(highest_rank_index[1], articles[highest_rank_index[1]], new_parents)
else:
print '==== LINK NOT FOUND - Add New Root ====='
tree.add_node(highest_rank_index[1], articles[highest_rank_index[1]])
#EXAMPLE trees[0].create_node("tree0", "tree0") # root node
#EXAMPLE trees[0].create_node("Jane", "jane", parent = "tree0")
#trees[0].show(0)
return tree
def make_tree(clusterid, save=False):
articles_path = 'article_names.txt'
articles_file = open(articles_path, 'r')
articles = articles_file.read().strip().split("\n")
rankings_path = 'pagerank'
rankings_file = open(rankings_path, 'r')
rankings_list = rankings_file.read().strip().split("\n")
clusters_path = 'clusters'
clusters_file = open(clusters_path, 'r')
clusters = [int(i) for i in clusters_file.read().strip().split("\n")]
rankings_by_index = list()
rankings = []
for index in xrange(len(rankings_list)):
#print 'ranking at index: ', rankings_list[index]\
temp = [int(rankings_list[index]), index]
next = tuple(temp)
rankings_by_index.append(next)
if clusters[index] == clusterid:
rankings.append(next)
rankings.sort(key=lambda tup: tup[0])
rankings.reverse()
#print '====== Rankings by index ========'
#print rankings_by_index
#print '=========== Rankings ==========='
#print rankings
links_path = 'links'
links_file = open(links_path, 'r')
links_list = links_file.read().strip().split('\n')
links = dict()
for link in links_list:
link_parts = link.split()
link_source = int(link_parts[0])
link_target = int(link_parts[1])
if link_source not in links:
links[link_source] = set()
links[link_source].add(link_target)
#print '============ Links ==========='
#print links
tree = build_tree(rankings, articles, links, rankings_by_index)
'''print '========== Roots ==========='
roots = tree.get_roots().values()
for root in roots:
#print 'root key: ', root
#ode = tree.get_node(root)
print root.get_value()
print 'num_children: ', len(root.get_children())
'''
print '========== Tree ==========='
tree.show_recursive()
if save:
jsonfile = open("tree.json", "w")
jsonfile.write(tree.json() + "\n")
jsonfile.close()
return tree.get_roots().keys()
if __name__ == '__main__':
k = 10;
root_f = open("roots", "w")
for i in xrange(k):
save = False
if i == 6:
save = True
roots = make_tree(i, save)
for r in roots:
root_f.write(str(r) + " ")
root_f.write("\n")
|
UTF-8
|
Python
| false | false | 2,014 |
8,572,754,732,394 |
c5cc8e5b1fd6cf416bc8f04d79875e2712069634
|
9af1d767bf2ef7086c8c1ac0bb996c4637051a36
|
/getPrices.py
|
bd3a4f66e6614948a10c6770ff3c11da223895fb
|
[
"Apache-2.0"
] |
permissive
|
divir94/News-Analytics
|
https://github.com/divir94/News-Analytics
|
1aaf0011c0af1c69dc859f3e5c27bb8f8013eca3
|
1fcf2b11e38f9b0c182160dfded7be44d5a7c8bb
|
refs/heads/master
| 2021-01-18T01:57:00.609579 | 2014-08-14T04:42:51 | 2014-08-14T04:42:51 | 20,848,409 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on Jun 2, 2014
@author: vidurjoshi
'''
from pandas.io import parsers
from datetime import datetime
def make_url(ticker_symbol,start_date, end_date):
print ticker_symbol
base_url = "http://ichart.finance.yahoo.com/table.csv?s="
a = start_date
b = end_date
dt_url = '%s&a=%d&b=%d&c=%d&d=%d&e=%d&f=%d&g=d&ignore=.csv'% (ticker_symbol, a.month-1, a.day, a.year, b.month-1, b.day,b.year)
return base_url + dt_url
def getPrices(symb, openD, closeD):
url = make_url(symb,openD, closeD)
print url
e = parsers.read_csv(url)
print e
getPrices("GOOG", datetime(2000,1,1), datetime(2012,1,1))
|
UTF-8
|
Python
| false | false | 2,014 |
936,302,872,973 |
781c1be8d94c3da4be5dc91235337d798aa608ad
|
731f6e38d471bf2b941dae4b34419e9212d18063
|
/models/__init__.py
|
f76a752e41d320d8e055a67c08bc8274204e7896
|
[] |
no_license
|
jarrodtoh/whatfone-api
|
https://github.com/jarrodtoh/whatfone-api
|
5a18254f97f4fd46f6328b5f325b7107a622bad7
|
c1ee9e946653367e6f8fdb92ebb385bf0b77563a
|
refs/heads/master
| 2020-11-26T15:24:30.122940 | 2013-11-01T07:41:38 | 2013-11-01T07:41:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'Jarrod'
|
UTF-8
|
Python
| false | false | 2,013 |
15,272,903,708,076 |
48e870638cfd6992a66ba61ec150d69e701b49a4
|
a5b0ddbec1617bea859f215f756e471d7a69f478
|
/python/projects/projects/algorithms/primefac.py
|
3006efa81b3396b2b22f0349cf54daffe65b98a8
|
[
"MIT"
] |
permissive
|
wfaithfull/Projects
|
https://github.com/wfaithfull/Projects
|
6c6a4e0252809962a01d037d28bbc32c2d8c430a
|
7fffbeb9b20e66bc35cb97e5530aad14dd1d4870
|
refs/heads/master
| 2020-12-28T19:56:54.722343 | 2014-12-28T00:09:33 | 2014-12-28T00:09:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from fractions import gcd
def g(x, n):
return (x * x + 1) % n
def pollardrho(n):
x, xfixed, cyclesize, h = 2, 2, 2, 1
while h==1:
count = 1
while(count < cyclesize and h==1):
x = g(x,n)
count += 1
h = gcd(x - xfixed, n)
if(h!= 1):
break
cyclesize = 2*cyclesize
xfixed = x
return h
def main(args):
print pollardrho(int(args[1]))
if __name__ == '__main__':
import sys
main(sys.argv)
|
UTF-8
|
Python
| false | false | 2,014 |
2,302,102,509,253 |
407ccb7e49384e8446af918ebfce60a3b4904664
|
5c49f5af724c3a95074d4c061db9f00d0f63515f
|
/sparky_landing/views.py
|
8c52f53f944b942b372def9281707fc7844c5fc9
|
[] |
no_license
|
andydepp6/sparky
|
https://github.com/andydepp6/sparky
|
3788b3ef14c69d5dc6a36cadaf629303bd925eca
|
66d954685ccb995cd67cc803784a02e23467e250
|
refs/heads/master
| 2021-01-10T21:35:37.245753 | 2014-11-24T22:04:45 | 2014-11-24T22:04:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from sparky_landing.models import *
from sparky_landing.forms import *
# Create your views here.
def index(request):
return render(request,'index.html')
@csrf_exempt
def save(request):
if request.is_ajax():
if request.method == 'POST':
form = Register_t(request.POST)
if form.is_valid():
try:
try:
Register.objects.get(email=request.POST['email'])
message = '3'
except:
Register(name=request.POST['name'],email=request.POST['email']).save()
message = '0'
except:
message = '1'
return HttpResponse(message)
else:
message = '2'
return HttpResponse(message)
else:
message = "Acceso no autorizado"
return HttpResponse(message)
else:
message = "Acceso no autorizado"
return HttpResponse(message)
def handler404(request):
return render(request,'404.html')
def handler500(request):
return render(request,'500.html')
|
UTF-8
|
Python
| false | false | 2,014 |
7,816,840,517,901 |
04061bc432e45337cb403736c11404884b6c0893
|
3ac170b17904226d63379f37fc0fc20abf9d1a92
|
/testInternals.py
|
c933cd448bd2f66bd2cbc5259734dac3d80be61b
|
[] |
no_license
|
puhw-dev/Sensors
|
https://github.com/puhw-dev/Sensors
|
6cf840d8f8c26bd3f1078d4d1006e81dd5df7977
|
fdaae343604c4707f7a424d8ce12900db0f6cd12
|
refs/heads/master
| 2016-09-11T10:23:33.042139 | 2014-06-14T14:51:28 | 2014-06-14T14:51:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from internal.SensorSystemInfo import *
from internal.SensorSystemLoad import *
from internal.SensorNetworkInfo import *
from internal.Options import *
import time
import os
clear = lambda: os.system('clear')
options = Options()
options.frequency = 1.0
networkInfo = SensorNetworkInfo(options)
systemInfo = SensorSystemInfo(options)
systemLoad = SensorSystemLoad(options)
try:
while True:
clear()
print("========= SystemInfo =========")
print("System name:\t\t%s" % systemInfo.name())
print("Architecture:\t\t%s" % systemInfo.architecture())
print("CPU name:\t\t%s" % systemInfo.CPU())
print("Total RAM:\t\t%f GB" % (systemInfo.totalRAM() / 1024.0 / 1024.0 / 1024.0) )
print("Total disk space:\t%f GB" % (systemInfo.totalDiskSpace() / 1024.0 / 1024.0 / 1024.0) )
#print("Host IP:\t\t%s" % systemInfo.IP())
print("")
print("========= SystemLoad =========")
print("Free memory:\t\t%f MB" % (systemLoad.freeMemory() / 1024.0 / 1024.0) )
print("CPU utilization:\t%0.2f%%" % systemLoad.cpuUtilization())
print("")
print("======== NetworkInfo =========")
print("Bytes sent:\t\t%d" % networkInfo.bytesSent() )
print("Bytes recv:\t\t%d" % networkInfo.bytesReceived() )
print("Packets sent:\t\t%d" % networkInfo.packetsSent() )
print("Packets recv:\t\t%d" % networkInfo.packetsReceived() )
print("Network in: \t\t%d kbit/s" % networkInfo.kBitsPerSecondIn() )
print("Network out: \t\t%d kbit/s" % networkInfo.kBitsPerSecondOut() )
time.sleep(1)
except:
networkInfo.isRunning = False
|
UTF-8
|
Python
| false | false | 2,014 |
455,266,568,008 |
64d4582e08dd10cec741cf2c4e680b15fe8e279c
|
faeb0af23da9e8506e1b2654ba77202c4defb4ec
|
/client.py
|
27167a8a96abdc495165a819fb55dadf2776e3a2
|
[] |
no_license
|
Speculative/rhythm-gate
|
https://github.com/Speculative/rhythm-gate
|
35f4b5f39a9a6c8013cb50d78904a1cbc78ba61a
|
6fca3904d0609fa58c6597f31536b56e5eb62e21
|
refs/heads/master
| 2021-01-23T15:50:59.907422 | 2014-04-14T21:47:57 | 2014-04-14T21:47:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import socket
if __name__ == main:
pass
#connect with server
socket s = socket.socket()
|
UTF-8
|
Python
| false | false | 2,014 |
17,093,969,871,298 |
78957f6686d82660835ef48780eb11c1bbc8c10c
|
e90db7cf2e90cbd08b6ae29be3061020af82ad1a
|
/kaggle/happy.py
|
11f0c5de7f663042ac4132a716976a4739111f37
|
[] |
no_license
|
tomaximum/MITx15.071-Spring-2014
|
https://github.com/tomaximum/MITx15.071-Spring-2014
|
3d14d5a80a500eac6c2d49396e9d719bd3eb9e7f
|
5aa268fc01648155b66e856f495e309511b4e3c6
|
refs/heads/master
| 2020-04-05T18:33:54.938972 | 2014-05-27T20:16:54 | 2014-05-27T20:16:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# MITx 15.074x Spring 2014 Kaggle Competition
# David Wihl
import csv
import pickle
import numpy as np
from sklearn import preprocessing
from sklearn import svm
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
from sklearn import metrics
global labelEncoders
class Dataset(object):
def __init__(self):
self.labelEncoders = []
def load(self):
self.labelEncoders = pickle.load( open("encoders.p", "rb") )
def save(self):
pickle.dump(self.labelEncoders, open("encoders.p", "wb") )
def trainProcess(self,headers, array):
newRow = [0] * len(headers)
YOB = []
factors = []
for i in xrange(len(headers)):
factors.append(set([]))
self.labelEncoders.append(preprocessing.LabelEncoder())
target = []
for row in array:
# YOB - to be normalized
if row[1] != 'NA':
YOB.append(row[1])
# Gender, Income, HouseholdStatus, EducationLevel, Party
factors[0].add(row[2])
factors[1].add(row[3])
factors[2].add(row[4])
factors[3].add(row[5])
factors[4].add(row[6])
# Target (Happy = {0,1})
target.append(int(row[7]))
colnum = 5
for col in row[8:-1]: # omit last column (votes)
factors[colnum].add(col)
colnum += 1
# Encode Labels
colnum = 0
for f in factors:
self.labelEncoders[colnum].fit(list(f))
colnum += 1
# ok, let's add a new cleaned up row
rownum = 0
cleanArray = []
for row in array:
newRow = []
colnum = 0
if rownum % 1000 == 0 and rownum != 0:
print rownum
#demographics
for col in row[2:7]:
newRow.append(self.labelEncoders[colnum].transform([col])[0])
colnum += 1
# 101 questions
for col in row[8:-1]:
newRow.append(self.labelEncoders[colnum].transform([col])[0])
colnum += 1
cleanArray.append(newRow)
rownum += 1
return cleanArray, target
def testProcess(self,headers, array):
userIds = []
YOB = []
# ok, let's add a new cleaned up row
rownum = 0
cleanArray = []
for row in array:
userIds.append(row[0])
newRow = []
colnum = 0
if rownum % 1000 == 0 and rownum != 0:
print rownum
for col in row[2:-1]:
newRow.append(self.labelEncoders[colnum].transform([col])[0])
colnum += 1
cleanArray.append(newRow)
rownum += 1
return userIds, cleanArray
def main():
nrow = 0
ds = Dataset()
print "reading and processing..."
try:
X = np.load("X.npy")
y = np.load("y.npy")
ds.load()
except:
print "exception thrown"
with open('train.csv', 'rU') as csvfile:
reader = csv.reader(csvfile, lineterminator='\r')
headers = reader.next()
trainData = []
for row in reader:
trainData.append(row)
X_array, target = ds.trainProcess(headers,trainData)
X = np.array(X_array)
y = np.array(target)
np.save("X.npy",X)
np.save("y.npy",y)
ds.save()
print "Starting train..."
print "X shape", X.shape
clf = RandomForestRegressor(n_estimators=2000)
scores = cross_val_score(clf, X, y, cv = 5)
print scores.mean()
return
clf.fit(X,y)
print "Starting predictions..."
nrow = 0
with open('test.csv', 'rU') as csvfile:
reader = csv.reader(csvfile, lineterminator='\r')
headers = reader.next()
testData = []
for row in reader:
testData.append(row)
userIds, testArray = ds.testProcess(headers, testData)
X_test = np.array(testArray)
print "X test shape", X_test.shape
pred = clf.predict(X_test)
with open('subextra.csv','wb') as f:
writer = csv.writer(f)
writer.writerow(['UserID','Probability1'])
for u, p in zip(userIds, pred):
if p > 0.99: p = 0.99
if p < 0.001: p = 0.001
writer.writerow([u,p])
print "Submission CSV written."
if __name__ == "__main__":
main()
# todo:
# 1. add normalization code for YOB and questions answered, then normalize the values
# 2. Try a two or four cluster then run RF against it.
# 3. Clean out bad training data
|
UTF-8
|
Python
| false | false | 2,014 |
12,773,232,770,372 |
164efb294f65894a8a3760e9dd6fc61e93b04628
|
a47f2e427eedd0ca3e28ebce72fd06fecf310fb1
|
/pytest/GEO/word2geo/test_get_field_words.py
|
221c01f84107682c9f3578f8a42afd6dcee1d04b
|
[] |
no_license
|
phonybone/trends
|
https://github.com/phonybone/trends
|
68984152cc5aff64fcb0bcbdcc49d08ff28057c0
|
e89c422ba794716d39f049a06913e107e856e54a
|
refs/heads/master
| 2020-12-24T14:55:50.435134 | 2012-10-11T02:02:50 | 2012-10-11T02:02:50 | 3,132,031 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import unittest, sys, os
sys.path.append(os.path.join(os.environ['TRENDS_HOME'], 'pylib'))
from GEO.Factory import Factory
from GEO.word2geo import Word2Geo
from warn import *
class TestGetFieldWords(unittest.TestCase):
def setUp(self):
pass
def test_get_field_words(self):
geo_id='GSE10072'
geo=Factory().newGEO(geo_id)
words=Word2Geo.get_field_words(geo)
self.assertEqual(len(words['title']), 42)
self.assertEqual(len(words['description']), 0)
self.assertEqual(len(words['summary']), 738) # not quite sure why this isn't 741
suite = unittest.TestLoader().loadTestsFromTestCase(TestGetFieldWords)
unittest.TextTestRunner(verbosity=2).run(suite)
|
UTF-8
|
Python
| false | false | 2,012 |
3,736,621,551,433 |
7c70074a7a90e9c785b83e404d6bc609e22e61f1
|
a58a44bae3e1fa7c3c6087d7188b31a15f4d7221
|
/src/welcometo42cc/urls.py
|
35a4d055ffe57ab5aff4aaeee36137b0a2136ba2
|
[] |
no_license
|
pioneer/42cc-test
|
https://github.com/pioneer/42cc-test
|
63c691f0d2b3fe6ccdfb152cca2005146d68b1f3
|
047f079bc80d0848561cf8f120189eacea4b0cb5
|
refs/heads/master
| 2021-01-01T17:00:06.380360 | 2010-03-17T08:19:22 | 2010-03-17T08:19:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os.path as op
from django.conf import settings
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'common.views.homepage'),
url(r'^form/$', 'common.views.userform', name="userform"),
url(r'^form/ajax/$', 'common.views.userform_ajax', name="userform_ajax"),
url(r'^login/$', 'django.contrib.auth.views.login', \
{'template_name': 'login.html'}, name="login"),
url(r'^logout/$', 'django.contrib.auth.views.logout', \
{'next_page': settings.LOGIN_URL}, name="logout"),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
)
# static urls will be disabled in production mode
urlpatterns += patterns(
'',
url(r'^static/(.*)$', 'django.views.static.serve', \
{'document_root': settings.STATIC_ROOT}),
url(r'^admin-media/(.*)$', 'django.views.static.serve', \
{'document_root': op.join(op.dirname(admin.__file__), 'media')}),
)
|
UTF-8
|
Python
| false | false | 2,010 |
18,004,502,929,392 |
5a1d1a09e3017474d45557c65a0ad25298b7d477
|
45e067a4cb6ad96004a8427887dba9a2af87029d
|
/yogacal/yogacal.py
|
c7ad9dcf3ee0d3d2b1e83992f0bc698a0871730a
|
[
"GPL-3.0-only"
] |
non_permissive
|
spudtrooper/yogaical
|
https://github.com/spudtrooper/yogaical
|
e5eb6cda9758fb6276485c3abbf8db52ed0682d3
|
56271529c9ab1c6de4bd0db126e5e0fab743f3f2
|
refs/heads/master
| 2021-01-23T09:52:20.094965 | 2012-09-30T20:09:29 | 2012-09-30T20:09:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import urllib
import urllib2
import logging
import json
import re
class JsonObj:
def __init__(self,id):
self.id = int(id)
class Instructor(JsonObj):
def __init__(self,id,name):
JsonObj.__init__(self,id)
self.name = name
@staticmethod
def fromJson(obj):
return Instructor(obj['id'], obj['n'])
class Class(JsonObj):
def __init__(self,id,name,description):
JsonObj.__init__(self,id)
self.name = name
self.description = description
@staticmethod
def fromJson(obj):
return Class(obj['id'], obj['n'], obj['d'])
class Location(JsonObj):
def __init__(self,id,name,lat,lng,city,address,state,phone):
JsonObj.__init__(self,id)
self.name = name
self.lat = float(lat)
self.lng = float(lng)
self.city = city
self.address = address
self.state = state
self.phone = phone
@staticmethod
def fromJson(obj):
return Location(obj['id'], obj['n'], obj['t'], obj['g'], obj['ct'],
obj['ad'], obj['state'], obj['ph'])
class YogaCalItem:
def __init__(self,instructor,klass,location,date,mins):
self.instructor = instructor
self.klass = klass
self.location = location
self.date = date
self.mins = mins
class YogaCalJsonAdapter:
def __init__(self,obj):
self.items = []
ids2instructors = {}
for jsonObj in obj['instructors']:
o = Instructor.fromJson(jsonObj)
ids2instructors[o.id] = o
ids2locations = {}
for jsonObj in obj['locations']:
o = Location.fromJson(jsonObj)
ids2locations[o.id] = o
ids2classes = {}
for jsonObj in obj['classes']:
o = Class.fromJson(jsonObj)
ids2classes[o.id] = o
for jsonObj in obj['data']:
instructor = ids2instructors[int(jsonObj['s'])]
klass = ids2classes[int(jsonObj['d'])]
location = ids2locations[int(jsonObj['l'])]
date = jsonObj['od']
mins = int(jsonObj['r'])
item = YogaCalItem(instructor, klass, location, date, mins)
self.items.append(item)
class YogaCal:
def __init__(self):
self.log = logging.getLogger('YogaCal')
def header(self,locations=None):
res = ''
res += "BEGIN:VCALENDAR\r\n"
res += "VERSION:2.0\r\n"
res += "PRODID:-//jeffpalm/yogacal//NONSGML v1.0//EN\r\n"
name = 'Yoga'
if locations and len(locations) > 0:
name += "--"
name += "/".join(locations)
res += "X-WR-CALNAME:%s\r\n" % (name)
return res
def footer(self):
res = ''
res += "END:VCALENDAR\r\n"
return res
@staticmethod
def pad(n):
if (n < 10):
return '0%d' % (n)
return '%d' % (n)
@staticmethod
def dtstart(dateStr,offset=0):
"""
@param offset number number of hours to pad the date
NOTE: this won't handle wrapping offsets
"""
# 201209160830 -> 20120916T083000
date = dateStr[0:8] # 20120916
time = dateStr[8:] # 0830
hours = int(time[0:2]) # 08
hours += offset
mins = int(time[2:]) # 30
newTime = '%s%s00' % (YogaCal.pad(hours),
YogaCal.pad(mins))
return '%sT%s' % (date,newTime)
@staticmethod
def dtend(dateStr,dur,offset=0):
"""
@param offset number number of hours to pad the date
NOTE: this won't handle wrapping offsets
"""
# 201209160830, 75 -> 20120916T094500
date = dateStr[0:8] # 20120916
time = dateStr[8:] # 0830
hours = int(time[0:2]) # 08
hours += offset
mins = int(time[2:]) # 30
carryHours = (mins + dur) / 60 # 1 = (30 + 75) / 60
# = 105 / 60
newHours = hours + carryHours # 9 = 8 + 1
newMins = (mins + dur) % 60 # 15 = (30 + 75) % 60
# = 105 % 60
newTime = '%s%s00' % (YogaCal.pad(newHours),
YogaCal.pad(newMins))
return '%sT%s' % (date,newTime)
@staticmethod
def removeHTML(str):
res = str
res = re.sub('&#\d{4};','',res)
res = re.sub('\s+',' ',res)
return res
def toEvent(self, item, offset):
"""
@param item YogaCalItem
@param offset number hours to pad dates
"""
res = ''
res += "BEGIN:VEVENT\r\n"
uid = '%d-%d-%d' % (item.klass.id,
item.instructor.id,
item.location.id)
#res += "UID:%s\r\n" % (uid)
start = YogaCal.dtstart(item.date, offset)
end = YogaCal.dtend(item.date, item.mins, offset)
res += "DTSTART:%s\r\n" % (start)
res += "DTEND:%s\r\n" % (end)
summary = '%s: %s @ %s (%d mins)' % (item.instructor.name,
item.klass.name,
item.location.name,
item.mins)
summary = YogaCal.removeHTML(summary)
res += "SUMMARY:%s\r\n" % (summary)
res += "LOCATION:%s: %s, %s, %s\r\n" % (item.location.name,
item.location.address,
item.location.city,
item.location.state)
res += "END:VEVENT\r\n"
return res
def requestItems(self, locations=None, levels=None, instructors=None,
offset=None):
"""
@param locations None or empty means we use all the cities
@param levels array of class levels -- e.g. "1", "1/2"
@param instructors array of names of instructors to search for
@param offset number offset to pad hours, because google
calendar doesn't seem to respect no time zones
@return iCal version of calendar at: http://schedule.yogaworks.com
"""
if not locations:
locations = []
if not levels:
levels = []
if not instructors:
instructors = []
levelsStr = '|'.join(['.*\(%s\).*' % (str(level)) for level in levels])
levelRe = re.compile(levelsStr)
res = self.request()
adp = YogaCalJsonAdapter(json.read(res))
filteredItems = adp.items
# Filter by locations
filteredItems = filter(lambda it: len(locations) == 0 or
(it.location.name in locations),
filteredItems)
# Filter by levels
filteredItems = filter(lambda it: len(levels) == 0 or
re.match(levelRe, it.klass.name),
filteredItems)
# Filter by instructors
filteredItems = filter(lambda it: len(instructors) == 0 or
(it.instructor.name in instructors),
filteredItems)
return filteredItems
def ics(self, **kwargs):
items = self.requestItems(**kwargs)
locations = kwargs.get('locations') or []
offset = kwargs.get('offset') or 0
if offset:
offset = int(offset)
res = self.header(locations)
for it in items:
res += self.toEvent(it, offset)
res += self.footer()
return res
def request(self):
data = {
'hsrc':'rtc',
'name':'gtSchd',
'action':'pr',
'owner':'yogaworks',
'dy':'2',
'offset':'0',
'regionVl':'-140'
}
url = 'http://schedule.yogaworks.com/rq/'
req = urllib2.Request(url, urllib.urlencode(data))
f = urllib2.urlopen(req)
response = f.read()
f.close()
return response
|
UTF-8
|
Python
| false | false | 2,012 |
17,617,955,886,154 |
d5434a6e2c54e0fb1fa43a900780c20aed472bfd
|
e84e699767444315ac2096b3ece1659ba2873ae3
|
/urls.py
|
8ccdd92250b5982319c31f6226d99d951279bdb0
|
[
"BSD-3-Clause"
] |
permissive
|
ftrain/django-ftrain
|
https://github.com/ftrain/django-ftrain
|
1e6ac41211dba5e69eabf1a4a85c2aec0c048959
|
af535fda8e113e9dcdac31216852e35a01d3b950
|
refs/heads/master
| 2021-01-21T01:46:53.957091 | 2009-12-28T15:31:26 | 2009-12-28T15:31:26 | 259,071 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls.defaults import *
#-------------------------------------------------------------------------------
# Feeds
#-------------------------------------------------------------------------------
from ftrain.kcal.feeds import KcalEventsFeed
from ftrain.kcal.feeds import KcalDaysFeed
# from ftrain.radio.feeds import MaryislaughingPostsFeed
# from ftrain.reviews.feeds import LeastreviewReviewsFeed
# from ftrain.dotcom.feeds import FtrainCompleteFeed
urlpatterns = patterns('',)
#-------------------------------------------------------------------------------
# Admin
#-------------------------------------------------------------------------------
from django.contrib import admin
admin.autodiscover()
urlpatterns = urlpatterns + patterns(
'',
(r'^admin/', include(admin.site.urls)),
(r'^admin/(.*)', admin.site.root),
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^m/(?P<path>.*)$', 'django.views.static.serve', {'document_root':'/Users/ford/sites/ftrain.com/htdocs/m/'}),
(r'^feeds/(?P<url>.*)/$', 'django.contrib.syndication.views.feed', {'feed_dict': {'ohlih-events': KcalEventsFeed, 'ohlih-days': KcalDaysFeed}}),
)
#-------------------------------------------------------------------------------
# Search
#-------------------------------------------------------------------------------
from haystack.forms import FacetedSearchForm
from haystack.query import SearchQuerySet
from haystack.views import FacetedSearchView
#from ftrain.haystack.views import FtrainFacetedSearchView
#sqs = SearchQuerySet().facet('type').facet('month').facet('site')
sqs = SearchQuerySet().facet('title')
urlpatterns = urlpatterns + patterns(
'',
# (r'^search/', include('haystack.urls')),
url(r'^search/', FacetedSearchView(form_class=FacetedSearchForm, searchqueryset=sqs), name='haystack_search'),
# reload_all=False
)
#-------------------------------------------------------------------------------
# Sites
#-------------------------------------------------------------------------------
urlpatterns = urlpatterns + patterns('',
# Ftrain
(r'^dotcom/', include('ftrain.dotcom.urls')),
# One Huge Lesson in Humility
(r'^ohlih/', include('ftrain.kcal.urls')),
# Mary is Laughing
(r'^mary-is-laughing/', include('ftrain.radio.urls')),
# Least Review
(r'^least-review/', include('ftrain.reviews.urls')),
# "Advertising" server
(r'^ads/', include('ftrain.ads.urls')),
# Featurism
(r'^featurism/', include('ftrain.features.urls')),
)
|
UTF-8
|
Python
| false | false | 2,009 |
1,142,461,340,377 |
8ebbbbb436bcb560717612efe92804996397f5d9
|
44e1cbd2554419851258b982c468c7ba55c7acda
|
/convener/schedule/views.py
|
124639effca67ce2dd584e25d27dfda899d90e70
|
[
"MIT"
] |
permissive
|
keningle/convener
|
https://github.com/keningle/convener
|
b88ab2e419a4c4a1aa44f0a7e4b6d3210289338e
|
00c1e3a09e903b275dd0f09f6014ad16bb2f0c9a
|
refs/heads/master
| 2016-09-10T09:55:07.019827 | 2013-07-27T02:20:15 | 2013-07-27T02:20:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from rest_framework import viewsets, permissions
from schedule.models import Presenter, Location, Track, Session
from schedule.serializers import (PresenterSerializer, LocationSerializer,
TrackSerializer, SessionSerializer)
class PresenterViewSet(viewsets.ModelViewSet):
'''
API endpoint for Presenter
'''
queryset = Presenter.objects.all()
serializer_class = PresenterSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class LocationViewSet(viewsets.ModelViewSet):
'''
API endpoint for Location
'''
queryset = Location.objects.all()
serializer_class = LocationSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class TrackViewSet(viewsets.ModelViewSet):
'''
API endpoint for Track
'''
queryset = Track.objects.all()
serializer_class = TrackSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class SessionViewSet(viewsets.ModelViewSet):
'''
API endpoint for Session
'''
queryset = Session.objects.all()
serializer_class = SessionSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
|
UTF-8
|
Python
| false | false | 2,013 |
15,573,551,425,450 |
82c8c7ddaae8af0a268663c84b95c5f088a6d70c
|
2077219d2c6266279f5c11a642964166ca251e42
|
/python/test.py
|
7dc8d4547e4a6d8a50a158c1a042279bf17401d4
|
[] |
no_license
|
robputt796/py-rail
|
https://github.com/robputt796/py-rail
|
1c68e469c7f16006b8aa2168b50772a5c2e576c2
|
70cae458ddd7be5e40bd8a21327fe9069c06f987
|
refs/heads/master
| 2021-05-29T07:50:10.538441 | 2014-12-23T23:31:28 | 2014-12-23T23:31:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#hornby_lower_interface.py - this modules encapsulates the functions of the Hornby Elite DCC controller.
'''
for this module to work they would have to create a text file called 10-elite.rules
and copy the following (all on one line)
ATTR{idVendor}=="04d8", ATTR{idProduct}=="000a", RUN+="/sbin/modprobe -q ftdi_sio vendor=0x04d8 product=0x000a"
then run the they would need to move the file into etc/udev/rules.d
they have to be in the same directory has the file >> sudo mv 10-elite.rules /etc/udev/rules.d/
'''
import hornby # Use hornby_lower_interface.py as the interface for controlling trains and accessories
import time #Use python standard module time for simple timming functions
# example functions for South West Digital's Class 108 (55488) decoder
# F0 Lights on
def lights_on(t):
t.function(0,hornby.ON)
# F0 Lights off
def lights_off(t):
t.function(0,hornby.OFF)
# F1 Sound on
def sound_on(t):
t.function(1,hornby.ON)
# F1 Sound off
def sound_off(t):
t.function(1,hornby.OFF)
# F2 Horn 1
def horn1(t):
t.function(2,hornby.ON)
time.sleep(.1)
t.function(2,hornby.OFF)
# F3 Horn 2
def horn2(t):
t.function(3,hornby.ON)
time.sleep(.1)
t.function(3,hornby.OFF)
# F4 Brake
def brake(t):
t.function(4,hornby.ON)
time.sleep(.1)
t.function(4,hornby.OFF)
# F5 Buzzer x 2
def buzzer2(t):
t.function(5,hornby.ON)
time.sleep(.1)
t.function(5,hornby.OFF)
# F6 Buzzer x 1
def buzzer1(t):
t.function(6,hornby.ON)
time.sleep(.1)
t.function(6,hornby.OFF)
# F7 Aux 1 on
def aux1_on(t):
t.function(7,hornby.ON)
# F7 Aux 1 off
def aux1_off(t):
t.function(7,hornby.OFF)
# F8 Aux 2 on
def aux2_on(t):
t.function(8,hornby.ON)
# F8 Aux 2 off
def aux2_off(t):
t.function(8,hornby.OFF)
# F9 Directional Gear Change
def gear_change(t):
t.function(9,hornby.ON)
time.sleep(.1)
t.function(9,hornby.OFF)
# F10 Guards Whistle
def guards_whistle(t):
t.function(10,hornby.ON)
time.sleep(.1)
t.function(10,hornby.OFF)
# Accessory - station signal Go
def station_signal_go(a) :
a.activate()
# Accessory - station signal Stop
def station_signal_stop(a) :
a.deactivate()
# helper function - wait a given number of seconds
def wait(secs):
print "Wait {0:d} seconds".format(secs)
time.sleep(secs)
try:
hornby.connection_open('/dev/ttyACM0',115200) #<<<<<< must be changed to the right device + baud rate changed for the elink
except RuntimeError as e:
try:
hornby.connection_open('/dev/ttyACM1',115200) #<<<<<< must be changed to the right device + baud rate changed for the elink
except RuntimeError as e:
hornby.connection_open('/dev/ttyACM2',115200) #<<<<<< must be changed to the right device + baud rate changed for the elink
# set_debug(True) to show the data transmitted and received from the controller
# Try not to worry if it goes wierd
hornby.set_debug(False)
# Check hardware and perform initialisation
hornby.setup()
# create a train object to represent each train to be controlled
# parameter 1 = DCC addres
# The somerset Belle (small black 6-wheeler steam train) has an ID of 3
try:
print '''
\t\t\t********************************
\t\t\t* Railway Test Program Demo *
\t\t\t********************************
press Ctrl + C ONCE to exit
'''
for i in range(3,5):
print "Testing train "+ str(i)
t1 = hornby.Train(i)
# Make sure it's stopped
t1.throttle(0,hornby.FORWARD)
try:
sound_on(t1)
horn1(t1)
aux1_on(t1)
aux1_off(t1)
guards_whistle(t1)
t1.throttle(55, hornby.FORWARD)
time.sleep(2)
t1.throttle(0, hornby.FORWARD)
except ValueError:
print
# close the connection with a Hornby Elite DCC controller
hornby.connection_close()
print '''
\t\t\t********************************
\t\t\t *****ENDING*****
\t\t\t********************************
'''
except KeyboardInterrupt:
print "Stopping the program"
t1.throttle(0,hornby.FORWARD) # stopping the train
hornby.connection_close()
print '''
\t\t\t********************************
\t\t\t *****ENDING*****
\t\t\t********************************
'''
|
UTF-8
|
Python
| false | false | 2,014 |
472,446,416,647 |
63f4ab309892043edfc0c31cd453bd717c945dfc
|
7420c6fd7f774dee035b3ff22c910c639c7ef8b8
|
/pythonInSysAdm/flag_poster/grabber.py
|
231439f42bbe24139e53626f2dfdefcdbd1f4b03
|
[] |
no_license
|
andresPanyakin/test
|
https://github.com/andresPanyakin/test
|
c09c7f9abdfbc1d8343b146f9e5f042fca37f9f8
|
a9944f4f2ea89c50da843d1a8597b7149670c4cb
|
refs/heads/master
| 2020-03-31T09:41:49.105484 | 2013-03-10T19:38:20 | 2013-03-10T19:38:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf8 -*-
## \file grabber.py
## \brief Реализация граббера
import logging
import time
import config as conf
from checker import Checker
__author__ = 'sima, rockosov'
conf.configuration_logging()
class Grabber( object ):
"""
\brief Grabber - класс граббера.
Создает для каждого IP адреса чекер и стартует их
"""
def __init__( self ):
logging.debug( 'Initialize' )
for addr in conf.evil_ip_list:
checker = Checker( addr )
checker.start()
|
UTF-8
|
Python
| false | false | 2,013 |
5,239,860,138,900 |
049b5a0bc3c7ffb02ed422bc1de60167744af6c0
|
032cfb0d625a6fa9d3e7f6dba41a4619364de688
|
/view_08_09/02_find_nonpaired.py
|
50b7a8c2412da5aae2d4df4ab591be82762d7880
|
[] |
no_license
|
explesy/python
|
https://github.com/explesy/python
|
435878bd88c44ef7854763c56d8b8283b3a97f46
|
76214e6d071f7fd558142c371ea5c22a101b80a2
|
refs/heads/master
| 2016-09-05T08:47:47.339755 | 2014-09-24T23:25:02 | 2014-09-24T23:25:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python3
def find_nonpaired(digits):
''' x, y, flag - 3 variables;
for x ... & for y ... - 2 loops
res[] - only to return answer from function, so if we rewrite it as one plain function
we don't need it
'''
res = []
for x in range(0, len(digits)):
flag = False
for y in range(0, len(digits)):
if digits[x] == digits[y] and x != y:
print(digits[x], digits[y])
flag = True
if not flag:
res.append(digits[x])
return res
def main():
test = [2, 56, 23, 5, 7, 5, 7, 2]
result = [56, 23]
if find_nonpaired(test) == result:
print("==> OK <==")
print("{} nonpaired = {}".format(test, result))
else:
print("Something wrong")
print("{} nonpaired != {}".format(test, result))
main()
|
UTF-8
|
Python
| false | false | 2,014 |
4,569,845,233,145 |
dff29c1f2d0ebd6f5f04856a2f1b72739b2ebfa6
|
4ce915b56ef78c7b6a0fb8f0b00c0a1a5c9491eb
|
/lib/blast/QueryToBLAST.py
|
10f14cda227e0fc8a7494eb54095666b75cdc235
|
[] |
no_license
|
antitak/au-summer-2013
|
https://github.com/antitak/au-summer-2013
|
2d3b77a91221397f48a239a1721f49db55ff6737
|
77debf45cbc5e8bb5ed538002820ce29ae2a38df
|
refs/heads/master
| 2021-01-16T22:55:16.675198 | 2013-07-08T14:00:16 | 2013-07-08T14:00:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from Bio import Blast
from Bio.Blast.Applications import NcbiblastxCommandline
from Bio import SeqIO
from Bio import Seq
nr_db_name = "nr"
in_protein_name = "VH_pro_trust.fasta"
in_protein = list(SeqIO.parse(open(in_protein_name), format="fasta"))[0]
print(in_protein)
xml_results_name = "results.xml"
blastx_cline = NcbiblastxCommandline(query=in_protein,
db=nr_db_name,
evalue=0.001,
outfmt=5,
out=xml_results_name)
blastx_cline()
|
UTF-8
|
Python
| false | false | 2,013 |
11,845,519,810,607 |
261c688546de6063902d3148ad3e043918c01445
|
cee3e7282e1dff882a34a7b438949239b2db3f1e
|
/2wayserver.py
|
535d884831c5b733be4155ef237cadee9e88cb74
|
[] |
no_license
|
scottdcrawford/pythonClientServer
|
https://github.com/scottdcrawford/pythonClientServer
|
59e1896740548fafb47eebefa3fd99a3410923fb
|
9c8c66e95d67922bb326e73bf5acaf683bf5bdad
|
refs/heads/master
| 2017-12-22T10:08:24.970027 | 2014-08-13T08:43:49 | 2014-08-13T08:43:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Name: Scott Crawford
Descrioption: 2 connection client-server network application for tranfering files, server side.
Resources: Program runs successfull on os-class.engr.oregonstate.edu, student collaboration
Sources: http://pymotw.com/2/socket/tcp.html, http://www.youtube.com/watch?v=qELZAi4yra8
Running code: /usr/bin/python ftserver.py
'''
import socket
import sys
HOST = '' #sys.argv[1]
PORT = 30021 #sys.argv[2]
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create a TCP/IP socket
server_address = (HOST, PORT)
print >>sys.stderr, 'starting up on %s port %s' % server_address
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(server_address) # Bind the socket to the port
sock.listen(3) # Listen for 1 incoming connection
print 'waiting for a connection' # Wait for a connection
while True: #Loop for conversation to take place until it is closed.
connection, client_address = sock.accept()
print 'Connected by', client_address #prints statement with clients address for user
HOST = ''
POST = 30020
connection.close() #closes connection
|
UTF-8
|
Python
| false | false | 2,014 |
8,899,172,247,328 |
f8aa2ac0ac1c92ccc36a14088c0c474bb888c062
|
972c19a4860f7e65f8d6ef7e6944a171c8d34e39
|
/is_core/main.py
|
b29cf47e704ab30ae71639e5bc391915ed70cf05
|
[] |
no_license
|
FilipSivak/django-is-core
|
https://github.com/FilipSivak/django-is-core
|
86ab33a19d642d363834c48a16b4ac0d4aca11ae
|
e982966359b80f45d3e3a188117ac257ac7d4da7
|
refs/heads/master
| 2020-12-25T06:12:22.859612 | 2014-02-03T15:18:59 | 2014-02-03T15:18:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import patterns, url
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from is_core.form import RestModelForm
from is_core.actions import WebAction, RestAction
from is_core.generic_views.form_views import AddModelFormView, EditModelFormView
from is_core.generic_views.table_views import TableView
class ISCore(object):
menu_group = None
menu_subgroup = None
menu_url_name = None
menu_verbose_name = None
show_in_menu = True
def __init__(self, site_name):
self.site_name = site_name
self.views = self.get_views()
def get_urlpatterns(self, views):
urls = []
for key, view_data in views.items():
url_pattern, view = view_data[0:2]
pattern_name = '%s-%s-%s' % (key, self.menu_group, self.menu_subgroup)
urls.append(url(url_pattern, view, name=pattern_name))
urlpatterns = patterns('', *urls)
return urlpatterns
def get_urls(self):
return self.get_urlpatterns(self.views)
def get_show_in_menu(self, request):
return self.show_in_menu
def get_views(self):
return {}
def menu_url(self, account, environment):
return reverse(('%(site_name)s:' + self.menu_url_name) % {'site_name': self.site_name},
args=(account, environment))
class ModelISCore(ISCore):
exclude = []
def save_model(self, request, obj, change):
obj.save()
def delete_model(self, request, obj):
obj.delete()
def menu_verbose_name(self):
return self.model._meta.verbose_name_plural
menu_verbose_name = property(menu_verbose_name)
def menu_group(self):
return str(self.model._meta.app_label)
menu_group = property(menu_group)
def menu_subgroup(self):
return str(self.model._meta.module_name)
menu_subgroup = property(menu_subgroup)
def get_obj(self, pk):
return get_object_or_404(self.model, pk=pk)
class UIISCore(ModelISCore):
list_display = ()
inline_form_views = ()
add_view = AddModelFormView
edit_view = EditModelFormView
table_view = TableView
show_in_menu = True
fieldsets = ()
default_list_filter = {}
api_url_name = None
list_actions = ()
form_class = RestModelForm
def get_show_in_menu(self, request):
return 'list' in self.allowed_views and self.show_in_menu;
def get_rest_list_fields(self):
return list(self.list_display)
def get_inline_form_views(self, request, obj=None):
return self.inline_form_views
def get_default_list_filter(self, request):
return self.default_list_filter.copy()
def menu_url_name(self):
info = self.menu_group, self.menu_subgroup
return 'list-%s-%s' % info
menu_url_name = property(menu_url_name)
def get_fieldsets(self, form):
return list(self.fieldsets)
def bread_crumbs_url_names(self, context):
request = context.get('request')
view_type = context.get('view_type')
bread_crumbs_url_names = [
(_('List %s') % self.model._meta.verbose_name,
'list' in self.allowed_views and \
self.has_read_permission(request.user, request.account_pk) and \
'%s:list-%s-%s' % (self.site_name, self.menu_group, self.menu_subgroup) or None)
]
if view_type == 'add':
bread_crumbs_url_names.append((_('Add %s') % self.model._meta.verbose_name, None))
elif view_type == 'edit':
bread_crumbs_url_names.append((_('Edit %s') % self.model._meta.verbose_name, None))
return bread_crumbs_url_names
def get_views(self):
views = super(UIISCore, self).get_views()
if 'list' in self.allowed_views:
views['list-%s-%s' % (self.menu_group, self.menu_subgroup)] = \
(r'^/?$', self.table_view.as_view(persoo_view=self))
if 'add' in self.allowed_views:
views['add-%s-%s' % (self.menu_group, self.menu_subgroup)] = \
(r'^/add/$', self.add_view.as_view(persoo_view=self))
if 'edit' in self.allowed_views:
views['edit-%s-%s' % (self.menu_group, self.menu_subgroup)] = \
(r'^/(?P<pk>\d+)/$', self.edit_view.as_view(persoo_view=self))
return views
def default_list_actions(self, user, account_pk):
self._default_list_actions = []
self._default_list_actions.append(WebAction('edit-%s-%s' % (self.menu_group, self.menu_subgroup),
_('Edit'), 'edit'))
self._default_list_actions.append(RestAction('delete', _('Delete')))
return self._default_list_actions
def get_list_actions(self, user, account_pk):
list_actions = list(self.list_actions) + list(self.default_list_actions(user, account_pk))
return list_actions
def gel_api_url_name(self):
return self.api_url_name
|
UTF-8
|
Python
| false | false | 2,014 |
4,922,032,551,125 |
bf6d2b07429ed379937a705d255524e1064e7099
|
04e794e6bdb8b3de778a338dff08fad061bd314b
|
/Analyzer/src/logparse/commons.py
|
9d70dc12c2d1f90cc8c9acdc48d52419e691ec5f
|
[] |
no_license
|
tectronics/socialgossip
|
https://github.com/tectronics/socialgossip
|
b5404e56a7ea1d785cca90be82b371edb67a8a5c
|
d9575a1f097e02746e849a34557633061dfa3f61
|
refs/heads/master
| 2018-01-11T15:03:00.938360 | 2014-06-14T16:13:46 | 2014-06-14T16:13:46 | 46,855,976 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on Oct 18, 2011
@author: giuliano
'''
import re
import subprocess
from subprocess import Popen
import sys
class ParameterIterator(object):
IDENT = "[A-Z]+"
INT_OR_FLOAT = "[0-9]+(?:\\.[0-9]+)?"
INT_OR_FLOAT_OR_IDENT = "(?:" + IDENT + "|(?:" + INT_OR_FLOAT + "))"
SPLIT_CHAR = "_"
PARAMETER = IDENT + SPLIT_CHAR + INT_OR_FLOAT_OR_IDENT
def __init__(self, string):
self.__string__ = string
def __iter__(self):
for match in re.finditer(self.PARAMETER, self.__string__):
yield match.group(0)
class AnalyzerLauncher:
def __init__(self, processor, inp=None, oup=None, zipped=False,
separator=","):
self.__properties__ = {}
self.__processor__ = processor
self.__output__ = oup
self.__input__ = inp
self.__zipped__ = zipped
def __setitem__(self, key, value):
self.__properties__[key] = value
def run(self, output):
command = ["analyzer-j", "-s", ","]
if not self.__output__ is None:
command.append("-o")
command.append(self.__output__)
if not self.__input__ is None:
command.append("-i")
command.append(self.__input__)
if self.__zipped__:
command.append("--zipped")
command.append("-p")
command.append(self.__option_string__())
command.append(self.__processor__)
command = " ".join(command)
process = Popen(command, shell=True, stdout=output, stderr=subprocess.PIPE)
while process.returncode is None:
(stdout, stderr) = process.communicate()
print >> sys.stderr, stderr
def __option_string__(self):
opts = []
for key, value in self.__properties__.items():
opt = key + "=" + value
opts.append(opt)
return ",".join(opts)
|
UTF-8
|
Python
| false | false | 2,014 |
10,273,561,785,208 |
a77e39b3ffebb0cdf9d63d1e3a40b2aee53cae1c
|
c39c09b6fe1937610f465c71eb5f14d6acb9fdc4
|
/sandbox/mtgoxExample3.py
|
a089feb41ac8ff2db1e7cf748e4f80b2177e3a81
|
[] |
no_license
|
spuder/alfred-bitcoin-workflow
|
https://github.com/spuder/alfred-bitcoin-workflow
|
ee9a65dd02df0a8cad42b00fa7328b1f65179e78
|
1bd7683c4ab1c3c051256bc2523226f474c59558
|
refs/heads/master
| 2021-01-22T22:57:01.327415 | 2013-04-27T23:51:56 | 2013-04-27T23:51:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#http://www.pythonforbeginners.com/python-on-the-web/parsingjson/
import json
import pprint
json_data = open("json_file")
# the value returned from json.load is a Python dictionary.
data = json.load(json_data)
# use pprint to make the output more readable
pprint.pprint(data)
json_data.close()
# Get the data you want, by navigating the structure using standard python.
print data["maps"][0]["id"]
print data["masks"]["id"]
print data["om_points"]
|
UTF-8
|
Python
| false | false | 2,013 |
15,633,681,000,352 |
fde91a2f5dea1fba99d9ebe6755d5c574e03b894
|
375b3fc3d75d0aa9a8405d797076d3bbfa71b6b6
|
/ThirdParty/Ert/devel/python/test/region_test.py
|
4e99395f35de0895ac1706993f3da92d717ff550
|
[
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"GPL-3.0-only"
] |
non_permissive
|
renjianshuiguo/ResInsight
|
https://github.com/renjianshuiguo/ResInsight
|
0d6640a549e05613afee835583813cd25a7058f8
|
24bd90f55dde82f838e169531429a68492809bb6
|
refs/heads/master
| 2021-01-17T05:26:32.929998 | 2013-08-23T11:59:11 | 2013-08-23T11:59:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# Copyright (C) 2012 Statoil ASA, Norway.
#
# The file 'region_test.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
import datetime
import unittest
import ert
import ert.ecl.ecl as ecl
from test_util import approx_equal, approx_equalv
case = "test-data/Statoil/ECLIPSE/Gurbat/ECLIPSE"
class RegionTest( unittest.TestCase ):
def setUp(self):
self.grid = ecl.EclGrid( case )
self.rst_file = ecl.EclFile( "%s.UNRST" % case )
self.init_file = ecl.EclFile( "%s.INIT" % case )
def test_kw_imul(self):
P = self.rst_file["PRESSURE"][5]
fipnum = self.init_file["FIPNUM"][0]
fipnum_copy = fipnum.deep_copy()
reg = ecl.EclRegion( self.grid , False )
reg.select_more( P , 260 )
fipnum.mul( -1 , mask = reg )
self.assertFalse( fipnum.equal( fipnum_copy ) )
fipnum.mul( -1 , mask = reg )
self.assertTrue( fipnum.equal( fipnum_copy ) )
def test_kw_idiv(self):
P = self.rst_file["PRESSURE"][5]
fipnum = self.init_file["FIPNUM"][0]
fipnum_copy = fipnum.deep_copy()
reg = ecl.EclRegion( self.grid , False )
reg.select_more( P , 260 )
fipnum.div( -1 , mask = reg )
self.assertFalse( fipnum.equal( fipnum_copy ) )
fipnum.div( -1 , mask = reg )
self.assertTrue( fipnum.equal( fipnum_copy ) )
def test_kw_iadd(self):
P = self.rst_file["PRESSURE"][5]
fipnum = self.init_file["FIPNUM"][0]
fipnum_copy = fipnum.deep_copy()
reg = ecl.EclRegion( self.grid , False )
reg.select_more( P , 260 )
fipnum.add( 1 , mask = reg )
self.assertFalse( fipnum.equal( fipnum_copy ) )
reg.invert( )
fipnum.add( 1 , mask = reg )
fipnum.sub(1)
self.assertTrue( fipnum.equal( fipnum_copy ) )
def test_kw_isub(self):
P = self.rst_file["PRESSURE"][5]
fipnum = self.init_file["FIPNUM"][0]
fipnum_copy = fipnum.deep_copy()
reg = ecl.EclRegion( self.grid , False )
reg.select_more( P , 260 )
fipnum.sub( 1 , mask = reg )
self.assertFalse( fipnum.equal( fipnum_copy ) )
fipnum.add( 1 , mask = reg)
self.assertTrue( fipnum.equal( fipnum_copy ) )
def test_slice(self):
reg = ecl.EclRegion( self.grid , False )
reg.select_islice( 0 , 5 )
OK = True
for gi in reg.global_list:
(i,j,k) = self.grid.get_ijk( global_index = gi )
if i > 5:
OK = False
self.assertTrue( OK )
self.assertTrue( self.grid.ny * self.grid.nz *6 == len(reg.global_list))
reg.select_jslice( 7 , 8 , intersect = True)
OK = True
for gi in reg.global_list:
(i,j,k) = self.grid.get_ijk( global_index = gi )
if i > 5:
OK = False
if j < 7 or j > 8:
OK = False
self.assertTrue( OK )
self.assertTrue( 2 * self.grid.nz * 6 == len(reg.global_list))
reg2 = ecl.EclRegion( self.grid , False )
reg2.select_kslice( 3 , 5 )
reg &= reg2
OK = True
for gi in reg.global_list:
(i,j,k) = self.grid.get_ijk( global_index = gi )
if i > 5:
OK = False
if j < 7 or j > 8:
OK = False
if k < 3 or k > 5:
OK = False
self.assertTrue( OK )
self.assertTrue( 2 * 3 *6 == len(reg.global_list))
def fast_suite():
suite = unittest.TestSuite()
suite.addTest( RegionTest( 'test_kw_imul' ))
suite.addTest( RegionTest( 'test_kw_iadd' ))
suite.addTest( RegionTest( 'test_kw_idiv' ))
suite.addTest( RegionTest( 'test_kw_isub' ))
suite.addTest( RegionTest( 'test_slice' ))
return suite
if __name__ == "__main__":
unittest.TextTestRunner().run( fast_suite() )
|
UTF-8
|
Python
| false | false | 2,013 |
14,010,183,357,862 |
9683aaa714cb4bd9f88665038cee3082abebfacf
|
27e147fa304ad20cad5d1618611c4bbd0b24c3e1
|
/lib/i3status/i3status.py
|
9064c9379dcfd1d6e0ceebdcd5e9e5dbae36a30a
|
[] |
no_license
|
mtjandra/CliTools
|
https://github.com/mtjandra/CliTools
|
f8e3f0e3a88d65ceb0456221783cc851cbc2dae2
|
7e3cfaa3bb7f9351c3ab81009be7516ce3e50780
|
refs/heads/master
| 2018-01-07T12:46:48.298665 | 2014-09-19T04:00:12 | 2014-09-19T04:01:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import subprocess
from i3pystatus import Status
status = Status(standalone=True)
# Display clock
status.register("clock",
format="%a %-d %b %X",)
# The battery monitor has many formatting options, see README for details
# This would look like this, when discharging (or charging)
# ↓14.22W 56.15% [77.81%] 2h:41m
# And like this if full:
# =14.22W 100.0% [91.21%]
#
# This would also display a desktop notification (via dbus) if the percentage
# goes below 5 percent while discharging. The block will also color RED.
status.register("battery",
format="{status}/{consumption:.2f}W \[{percentage:.2f}%\] {remaining:%E%hh:%Mm}",
alert=True,
alert_percentage=5,
status={
"DIS": "↓",
"CHR": "↑",
"FULL": "=",
},)
# Displays whether a DHCP client is running
status.register("runwatch",
name="DHCP",
path="/var/run/dhclient*.pid",)
# Displays if connected to a vpn
status.register("runwatch",
name="VPN",
path="/var/run/vpnc/pid",)
# Has all the options of the normal network and adds some wireless specific things
# like quality and network names.
#
# Note: requires both netifaces and basiciw
"""
status.register("wireless",
interface="wlp0s20u1",
format_up="{essid} {quality:03.0f}%",)
"""
# Display MPD
status.register("mpd",
format="{artist} {status} {title}",
status={
"pause": "▷",
"play": "▶",
"stop": "◾",
},)
# Show the status
status.run()
|
UTF-8
|
Python
| false | false | 2,014 |
5,085,241,317,122 |
f265fc6d12513a7dd762daa7e10502f7b1d47b77
|
ad86f150df83390cc527eed5339fcc67f411ea58
|
/experiment01/01_energy/energyFeatureSelection.py
|
cf77588c9d405f9099f3f7195913df97a91ee2df
|
[
"MIT"
] |
permissive
|
stefanseibert/DataMining
|
https://github.com/stefanseibert/DataMining
|
3faa4292600166eb30d08b70e430696f2539626f
|
c80dee127605a80070ebfaccdf9f0199540141f7
|
refs/heads/master
| 2021-01-01T18:34:09.483361 | 2014-07-10T13:02:17 | 2014-07-10T13:02:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pandas
from sklearn import feature_selection
from numpy import *
import matplotlib.pyplot as plt
NUM_CLUSTER = 4
ENERGYFORMS = ['Oil', 'Gas', 'Coal', 'Nuclear', 'Hydro']
TARGET = ['CO2Emm']
NUM_ENERGYFORMS = len(ENERGYFORMS)
energyInfo = pandas.read_csv('../resources/EnergyMixGeo.csv')
reducedEnergyInfo = energyInfo[ENERGYFORMS]
targetInfo = energyInfo[TARGET]
featureSelector = feature_selection.SelectKBest(score_func = feature_selection.f_regression, k = NUM_ENERGYFORMS)
featureSelector.fit_transform(X = reducedEnergyInfo, y=targetInfo )
scoresArray = pandas.Series(featureSelector.scores_, reducedEnergyInfo.columns.values)
print scoresArray
|
UTF-8
|
Python
| false | false | 2,014 |
16,432,544,875,180 |
5e884554bf15f50e24d74dabdc9b62af29f86c96
|
3928abf1d6877340cd46f09049f07af4e4c676ee
|
/load_data.py
|
078ccf3721880bf71341204b50cf03e39458083f
|
[
"BSD-3-Clause"
] |
permissive
|
ilsgateway/ilsgateway
|
https://github.com/ilsgateway/ilsgateway
|
8283a0d0a3ff446a498c324b69f76e42beece258
|
6730060cc897069e5c05804d39902f0f7b3871b3
|
refs/heads/master
| 2016-09-06T02:21:19.380063 | 2011-08-09T18:23:39 | 2011-08-09T18:23:39 | 1,384,785 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
def LoadRegions(in_file):
print "Loading Regions from %s" % (in_file)
f = open(in_file, 'rU' )
reader = csv.reader( f )
header_row = []
entries = []
top_level_sdp = ServiceDeliveryPoint.objects.filter(name="MOHSW")[0]
if not top_level_sdp:
print "Missing initial SDP record - rerun loaddata"
sys.exit(1)
count = 0
skipped = 0
for row in reader:
if not header_row:
header_row = row
continue
pk = row[0]
region_name = row[1]
if region_name == 0:
continue
existing_sdp = Region.objects.filter(name=region_name)
if existing_sdp:
skipped = skipped + 1
continue
latitude = 0
longitude = 0
if len(row) > 2:
longitude = row[2]
if len(row) > 3:
latitude = row[3]
p = None
if longitude and longitude != '0' and latitude and latitude != '0':
p = Point(latitude=latitude, longitude=longitude)
p.save()
sdp = Region(pk=pk, point=p, name=row[1], parent_type=ContentType.objects.get_for_model(MinistryOfHealth), parent_id = top_level_sdp.id, service_delivery_point_type_id=2)
sdp.save()
print sdp
count = count + 1
print "Loaded %d new Region(s), skipped %d" % (count, skipped)
def LoadDistricts(in_file):
print "Loading Districts from %s" % (in_file)
f = open(in_file, 'rU' )
reader = csv.reader( f )
header_row = []
entries = []
count = 0
skipped = 0
for row in reader:
if not header_row:
header_row = row
continue
district_name = row[2].upper()
if not district_name or district_name == 0:
continue
existing_sdp = District.objects.filter(name=district_name)
if existing_sdp:
skipped = skipped + 1
continue
sdp = District()
parent_name = row[1].upper()
parent_regions = ServiceDeliveryPoint.objects.filter(name=parent_name)
if not parent_regions:
print "Invalid Region Name: %s" % parent_name
print "Please correct and retry"
sys.exit(1)
sdp.parent_id = parent_regions[0].id
sdp.parent_type = ContentType.objects.get_for_model(Region)
sdp.pk = row[0]
sdp.name = district_name
longitude = row[4]
latitude = row[5]
p = None
if longitude and longitude != '0' and latitude and latitude != '0':
p = Point(latitude=latitude, longitude=longitude)
p.save()
sdp.point = p
sdp.service_delivery_point_type_id=3
sdp.save()
print sdp
count = count + 1
print "Loaded %d new District(s), skipped %d" % (count, skipped)
def LoadFacilities(in_file):
print "Loading Facilities from %s" % (in_file)
f = open(in_file, 'rU' )
reader = csv.reader( f )
header_row = []
entries = []
count = 0
skipped = 0
for row in reader:
if not header_row:
header_row = row
continue
facility_name = row[4].upper()
if not facility_name or facility_name == 0:
continue
parent_name = row[3].upper()
parent_districts = District.objects.filter(name=parent_name)
if not parent_districts:
print "Invalid District Name: %s" % parent_name
print "Please correct and retry"
print row
sys.exit(1)
msd_code = row[1].upper()
if not re.match('D\d+', msd_code):
print "Invalid MSD code format: %s" % msd_code
sys.exit(1)
existing_sdp = Facility.objects.filter(msd_code=msd_code)
if existing_sdp:
print "Facility with MSD Code %s already exists - skipping" % msd_code
skipped = skipped + 1
continue
sdp = Facility()
sdp.pk = row[0]
sdp.parent_id = parent_districts[0].id
sdp.parent_type = ContentType.objects.get_for_model(District)
sdp.name = facility_name
sdp.msd_code = row[1]
delivery_group_name = row[5].upper()
delivery_groups = DeliveryGroup.objects.filter(name__iexact=delivery_group_name)
if not delivery_groups:
print "Invalid Delivery Group: %s" % delivery_group_name
sdp.delivery_group = delivery_groups[0]
longitude = 0
latitude = 0
if len(row) > 6:
longitude = row[6]
if len(row) > 7:
latitude = row[7]
p = None
if longitude and longitude != '0' and latitude and latitude != '0':
p = Point(latitude=latitude, longitude=longitude)
p.save()
sdp.point = p
sdp.service_delivery_point_type_id=4
sdp.save()
for product in Product.objects.all():
ActiveProduct.objects.create(product=product, service_delivery_point=sdp)
print sdp, longitude, latitude
count = count + 1
print "Loaded %d new Facilities, skipped %d" % (count, skipped)
def LoadSchedules():
count = 0
callbacks = ['ilsgateway.callbacks.run_reminders']
# 'ilsgateway.callbacks.district_randr_reminder',
# 'ilsgateway.callbacks.facility_delivery_reminder',
# 'ilsgateway.callbacks.district_delivery_reminder',
# 'ilsgateway.callbacks.district_delinquent_deliveries_summary',
# 'ilsgateway.callbacks.facility_soh_reminder',]
for callback in callbacks:
if not EventSchedule.objects.filter(callback=callback):
e = EventSchedule(callback=callback,
description=callback,
days_of_month='*',
hours='*',
minutes=[0,15,30,45])
e.save()
count = count + 1
print "Loaded %d EventSchedules" % count
from django.core.management import execute_manager
import sys, os
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
try:
import settings # Assumed to be in the same directory.
except ImportError:
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r." % __file__)
sys.exit(1)
if __name__ == "__main__":
import os
project_root = os.path.abspath(os.path.dirname(__file__))
for dir in ["lib", "apps"]:
path = os.path.join(project_root, dir)
sys.path.insert(0, path)
sys.path.insert(0, project_root)
from ilsgateway.models import ServiceDeliveryPoint, DeliveryGroup, Facility, District, Region, MinistryOfHealth, Product, ActiveProduct
from django.contrib.contenttypes.models import ContentType
from rapidsms.contrib.locations.models import Point
from rapidsms.contrib.scheduler.models import EventSchedule
project_root = os.path.abspath(os.path.dirname(__file__))
import csv
import re
model_name = "ServiceDeliveryPoint"
regions_file = os.path.join(project_root, "apps", "ilsgateway", "fixtures", "regions.csv")
districts_file = os.path.join(project_root, "apps", "ilsgateway", "fixtures", "districts.csv")
facilities_file = os.path.join(project_root, "apps", "ilsgateway", "fixtures", "facilities.csv")
LoadRegions(regions_file)
LoadDistricts(districts_file)
LoadFacilities(facilities_file)
LoadSchedules()
|
UTF-8
|
Python
| false | false | 2,011 |
77,309,443,372 |
6015d8e3567e15d6e0f343e655fe5ef1d76d098e
|
dbf35ec4f640799ffa25d0d30c002fb4e7c82efa
|
/test/defects/enthalpy_diagram.py
|
b7586c211f0a2bbbd4e95235a80bda5bac15f050
|
[] |
no_license
|
Shibu778/LaDa
|
https://github.com/Shibu778/LaDa
|
8962553c2f62882960b402f1a0761a9b80410ca6
|
9c0ab667f94dc4629404a8ec99cbeaa323f0c8b3
|
refs/heads/master
| 2023-03-16T19:58:44.411851 | 2013-06-16T10:59:11 | 2013-06-16T10:59:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""" Creates diagram of formation enthalpies with respect to Fermi energy. """
from pylada.jobs import AbstractMassExtract
from pylada.opt.decorators import make_cached
class PointDefectExtactor(object):
""" Extracts output across all charged states of a defect. """
def __init__(self, extract, epsilon = 1e0, host = None, pa_maxdiff=-8):
""" Initializes an enthalpy function. """
# extraction object.
self.extract = extract.copy(unix_re=False)
if self.extract.excludes is None: self.extract.excludes = [".*relax_*"]
else: self.extract.excludes.append(".*relax_*$")
self.epsilon = epsilon
""" Dimensionless dielectric constant. """
self._host = host
""" Host extraction object.
If None, will determine it from the directory structure.
"""
self.pa_maxdiff = pa_maxdiff
""" Potential alignment parameter. """
@property
def rootdir(self):
""" Root directory of defects. """
return self.extract.rootdir
def _all_jobs(self):
""" Loops over all jobs in special way. """
for child in self.extract.children: # Each child is a different charge state.
for job in child.itervalues(): yield job
@property
@make_cached
def _charge_correction(self):
""" Returns the charge corrections.
Tries and minimizes the number of calculations by checking if performed
in smae cell.
"""
from numpy.linalg import inv, det
from numpy import array
from quantities import eV
result, cells = [], []
# loops of all jobs.
for job in self._all_jobs():
cell = job.structure.cell * job.structure.scale
invcell = inv(cell)
found = None
# looks if already exists.
for i, other in enumerate(cells):
rotmat = other * cell
d = abs(det(rotmat))
if abs(d - 1e0) < 1e-8: continue
invrotmat = inv(rotmat)
if all( abs(rotmat.T - invrotmat) < 1e-8 ): found = i; break
if found is None:
cells.append(inv(cell))
result.append( job.charge_corrections / self.epsilon )
else: result.append(result[found])
return array(result) * eV
@property
@make_cached
def _potential_alignment(self):
""" Potential alignments for all jobs. """
from numpy import array
from quantities import eV
from pylada.crystal.point_defects import potential_alignment
return array([ potential_alignment(state, self.host, self.pa_maxdiff) \
for state in self._all_jobs() ]) * eV
@property
@make_cached
def _band_filling(self):
""" Band-filling for all jobs. """
from numpy import array
from quantities import eV
from pylada.crystal.point_defects import band_filling
return array([ band_filling(state, self.host, maxdiff=self.pa_maxdiff) \
for state in self._all_jobs() ]) * eV
@property
@make_cached
def _uncorrected(self):
""" Uncorrected formation enthalpy. """
from numpy.linalg import det
from numpy import array
from quantities import eV
energies = []
for state in self._all_jobs():
n = int(det(state.structure.cell)/det(self.host.structure.cell) + 1.e-3) + 0.
energies.append(state.total_energy - self.host.total_energy * n)
return array(energies) * eV
@property
def _corrected(self):
""" Corrected formation enthalpy. """
return self._uncorrected \
+ self._charge_correction\
+ self._potential_alignment\
+ self._band_filling
@property
@make_cached
def _charged_states(self):
""" Yields extraction routine toward each charge states.
If a charge state has children, then only the lowest energy calculation
is returned.
"""
from os.path import basename
from operator import itemgetter
alles = {}
names = [child.directory for child in self._all_jobs()]
for n, u, c, p, b, corr in zip( names, self._uncorrected, \
self._charge_correction, \
self._potential_alignment, \
self._band_filling,\
self._corrected ):
alles[n] = u, c, p, b, corr
corrected = self._corrected
charges = [u.charge for u in self._all_jobs()]
children = [u for u in self._all_jobs()]
result = []
for charge in sorted(list(set(charges))):
sequence = [(child, u) for child, u, c in zip(children, corrected, charges) if c == charge]
child = sorted(sequence, key=itemgetter(1))[0][0].copy()
child.__dict__['raw_DeltaH'] = alles[child.directory][0]
child.__dict__['charge_corrections'] = alles[child.directory][1]
child.__dict__['potential_alignment'] = alles[child.directory][2]
child.__dict__['band_filling'] = alles[child.directory][3]
child.__dict__['DeltaH'] = alles[child.directory][4]
result.append(child)
return result
@property
def _all_energies(self):
""" Dictionary with all energies. """
return result
@property
def host(self):
""" Returns extraction object towards the host. """
if self._host is None:
host = self.extract['../..' if self._is_site is None else '../../..']
host = self.copy(excludes=[".*PointDefects"], naked_end=False)
host.excludes.extend(host.excludes)
lowest = sorted(child.total_energies.iteritems(), key=itemgetter(1))[0][0]
self._host = [u for u in self.extract[lowest].itervalues()]
assert len(self._host) == 1
self._host = self._host[0]
return self._host
@property
def _site(self):
""" Returns site number or None. """
from re import match
regex = match(r"site_(\d+)", self.extract.view.split('/')[-1])
return int(regex.group(1)) if regex is not None else None
@property
def name(self):
""" Name of the defect. """
return self.extract.view.split('/')[-2 if self._site is not None else -1]
@property
def is_vacancy(self):
""" True if this is a vacancy. """
from re import match
return match("vacancy_[A-Z][a-z]?", self.name) is not None
@property
def is_interstitial(self):
""" True if this is an interstitial. """
from re import match
return match("[A-Z][a-z]?_interstitial_\S+", self.name) is not None
@property
def is_substitution(self):
""" True if this is a substitution. """
from re import match
return match("[A-Z][a-z]?_on_[A-Z][a-z]?", self.name) is not None
@property
def n(self):
""" Number of atoms added/removed from system.
This is a dictionary.
"""
from re import match
if self.is_vacancy:
return {match("vacancy_([A-Z][a-z])?", self.name).group(1): -1}
elif self.is_interstitial:
return {match("[A-Z][a-z]?_interstitial_(\S+)", self.name).group(1): -1}
else:
found = match("([A-Z][a-z])?_on_([A-Z][a-z])?", self.name)
return {match.group(1): 1, match.group(2): -1}
def uncache(self):
""" Uncaches result. """
from opt import uncache as opt_uncache
opt_uncache(self)
self.extract.uncache()
self.host.unchache()
class PointDefectExtractor(PointDefectExtractorImpl):
""" Properties of a single defect. """
def __init__(self, extract, epsilon = 1e0, host = None, pa_maxdiff=-8):
""" Initializes an enthalpy function. """
super(PointDefectExtractor, self).__init__(extract, epsilon, host, pa_maxdiff)
def chempot(self, mu):
""" Computes sum of chemical potential from dictionary ``mu``.
:Param mu: Dictionary of chemical potentials. If no units, assumes eV.
:return: Chemical potential of this defect. Value is always in eV.
"""
from quantities import eV
if mu is None: return 0 * eV
result = 0e0 * eV
n = self.n
for specie, value in self.n:
assert specie in mu,\
ValueError("Specie {0} not in input chemical potentials {1}.".format(specie, mu))
chem = mu[specie]
if not hasattr(chem, 'units'): chem = chem * eV
result += value * chem
return result.rescale(eV)
def _lines(self):
""" Returns lines composed by the different charge states. """
from numpy import array
from quantities import elementary_charge as e, eV
lines = []
states = set()
for state in self._charged_states:
assert state.charge not in states,\
RuntimeError("Found more than one calculation for the same charge state.")
states.add(state.charge)
lines.append((state.DeltaH.rescale(eV), state.charge))
return lines
def _all_intersections(self, _lines):
""" Returns all intersection points between vbm and cbm, ordered. """
from numpy import array
from quantities import eV
vbm = 0.*eV
cbm = (self.host.cbm - self.host.vbm).rescale(eV)
result = []
for i, (b0, a0) in enumerate(_lines[:-1]):
for b1, a1 in _lines[i+1:]: result.append( (b0 - b1) / (a1 - a0) )
result = [u for u in sorted(result) if u - 1e-6 * eV > vbm]
result = [u for u in result if u + 1e-6 * eV < cbm]
result.append(cbm)
result.insert(0, vbm)
return array([array(u.rescale(eV)) for u in result]) * eV
def lines(self):
""" Lines forming the formation enthalpy diagram.
:return: A list of 2-tuples with the first item b and the second a (a*x+b).
"""
from numpy import array
from quantities import eV
_lines = self._lines()
intersections = self._all_intersections(_lines)
# adds line before vbm
func = lambda x: x[0] + (intersections[0]-eV)*x[1]
lines = [ min(_lines, key=func) ]
# now look for lines up to cbm
for i, intersection in enumerate(intersections[1:]):
func = lambda x: x[0] + (intersection-intersections[i])*x[1]
min_line = min(lines, key=func)
if abs(min_line[0] - lines[-1][0]) > 1e-12*eV \
or abs(min_line[1] - lines[-1][1]) > 1e-12:
lines.append([min_line[0].rescale(eV), min_line[1]])
# adds line after cbm
func = lambda x: x[0] + (intersections[-1]+eV)*x[1]
min_line = min(_lines, key=func)
if abs(min_line[0] - lines[-1][0]) > 1e-12*eV \
or abs(min_line[1] - lines[-1][1]) > 1e-12:
lines.append([min_line[0].rescale(eV), min_line[1]])
return lines
def enthalpy(self, fermi, mu = None):
""" Point-defect formation enthalpy.
:Parameters:
fermi
Fermi energy with respect to the host's VBM. If without
units, assumes eV.
mu : dictionary or None
Dictionary of chemical potentials. If without units, assumes eV.
If None, chemical potential part of the formation enthalpy is
assumed zero.
:return: Lowest formation enthalpy for all charged states.
"""
from quantities import eV
if hasattr(fermi, "rescale"): fermi = fermi.rescale(eV)
else: fermi = fermi * eV
return (min(x[0]+fermi*x[1] for x in self.lines()) + self.chempot(mu)).rescale(eV)
@property
def latex_label(self):
""" A label in LaTex format. """
from re import match
if self.is_interstitial:
site = self._site
if site is None:
found = match("([A-Z][a-z]?)_interstitial_(.+)$", self.name)
return r"{0}$^{{(i)}}_{{ \mathrm{{ {1} }} }}$"\
.format(found.group(1), found.group(2).replace('_', r"\_"))
else:
found = match("([A-Z][a-z]?)_interstitial_(.+)$", self.name)
return r"{0}$^{{(i,{2})}}_{{ \mathrm{{ {1} }} }}$"\
.format(found.group(1), found.group(2).replace('_', r"\_"), site)
if self.is_substitution:
found = match("([A-Z][a-z]?)_on_([A-Z][a-z]?)", self.name)
site = self._site
if site is None:
return r"{0}$_{{ \mathrm{{ {1} }} }}$".format(found.group(1), found.group(2))
else:
return r"{0}$_{{ \mathrm{{ {1} }}_{{ {2} }} }}$"\
.format(found.group(1), found.group(2), site)
if self.is_vacancy:
found = match("vacancy_([A-Z][a-z]?)", self.name)
site = self._site
if site is None:
return r"$\square_{{ \mathrm{{ {0} }} }}$".format(found.group(1))
else:
return r"$\square_{{ \mathrm{{ {0} }}_{{{1}}} }}$".format(found.group(1), site)
def __str__(self):
""" Energy and corrections for each charge defect. """
from operator import itemgetter
from os.path import relpath
from numpy import array
from numpy.linalg import det
from quantities import eV
result = "{0}: \n".format(self.name)
states = sorted(((c, c.charge) for c in self._charged_states), key = itemgetter(1))
for extract, charge in states:
n = int(det(extract.structure.cell)/det(self.host.structure.cell) + 1.e-3) + 0.
a = float(extract.raw_DeltaH.rescale(eV))
b = float(extract.charge_corrections.rescale(eV))
c = float(extract.potential_alignment.rescale(eV))
d = float(extract.band_filling.rescale(eV))
e = relpath(extract.directory, extract.directory + "/../../")
result += " - charge {0:>3}: DeltaH = {1:8.4f} + {2:8.4f} + {3:8.4f}"\
"+ {4:8.4f} = {5:8.4f} eV # {6}.\n"\
.format(int(charge), a, b, c, d, a+b+c+d, e)
return result
class PointDefectMassExtractorImpl(AbstractMassExtract):
""" Enthalpy for a series of defects for a given material and lattice. """
def __init__(self, path=None, epsilon = 1e0, pa_maxdiff=0.5, Extractor=None, **kwargs):
""" Initializes an enthalpy function. """
from pylada.vasp import MassExtract
super(PointDefectMassExtractor, self).__init__(**kwargs)
self.Extractor = Extractor
""" Class for extracting data from a single defect. """
if self.Extractor is None: self.Extractor = PointDefectExtractor
self.massextract = MassExtract(path, unix_re=False, excludes=[".*relax_*"])
""" Mass extraction object from which all results are pulled. """
self.host = self._get_host()
""" Result of host calculations. """
# must be last. Can't use porperty setter.
self._epsilon = epsilon
self._pa_maxdiff = pa_maxdiff
@property
def epsilon(self):
""" Dimensionless dielectric constant. """
return self._epsilon
@epsilon.setter
def epsilon(self, value):
self._epsilon = value
for v in self.itervalues(): v.epsilon = self._epsilon
@property
def pa_maxdiff(self):
""" Dimensionless dielectric constant. """
return self._pa_maxdiff
@pa_maxdiff.setter
def pa_maxdiff(self, value):
self._pa_maxdiff = value
for v in self.itervalues(): v.pa_maxdiff = self._pa_maxdiff
@property
def rootdir(self):
""" Path to the root-directory containing the poin-defects. """
return self.massextract.rootdir
@rootdir.setter
def rootdir(self, value): self.massextract.rootdir = value
def _get_host(self):
""" Returns extraction object towards the host. """
from operator import itemgetter
host = self.massextract.copy(excludes=[".*PointDefects"])
host.excludes.extend(self.massextract.excludes)
lowest = sorted(host.total_energies.iteritems(), key=itemgetter(1))[0][0]
host = [u for u in host[lowest].itervalues()]
assert len(host) == 1
return host[0]
def __iter_alljobs__(self):
""" Walks through point-defects only. """
for child in self.massextract["PointDefects"].children:
# looks for site_n
if len(child["site_\d+"].jobs) != 0:
assert len(child["site_\d+"].jobs) == len(child.jobs),\
RuntimeError("Don't understand directory structure of {0}.".format(child.view))
for site in child.children: # should site specific defects.
result = self.Extractor(site, self.epsilon, self.host, self.pa_maxdiff)
# checks this is a point-defect.
if result.is_interstitial or result.is_vacancy or result.is_substitution:
yield site.view, result
else:
result = self.Extractor(child, host=self.host, pa_maxdiff=self.pa_maxdiff,\
epsilon = self.epsilon)
# checks if this is a valid point-defect.
if result.is_interstitial or result.is_vacancy or result.is_substitution:
yield child.view, result
def ordered_items(self):
""" Returns items ordered by substitution, vacancy, and interstitial. """
from operator import itemgetter
interstitials = (u for u in self.iteritems() if u[1].is_interstitial)
substitution = (u for u in self.iteritems() if u[1].is_substitution)
vacancy = (u for u in self.iteritems() if u[1].is_vacancy)
result = sorted(substitution, key = itemgetter(0))
result.extend(sorted(vacancy, key = itemgetter(0)))
result.extend(sorted(interstitials, key = itemgetter(0)))
return result
def ordered_keys(self):
""" Returns keys ordered by substitution, vacancy, and interstitial. """
return [u[0] for u in self.ordered_items()]
def ordered_values(self):
""" Returns values ordered by substitution, vacancy, and interstitial. """
return [u[1] for u in self.ordered_items()]
def __str__(self):
""" Prints out all energies and corrections. """
return "".join( str(value) for value in self.ordered_values() )
class PointDefectMassExtractor(PointDefectMassExtractImpl):
""" Enthalpy for a series of defects for a given material and lattice. """
def __init__(self, **kwargs):
""" Initializes an enthalpy function. """
super(PointDefectMassExtractor, self).__init__(**kwargs)
def enthalpies(self, fermi, mu=None):
""" Dictionary of point-defect formation enthalpies.
:Parameters:
fermi
Fermi energy with respect to the host's VBM. If without
units, assumes eV.
mu : dictionary or None
Dictionary of chemical potentials. If without units, assumes eV.
If None, chemical potential part of the formation enthalpy is
assumed zero.
:return: Dictionary where keys are the name of the defects, and the
values the formation enthalpy.
"""
from quantities import eV
results = {}
for name, defect in self.iteritems():
results[name] = defect.enthalpy(fermi, mu).rescale(eV)
return results
def plot_enthalpies(self, mu=None, **kwargs):
""" Plots diagrams using matplotlib. """
from quantities import eV
try: import matplotlib.pyplot as plt
except ImportError:
print "No matplotlib module."
return
from operator import itemgetter
# sets up some stuff for legends.
plt.rc('text', usetex=True)
plt.rc('text.latex', preamble="\usepackage{amssymb}")
# finds limits of figure
xlim = 0., float( (self.host.cbm-self.host.vbm).rescale(eV) )
all_ys = [float(val.rescale(eV)) for x in xlim for val in self.enthalpies(x, mu).itervalues()]
ylim = min(all_ys), max(all_ys)
# creates figures and axes.
figure = plt.figure()
axes = figure.add_subplot(111, xlim=(self.host.vbm, self.host.cbm), ylim=ylim)
# loop over defects.
for defect in self.ordered_values():
# finds intersection points.
x = [-5e0*eV]
lines = defect.lines()
for i in range(len(lines)-1):
(b0, a0), (b1, a1) = lines[i], lines[i+1]
x.append( ((b0-b1)/(a1-a0)).rescale(eV) - self.host.vbm)
x.append(5e0*eV)
# Now draws lines.
lines.append(lines[-1])
y = [u[0] + u[1] * xx for u, xx in zip(lines, x)]
axes.plot(x, y, label=defect.latex_label, **kwargs)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
plt.legend()
plt.draw()
|
UTF-8
|
Python
| false | false | 2,013 |
10,402,410,835,312 |
3efbb27b377d5fb786c30ed720ef090a76f9dcde
|
0c763e1bd336419d9dabca3fcabb39538dee8181
|
/modules/system_checks.py
|
7005758d80e5991f53899098b65c0c654e4e9230
|
[] |
no_license
|
cryzlasm/PySC
|
https://github.com/cryzlasm/PySC
|
a95101f4749e01c8ac8b9f39f6b5367d36f6ddc6
|
b8878f0fcbe51b882774bfef67f8941eadc9aae6
|
refs/heads/master
| 2020-03-31T12:20:04.911493 | 2013-12-27T00:15:51 | 2013-12-27T00:15:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# !/usr/bin/python
# -*- coding: utf-8 -*-
#
"""
PySc System Checks
This module contains the logic for the following checks .:
- check_platform --> Checks for Windows system
- check_network --> Checks network connectivity
"""
from sys import exit, maxsize
import platform
from ctypes import *
from ctypes.wintypes import DWORD
wininet = windll.wininet
def check_platform(debug):
# check target is 'winXX'
if not platform.system().lower() == 'windows':
if debug:
print '\n [!] Not a Windows system!, exiting'
exit(1)
else:
if debug:
print '\n [>] Windows detected - %s' \
% platform.system(), platform.version(), platform.architecture()[0]
# check 32bit / 64bit
is_64bits = maxsize > 2**32
if is_64bits and debug:
print ' [!] Injection into 64bit processes is not currently supported'
def check_network(debug):
# check network connection
flags = DWORD()
if debug:
print '\n [>] Checking connection'
connected = wininet.InternetGetConnectedState(
byref(flags),
None,
)
if not connected:
if debug:
print ' [!] No internet connection, cannot retrieve data'
exit(1)
else:
if debug:
print ' [>] Connection check confirmed\n'
|
UTF-8
|
Python
| false | false | 2,013 |
4,964,982,213,019 |
216765de51d3c30165908cd0beec1c11efe6c454
|
44d94fee19a8b904042e0480e16a62fad9b5f93c
|
/randomForm.py
|
a40999a06112a812581ca37d437652c82f042799
|
[] |
no_license
|
yayshine/pyFolder
|
https://github.com/yayshine/pyFolder
|
8e02360b9a209ff211456ead2e55014c402cc8f9
|
3ec284ac3577acf1f03cb726ae7c30f1c7363442
|
refs/heads/master
| 2016-09-06T05:31:30.654986 | 2014-11-10T10:26:01 | 2014-11-10T10:26:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
answer = "*** "
print "What is your name?"
name = raw_input(answer)
print "What do you want to be called?"
nick_name = raw_input(answer)
print "Your name is %r but you want to be called by %r" % (
name, nick_name)
|
UTF-8
|
Python
| false | false | 2,014 |
19,061,064,901,188 |
e063eddbed5a5bc8b8b849580fdca0200a733639
|
db639c29e5c196af7aa543ecf9b4b0696ef9a9d1
|
/tests/api/http/test_create_or_list_accounts.py
|
8ec9af63981d57feb855c94d6d29b251b873980e
|
[] |
no_license
|
hydroshare/hs_core
|
https://github.com/hydroshare/hs_core
|
d938afa528e1f6537f21f42c11ece198fc68ec11
|
a2340fc0efdbe4ad600eefac946dc4ed190b5a96
|
refs/heads/master
| 2021-01-23T13:18:39.498523 | 2014-10-10T16:23:41 | 2014-10-10T16:23:41 | 16,313,579 | 0 | 1 | null | false | 2018-03-21T21:57:13 | 2014-01-28T14:12:19 | 2014-09-04T00:06:24 | 2014-10-10T17:00:37 | 2,391 | 1 | 3 | 3 |
Python
| false | null |
__author__ = 'shaunjl'
"""
Tastypie REST API tests for CreateOrListAccounts.as_view() modeled after: https://github.com/hydroshare/hs_core/blob/master/tests/api/http/test_resource.py
comments-
post returns TypeError, put returns HttpResponseForbidden (403)
get expects a json query in a dictionary like data={'query': self.serialize({'email': '[email protected]})}
"""
from tastypie.test import ResourceTestCase, TestApiClient
from tastypie.serializers import Serializer
from django.contrib.auth.models import User
from hs_core import hydroshare
from hs_core.views.users_api import CreateOrListAccounts
class CreateOrListAccountsTest(ResourceTestCase):
serializer = Serializer()
def setUp(self):
self.account_url_base = '/hsapi/accounts/'
self.sudo = hydroshare.create_account('[email protected]','hs','hydro','share',True,password='hs')
self.api_client = TestApiClient()
self.api_client.client.login(username=self.sudo.username, password=self.sudo.password)
def tearDown(self):
User.objects.all().delete()
def test_create_account(self):
username = 'creator'
password = 'password'
post_data_should_fail = CreateOrListAccounts.CreateAccountForm({
'email': '[email protected]',
'username': username,
'first_name': 'shaun',
'last_name': 'livingston',
'password': password,
'superuser': True
})
resp=self.api_client.post(self.account_url_base, data=post_data_should_fail)
self.assertHttpForbidden(resp)
post_data_should_succeed = CreateOrListAccounts.CreateAccountForm({
'email': '[email protected]',
'username': username,
'first_name': 'shaun',
'last_name': 'livingston',
'password': password
})
resp=self.api_client.post(self.account_url_base, data=post_data_should_succeed)
self.assertHttpCreated(resp)
self.assertTrue(User.objects.filter(email='[email protected]').exists())
self.assertTrue(User.objects.filter(username=username).exists())
self.assertTrue(User.objects.filter(first_name='shaun').exists())
self.assertTrue(User.objects.filter(last_name='livingston').exists())
self.assertTrue(User.objects.filter(superuser=True).exists())
def test_list_users(self):
hydroshare.create_account(
'[email protected]',
username='user0',
first_name='User0_FirstName',
last_name='User0_LastName',
)
hydroshare.create_account(
'[email protected]',
username='user1',
first_name='User1_FirstName',
last_name='User1_LastName',
)
hydroshare.create_account(
'[email protected]',
username='user2',
first_name='User2_FirstName',
last_name='User2_LastName',
)
num_of_accounts = len(User.objects.filter(email= '[email protected]'))
query = self.serialize({
'email': '[email protected]',
})
get_data = {'query': query}
resp = self.api_client.get(self.account_url_base, data=get_data)
self.assertEqual(resp.status_code,200)
users = self.deserialize(resp)
self.assertTrue(len(users)==num_of_accounts)
for num in range(num_of_accounts):
self.assertEqual(str(users[num]['email']), '[email protected]')
self.assertEqual(str(users[num]['username']), 'user{0}'.format(num))
self.assertEqual(str(users[num]['first_name']), 'User{0}_FirstName'.format(num))
self.assertEqual(str(users[num]['last_name']), 'User{0}_LastName'.format(num))
'''
from list_users-
print resp
Vary: Accept-Language, Cookie
Content-Type: application/json
Content-Language: en
[{"date_joined": "2014-05-20T20:05:13.755078", "email": "[email protected]", "first_name": "User0_FirstName",
"id": 55, "last_login": "2014-05-20T20:05:13.755078", "last_name": "User0_LastName",
"resource_uri": "/api/v1/user/55/", "username": "user0"}, {"date_joined": "2014-05-20T20:05:18.188848",
"email": "[email protected]", "first_name": "User1_FirstName", "id": 56, "last_login": "2014-05-20T20:05:18.188848",
"last_name": "User1_LastName", "resource_uri": "/api/v1/user/56/", "username": "user1"},
{"date_joined": "2014-05-20T20:05:21.802584", "email": "[email protected]", "first_name": "User2_FirstName",
"id": 57, "last_login": "2014-05-20T20:05:21.802584", "last_name": "User2_LastName",
"resource_uri": "/api/v1/user/57/", "username": "user2"}]
'''
|
UTF-8
|
Python
| false | false | 2,014 |
1,099,511,669,117 |
6de3fa8e61f304ab43c6923392d7f5ebd83e298b
|
03512f792f465d6afda403af27ac9ab22be51426
|
/paydollar/membership.py
|
091703d8d5ae1a4023589fdbfa98a84107f2cf40
|
[] |
no_license
|
siutin/py-paydollar
|
https://github.com/siutin/py-paydollar
|
0832bd32133a87a57e27bacadc300520c80846d6
|
e1e42692dd48ffb20758c93c674455250fa9cf7c
|
refs/heads/master
| 2020-12-25T12:17:12.793627 | 2012-09-02T09:43:22 | 2012-09-02T09:43:22 | 31,431,591 | 1 | 0 | null | true | 2015-02-27T17:47:51 | 2015-02-27T17:47:51 | 2013-11-08T01:31:54 | 2012-09-02T09:43:43 | 108 | 0 | 0 | 0 | null | null | null |
"""
Membership APIs
"""
import requests
import urllib
from lxml import etree
from . import settings, logger
def query_membership():
pass
def add_membership(merchant_id, merchant_api_id, merchant_api_password,
member_id, first_name, last_name, url=settings.MEMBERSHIP_API_PROD_URL):
"""
API for adding membership.
"""
params = {
'merchantId': merchant_id,
'merchantApiId': merchant_api_id,
'password': merchant_api_password,
'actionType': 'Add',
'memberId': member_id,
'firstName': first_name,
'lastName': last_name,
'status': 'A'
}
params = urllib.urlencode(params)
logger.info('adding membership...')
headers = {"Content-type": "application/x-www-form-urlencoded"}
response = requests.post(url, data=params, headers=headers, verify=True)
dom = etree.fromstring(response.text)
status_code = dom.find('.//responsecode').text
status_message = dom.find('.//responsemessage').text
result = {'status_code': status_code,
'status_message': status_message}
if status_message == 'OK':
result['static_token'] = dom.find('.//statictoken').text
return result
def update_membership():
pass
def delete_membership():
pass
def verify_membership():
pass
|
UTF-8
|
Python
| false | false | 2,012 |
6,640,019,468,492 |
01918bd9c6a37b871f0fe9d2a4a715b5b8232f47
|
e04f260a66dba95e754cdec3254a833d81cbec6d
|
/gitconfig_parser/__init__.py
|
e577a886f1101bedc5c5b500012ca841baf31120
|
[
"MIT"
] |
permissive
|
seanfisk/gitconfig-parser
|
https://github.com/seanfisk/gitconfig-parser
|
852caa458c17d52cb100fd95f63678d756f763b7
|
f747a4263bdd7fc4ffd6b6a0fe34ef7cf7e935a8
|
refs/heads/master
| 2021-01-18T15:23:54.900025 | 2013-07-12T06:19:12 | 2013-07-12T06:19:12 | 11,370,534 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
""":mod:`gitconfig_parser` -- Parse a .gitconfig file.
"""
from gitconfig_parser import metadata
__version__ = metadata.version
__author__ = metadata.authors[0]
__license__ = metadata.license
__copyright__ = metadata.copyright
|
UTF-8
|
Python
| false | false | 2,013 |
3,135,326,162,995 |
bb5bed80f26d8d6e54f37434397a19058082ed3e
|
d61e5d9a63bb81bef9473c34ab8eb13757f78159
|
/simplecloud/template/__init__.py
|
abb9be7ea6ee3889e1c4a7b937c5757454ee423d
|
[
"Apache-2.0"
] |
permissive
|
lzufalcon/simplecloud
|
https://github.com/lzufalcon/simplecloud
|
2c3bfbb60ce9b2adaf5baec942a7fdb267ccd521
|
6cee961ece603ee772db3b62c9995569df40db6b
|
refs/heads/master
| 2020-05-27T12:23:46.329622 | 2013-08-11T09:08:32 | 2013-08-11T09:08:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from .views import template
from .models import Template
from .constants import (TEMPLATE_STATUS, TEMPLATE_OK, TEMPLATE_INVALID)
|
UTF-8
|
Python
| false | false | 2,013 |
14,173,392,116,778 |
bf1f92d4b9466dbc4964f12f78775b6ff97b647f
|
0692d2425d810e7ea40b6d30b2de6981b60e60c7
|
/plugins/config/jplaylist.py
|
3073f7f92eef18f6a21e8d481c4ff993df69590e
|
[
"MIT"
] |
permissive
|
Exaktus/aceproxy
|
https://github.com/Exaktus/aceproxy
|
ad59d17aed2333be385c158fe995da713eabf805
|
1e486664780dbd8401c075e2cdf5ab48beb3d55d
|
refs/heads/master
| 2021-01-18T06:22:10.474730 | 2014-10-06T13:58:42 | 2014-10-06T13:58:42 | 20,812,394 | 1 | 0 | null | true | 2014-10-06T13:58:43 | 2014-06-13T17:39:38 | 2014-09-10T11:40:37 | 2014-10-06T13:58:42 | 403 | 0 | 0 | 0 |
Python
| null | null |
urls = ('http://{AceProxy}/torrenttv/playlist.m3u',
'http://{AceProxy}/ytv/playlist.m3u'
#,"file:///d|/playlist.m3u"
#,"file:///etc/playlist.m3u"
,)
|
UTF-8
|
Python
| false | false | 2,014 |
4,827,543,265,353 |
8814e8bf0e76f21ed9cbcce77aa49e2d71117a3b
|
7860ed6d27512c4601400f89c70c6ccbf654ff99
|
/claritick/rappel/admin.py
|
2aac9d8cf07c1d3850bb821b7934f0a5cc3ba0ea
|
[] |
no_license
|
zehome/claritick
|
https://github.com/zehome/claritick
|
a7e4ed39e535163bc54e58e9611b84122de298c6
|
69290d639df55aba6f17526c97868c2238cd962f
|
refs/heads/master
| 2020-06-03T05:03:22.433046 | 2014-02-07T23:20:37 | 2014-02-07T23:20:37 | 5,178,975 | 0 | 1 | null | false | 2022-10-26T08:16:16 | 2012-07-25T12:49:27 | 2014-02-11T20:19:11 | 2014-02-11T20:19:10 | 14,618 | 2 | 2 | 0 |
JavaScript
| false | false |
#-*- coding: utf-8 -*-
from django.contrib import admin
from rappel.models import Rappel
class ListRappel(admin.ModelAdmin):
list_display = ('ticket', 'date_email', 'date')
admin.site.register(Rappel, ListRappel)
|
UTF-8
|
Python
| false | false | 2,014 |
4,028,679,324,901 |
bb689ce7c24ef2467186f4af9aae05f17ed1f741
|
08473b7aa18fa1e47a4bd4c1ae44fafc1d63926c
|
/StockTradeRobot/src/StockStrategyReport/StockStrategyHelper.py
|
bb1d234fe68399441b86fcc776b8d9b7298ee615
|
[] |
no_license
|
phymach/StockTradeRobot
|
https://github.com/phymach/StockTradeRobot
|
ff28d8a3f9b3801b9e69003cb497305bb1053fb5
|
3a6a33eff3cce6ffe749374f248f8952217d6208
|
refs/heads/master
| 2016-05-27T16:50:20.604367 | 2014-04-19T08:20:52 | 2014-04-19T08:20:52 | 18,936,857 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import re, codecs
import Common.dictToObj as dictToObj
import logging
import StockInfoLoader.ystockquote as ystockquote
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('StockPortfolio')
class StockPortfolio(object):
dict_stock_id = {}
def __init__(self):
self.dict_stock_id = self.getStockList()
def getStockInfo(self, strStockId):
return ystockquote.get_all('%s.TW' % strStockId)
def getStockList(self):
f = codecs.open('stock_id_list.txt', encoding='utf-8', mode='r')
text= f.read()
f.close()
dict_stock_id={}
for stock in re.findall('\d{4}\t[\S ]+', text):
stock_id,stock_name = stock.split('\t',1)
dict_stock_id[stock_id] = stock_name
return dict_stock_id
def priceBelow(self, floatPrice):
for stock_id in self.dict_stock_id.keys():
stock_price = eval(ystockquote.get_price('%s.TW' % stock_id))
if stock_price==0.0 or stock_price>floatPrice:
del self.dict_stock_id[stock_id]
def dividendAbove(self, floatDividend):
for stock_id in self.dict_stock_id.keys():
stock_dividend = eval(ystockquote.get_dividend_per_share('%s.TW' % stock_id))
if stock_dividend<floatDividend:
del self.dict_stock_id[stock_id]
def near52WeekLow(self, floatPercentage):
# floatPercentage = (Price -L) / (H-L)
for stock_id in self.dict_stock_id.keys():
stock_price = eval(ystockquote.get_price('%s.TW' % stock_id))
stock_high = eval(ystockquote.get_52_week_high('%s.TW' % stock_id))
stock_low = eval(ystockquote.get_52_week_high('%s.TW' % stock_id))
if (stock_price - stock_low) > (stock_high-stock_low)*floatPercentage:
del self.dict_stock_id[stock_id]
def savePortfolio(self):
f = codecs.open('portfolio_list.txt', encoding='utf-8', mode='w')
for k in self.dict_stock_id.keys():
f.writelines('%s %s\n' % (k,self.dict_stock_id[k]))
f.close()
def checkStockExists(stock_info):
return True if not str(stock_info['stock_exchange'])=='"N/A"' else False
if __name__=="__main__":
portfolio = StockPortfolio()
portfolio.priceBelow(20.0)
portfolio.dividendAbove(1.0)
portfolio.near52WeekLow(10.0)
for stock_id in sorted(portfolio.dict_stock_id.keys()):
print stock_id, portfolio.dict_stock_id[stock_id]
logger.debug('%s %s: %s' % (stock_id, portfolio.dict_stock_id[stock_id], str(portfolio.getStockInfo(stock_id))))
portfolio.savePortfolio()
|
UTF-8
|
Python
| false | false | 2,014 |
9,208,409,886,513 |
61a216e79ba50aee1ffb52f8f5a1281bb764472a
|
9fa0e086cb9a710afdc001df405856fb92fa6184
|
/exif_renamer/models.py
|
1e23414b3fdaadd7b8029cfc729c1c40875d02cf
|
[
"GPL-3.0-only"
] |
non_permissive
|
abits/exif-renamer
|
https://github.com/abits/exif-renamer
|
239f46e2d25f355ae577a33c842a74617d1f7b9b
|
8e49c5e5c447f478939cc3d1f7491f601edbc5c5
|
refs/heads/master
| 2021-01-21T11:45:28.894757 | 2012-12-13T13:50:54 | 2012-12-13T13:50:54 | 4,115,689 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from PIL import Image
from PIL.ExifTags import TAGS
from datetime import datetime
from settings import Settings
import ConfigParser
import os
class Photo():
def __init__(self, file_name):
self.original_base_dir = os.path.dirname(file_name)
self.original_file_name = os.path.basename(file_name)
self.original_file_name_noext = os.path.splitext(self.original_file_name)[0]
self.renamed_file_name = ''
self.raw_exif_data = {}
self.date = None
self.dirty = False
def rename(self):
extension = 'jpg'
self.renamed_file_name = \
'{:02d}{:02d}{:02d}_{:02d}{:02d}_{:s}.{:s}'.format(
self.date.year,
self.date.month,
self.date.day,
self.date.hour,
self.date.minute,
self.original_file_name_noext,
extension)
def rename_and_save(self):
self.rename()
def get_original_path(self):
return os.path.join(self.original_base_dir, self.original_file_name)
def get_rename_path(self):
return os.path.join(self.original_base_dir, self.renamed_file_name)
def update_exif_data(self):
self.raw_exif_data = self.get_exif(self.get_original_path())
#print self.raw_exif_data
config = ConfigParser.ConfigParser()
ini_file = os.path.join(Settings.user_data, 'manufacturers', self.raw_exif_data['Make'] + '.ini')
#print ini_file
config.read(ini_file)
ini_section = self.raw_exif_data['Model']
exif_index = config.get(ini_section, 'datetimeoriginal')
exif_timestamp = config.get(ini_section, exif_index)
format_string = '%Y:%m:%d %H:%M:%S'
self.date = datetime.strptime(self.raw_exif_data[exif_timestamp], format_string)
def get_exif(self, file_name):
return_value = {}
i = Image.open(file_name)
info = i._getexif()
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
return_value[decoded] = value
return return_value
|
UTF-8
|
Python
| false | false | 2,012 |
17,798,344,474,762 |
60c334ffdd89ce067a81ae6e369cb2b1c9fb1790
|
0716ae964395f02ccd157199541eb44ce7c56022
|
/studentAPI/admin.py
|
83a15c997976d66ed023cd61d8afdb9409906298
|
[] |
no_license
|
patnaikshekhar/SampleAnayticsDashboard
|
https://github.com/patnaikshekhar/SampleAnayticsDashboard
|
20cbe3d84f45b68ff9dc7274a32c8455484e2c9c
|
4702a8a4040e1d3db38523eda4b158115af290c4
|
refs/heads/master
| 2021-01-21T10:06:01.330622 | 2014-06-17T16:16:52 | 2014-06-17T16:16:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from studentAPI.models import Candidate, Department
from studentAPI.models import StudentQualification, StudentAppliedDepartments
class StudentQualificationInline(admin.StackedInline):
"""
Inline class for Qualifications
"""
model = StudentQualification
extra = 3
class StudentAppliedDepartments(admin.StackedInline):
"""
Inline class for Applied Departments
"""
model = StudentAppliedDepartments
extra = 3
class CandidateAdmin(admin.ModelAdmin):
"""
Main Candidate class to show on the model. Used to define
the inlines for easy data entry
"""
inlines = [StudentQualificationInline, StudentAppliedDepartments]
# Register the Candidate Model
admin.site.register(Candidate, CandidateAdmin)
# Register the Department Model
admin.site.register(Department)
|
UTF-8
|
Python
| false | false | 2,014 |
6,648,609,414,840 |
399e763bf13bb3f6161a2ee15feedc952ea4839e
|
d9cdb175fb4a336e9fd548e9b2999c6f33f341cb
|
/code/3rdParty/sleekxmpp/stanza/__init__.py
|
765748ca0995ac5d72b4f929806599283a1533c3
|
[] |
no_license
|
remko/xmpp-tdg-examples
|
https://github.com/remko/xmpp-tdg-examples
|
b31db13b89fbf69303cc4cf841f77995e926825a
|
526e71ac5c885888667e6a93e3d45cea7b2feee2
|
refs/heads/master
| 2020-04-06T04:00:40.050557 | 2014-11-14T13:43:49 | 2014-11-14T13:43:49 | 247,854 | 5 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__all__ = ['presence']
|
UTF-8
|
Python
| false | false | 2,014 |
4,552,665,344,558 |
59e2111e7508ffedbb552cc2573f35ef6f04d273
|
a5a1a4a34d5e404d483cd442527ed154cdc4ab54
|
/scripts/mbi/mbi_eramsey.py
|
bf5416359957d097b1949fd6230069127b9c0998
|
[] |
no_license
|
AdriaanRol/measurement
|
https://github.com/AdriaanRol/measurement
|
c0abb9cfb2e7061a060c109f6be61a420ca8586e
|
32e0912b83d5ceedf00378df1d6a48feb9ab8f17
|
refs/heads/master
| 2021-01-20T16:48:03.044302 | 2014-03-26T15:42:09 | 2014-03-26T15:42:09 | 18,175,928 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import qt
import numpy as np
from measurement.lib.measurement2.adwin_ssro import pulsar_mbi_espin
import mbi.mbi_funcs as funcs
reload(funcs)
SIL_NAME = 'hans-sil1'
def run(name):
m = pulsar_mbi_espin.ElectronRamsey(name)
funcs.prepare(m, SIL_NAME)
pts = 81
m.params['pts'] = pts
m.params['reps_per_ROsequence'] = 500
m.params['MW_pulse_delays'] = np.linspace(0,10e-6,pts)
m.params['detuning'] = 1e6 #artificial detuning
# MW pulses
m.params['MW_pulse_durations'] = np.ones(pts) * m.params['fast_pi2_duration']
m.params['MW_pulse_amps'] = np.ones(pts) * m.params['fast_pi2_amp']
m.params['MW_pulse_mod_frqs'] = np.ones(pts) * \
m.params['AWG_MBI_MW_pulse_mod_frq']
m.params['MW_pulse_1_phases'] = np.ones(pts) * 0
m.params['MW_pulse_2_phases'] = np.ones(pts) * 180 + 360*m.params['detuning']*m.params['MW_pulse_delays']
# for the autoanalysis
m.params['sweep_name'] = 'evolution time (ns)'
m.params['sweep_pts'] = m.params['MW_pulse_delays'] /1e-9
funcs.finish(m, debug=False)
if __name__ == '__main__':
run(SIL_NAME+'mbi_eramsey')
|
UTF-8
|
Python
| false | false | 2,014 |
15,058,155,372,182 |
58077302ee4aa07375b6a9f6d3d77b3504427ca7
|
d0960d64efc42587f465397740ad26bb280c4488
|
/python/sorting/mergesort.py
|
1ac8e27841e2f4d27be386b4f812e016b02d8b41
|
[] |
no_license
|
noudald/programming-etudes
|
https://github.com/noudald/programming-etudes
|
77fc84dcc7cdffaa306b8bc5b0ce25976aff0b29
|
211f7ab92b773af2123bf5c3523dcdec84eb3820
|
refs/heads/master
| 2021-05-27T13:52:19.232294 | 2013-12-28T12:20:17 | 2013-12-28T12:20:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
def merge_sort(l):
'''Merge sort implementation.'''
if type(l) != list:
raise TypeError, 'Expected list.'
if len(l) <= 1:
return l
middle = len(l)/2
left = merge_sort(l[:middle])
right = merge_sort(l[middle:])
ret = []
i, j = 0, 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
ret.append(left[i])
i += 1
else:
ret.append(right[j])
j += 1
ret += left[i:] + right[j:]
return ret
|
UTF-8
|
Python
| false | false | 2,013 |
13,666,585,983,105 |
bd05b5543c8846351e883fa52f8121816651235a
|
fec89aa1967511f333ee5b58a835522e7a4edc1a
|
/Minecraft/plugin.py
|
339bf0065d2a1b513b98065a089468ada38f3cd5
|
[] |
no_license
|
ZeeCrazyAtheist/IotaBot
|
https://github.com/ZeeCrazyAtheist/IotaBot
|
f750380ef567d902fedd309c2ff56a959e1d0b34
|
b0196ef6201062175534437ad190000452a3d521
|
refs/heads/master
| 2016-09-02T15:58:05.784513 | 2013-11-17T21:19:26 | 2013-11-17T21:19:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
###
# Copyright (c) 2013, Ken Spencer
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.utils as utils
import supybot.conf as conf
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
import supybot.ircmsgs as ircmsgs
import sqlite3 as lite
import urllib2 as urllib
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('Minecraft')
except:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x:x
class Minecraft(callbacks.Plugin):
"""recipe add, recipe get, recipe search, recipe count, haspaid"""
threaded = True
def recipeadd(self, irc, msg, args, item, line1, line2, line3):
"""Item Line1 Line2 Line3
Add a recipe in, you must be a "TrustedMCer" to add recipes though"""
dbpath = self.registryValue('DBPath')
con = lite.connect(dbpath)
quiet = self.registryValue('quiet', msg.args[0])
with con:
cur = con.cursor()
con.text_factory = str
cur.execute("INSERT INTO recipes VALUES(?, ?, ?, ?);", (item,
line1, line2, line3))
# And now some making sure its in there
cur.execute("SELECT * FROM recipes WHERE item LIKE ?;", (item,))
result = cur.fetchall()
result = str(result).translate(None, '[](),')
channel = msg.args[0]
if quiet == True or irc.isChannel(msg.args[0]) == False:
irc.sendMsg(ircmsgs.privmsg(msg.nick, result))
elif quiet == False:
irc.sendMsg(ircmsgs.privmsg(msg.args[0], result))
recipeadd = wrap(recipeadd, [("checkCapability", "TrustedMCer"), "something", "something", "something", "something"])
def recipeget(self, irc, msg, args, citem):
"""Item
Get a crafting recipe from the bot, atm if it doesn't reply, that means there isn't a
recipe in the bot for it yet. Ask the Owner or a 'TrustedMCer'"""
dbpath = self.registryValue('DBPath')
con = lite.connect(dbpath)
global nick
nick = msg.nick
quiet = self.registryValue('quiet', msg.args[0])
with con:
#assert nick is str
cur1 = con.cursor()
cur2 = con.cursor()
cur3 = con.cursor()
cur1 = con.execute("SELECT recipe_line_1 FROM recipes WHERE item LIKE ?;", (citem,))
cur2 = con.execute("SELECT recipe_line_2 FROM recipes WHERE item LIKE ?;", (citem,))
cur3 = con.execute("SELECT recipe_line_3 FROM recipes WHERE item LIKE ?;", (citem,))
line1 = cur1.fetchone()
line2 = cur2.fetchone()
line3 = cur3.fetchone()
channel = msg.args[0]
if line1 == None or line2 == None or line3 == None:
irc.reply("That recipe does not exist in the database. Please get a TrustedMCer or the owner to add it, make sure to give the recipe :P")
elif line1 != None or line2 != None or line3 != None:
if quiet == True or irc.isChannel(msg.args[0]) == False:
irc.sendMsg(ircmsgs.notice(msg.nick, "%s" % (line1)))
irc.sendMsg(ircmsgs.notice(msg.nick, "%s" % (line2)))
irc.sendMsg(ircmsgs.notice(msg.nick, "%s" % (line3)))
elif quiet == False:
irc.reply("%s" % (line1), private=False)
irc.reply("%s" % (line2), private=False)
irc.reply("%s" % (line3), private=False)
recipeget = wrap(recipeget, ["text"])
def recipelist(self, irc, msg, args):
"""
lists the recipes in the database"""
dbpath = self.registryValue('DBPath')
con = lite.connect(dbpath)
quiet = self.registryValue('quiet', msg.args[0])
with con:
cur1 = con.cursor()
cur1 = con.execute("SELECT COUNT(item) FROM recipes;")
result = cur1.fetchone()
if quiet == True or irc.isChannel(msg.args[0]) == False:
irc.reply("There are %s recipes in the database. =)" % (result), private=True, to=msg.nick)
elif quiet == False:
irc.reply("There are %s recipes in the database. =)" % (result))
cur = con.cursor()
con.text_factory = str
cur = con.execute("SELECT item FROM recipes WHERE item LIKE '%';")
rows = cur.fetchall()
items = rows
items = str(items).translate(None, '(),[]')
if quiet == True or irc.isChannel(msg.args[0]) == False:
irc.reply("%s" % (items), prefixNick=False, private=True, to=msg.nick)
elif quiet == False:
irc.reply("%s" % (items), prefixNick=True)
recipelist = wrap(recipelist, [("checkCapability", "owner")])
def recipecount(self, irc, msg, args):
"""
returns the total number of recipes in the database!"""
dbpath = self.registryValue('DBPath')
quiet = self.registryValue('quiet', msg.args[0])
con = lite.connect(dbpath)
with con:
cur = con.cursor()
cur = con.execute("SELECT COUNT(item) FROM recipes;")
result = cur.fetchone()
if quiet == True or irc.isChannel(msg.args[0]) == False:
irc.reply("There are %s crafting recipes inside the database. =)" % (result),
private=True, to=msg.nick)
elif quiet == False:
irc.reply("There are %s crafting recipes inside the database. =)" % (result), private=False)
recipecount = wrap(recipecount)
def recipesearch(self, irc, msg, args, query):
"""Item
Please include your search, use sqlite wildcards."""
dbpath = self.registryValue('DBPath')
con = lite.connect(dbpath)
quiet = self.registryValue('quiet', msg.args[0])
with con:
currcount = con.cursor()
con.text_factory = str
currcount = con.execute("SELECT COUNT(item) FROM recipes WHERE item LIKE ?;", (query,))
count = currcount.fetchone()
count = str(count).translate(None, '(),')
if quiet == True or irc.isChannel(msg.args[0]) == False:
irc.reply("%s gives %s results." % (query, count), prefixNick=False,
private=True, to=msg.nick)
elif quiet == False:
irc.reply("%s gives %s results." % (query, count), private=False)
currsearch = con.cursor()
currsearch = con.execute("SELECT item FROM recipes WHERE item LIKE ?", (query,))
result = currsearch.fetchall()
result = str(result).translate(None, '[]\(\)\,')
if count == '0':
irc.noReply()
else:
if quiet == True or irc.isChannel(msg.args[0]) == False:
irc.reply("%s gives the following results, %s" % (query, result))
elif quiet == False:
irc.reply("%s gives the following results, %s" % (query, result), private=False)
recipesearch = wrap(recipesearch, ["something"])
def haspaid(self, irc, msg, args, user):
"""User
Use to determine whether not a certain user is premium."""
quiet = self.registryValue('quiet', msg.args[0])
req = urllib.Request(url="http://minecraft.net/haspaid.jsp?user=%s" % (user))
f = urllib.urlopen(req)
result = f.read()
if quiet == True or irc.isChannel(msg.args[0]) == False:
if result == "true":
irc.reply("%s is a premium account." % (user), prefixNick=False, private=True)
elif result == "false":
irc.reply("%s is not a premium account." % (user), prefixNick=False, private=True)
elif quiet == False:
if result == "true":
irc.reply("%s is a premium account." % (user), prefixNick=False)
elif result == "false":
irc.reply("%s is not a premium account." % (user), prefixNick=False)
haspaid = wrap(haspaid, ["something"])
Class = Minecraft
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
UTF-8
|
Python
| false | false | 2,013 |
712,964,608,464 |
8fde1c9faa46f10d5d141b1c3d83f9de9b0ebd9b
|
4204b56cbfb2ac8a21cbd271a9d22e9cf1afd2f2
|
/Tools/GardeningServer/alerts.py
|
8679c118b65dddf655ceb1c8f5e8c7cc6d3c00a6
|
[
"BSD-3-Clause"
] |
permissive
|
jtg-gg/blink
|
https://github.com/jtg-gg/blink
|
9fd9effda9bc78b0db3cf8113d5de3d1770e4a03
|
c3ef97cc9aaaf903a9b2977d4a3799fe2a8584a9
|
refs/heads/development11
| 2023-03-07T10:30:16.588846 | 2014-08-19T06:54:16 | 2015-03-06T06:47:58 | 21,159,392 | 0 | 1 | null | true | 2015-07-31T12:42:46 | 2014-06-24T09:32:31 | 2015-03-06T06:51:36 | 2015-07-31T12:42:45 | 5,761,072 | 0 | 0 | 0 |
HTML
| null | null |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import calendar
import datetime
import json
import webapp2
import zlib
from google.appengine.api import memcache
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return calendar.timegm(obj.timetuple())
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
class AlertsHandler(webapp2.RequestHandler):
MEMCACHE_ALERTS_KEY = 'alerts'
def get(self):
self.response.headers.add_header('Access-Control-Allow-Origin', '*')
self.response.headers['Content-Type'] = 'application/json'
compressed = memcache.get(AlertsHandler.MEMCACHE_ALERTS_KEY)
if not compressed:
return
uncompressed = zlib.decompress(compressed)
self.response.write(uncompressed)
def post(self):
try:
alerts = json.loads(self.request.get('content'))
except ValueError:
self.response.set_status(400, 'content field was not JSON')
return
alerts.update({
'date': datetime.datetime.utcnow(),
'alerts': alerts['alerts']
})
uncompressed = json.dumps(alerts, cls=DateTimeEncoder, indent=1)
compression_level = 1
compressed = zlib.compress(uncompressed, compression_level)
memcache.set(AlertsHandler.MEMCACHE_ALERTS_KEY, compressed)
app = webapp2.WSGIApplication([
('/alerts', AlertsHandler)
])
|
UTF-8
|
Python
| false | false | 2,014 |
11,476,152,639,107 |
516de61d5ed764a2c4feeae85c9ee47aa7db92d7
|
7d791732babbdd5834303c780e0f3a8e7380a150
|
/bin/alerta-dashboard
|
74df265c5a559849ac432cd737f7ad25091ea26a
|
[
"Apache-2.0"
] |
permissive
|
iapilgrim/alerta
|
https://github.com/iapilgrim/alerta
|
b39e6ad1a16de68003f2216a5d4f7c41f1b9dae1
|
5e4ef7be564068058b9c270f7a4c3f84b51180b5
|
refs/heads/master
| 2020-12-11T09:03:51.556948 | 2014-04-15T14:02:15 | 2014-04-15T14:02:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import sys
from alerta.common import config
from alerta.common import log as logging
from alerta.dashboard.v2 import app, __version__
LOG = logging.getLogger('alerta.dashboard')
CONF = config.CONF
if __name__ == '__main__':
config.parse_args(version=__version__)
logging.setup('alerta')
app.run(host='0.0.0.0', debug=CONF.debug)
|
UTF-8
|
Python
| false | false | 2,014 |
11,605,001,644,829 |
a7a284ef2279d5878aac0da9f39a11b26582c658
|
5608c92bf836f7203aedb8f924884c5b6d27830b
|
/cetacean_incidents/apps/csv_import/strandings_parse.py
|
e0510eddd21af76bdc98bf2bef2c783da1c35b12
|
[
"X11",
"GPL-1.0-or-later",
"GPL-3.0-only"
] |
non_permissive
|
morganwahl/Cetacean-Injuries-Database
|
https://github.com/morganwahl/Cetacean-Injuries-Database
|
69a90adf85e94b2ef4328f6579d8a33b8ca91697
|
bf89afe744b28af52b487f2c3ba60a862fe762e1
|
refs/heads/master
| 2020-06-01T19:41:40.289971 | 2011-10-05T17:18:58 | 2011-10-05T17:18:58 | 2,514,816 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from copy import copy
import csv
import datetime
from decimal import Decimal
import os
import pytz
import re
from django.db.models import Q
from django.forms import ValidationError
from django.utils.html import conditional_escape as esc
from django.contrib.localflavor.us.us_states import STATES_NORMALIZED
from cetacean_incidents.apps.countries.models import Country
from cetacean_incidents.apps.documents.models import (
Document,
DocumentType,
)
from cetacean_incidents.apps.entanglements.models import (
Entanglement,
EntanglementObservation,
)
from cetacean_incidents.apps.locations.forms import NiceLocationForm
from cetacean_incidents.apps.locations.models import Location
from cetacean_incidents.apps.locations.utils import dms_to_dec
from cetacean_incidents.apps.incidents.models import (
Animal,
Case,
Observation,
)
from cetacean_incidents.apps.shipstrikes.models import (
Shipstrike,
ShipstrikeObservation,
)
from cetacean_incidents.apps.tags.models import Tag
from cetacean_incidents.apps.taxons.models import Taxon
from cetacean_incidents.apps.uncertain_datetimes import UncertainDateTime
from . import CURRENT_IMPORT_TAG
FIELDNAMES = set((
# ignorable
'New event ?',
'New Case ?',
'File complete? (internal use)',
'Strike form?',
'CCS Forms',
# animal
'Field # ', # field_number (regional database # in 2005 data)
'Field # (S)', # field_number in 2005 data
'Individual', # name
'Indiv.', # name
'Sp Ver?', # if yes, set taxon
'Common Name', # determined_taxon (also used for Observation.taxon)
'Alive?', # determined_dead_before (also used for Observation.condition)
' Alive? Was the animal ever seen alive during this event? 0=No,1=Yes ', # " used in 2002
'Date', # determined_dead_before (if not alive) (also used for Observation.datetime_observed and Observation.datetime_reported)
'Necropsy?', # necropsy, partial_necropsy
'Full necropsy', # necropsy, partial_necropsy
'Full Necropsy', # " used in 2002
'Partial necropsy', # necropsy, partial_necropsy
'Partial Necropsy', # " used in 2002
'Carcass Dispossed Y/N', # carcass_disposed
'Ceta data rec', # document on an animal with type 'Cetacean Data Record'
'Cetacean Data Record', # " used in 2002
'Histo results', # document on an animal with type 'Histological Findings'
# case
'Classification', # case_type
'Class', # " used in 2001
'Re-sight?', # just note in import_notes
'Resight: Date 1st Seen', # just note in import_notes
"Date 1st Seen: If there is a '1' in the Resight column indicating this is a whale with prior events, fill in the date of the whale's initial event in this column.", # " used in 2002
'Event Confirmed?', # valid
'NMFS Database Regional #', # just note in import_notes
'Field #', # 2005 column name for 'NMFS Database Regional'
'NMFS Database # ', # just note in import_notes
'NMFS # ', # 2005 column name for 'NMFS Database # '
'NMFS #', # 2003 column name for 'NMFS Database # '
'Additional Identifier', # just note in import_notes
'Entanglement or Collision #', # just note in import_notes
'CCS web page', # document attached to case with type 'CCS web page'
'Hi Form?', # document attached to case with type 'Human-Interaction Form'
'Lg Whale email', # document attached to case with type 'Large Whale email'
'Stranding Rept?', # document attached to case with type 'Stranding Report (Level-A)'
'Level A', # " used in 2002
'Last Sighting Prior to Entanglement (for Entangled whales only)', # happened_after
# observation
'Comments', # narrative
'Date', # datetime_observed and datetime_reported (just the year)
'Initial report', # just note in import_notes
'Disentanglement or Response Agencies', # just note in import_notes
'Common Name', # taxon
'Sp Ver?', # just note in import_notes
'Sex', # gender
' Sex M=male F=female U=unknown', # " used in 2002
'Age (at time of event) *PRD indicated age by length as presented in field guides ', # age_class
'Age (at time of event)', # age_class (in 2004 data)
'Est. Age (at time of event) - A=adult, S=sub-adult, Y=Yearling, C=calf, U=unknown, NI=not indicated', # " used in 2002
'Total Length (cm) *=est', # animal_description
'Total Length ', # " used in 2002
' Ashore? - Did the whale/carcass ultimately come ashore', # ashore
' Ashore? - Did the whale/carcass ultimately come ashore', # ashore
' Ashore? - Did the whale/carcass ultimately come ashore', # ashore
'Ashore - Did the whale/carcass ultimately come ashore', # ashore
'Ashore? 0=No,Floater that never landed 1=Yes, came to shore', # " used in 2002
'Alive?', # condition
'Initial condtion', # condition, observation splitting
'InitialCondition Code 1=Alive, Code 2=Fresh dead, Code 3=Moderate Decomposition, Code 4=Advanced Decomposition, Code 5=Mummified/Skeletal 6=Dead-Condition Unknown', # " used in 2002
'Exam condtion', # condition, observation splitting
'Exam Condition', # " used in 2002
'Photo w/file', # if yes, there's documentation, otherwise unknown
'Pictures', # documentation
'Genetics Sample', # genetic_sample
'Genetics sampled', # " used in 2002
'HI ?', # human_interaction
' HI? 1=Yes 2=No CBD=Cannot be determined', # " used in 2002
'Indication of Entanglement', # indication_entanglement
'Indication of Ent anglement', # " used in 2002
'Indication of Ship Strike', # indication_shipstrike
'Phone Log', # document(s) attached to observation with type 'Phone Log entry'. just note for now.
# entanglement observations
'Gear', # entanglement_details
'Disentangle status of live whale', # disentanglement_outcome
'Disent status of live whale', # disentanglement_outcome
'Disent status', # " used in 2002
'Disent attempt on live whale?', # disentanglement_outcome
'Disentangle attempt on live whale?', # disentanglement_outcome
'Disent attempt?', # " used in 2002
# location
'LATITUDE', # coordinates
'LONGITUDE', # coordinates
'Location', # coordinates
'Location *Location*=estimated', # " used in 2002
'General location', # description
'State/EZ', # state, waters, country
'Region', # just note in import_notes
))
CLASSIFICATIONS = set((
'M', # mortality
'E', # entanglement
'Injury', # other injury
))
# various length units, in meters
CENTIMETER = Decimal('0.01')
INCH = Decimal('2.54') * CENTIMETER
FOOT = 12 * INCH
def parse_length(length):
'''\
Returns a tuple of length in meters and sigdigs of original.
'''
# trim
length = length.strip()
m = None
for unit_match, unit_factor in (
(r'(cm)?', CENTIMETER), # note that this matches no-unit
(r'(in|")', INCH),
(r"(ft|')", FOOT),
):
match = re.match(r'(?i)(?P<length>[0-9.]+)\s*' + unit_match + '$', length)
if match:
length_string = match.group('length')
length_decimal = Decimal(length_string)
m = length_decimal * unit_factor
break
if not m:
raise ValueError("can't figure out length: %s" % length)
(sign, digits, exponent) = length_decimal.as_tuple()
# is there a decimal?
if '.' in length_string:
# count all the digits as significant
sigdigs = len(digits)
else:
# don't count trailing zeros
sigdigs = len(length_string.strip('0'))
return m, sigdigs
def parse_date(date):
for format in (
'%Y/%m/%d',
'%d-%b-%y',
'%m/%d/%Y',
'%m/%d/%y',
'%d-%b-%y*',
'%B %d.%Y',
):
try:
return datetime.datetime.strptime(date, format).date()
except ValueError:
pass
raise ValueError("can't parse datetime %s" % date)
ASHORE_KEYS = (
' Ashore? - Did the whale/carcass ultimately come ashore',
' Ashore? - Did the whale/carcass ultimately come ashore',
' Ashore? - Did the whale/carcass ultimately come ashore',
'Ashore - Did the whale/carcass ultimately come ashore',
'Ashore? 0=No,Floater that never landed 1=Yes, came to shore',
)
def get_ashore(row):
# ashore has a couple variations:
key = None
for k in ASHORE_KEYS:
if k in row:
key = k
break
if key is None:
return None
ashore = row[k]
return {
"": None,
'0-1': None,
"0": False,
"1": True,
}[ashore]
def translate_taxon(data, data_key, row):
data[data_key] = {
'BEWH': Taxon.objects.get(tsn=180506), # beaked whales
'BOWH': Taxon.objects.get(tsn=180533), # bowhead whale
'BRWH': Taxon.objects.get(tsn=612597), # bryde's whale
'FIWH': Taxon.objects.get(tsn=180527), # finback
'Fin': Taxon.objects.get(tsn=180527), # finback
'HUWH': Taxon.objects.get(tsn=180530), # humpback
'HUWH?': Taxon.objects.get(tsn=180530), # humpback
'Humpback': Taxon.objects.get(tsn=180530), # humpback
'MIWH': Taxon.objects.get(tsn=180524), # minke
'Minke': Taxon.objects.get(tsn=180524), # minke
'RIWH': Taxon.objects.get(tsn=180537), # right
'RIWH?': Taxon.objects.get(tsn=180537), # right
'Right': Taxon.objects.get(tsn=180537), # right
'SEWH': Taxon.objects.get(tsn=180526), # sei whale
'Sei': Taxon.objects.get(tsn=180526), # sei whale
'SPWH': Taxon.objects.get(tsn=180488), # sperm whale
'Sperm': Taxon.objects.get(tsn=180488), # sperm whale
'UNAN': None, # unknown animal
'Unk': None,
'UNBA': Taxon.objects.get(tsn=552298), # unknown baleen whale
'UNRW': Taxon.objects.get(tsn=552298), # unknown rorqual
'UNFS': Taxon.objects.get(tsn=180523), # finback or sei whale
'FI/SEWH': Taxon.objects.get(tsn=180523), # finback or sei whale
'FI-SEWH': Taxon.objects.get(tsn=180523), # finback or sei whale
'FIN/SEI': Taxon.objects.get(tsn=180523), # finback or sei whale
'Fin/sei': Taxon.objects.get(tsn=180523), # finback or sei whale
'UNWH': Taxon.objects.get(tsn=180403), # unknown whale
}[row['Common Name']]
if row['Common Name'] in set(('UNWH', 'UNRW', 'FI/SEWH', 'RIWH?', 'UNFS')):
odd_value(data, 'Common Name')
### Three types of import problems
def note_error(key, column_name, notes):
if not key in notes:
notes[key] = set()
notes[key].add(column_name)
## - a column is ignored
def ignored_column(data, column_name):
note_error('ignored_column', column_name, data['import_notes'])
## - a column can't be represented
def unimportable_column(data, column_name):
note_error('unimportable_column', column_name, data['import_notes'])
## - a column's value can't be represented
def unimportable_value(data, column_name):
note_error('unimportable_value', column_name, data['import_notes'])
## - a column's value isn't understood
def unknown_value(data, column_name):
note_error('unknown_value', column_name, data['import_notes'])
## - a combination of columns' values isn't understood
def unknown_values(data, column_names):
note_error('unknown_values', column_names, data['import_notes'])
## - a column's value is recognized, but can't be fully represented
def odd_value(data, column_name):
note_error('odd_value', column_name, data['import_notes'])
def parse_animal(row):
a = {
'import_notes': {},
}
if 'NMFS Database # ' in row and row['NMFS Database # ']:
unimportable_column(a, 'NMFS Database # ')
if 'NMFS # ' in row and row['NMFS # ']:
unimportable_column(a, 'NMFS # ')
if 'NMFS Database Regional #' in row and row['NMFS Database Regional #']:
unimportable_column(a, 'NMFS Database Regional #')
#if 'Field #' in row and row['Field #']:
# unimportable_column(a, 'Field #')
if 'Additional Identifier' in row and row['Additional Identifier']:
unimportable_column(a, 'Additional Identifier')
# field_number
# 2005 has different column names
if 'Field # (S)' in row and row['Field # (S)']:
a['field_number'] = row['Field # (S)']
elif 'Field # ' in row and row['Field # ']:
a['field_number'] = row['Field # ']
elif 'Field #' in row and row['Field #']:
a['field_number'] = row['Field #']
# name
name_key = None
for k in ('Individual', 'Indiv.'):
if k in row and row[k]:
name_key = k
break
if not name_key is None:
# filter 'unknown'
if row[name_key] not in set(('U', 'Unknown')):
a['name'] = row[name_key]
# determined_taxon
if {
'': False,
'?': False,
'0': False,
'1': True,
}[row['Sp Ver?']]:
translate_taxon(a, 'determined_taxon', row)
# the value isn't understood
if row['Sp Ver?'] not in set(('', '0', '1')):
unknown_value(a, 'Sp Ver?')
# determined_gender defaults to ''
# determined_dead_before
alive_key = None
for kn in (
'Alive?',
' Alive? Was the animal ever seen alive during this event? 0=No,1=Yes ',
):
if kn in row:
alive_key = kn
break
if alive_key:
dead = {
'': False,
'?': False,
'1-0': False,
'0-1': False,
'0': True,
'1': False,
}[row[alive_key]]
if dead:
# a one-off exception
if row['Date'] == 'unk-Sep07':
a['determined_dead_before'] = datetime.date(2007, 10, 1)
elif row['Date'] == '7/1/2003*':
a['determined_dead_before'] = datetime.date(2003, 7, 1)
odd_value(a, 'Date')
else:
a['determined_dead_before'] = parse_date(row['Date'])
# the value isn't understood
if row[alive_key] not in set(('', '0', '1')):
unknown_value(a, alive_key)
# carcass_disposed
if 'Carcass Dispossed Y/N' in row:
a['carcass_disposed'] = {
'': None,
'U': None,
'0': False,
'N': False,
'1': True,
'Y': True,
}[row['Carcass Dispossed Y/N']]
# partial_necropsy
# necropsy
full_necropsy_key = None
for kn in (
'Full Necropsy',
'Full necropsy',
):
if kn in row:
full_necropsy_key = kn
break
if full_necropsy_key is None:
full_necropsy_key = 'Full Necropsy'
row['Full Necropsy'] = ''
partial_necropsy_key = None
for kn in (
'Partial Necropsy',
'Partial necropsy',
):
if kn in row:
partial_necropsy_key = kn
break
if partial_necropsy_key is None:
partial_necropsy_key = 'Partial Necropsy'
row['Partial Necropsy'] = ''
a['necropsy'], a['partial_necropsy'], understood = {
(None, None, None ): (False, False, True),
(None, False, False): (False, False, True),
(False, None, None ): (False, False, True),
(False, False, None ): (False, False, True),
(False, False, False): (False, False, True),
(None, True, None ): (True, False, True),
(True, None, None ): (True, False, False),
(True, True, None ): (True, False, True),
(True, False, False): (True, False, False),
(True, True, False): (True, False, True),
(None, False, True ): (False, True, True),
(False, False, True ): (False, True, False),
(False, None, True ): (False, True, False),
(True, None, True ): (False, True, False),
(True, False, True ): (False, True, False),
(None, True, True ): (True, True, True),
(False, True, True ): (True, True, False),
(True, True, True ): (True, True, True),
}[
{
'': None,
'X': None,
'Pending': None,
'0': False,
'1': True,
'Performed by Bob Bonde, no report in file': True,
'yes- but no report': True,
'0-Kim Durham': False,
}[row['Necropsy?']],
{
'': None,
'?': None,
'0': False,
'na': False,
'N/A': False,
'1': True,
}[row[full_necropsy_key]],
{
'': None,
'0': False,
'na': False,
'N/A': False,
'1': True,
}[row[partial_necropsy_key]],
]
if not understood:
unknown_values(a, ('Necropsy?', full_necropsy_key, partial_necropsy_key))
if row['Necropsy?'] not in set(('', '0', '1')):
odd_value(a, 'Necropsy?')
# cause_of_death defaults to ''
return a
def parse_case(row):
# animal
c = {
'import_notes': {},
}
# case_type
class_key = None
for kn in (
'Classification',
'Class',
):
if kn in row:
class_key = kn
break
cls = row[class_key]
cls, resight = re.subn(r'(?i)\s*\(?(likely )?resight\??\)?', '', cls)
resight = bool(resight)
c['__class__'] = {
'': 'Case',
'Injury': 'Case',
'M': 'Case',
'M ?': 'Case', # TODO mark as suspected?
'M(resight?)': 'Case', # TODO mark in import notes
'M(Likely resight)': 'Case', # TODO mark in import notes
'M(Incidental Take)': 'Case',
'C': 'Shipstrike',
'SS': 'Shipstrike',
'C (injury)': 'Shipstrike',
'E': 'Entanglement',
'E,': 'Entanglement',
'E (CAN)': 'Entanglement',
'E (lures)': 'Entanglement',
'E (entrapped)': 'Entanglement',
'C,M': set(('Case', 'Shipstrike')),
'M,C': set(('Case', 'Shipstrike')),
'M,SS': set(('Case', 'Shipstrike')),
'M, C': set(('Case', 'Shipstrike')),
'C, M': set(('Case', 'Shipstrike')),
'M, SS': set(('Case', 'Shipstrike')),
'SS, M': set(('Case', 'Shipstrike')),
'M,E': set(('Case', 'Entanglement')),
'M, E': set(('Case', 'Entanglement')),
'E, M': set(('Case', 'Entanglement')),
'E,M': set(('Case', 'Entanglement')),
'E, C, M': set(('Case', 'Shipstrike', 'Entanglement')),
}[cls]
if resight or row[class_key] in set((
'',
'M ?',
'M(resight?)',
'M(Likely resight)',
'M(Incidental Take)',
'C (injury)',
'E (CAN)',
'E (lures)',
'E (entrapped)',
)):
odd_value(c, class_key)
c['classification'] = cls
for kn in (
'Re-sight?',
'Resight: Date 1st Seen',
"Date 1st Seen: If there is a '1' in the Resight column indicating this is a whale with prior events, fill in the date of the whale's initial event in this column.",
):
if kn in row and row[kn]:
unimportable_column(c, kn)
# valid
c['valid'] = {
'': 1,
'1,0': 1,
'?': 1,
'0': 1,
'0?': 1,
'1': 2,
}[row['Event Confirmed?']]
if c['valid'] in set(('0?',)):
odd_value(c, 'valid')
# happened_after defaults to None
last_seen_key = 'Last Sighting Prior to Entanglement (for Entangled whales only)'
if last_seen_key in row and row[last_seen_key]:
m = re.search(r'^(?P<date>(August )?[\d/Aug\-.]+)\s*(?P<other>[^\s]+.*)?$', row[last_seen_key])
if m:
c['happened_after'] = parse_date(m.group('date'))
if m.group('other'):
odd_value(c, last_seen_key)
else:
if row[last_seen_key] not in set((
'Unk',
'N/A',
'U',
'N/A, Previously unknown individual'
)):
raise ValueError("can't parse last-sighted field: '%s'" % row[last_seen_key])
# human_interaction
# choices:
# ('unk', 'not yet determined'),
# ('yes', 'yes'),
# ('no' , 'no'),
# ('cbd', 'can\'t be determined'),
hi_key = None
for kn in (
'HI ?',
' HI? 1=Yes 2=No CBD=Cannot be determined',
):
if kn in row:
hi_key = kn
break;
if not hi_key is None:
c['human_interaction'] = {
'': 'unk',
'?': 'unk',
'X': 'unk',
'1/cbd?': 'unk',
'1?': 'unk',
'PENDING HISTO': 'unk',
'No': 'no',
'0': 'no',
'1': 'yes',
'cbd': 'cbd',
'CBD': 'cbd',
'CBD`': 'cbd',
}[row[hi_key]]
if c['human_interaction'] in set(('1/cbd?', '1?', 'PENDING HISTO', 'X')):
unknown_value(c, 'human_interaction')
# ole_investigation defaults to False
c['ole_investigation'] = None
## Entanglement
if c['__class__'] == 'Entanglement' or c['__class__'] is set and 'Entanglement' in c['__class__']:
e = {}
# nmfs_id defaults to ''
# gear_fieldnumber defaults to ''
# gear_analyzed defaults to False
e['gear_analyzed'] = None
# analyzed_date defaults to None
# analyzed_by defaults to None
# analyzed_gear_attributes defaults to []
# gear_owner_info defaults to None
c['entanglement'] = e
## Shipstrike has no additional fields
return c
def parse_location(row, observation_data):
l = {}
# description
if row['General location']:
l['description'] = row['General location']
# country
# waters
# state
ashore = get_ashore(row)
state_input = row['State/EZ'].lower() if 'State/EZ' in row else ''
if state_input == '':
country = None
eez = None
state = None
elif state_input in (('ez',)):
country = Country.objects.get(iso='US')
eez = True
state = None
elif state_input in (('ber', 'can', 'cn', 'dr',)):
country = Country.objects.get(iso={
'ber': 'BM',
'can': 'CA',
'cn': 'CA',
'dr': 'DO',
}[state_input])
eez = None
state = None
elif state_input in STATES_NORMALIZED.keys():
country = Country.objects.get(iso='US')
eez = False
state = STATES_NORMALIZED[state_input]
# one-off errors
elif state_input in set(('ma eez',)):
unknown_value(observation_data, 'State/EZ')
country = None
eez = None
state = None
else:
raise KeyError(row['State/EZ'])
# country
l['country'] = country
# waters
l['waters'] = 0
if ashore:
l['waters'] = 1
elif not eez and ashore is None:
l['waters'] = 0
else:
if country and not state:
l['waters'] = 3
elif state:
l['waters'] = 2
# state
if state:
l['state'] = state
# coordinates
for kn in (
'Location',
'Location *Location*=estimated',
):
if kn in row and row[kn]:
# split 'Location' into a lat and long
number = r'[\d.+\-]+[^\s]*'
for regex in (
r'^(?P<lat>' + number + '[^\d.+\-]*)(?P<lon>' + number + '[^\d.+\-]*)$',
r'^(?P<lat>' + '\s+'.join([number] * 2) + '[^\d.+\-]*)(?P<lon>' + '\s+'.join([number] * 1) + '[^\d.+\-]*)$',
r'^(?P<lat>' + '\s+'.join([number] * 2) + '[^\d.+\-]*)(?P<lon>' + '\s+'.join([number] * 2) + '[^\d.+\-]*)$',
r'^(?P<lat>' + '\s+'.join([number] * 3) + '[^\d.+\-]*)(?P<lon>' + '\s+'.join([number] * 3) + '[^\d.+\-]*)$',
):
m = re.search(regex, row[kn])
if m:
if 'LATITUDE' not in row or not row['LATITUDE']:
row['LATITUDE'] = m.group('lat')
if 'LONGITUDE' not in row or not row['LONGITUDE']:
row['LONGITUDE'] = m.group('lon')
break
if not m:
if row[kn] in set((
'44 17 49 66 26 43 (7/10/03 sighting)',
'est: 4405.532 6837.161',
'*4128.52 7116.27*',
)):
unknown_value(observation_data, kn)
elif row[kn] not in set(('?', 'unknown', '??', 'None')):
raise ValueError("can't parse Location: '%s'" % row[kn])
lat = None
lon = None
if 'LATITUDE' in row and row['LATITUDE']:
try:
lat = NiceLocationForm._clean_coordinate(row['LATITUDE'], is_lat=True)
lat = dms_to_dec(lat)
except ValidationError:
unknown_value(observation_data, 'LATITUDE')
if 'LONGITUDE' in row and row['LONGITUDE']:
try:
lon = NiceLocationForm._clean_coordinate(row['LONGITUDE'], is_lat=False)
lon = dms_to_dec(lon)
# assume west
if lon > 0:
odd_value(observation_data, 'LONGITUDE')
lon = - abs(lon)
except ValidationError:
unknown_value(observation_data, 'LONGITUDE')
if (lat is None) != (lon is None):
unknown_values(observation_data, ('LATITUDE', 'LONGITUDE'))
if (not lat is None) and (not lon is None):
l['coordinates'] = "%s,%s" % (lat, lon)
if 'Region' in row and row['Region']:
unimportable_column(observation_data, 'Region')
return l
def parse_observation(row, case_data):
o = {
'import_notes': {},
}
# animal
#o['animal'] = case_data['animal']
# cases
#o['cases'] = case_data['id']
# narrative
if row['Comments']:
o['narrative'] = row['Comments']
# observer defaults to None
if row['Initial report']:
unimportable_column(o, 'Initial report')
if 'Disentanglement or Response Agencies' in row and row['Disentanglement or Response Agencies']:
unimportable_column(o, 'Disentanglement or Response Agencies')
# datetime_observed
if not row['Date']:
unimportable_value(o, 'Date')
# a one-off exception
if row['Date'] == 'unk-Sep07':
uncertain_datetime = UncertainDateTime(2007, 9)
elif row['Date'] == '7/1/2003*':
uncertain_datetime = UncertainDateTime(2003, 7, 1)
odd_value(o, 'Date')
else:
date = parse_date(row['Date'])
uncertain_datetime = UncertainDateTime(date.year, date.month, date.day)
o['datetime_observed'] = uncertain_datetime
# location
#o['location'] = location_data['id']
# observer_vessel defaults to None
# reporter defaults to None
# datetime_reported
o['datetime_reported'] = UncertainDateTime(uncertain_datetime.year)
# taxon
translate_taxon(o, 'taxon', row)
if row['Sp Ver?']:
unimportable_column(o, 'Sp Ver?')
# animal_length
length_key = None
for kn in (
'Total Length ',
'Total Length (cm) *=est',
):
if kn in row:
length_key = kn
break;
if not length_key is None:
if row[length_key] not in set(('', 'U')):
try:
length, sigdigs = parse_length(row[length_key])
o['animal_length'] = length
o['animal_length_sigdigs'] = sigdigs
except ValueError:
unimportable_value(o, length_key)
# age_class
age_keys = (
'Age (at time of event) *PRD indicated age by length as presented in field guides ',
'Age (at time of event)',
'Est. Age (at time of event) - A=adult, S=sub-adult, Y=Yearling, C=calf, U=unknown, NI=not indicated',
)
for age_key in age_keys:
if age_key in row:
o['age_class'] = {
'': '',
'?': '',
'Born in 2001': '',
'6 tons*': '',
'Y': '',
'Y*': '',
'U': '',
'Unk.': '',
'U-Aknowlton': '',
'First id in 1992': '',
'M': '',
'X': '',
'C': 'ca',
'Calf': 'ca',
'J': 'ju',
'SA': 'ju',
'S': 'ju',
'S*': 'ju',
'Subadult': 'ju',
'A': 'ad',
'A*': 'ad',
}[row[age_key]]
if row[age_key] in set(('S*','A*', 'U-Aknowlton',)):
odd_value(o, age_key)
if row[age_key] in set(('Y', 'Y*', 'S', 'M', 'First id in 1992', '6 tons*', 'Born in 2001', 'X')):
unknown_value(o, age_key)
break
# gender
sex_key = None
for kn in (
' Sex M=male F=female U=unknown',
'Sex',
):
if kn in row:
sex_key = kn
break;
if not sex_key is None:
o['gender'] = {
'': '',
'0': '',
'4': '',
'U': '',
'U-Aknowlton': '',
'X': '',
'?': '',
'CBD': '',
'm': 'm',
'M': 'm',
'f': 'f',
'F': 'f',
}[row[sex_key]]
if row[sex_key] in set(('CBD', 'U-Aknowlton')):
odd_value(o, sex_key)
if row[sex_key] in set(('4', 'X', '0')):
unknown_value(o, sex_key)
# animal_description defaults to ''
# ashore
o['ashore'] = get_ashore(row)
# condition
# (0, 'unknown'),
# (1, 'alive'),
# (6, 'dead, carcass condition unknown'),
# (2, 'fresh dead'),
# (3, 'moderate decomposition'),
# (4, 'advanced decomposition'),
# (5, 'skeletal'),
alive_key = None
for kn in (
'Alive?',
' Alive? Was the animal ever seen alive during this event? 0=No,1=Yes ',
):
if kn in row:
alive_key = kn
break
initial_key = None
for kn in (
'Initial condtion',
'InitialCondition Code 1=Alive, Code 2=Fresh dead, Code 3=Moderate Decomposition, Code 4=Advanced Decomposition, Code 5=Mummified/Skeletal 6=Dead-Condition Unknown',
):
if kn in row:
initial_key = kn
break
exam_key = None
for kn in (
'Exam condtion',
'Exam Condition',
):
if kn in row:
exam_key = kn
break
conditions = {
'': 0,
'?': 0,
'X': 0,
'0': 0,
'U': 0,
'na': 0,
'NE': 0,
'1': 1,
'1/2': 2,
'2': 2,
'3': 3,
'3+': 3,
'3 or 4': 3,
'3 to 4': 3,
'4': 4,
'4?': 4,
'4+': 4,
'~4': 4,
'5': 5,
'6': 6,
}
if not initial_key is None:
o['initial_condition'] = conditions[row[initial_key]]
else:
o['initial_condition'] = 0
if not exam_key is None:
o['exam_condition'] = conditions[row[exam_key]]
else:
o['exam_condition'] = 0
o['alive_condition'] = {
'': 0,
'?': 0,
'1-0': 0,
'0-1': 0,
'0': 6,
'1': 1,
}[row[alive_key]]
if row[alive_key] not in set(('', '0', '1')):
odd_value(o, alive_key)
if o['initial_condition'] == 0 and o['alive_condition'] != 0:
o['initial_condition'] = o['alive_condition']
if o['exam_condition'] == 0 and o['alive_condition'] != 0:
o['exam_condition'] = o['alive_condition']
o['split'] = False
o['condition'] = o['initial_condition']
if o['initial_condition'] != o['exam_condition'] and o['exam_condition'] != 0:
o['split'] = True
condition_expected = set(('', '1', '2', '3', '4', '5', '6'))
for model_key, row_key in (
('initial', initial_key),
('exam', exam_key),
):
o[model_key] = bool(o[model_key + '_condition'])
if not row_key is None:
if row[row_key] not in condition_expected:
odd_value(o, row_key)
# wounded defaults to None
# wound_description defaults to ''
# documentation
# if 'Pictures' starts with 'yes' or 'no', strip out the rest
pictures = row['Pictures']
# don't forget 'match' only matches at the beginning of the string
yesno_match = re.match(r'(yes|no)\s*[^\s]+', row['Pictures'], re.I)
if yesno_match:
pictures = yesno_match.group(1)
odd_value(o, 'Pictures')
o['documentation'] = {
(None, None ): None,
(None, False): None,
(None, True ): True,
(False, None ): False,
(False, False): False,
(False, True): None,
(True, None ): True,
(True, True ): True,
(True, False): True,
}[
{
'': None,
'?': None,
'Unk': None,
'Unknown': None,
'Maybe, WW Vessel, but NOAA never received. Photos with file are from a separate 8/11 sighting of Sickle': None,
'Uncertain - Canadian CG, Fundy Voyager.': None,
'Unknown - perhaps w/ S. Dufault': None,
'Unknown- DFO?': None,
'No': False,
'NO': False,
'Yes': True,
'yes': True,
'1': True,
'video': True,
'Video -NMFS': True,
'Video, USCG': True,
'pending': True,
'Pending': True,
'Pending -from Navy': True,
'Gear only': True,
'Video- Grisel Rodrigues': True,
'CCSN': True,
}[pictures],
{
'': None,
'?': None,
'X': None,
'PG': None,
'0': False,
'1': True,
}[row['Photo w/file']],
]
if row['Pictures'] not in set(('', 'No', 'NO', 'Yes', 'yes', '1')):
odd_value(o, 'Pictures')
if row['Photo w/file'] not in set(('', '0', '1')):
odd_value(o, 'Photo w/file')
# tagged deafults to None
# biopsy defaults to None
# genetic_sample
genetic_key = None
for kn in (
'Genetics Sample',
'Genetics sampled',
):
if kn in row:
genetic_key = kn
break
if not genetic_key is None:
o['genetic_sample'] = {
"": None,
"0": False,
"1": True,
}[row[genetic_key]]
# indication_entanglement
indication_entanglement_key = None
for kn in (
'Indication of Entanglement',
'Indication of Ent anglement',
):
if kn in row:
indication_entanglement_key = kn
break
if not indication_entanglement_key is None:
o['indication_entanglement'] = {
'': None,
'1?': None,
'0': False,
'1': True,
}[row[indication_entanglement_key]]
if row[indication_entanglement_key] in set(('1?',)):
unknown_value(o, indication_entanglement_key)
# indication_shipstrike
if 'Indication of Ship Strike' in row:
o['indication_shipstrike'] = {
'': None,
'0': False,
'1': True,
'1*': True,
}[row['Indication of Ship Strike']]
if not row['Indication of Ship Strike'] in set(('', '0', '1')):
odd_value(o, 'Indication of Ship Strike')
### ObservationExtensions
o['observation_extensions'] = {}
## EntanglementObservation
if case_data['__class__'] == 'Entanglement' or isinstance(case_data['__class__'], set) and 'Entanglement' in case_data['__class__']:
eo = {}
# anchored defaults to None
# gear_description defaults to ''
# gear_body_location defaults to []
# entanglement_details
if row['Gear']:
eo['entanglement_details'] = row['Gear']
# gear_retrieved defaults to None
disent_attempt_key = None
for k in (
'Disentangle attempt on live whale?',
'Disent attempt on live whale?',
'Disent attempt?',
):
if k in row:
disent_attempt_key = k
break
disent_status_key = None
for k in (
'Disentangle status of live whale',
'Disent status of live whale',
'Disent status',
):
if k in row:
disent_status_key = k
break
# disentanglement_outcome
attempt = {
'': None,
'.': None,
'N/A': None,
'Document and tag if possible': None,
'1-F/V': None,
'0': False,
'no': False,
'No': False,
'1': True,
'Yes': True,
}[row[disent_attempt_key]]
if row[disent_attempt_key] in set((
'.',
'Document and tag if possible',
)):
unknown_value(o, disent_attempt_key)
outcome = {
'': 'unknown',
'?': 'unknown',
'N/A': 'unknown',
'Animal releases': 'unknown',
'Carrying gear': 'gear',
'disentangled': 'no gear',
'Disentangled': 'no gear',
'"Disentangled"': 'no gear',
'Disentangled (presumed)': 'no gear',
'Completely disentangled': 'no gear',
'fully disentangled': 'no gear',
'disentangled by bystander': 'no gear',
'Entangled': 'entangled',
'entangled': 'entangled',
'Entangled, needs full assessment': 'entangled, needs assesment',
'Entangled -suspect but cannot confirm that gear shed': 'entangled, suspected shed',
'Presumed Entangled': 'entangled',
'Unsuccessful': 'unsuccessful',
'Gear free': 'no gear',
'Gear shed': 'gear shed',
'Potentially gear free': 'unknown',
'Minor': 'unknown',
'No attempt made': 'unknown',
'NOAA, GA': 'unknown',
'Partial Disentanglement': 'partly entangled',
'partial disentanglement': 'partly entangled',
'Partial disentanglement': 'partly entangled',
'Disentangled from most gear': 'partly entangled',
'some line still embedded in dorsal peduncle': 'some gear',
'unable to relocate': 'unknown',
'Unable to relocate': 'unknown',
'Animal could not be relocated': 'unknown',
'Unknown': 'unknown',
'unknown': 'unknown',
'Unknown/unconfirmed': 'unknown',
'Lost/Unidentifiable': 'unknown',
'Freed by fisherman': 'no gear by fisherman',
'Animal freed itself': 'gear shed',
'Presumed Entangled - Gear shed': 'unknown',
}[row[disent_status_key]]
if row[disent_status_key] in set((
'?',
'Carrying gear',
'some line still embedded in dorsal peduncle',
'disentangled by bystander',
'Disentangled (presumed)',
)):
odd_value(o, disent_status_key)
if row[disent_status_key] in set((
'Minor',
'No attempt made',
'NOAA, GA',
'unable to relocate',
'Unable to relocate',
'Animal could not be relocated',
'Potentially gear free',
'Animal releases',
'Lost/Unidentifiable',
)):
unknown_value(o, disent_status_key)
# disentanglement_outcome choices:
# #('', 'unknown'),
# ('shed', 'gear shed'),
# ('mntr', 'monitor'),
# ('entg', 'entangled'),
# ('part', 'partial'),
# ('cmpl', 'complete'),
# <em>Was a disentanglement attempted and if so, what was the outcome?<em>
# <dl>
# <dt>gear shed</dt>
# <dd>No disentanglement was attempted since the animal had disentangled itself.</dd>
# <dt>monitor</dt>
# <dd>No disentanglement was attempted since the entanglement wasn't severe enough to warrant it.</dd>
# <dt>entangled</dt>
# <dd>A disentanglement was needed, but either couldn't be attempted or was unsuccessful.</dd>
# <dt>partial</dt>
# <dd>A disentanglement was attempted and the gear was partly removed.</dd>
# <dt>complete</dt>
# <dd>A disentanglement was attempted and the gear was completely removed.</dd>
# </dl>
eo['disentanglement_outcome'], understood = {
(None, 'unknown'): ('', True),
(None, 'gear'): ('', False),
(None, 'entangled'): ('', False),
(None, 'partly entangled'): ('', False),
(None, 'no gear'): ('', False), # could be 'shed' or 'cmpl'
(False, 'unknown'): ('', False), # could be 'shed' or 'mntr'
(False, 'no gear'): ('shed', True),
(False, 'no gear by fisherman'): ('', False),
(False, 'entangled'): ('entg', True),
(False, 'entangled, needs assesment'): ('entg', False),
(False, 'gear shed'): ('shed', True),
(True, 'entangled'): ('entg', True),
(True, 'entangled, needs assesment'): ('entg', False),
(True, 'entangled, suspected shed'): ('entg', False),
(True, 'unsuccessful'): ('entg', True),
(True, 'partly entangled'): ('part', True),
(True, 'some gear'): ('', False),
(True, 'gear shed'): ('', False),
(True, 'no gear'): ('cmpl', True),
(True, 'unknown'): ('', False),
}[(attempt, outcome)]
if not understood:
unknown_values(o, (disent_attempt_key, disent_status_key))
o['observation_extensions']['entanglement_observation'] = eo
## ShipstrikeObservation
# striking_vessel defaults to None
if case_data['__class__'] == 'Shipstrike' or isinstance(case_data['__class__'], set) and 'Shipstrike' in case_data['__class__']:
o['observation_extensions']['shipstrike_observation'] = {}
return o
def parse_documents(row, animal_data, case_data):
docs = []
for doc_keys, attach_to, data, doctype_name in (
(('Ceta data rec', 'Cetacean Data Record'), 'animal', animal_data, 'Cetacean Data Record'),
(('Histo results',), 'animal', animal_data, 'Histological Findings'),
(('CCS web page',), 'case', case_data, 'CCS web page'),
(('Hi Form?',), 'case', case_data, 'Human-Interaction Form'),
(('Lg Whale email',), 'case', case_data, 'Large Whale email'),
(('Stranding Rept?', 'Level A'), 'case', case_data, 'Stranding Report (Level-A)'),
):
for doc_key in doc_keys:
if doc_key in row and row[doc_key]:
yes_values = set(('1',))
no_values = set(('', '0'))
if row[doc_key] in yes_values:
# create a new document
d = {
'attach_to': attach_to,
'document_type': DocumentType.objects.get(name=doctype_name),
}
docs.append(d)
elif row[doc_key] not in no_values:
odd_value(data, doc_key)
return docs
def parse_csv(csv_file):
'''\
Given a file-like object with CSV data, return a tuple with one item for
each row. The items are a dictionary like so:
{
'animals': (<animal>,),
'cases': (<case>,),
etc...
}
Where <animal>, <case> etc. are dictionaries with model fieldnames as keys.
May also throw an exception if the CSV data isn't understood.
'''
data = csv.DictReader(csv_file, dialect='excel')
row_results = []
for i, row in enumerate(data):
# normalize cell values and check for unhandled fieldnames
empty_row = True
for k in row.keys():
if row[k] is None:
row[k] = ''
row[k] = row[k].strip()
if row[k] != '':
empty_row = False
if k not in FIELDNAMES:
#raise UnrecognizedFieldError("%s:%s" % (k, row[k]))
print u"""Warning: unrecognized field "%s": "%s\"""" % (k, row[k])
if empty_row:
continue
# ignore beaked whales
if row['Common Name'] == 'BEWH':
continue
new = {}
a = parse_animal(row)
new['animal'] = a
c = parse_case(row)
new['case'] = c
o = parse_observation(row, c)
new['observation'] = o
l = parse_location(row, o)
new['location'] = l
docs = parse_documents(row, a, c)
if docs:
new['documents'] = docs
row_results.append({'row_num': i, 'row': row, 'data': new})
return tuple(row_results)
def _process_import_notes(notes, row, filename):
if 'TZ' in os.environ:
timezone = pytz.timezone(os.environ['TZ'])
else:
timezone = pytz.utc
header = u"""<p>Imported on <span class="date">%s</span> from <span class="filename">%s</span>.</p>\n""" % (
esc(datetime.datetime.now(timezone).strftime('%Y-%m-%d %H:%M:%S %z')),
esc(filename),
)
err = u""
if 'mergeable' in notes:
err += u"""<div class="section">Found these possible duplicates when importing:\n"""
err += " <ul>\n"
for dup in notes['mergeable']:
err += u" <li>%06d. %s</li>\n" % (
dup.pk,
esc(unicode(dup)),
)
err += " </ul>\n"
err += "</div>\n"
def _error_columns(error_message, columns):
o = u"""<div class="section">%s\n""" % error_message
o += u""" <table class="imported_entry">\n"""
for key in columns:
value = row[key]
if value != '':
o += u" <tr><th>%s</th><td>%s</td></tr>\n" % (
esc(key),
esc(value),
)
o += u" </table>\n"
o += u"</div>"
return o
if 'ignored_column' in notes:
err += _error_columns(
'Some non-blank columns were ignored:',
sorted(notes['ignored_column']),
)
if 'unimportable_column' in notes:
err += _error_columns(
'Some non-blank columns don\'t correspond to any field in the database:',
sorted(notes['unimportable_column']),
)
if 'unimportable_value' in notes:
err += _error_columns(
'Some non-blank columns\' values don\'t correspond to any field in the database:',
sorted(notes['unimportable_value']),
)
if 'unknown_value' in notes:
err += _error_columns(
'Some non-blank columns\' values weren\'t understood:',
sorted(notes['unknown_value']),
)
if 'unknown_values' in notes:
for col_set in notes['unknown_values']:
err += _error_columns(
'Some non-blank columns\' combined values weren\'t understood:',
sorted(col_set),
)
if 'odd_value' in notes:
err += _error_columns(
'Some non-blank columns\' values were only partly understood:',
sorted(notes['odd_value']),
)
# add a table of the original fields
orig = u"""<div class="section">Original entry:\n"""
orig += u""" <table class="imported_entry">\n"""
for key in sorted(row.keys()):
value = row[key]
if value != '':
orig += u" <tr><th>%s</th><td>%s</td></tr>\n" % (
esc(key),
esc(value),
)
orig += u" </table>\n"
orig += u"</div>\n"
result = u"""<table class=layout><tr><td class="layout left_side">\n""" + header + err + u"""\n</td><td class="layout right_side">""" + orig + u"</td></tr></table>\n"
return result
def _make_tag(thing, user):
tag = Tag(entry=thing, user=user, tag_text=CURRENT_IMPORT_TAG)
tag.clean()
tag.save()
def _save_row(r, filename, user):
### animal
a = r['data']['animal']
# check for existing animals
animal_query = Q()
if 'field_number' in a:
animal_query |= Q(field_number__iexact=a['field_number'])
if 'name' in a:
animal_query |= Q(name__icontains=a['name'])
if animal_query: # bool(Q()) is False
animal_matches = Animal.objects.filter(animal_query)
if animal_matches:
a['import_notes']['mergeable'] = animal_matches
animal_kwargs = copy(a)
animal_kwargs['import_notes'] = _process_import_notes(a['import_notes'], r['row'], filename)
animal = Animal(**animal_kwargs)
animal.clean()
animal.save()
_make_tag(animal, user)
### case(s)
c = r['data']['case']
c['animal'] = animal
if not isinstance(c['__class__'], set):
c['__class__'] = set((c['__class__'],))
cases_classes = set()
for cls in c['__class__']:
cases_classes.add({
'Case': Case,
'Shipstrike': Shipstrike,
'Entanglement': Entanglement,
}[cls])
cases = []
for cls in cases_classes:
kwargs = copy(c)
kwargs['import_notes'] = _process_import_notes(c['import_notes'], r['row'], filename)
del kwargs['__class__']
del kwargs['classification']
if 'entanglement' in c:
if cls is Entanglement:
kwargs.update(c['entanglement'])
del kwargs['entanglement']
cases.append(cls(**kwargs))
for case in cases:
case.clean()
case.save()
_make_tag(case, user)
### observations(s)
l = r['data']['location']
o = r['data']['observation']
l_kwargs = copy(l)
observations = []
def _make_observation(kwargs):
del kwargs['split']
del kwargs['alive_condition']
del kwargs['exam_condition']
del kwargs['initial_condition']
del kwargs['observation_extensions']
kwargs['animal'] = animal
loc = Location(**l_kwargs)
loc.clean()
loc.save()
kwargs['location'] = loc
kwargs['import_notes'] = _process_import_notes(o['import_notes'], r['row'], filename)
observations.append(Observation(**kwargs))
if o['split']:
# create seperate initial and exam observations
if o['initial']:
i_kwargs = copy(o)
i_kwargs['condition'] = o['initial_condition']
_make_observation(i_kwargs)
if o['exam']:
e_kwargs = copy(o)
e_kwargs['condition'] = o['exam_condition']
_make_observation(e_kwargs)
if len(observations) != 2:
raise ValueError("Observation data has 'split' but is missing 'exam' and 'initial'!")
else:
_make_observation(copy(o))
for obs in observations:
obs.clean()
obs.save()
obs.cases = cases
_make_tag(obs, user)
if 'entanglement_observation' in o['observation_extensions']:
eo_kwargs = copy(o['observation_extensions']['entanglement_observation'])
eo_kwargs['observation_ptr'] = obs
eo = EntanglementObservation(**eo_kwargs)
eo.clean()
eo.save()
if 'shipstrike_observation' in o['observation_extensions']:
sso_kwargs = copy(o['observation_extensions']['shipstrike_observation'])
sso_kwargs['observation_ptr'] = obs
sso = ShipstrikeObservation(**sso_kwargs)
sso.clean()
sso.save()
### documents
if 'documents' in r['data']:
for doc in r['data']['documents']:
kwargs = copy(doc)
del kwargs['attach_to']
if doc['attach_to'] == 'case':
for c in cases:
kwargs['attached_to'] = c
d = Document(**kwargs)
d.save()
elif doc['attach_to'] == 'animal':
kwargs['attached_to'] = animal
d = Document(**kwargs)
d.clean()
d.save()
def process_results(results, filename, user):
'''\
Create all the new models described in results in a single transaction and
a single revision.
'''
# process the results
for r in results:
print r['row_num']
_save_row(r, filename, user)
|
UTF-8
|
Python
| false | false | 2,011 |
1,468,878,860,237 |
c2fd6f7fe45455ab12f9b181af90a2737730c818
|
d289f566d23bbcb8447b71a317d52193784d4e0b
|
/hamm.py
|
71f203a03fa6ec81a870e852615c95f93cf5ae32
|
[] |
no_license
|
rulises/ComSci
|
https://github.com/rulises/ComSci
|
6906351854fcee5ad7e070ed08813cdb6595e86b
|
eff31a72920716eca4e0f5b1f2b2b31958fafeb8
|
refs/heads/master
| 2021-01-16T21:48:42.418277 | 2013-05-20T21:03:44 | 2013-05-20T21:03:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Given two strings s and t of equal length, the Hamming distance between s and t, denoted dH(s,t), is the number of corresponding symbols that differ in s and t. See Figure 2.
Given: Two DNA strings s and t of equal length (not exceeding 1 kbp).
Return: The Hamming distance dH(s,t).
Sample Dataset
GAGCCTACTAACGGGAT CATCGTAATGACGGCCT
Sample Output
7
"""
f = open('rosalind_hamm.txt')
s = list(f.next().strip())
t = list(f.next().strip())
comp = zip(s,t)
ss = 0
for (x, y) in comp:
if x != y:
ss+=1
print ss
|
UTF-8
|
Python
| false | false | 2,013 |
19,473,381,751,560 |
29f20ba9f6c32a8d6cbb2359215a8e8b777313c6
|
2dc9ed7fc09dae24600edc4fa4c949f6eba3e339
|
/trunk/src/python/pyasynchio/test/__init__.py
|
5edf615c17e334d725f56182b083f8a9def77071
|
[
"MIT"
] |
permissive
|
BackupTheBerlios/pyasynchio-svn
|
https://github.com/BackupTheBerlios/pyasynchio-svn
|
e7f9e5043c828740599113f622fac17a6522fb4b
|
691051d6a6f6261e66263785f0ec2f1a30b854eb
|
refs/heads/master
| 2016-09-07T19:04:33.800780 | 2005-09-15T16:53:47 | 2005-09-15T16:53:47 | 40,805,133 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright (c) 2005 Vladimir Sukhoy
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import unittest
suite = unittest.TestSuite()
import echo
suite.addTest(echo.suite)
import files
suite.addTest(files.suite)
|
UTF-8
|
Python
| false | false | 2,005 |
3,934,190,083,568 |
b8a798d8a648b4a59a9627d290843b8910983165
|
225554eb191a29f0e7b325b7b05196994d28bf78
|
/wwwold/documentation/pymaemo_tutorial/examples/hello_world_1.py
|
37064201624fe70c136d6c3ebe822d8c387526cc
|
[
"LicenseRef-scancode-public-domain"
] |
non_permissive
|
maemo-leste/pymaemo-svn
|
https://github.com/maemo-leste/pymaemo-svn
|
61d4cc397eb852a55f71837427c3f93602fe4a02
|
ff78e5e99fbbbe1f0dfff0a85748481c89023b1c
|
refs/heads/master
| 2020-07-10T02:39:53.326803 | 2010-07-02T14:15:35 | 2010-07-02T14:15:35 | 204,144,269 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python2.5
import gtk
import hildon
window = hildon.Window()
window.connect("destroy", gtk.main_quit)
label = gtk.Label("Hello World!")
window.add(label)
label.show()
window.show()
gtk.main()
|
UTF-8
|
Python
| false | false | 2,010 |
4,810,363,402,739 |
643e6e234140e3f9de1ae1210ae7267f94d63951
|
899dcb9eb1e1cfd7e16f4c0966ecb7745393b01d
|
/src/paper/results/analyze.py
|
a0568873d052053c15783835ca4ebf2a41f9e469
|
[] |
no_license
|
baubie/DTNSpecies
|
https://github.com/baubie/DTNSpecies
|
115b13372e970db6b914f5a06969423851f4c4bf
|
68731c67c907d615e668e3437db8500e93e7c3f4
|
refs/heads/master
| 2021-01-23T11:47:51.098025 | 2011-08-16T16:43:20 | 2011-08-16T16:43:20 | 851,687 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
from subprocess import call
if len(sys.argv) < 2:
print "Error: Please provide a file to analze."
quit()
import csv
def unique(seq):
result = []
for a in seq:
if a not in result:
result.append(a)
return result
# Capture raw data
filename = sys.argv[1]
data = csv.reader(open(filename+".dat", 'rb'), delimiter=',')
x = []
y = []
y_names = []
first = True
for d in data:
if first:
first = False
y_names = d[1:]
y = [[] for i in range(len(y_names))]
else:
x.append(float(d[0]))
for i in range(len(y_names)):
y[i].append(float(d[i+1]))
for i in range(len(y_names)):
y_names[i] = eval(y_names[i])
listMode = False
numParams = 1
if isinstance(y_names[0],list):
numParams = len(y_names[0])
listMode = True
print "Found %d parameters" % numParams
if listMode:
y_params = [[] for i in range(numParams)]
for yi in y_names:
for i in range(numParams):
y_params[i].append(yi[i])
for i in range(numParams):
y_params[i] = unique(y_params[i])
y_slice = sys.argv[2:]
if len(y_slice) != numParams:
print "Error: Require %d parameters found %d" % (numParams, len(y_slice))
print "Parameters: "
for p in y_params:
print p
quit()
new_y_names = []
new_y = []
freeparam = None
for i in range(numParams):
if y_slice[i] == "-": freeparam = i
for i in range(numParams):
if i != freeparam:
print "Parameter %d constrained" % i
print y_params[i][int(y_slice[i])]
else:
print "Parameter %d free" % i
print y_params[i]
new_y_names = y_params[i]
key = []
for i in range(len(y_names)):
goodparam = True
for j in range(numParams):
if j != freeparam and y_names[i][j] != y_params[j][int(y_slice[j])]:
goodparam = False
if goodparam:
key.append(i)
for yi in range(len(y)):
if yi in key:
new_y.append(y[yi])
y_names = new_y_names
y = new_y
tuning_widths = []
best_duration = []
most_spikes = []
for series in range(len(y)):
# Calculate BD
first = y[series].index(max(y[series]))
last = first
for i in range(first, len(y[series])):
if y[series][i] == max(y[series]): last = i
BD = (x[first]+x[last])/2.0 # Middle of curves
best_duration.append(BD)
most_spikes.append(max(y[series]))
max_pos = y[series].index(max(y[series]))
half = max(y[series]) / 2.0
half_pos = -1
width = 0
if half in y[series]:
found = False
for i in range(max_pos, len(y[series])):
if found == True:
if y[series][i] != half:
half_pos = i-1
break
if found == False:
if y[series][i] == half: found = True
width = x[half_pos] - x[max_pos]
else:
for i in range(max_pos, len(y[series])):
if y[series][i] < half:
m = (y[series][i]-y[series][i-1])/(x[i]-x[i-1])
distance = (half-y[series][i-1]) / m
width = x[i-1]+distance - x[max_pos]
break
if max(y[series]) == 0: width = 0
tuning_widths.append(width)
if len(y_names) == 0: y_names = ["Sliced"]
f = open(filename+".sliced", 'w')
f.write('"x-axis"')
for i in range(len(y_names)):
f.write(',"%s"' % str(y_names[i]))
f.write("\n")
for xi in range(len(x)):
f.write(str(x[xi]))
for yi in y:
f.write(",%s" % str(yi[xi]))
f.write("\n")
f.close()
f = open(filename+".analyze", 'w')
f.write('"Series","Width","Best Duration","Peak Spikes"\n')
for i in range(len(y_names)):
f.write("%s," % y_names[i])
f.write("%s," % tuning_widths[i])
f.write("%s," % best_duration[i])
f.write("%s" % most_spikes[i])
f.write("\n")
f.close()
def color(i, total):
points = [ [0.00, [1,0,0]],
[0.25, [0,1,1]],
[0.50, [0,0,1]],
[0.75, [1,0,1]],
[1.00, [0,0,0]],
]
pos = float(i)/float(total)
col = []
last = points[0]
for p in points:
if p[0] == pos:
col = p[1]
break
if pos < p[0]:
col = [last[1][i]+(p[1][i]-last[1][i])*(p[0]-pos)/(p[0]-last[0]) for i in range(3)]
break
last = p
return 'rgb('+str(col[0])+','+str(col[1])+','+str(col[2])+')'
# Determine the highest responding x value over all y datasets
highest = -1
for ds in y:
for yi in range(len(ds)-1):
if ds[yi] > 0:
highest = max(highest, x[yi+1])
if highest == -1: highest = 10
# Output GLE file
f = open(filename+".gle", "w")
f.write('size 8 3\n')
f.write('set font psh\n')
f.write('set hei 0.15\n')
f.write('set lwidth 0.02\n')
f.write('begin graph\n')
f.write(' size 4 3\n')
f.write(' scale auto\n')
f.write(' xtitle "Stimulus Duration (ms)"\n')
f.write(' ytitle "Mean Spikes per Trial"\n')
f.write(' x2axis off\n')
f.write(' y2axis off\n')
f.write(' yaxis min 0\n')
f.write(' xaxis min 0 max '+str(highest+int(highest*0.25))+'\n')
f.write(' data "'+filename+'.sliced"\n')
f.write(' xticks length -0.05\n')
f.write(' yticks length -0.05\n')
for i in range(len(y)):
f.write(' d'+str(int(i+1))+' line color '+color(i,len(y))+' smooth'+'\n')
f.write(' key compact nobox\n')
f.write('end graph\n')
f.write('amove 4 0\n')
f.write('begin graph\n')
f.write(' size 4 3\n')
f.write(' scale auto\n')
f.write(' xtitle " (ms)"\n')
f.write(' ytitle "Width (ms) / Number of Spikes"\n')
f.write(' x2axis off\n')
f.write(' y2axis off\n')
f.write(' data "'+filename+'.analyze"\n')
f.write(' xticks length -0.05\n')
f.write(' yticks length -0.05\n')
f.write(' let d4 = x, d1/d3\n')
f.write(' d1 line marker fcircle msize 0.1 color rgb(1,0,0)\n')
f.write(' d2 line marker fsquare msize 0.1 color rgb(0,1,0)\n')
f.write(' d3 line marker fdiamond msize 0.1 color rgb(0,0,1)\n')
f.write(' d4 line marker ftriangle msize 0.1 color rgb(0,0,0) key "Norm Width"\n')
f.write(' key compact nobox pos br\n')
f.write('end graph\n')
f.close()
# View results in QGLE
call(["qgle", filename+".gle"])
|
UTF-8
|
Python
| false | false | 2,011 |
171,798,726,746 |
e7e0e1462832a9a26992751b7f4804d802e123ff
|
cd93962023e5b0fd1038569c496ae6a35af31ffd
|
/scripts/final_graphs.py
|
e911140f70f165770c9d05658cc95dea4e30428e
|
[] |
no_license
|
DevanR/RailwaySimulator
|
https://github.com/DevanR/RailwaySimulator
|
63e466db542b8bf714a31ee18dcc3815c0a72765
|
d68ef147832e6a3efb6b473c3683a6292f712323
|
refs/heads/master
| 2021-01-25T08:42:21.049962 | 2013-10-08T16:09:53 | 2013-10-08T16:09:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import string, re, math, sys
NEMO_VMN_reg = 284
NEMO_VMN_hand = 836
NEMO_MR_hand = 228
ILNP_VMN_reg = 1362
ILNP_VMN_hand = 144
ILNP_MR_hand = 1362
OPTI_VMN_reg = 284
OPTI_VMN_hand = 604
OPTI_MR_hand = 488
Number_CN = 2
Number_STN = 27
Hand_period = 60
folder = sys.argv[1]
mean_file = open(folder + '/mean_hop.txt', 'r')
std_file = open(folder + '/std_hop.txt', 'r')
Number_Hop = float(mean_file.read())
input_reg = open(folder + '/train_0_reg_plot.txt', 'r')
input_hand = open(folder + '/train_0_hand_plot.txt', 'r')
nemo = open(folder + '/nemo.txt', 'a')
ilnp = open(folder + '/ilnp.txt', 'a')
opti = open(folder + '/opti.txt', 'a')
while 1:
#Read both reg and hand files simulataneously
line1 = input_reg.readline()
columns1 = line1.split(" ")
line2 = input_hand.readline()
if not line1 and not line2:
break
columns2 = line2.split(" ")
#Remove end of line
columns1[2] = columns1[2][:-1]
columns2[2] = columns2[2][:-1]
#Calculate for NEMO
total_sum = ( (float(columns1[1])*float(NEMO_VMN_reg)) + (Number_STN*NEMO_MR_hand)) / (1024*Hand_period)
#95% upper/lower limit
total_error = (float(columns1[2])) + (float(columns2[2]))
output_nemo= str(columns1[0]) + '\t' + str(total_sum) + '\t' + str(total_error) + '\n'
nemo.write(output_nemo)
#Calculate for ILNP
total_sum = ( (float(columns1[1])*float(ILNP_VMN_reg)) + (Number_STN*ILNP_MR_hand) + ((float(columns2[1])*float(ILNP_VMN_hand)*Number_CN*Number_Hop)) ) / (1024*Hand_period)
total_error = (float(columns1[2])) + (float(columns2[2]))
output_ilnp = str(columns1[0]) + '\t' + str(total_sum) + '\t' + str(total_error) + '\n'
ilnp.write(output_ilnp)
#Calculate for OPTI
total_sum = ( (float(columns1[1])*float(OPTI_VMN_reg)) + (Number_STN*OPTI_MR_hand) + ((float(columns2[1])*float(OPTI_VMN_hand)*Number_CN*Number_Hop)) )/ (1024*Hand_period)
total_error = (float(columns1[2])) + (float(columns2[2]))
output_opti = str(columns1[0]) + '\t' + str(total_sum) + '\t' + str(total_error) + '\n'
opti.write(output_opti)
|
UTF-8
|
Python
| false | false | 2,013 |
5,574,867,577,916 |
6b83837476c9c13b01cea605d59304e2cb07272e
|
2eb6d57b4f97fe2ea2cd6ab78512dd2c7a6e6ecc
|
/chapter4/exercise_2.py
|
4f101e62aa3232ada77c48374a21e34cc2a7bb41
|
[] |
no_license
|
afcarl/PythonDataStructures
|
https://github.com/afcarl/PythonDataStructures
|
4ba98bca168f535dc9c8ed9392ed313592850101
|
a620af0a1e0d707556a8883ecb5b79a6f1df56c7
|
refs/heads/master
| 2020-03-26T06:26:38.637461 | 2014-04-26T15:53:46 | 2014-04-26T15:53:46 | 144,605,553 | 1 | 0 | null | true | 2018-08-13T16:26:42 | 2018-08-13T16:26:42 | 2014-04-26T16:23:28 | 2014-04-26T16:23:27 | 436 | 0 | 0 | 0 | null | false | null |
def compare(x,y):
if x < y:
print x, "is less than",y
elif x > y:
print x,"is greater than",y
else:
print x,"and",y,"are equal"
compare(4,5)
compare(5,4)
compare(5,5)
|
UTF-8
|
Python
| false | false | 2,014 |
283,467,875,575 |
212ebbbb8a2b93dd12ac5a8b008944082fe58787
|
42c9ff50c2d3235aba6060d3b41adb93f3435a61
|
/lecture/w4_data_type/yesterday_count_ex.py
|
a383a560413b16badc0bb6d10a237cc9ccc1b417
|
[] |
no_license
|
TeamLab/gachon_python_class
|
https://github.com/TeamLab/gachon_python_class
|
35196efcfd92be9de389f37bb31efb4105895d20
|
f52f62fc1635b277311b25ded007976c24903926
|
refs/heads/master
| 2016-08-04T08:09:10.092402 | 2014-12-01T10:43:10 | 2014-12-01T10:43:10 | 24,279,051 | 9 | 4 | null | null | null | null | null | null | null | null | null | null | null | null | null |
f = open("yesterday.song", 'r')
yesterday_lyric = ""
while 1:
line = f.readline()
if not line: break
yesterday_lyric = yesterday_lyric + line.strip() + "\n"
f.close()
n_of_yesterday = yesterday_lyric.count("Yesterday")
n2_of_yesterday = yesterday_lyric.count("yesterday")
print "Number of A Word 'Yesterday'" , n_of_yesterday
print "Number of A Word 'yesterday'" , n2_of_yesterday
|
UTF-8
|
Python
| false | false | 2,014 |
6,193,342,861,057 |
e95d41795ff4fc9cb8fe78e1c2adfc4089e677f8
|
b809c978ea607e74a04c27ead4e68db0fa82a06d
|
/clonedigger/logilab/common/interface.py
|
358921205ba75db5cd4da5f97b71a469c7aa0563
|
[
"GPL-3.0-only"
] |
non_permissive
|
evvers/clonedigger
|
https://github.com/evvers/clonedigger
|
6e4417a812488c85f9292f21513deda3d49ca746
|
959fcb845c919254fe0d25e612ea864e7797975c
|
refs/heads/master
| 2021-01-10T20:14:29.822002 | 2014-05-06T05:36:36 | 2014-05-06T05:36:36 | 19,551,876 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright (c) 2002-2007 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
bases class for interfaces to provide "light" interface handling.
TODO:
_ implements a check method which check that an object implements the
interface
_ Attribute objects
This module requires at least python 2.2
"""
from types import ListType, TupleType
class Interface:
"""base class for interfaces"""
def is_implemented_by(cls, instance):
return implements(instance, cls)
is_implemented_by = classmethod(is_implemented_by)
def implements(obj, interface):
"""return true if the give object (maybe an instance or class) implements
the interface
"""
kimplements = getattr(obj, '__implements__', ())
if not isinstance(kimplements, (list, tuple)):
kimplements = (kimplements,)
for implementedinterface in kimplements:
if issubclass(implementedinterface, interface):
return True
return False
def extend(klass, interface, _recurs=False):
"""add interface to klass'__implements__ if not already implemented in.
if klass is subclassed, ensure subclasses __implements__ it as well.
NOTE: klass should be e new class.
"""
if not implements(klass, interface):
try:
kimplements = klass.__implements__
kimplementsklass = type(kimplements)
kimplements = list(kimplements)
except AttributeError:
kimplementsklass = tuple
kimplements = []
kimplements.append(interface)
klass.__implements__ = kimplementsklass(kimplements)
for subklass in klass.__subclasses__():
extend(subklass, interface, _recurs=True)
elif _recurs:
for subklass in klass.__subclasses__():
extend(subklass, interface, _recurs=True)
|
UTF-8
|
Python
| false | false | 2,014 |
12,240,656,804,020 |
3006896f1ff9c9367d47cdd5354caab51237f375
|
c7eb6ff4963f9091cf25e4d3b157ef2a50096111
|
/pl_autobuild/pl_sliceadder.py
|
2b70ecf2c9f9098de50f3f189bf85fa0061026d3
|
[] |
no_license
|
nanodayo/sweets
|
https://github.com/nanodayo/sweets
|
87309f10b9b2db53c5254866bd26ee335efe4496
|
21bb23776320b8ef0dd642bc005579d7c3039558
|
refs/heads/master
| 2020-05-19T05:24:01.472888 | 2011-02-18T06:18:01 | 2011-02-18T06:18:01 | 1,080,462 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import xmlrpclib
import commands
import sys
import os
pwd = os.getcwd()
os.chdir('/etc/planetlab/')
import plc_config
os.chdir(pwd)
apiurl = 'https://' + plc_config.PLC_API_HOST + ':' + str(plc_config.PLC_API_PORT) + plc_config.PLC_API_PATH
api = xmlrpclib.ServerProxy(apiurl)
argv = sys.argv
argc = len(argv)
slice = 'pl_nanodayo'
#if (argc < 3):
# print 'Usage: # python %s <slice_name> <public_keyfile>' % argv[0]
# quit()
if(argc > 2):
slice = argv[1]
keyfile = argv[2]
elif(argc == 2):
slice = argv[1]
keyfile = slice + '.pub'
if not os.path.exists(slice):
commands.getstatusoutput('ssh-keygen -b 2048 -t rsa -f ' + slice)
else:
print 'Usage: # python %s <slice_name> (<public_keyfile>)' % argv[0]
quit()
f = open(keyfile, 'r')
for line in f:
key = line
f.close
# user
user = '[email protected]'
first = 'Daisuke'
last = 'Matsui'
password = 'root'
description = 'Created via script'
slice_url = 'http://dummy.nanodayo.org'
auth = {}
auth['Username'] = '[email protected]'
auth['AuthString'] = 'root'
auth['AuthMethod'] = 'password'
auth['role'] = 'admin'
nodes = []
result = api.GetNodes(auth,{},['hostname'])
for node in result:
nodes.append(node['hostname'])
result = api.AddSlice(auth,{'name':slice, 'url':slice_url,'description':description})
print result
pid = api.AddPerson(auth, {'first_name':first,'last_name':last,'email':user,'password':password,'enabled':True})
print result
result = api.AddPersonToSlice(auth, user ,slice)
print result
result = api.AddPersonKey(auth, user, {'key_type':'ssh','key':key})
print result
result = api.AddSliceToNodes(auth, slice,nodes)
result = api.UpdatePerson(auth, pid, {'enabled':True})
print result
|
UTF-8
|
Python
| false | false | 2,011 |
13,400,298,005,043 |
7379b64c9e45bb3903deb89f6e23263d237d7ac3
|
f9275037f2bc6e2ab2c745423461b63855513b79
|
/main.py
|
ba4621b6d9459b94be724503cd64e4c53a8a72dc
|
[] |
no_license
|
pfiziev/solvemenext
|
https://github.com/pfiziev/solvemenext
|
8c33fdec2bdfd49cb7332d62c3cbe2796713b848
|
875e1045e7e81532662183638dcbd7058d0b6c16
|
refs/heads/master
| 2020-05-17T17:38:19.970379 | 2013-04-29T09:34:48 | 2013-04-29T09:34:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import datetime
import urllib
import webapp2
import jinja2
import os
from google.appengine.ext import db
from google.appengine.api import users
JINJA_ENVIRONMENT = jinja2.Environment(loader=jinja2.FileSystemLoader(
os.path.dirname(__file__)))
JINJA_ENVIRONMENT.globals.update(zip=zip)
class BaseRequestHandler(webapp2.RequestHandler):
def seeother(self, uri):
self.response.set_status(303)
import urlparse
absolute_url = urlparse.urljoin(self.request.uri, uri)
self.response.headers['Location'] = str(absolute_url)
self.response.clear()
def notfound(self):
self.response.set_status(404)
self.response.clear()
def set_cookie(self, key, value='', max_age=None, path='/', domain=None, secure=None):
"""
Set (add) a cookie for the response
"""
header = key + '=' + urllib.quote(value)
if max_age:
import datetime
header += '; expires=' + (datetime.datetime.now() +
datetime.timedelta(seconds=max_age)).strftime("%a, %d %b %Y %H:%M:%S GMT")
if path != '/':
header += '; path=' + path
if domain is not None:
header = '; domain=' + domain
if secure is not None:
header = '; secure=' + str(secure)
self.response.headers.add_header('Set-Cookie', header)
def clear_cookie(self, key):
self.response.headers.add_header('Set-Cookie', key + '=; expires=Tue, 01 Jan 2008 00:00:00 GMT')
class Upvote(BaseRequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('Hello, webapp2 World!')
class Downvote(BaseRequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('Hello, webapp2 World!')
class Poll(db.Model):
"""Models an individual Guestbook entry with author, content, and date."""
title = db.StringProperty()
n_problems = db.IntegerProperty()
problem_titles = db.StringListProperty()
votes = db.ListProperty(int)
created = db.DateTimeProperty(auto_now_add=True)
class AddPoll(BaseRequestHandler):
def get(self):
user = users.get_current_user()
if users.is_current_user_admin():
self.response.write(JINJA_ENVIRONMENT.get_template('add.html').render({}))
else:
self.seeother('/')
def post(self):
if users.is_current_user_admin():
title = self.request.get('title')
problem_titles = filter(lambda t: t.strip() != '', self.request.get('problem_titles').split('\n'))
Poll(title=title,
n_problems=len(problem_titles),
problem_titles=problem_titles,
votes=[0] * len(problem_titles)).put()
self.seeother('/')
class DeletePoll(BaseRequestHandler):
def get(self):
if users.is_current_user_admin():
pkey = self.request.get('pkey')
poll = Poll.get(pkey)
if poll:
poll.delete()
self.seeother('/')
class ViewPoll(BaseRequestHandler):
def get(self):
pkey = self.request.get('pkey')
poll = Poll.get(pkey)
if not poll:
self.notfound()
else:
template = JINJA_ENVIRONMENT.get_template('view.html')
self.response.write(template.render({'poll': poll}))
class MainPage(BaseRequestHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('index.html')
user_logged = users.get_current_user()
params = {'login_link': users.create_login_url(self.request.uri)
if not user_logged else users.create_logout_url('/'),
'login_text': 'admin' if not user_logged else 'logout',
'all_polls': Poll.all().order('-created').fetch(100),
'admin': users.is_current_user_admin()}
self.response.write(template.render(params))
app = webapp2.WSGIApplication([('/', MainPage),
('/add', AddPoll),
('/del', DeletePoll),
('/view', ViewPoll)
],
debug=True)
|
UTF-8
|
Python
| false | false | 2,013 |
16,698,832,852,717 |
2fce43be19f46ebd281149e323dfab1e02f5978d
|
825d882aa04cd375457fc6750fea728026a5ab73
|
/gor0x/ga/tests/context.py
|
658f6cac4db3eda1baff7d0775fa6263d8b7f03b
|
[] |
no_license
|
beng/gor0x
|
https://github.com/beng/gor0x
|
60363d5e71a39c61e5bff25272866586a212e69d
|
1602cc54ec009cb9abeeeacf210e8bc56d788f09
|
refs/heads/master
| 2021-01-23T02:23:53.586659 | 2013-08-03T16:43:28 | 2013-08-03T16:43:28 | 3,583,336 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
from gor0x import ga, chromosome, markov
|
UTF-8
|
Python
| false | false | 2,013 |
14,817,637,193,560 |
81171507758c208ccec94640c9da67b9304ff851
|
614cc01bdea791ac713e832182705322ab57cd16
|
/gcalendar.py
|
973afb6ecac4b5228df8a3adff15391c57fdd7de
|
[] |
no_license
|
annielytical/gcalendar
|
https://github.com/annielytical/gcalendar
|
f866a3caf8d86c5d4f885d1e265022babe85fd8b
|
c06744b04f288c63ffd12040a7de90ff87afe0b4
|
refs/heads/master
| 2021-01-21T02:56:01.889866 | 2014-10-04T14:53:02 | 2014-10-04T14:53:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# modified from:
# https://developers.google.com/api-client-library/python/samples/authorized_api_cmd_line_calendar.py
import httplib2
import sys
import time
import datetime
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run_flow
from oauth2client.client import flow_from_clientsecrets
def main():
scope = 'https://www.googleapis.com/auth/calendar'
flow = flow_from_clientsecrets('client_secret.json', scope=scope)
storage = Storage('credentials.dat')
credentials = storage.get()
class fakeargparse(object): # fake argparse.Namespace
noauth_local_webserver = True
logging_level = "ERROR"
flags = fakeargparse()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, flags)
http = httplib2.Http()
http = credentials.authorize(http)
service = build('calendar', 'v3', http=http)
# get the next 12 hours of events
epoch_time = time.time()
start_time = epoch_time - 3600 # 1 hour ago
end_time = epoch_time + 12 * 3600 # 12 hours in the future
tz_offset = - time.altzone / 3600
if tz_offset < 0:
tz_offset_str = "-%02d00" % abs(tz_offset)
else:
tz_offset_str = "+%02d00" % abs(tz_offset)
start_time = datetime.datetime.fromtimestamp(start_time).strftime("%Y-%m-%dT%H:%M:%S") + tz_offset_str
end_time = datetime.datetime.fromtimestamp(end_time).strftime("%Y-%m-%dT%H:%M:%S") + tz_offset_str
print "Getting calendar events between: " + start_time + " and " + end_time
events = service.events().list(calendarId='primary', timeMin=start_time, timeMax=end_time, singleEvents=True).execute()
#pprint.pprint(events)
for event in events['items']:
print event["summary"]
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,014 |
8,890,582,303,740 |
937322acf7fcbe6b78f5f277ef5cf09b243bcf3d
|
3ed3d4d21909837c26c61c08b47749c6ba7f84ce
|
/data/level_data/level.py
|
1b12d8ea28823245a81505ad13ddbf3c5d73ec34
|
[] |
no_license
|
lisp-ceo/SmallCat
|
https://github.com/lisp-ceo/SmallCat
|
8c7156b197c0f29c4f072046f3980afbcb3c3982
|
44a50e38e3376211f3c2e8f9437d62584068bd00
|
refs/heads/master
| 2021-05-27T02:06:43.116955 | 2013-01-23T20:34:05 | 2013-01-23T20:34:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Parent class for all Level and Stage objects
"""
class LevelData(object):
"""
Vars
# - Impassible
' ' - Generic
$ - Hiding spot
"""
def __init__(self):
self.stages = []
self.CANVAS_MAX_H = 600
self.CANVAS_MAX_W = 800
self.CANVAS_MIN_H = 20
self.CANVAS_MIN_W = 20
self.BLOCKS_MAX_H = 20
self.BLOCKS_MAX_W = 20
self.BLOCKS_MIN_H = 20
self.BLOCKS_MIN_W = 20
self.canvas_size = None
self.BLOCKS = {} # Dictionary of images used as tiles
self.BLOCKDATA = [] # Array of character codes representing the blocks
self.MAPPING = {
'IMPASSIBLE':'#',
'GENERIC':' ',
'IMPASSIBLE':'$',
'BIGTESTICLES' :'@'
} # Dictionary that maps file data to images
self.canvas = None # This attribute is filled in by the caller
class StageData(object):
pass
|
UTF-8
|
Python
| false | false | 2,013 |
9,878,424,829,355 |
c2e4cf475b4a33955e12961ffcf8695a690478f5
|
ceebd0cc47512abf28250bf867d459d3e4009f80
|
/figure5.py
|
7818fea9fd64b584c72cf94b93626f3f38643aaa
|
[] |
no_license
|
AUESG/european_grid_ext
|
https://github.com/AUESG/european_grid_ext
|
2cfdf40feb9f45a450797a34d8048ac39a385caf
|
cd38b48f828517e546924af130cc0ecf97d3c589
|
refs/heads/master
| 2019-02-01T07:15:29.259225 | 2013-11-27T17:01:20 | 2013-11-27T17:01:20 | 13,417,060 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import multiprocessing as mp
import numpy as np
import magnus_figutils as fig
pool = mp.Pool(8)
scalefactorsA = [0.5, 1, 2, 4, 6, 8, 10, 12, 14]
#scalefactorsA = np.linspace(0,1,11) # for use with the alternative A rule
# that is downscaling the unsconst.
# flow.
pool.map(fig.solve_lin_interpol, scalefactorsA)
scalefactorsB = np.linspace(0, 2.5, 10)
#pool.map(fig.solve_linquant_interpol, scalefactorsB)
quantiles = [0.5, 0.8, 0.9, 0.95, 0.97, 0.99, 0.995, 0.999, 0.9995, 0.9999, 1]
#pool.map(fig.solve_quant_interpol, quantiles)
|
UTF-8
|
Python
| false | false | 2,013 |
8,821,862,851,086 |
aeadeb55cfa55e1e07e44f6a61e88feb90d71536
|
a3e736623a85b441d1a7201e038f222815ae27e6
|
/nix/test/test_dimensions.py
|
7059b30581602b78a16bd4cf93a2bf01cb0a05e2
|
[
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
non_permissive
|
balint42/nixpy
|
https://github.com/balint42/nixpy
|
d660923bbd76e959dd53e2bd77073d68ce941cd8
|
dd13ac151cd3829d56e5abc28df91ccc0666b1b9
|
refs/heads/master
| 2021-01-15T22:46:41.409204 | 2014-09-04T17:45:51 | 2014-09-04T17:49:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright (c) 2014, German Neuroinformatics Node (G-Node)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted under the terms of the BSD License. See
# LICENSE file in the root of the Project.
import unittest
from nix import *
test_range = tuple([float(i) for i in range(10)])
test_sampl = 0.1
test_label = "test label"
test_labels = tuple([str(i) + "_label" for i in range(10)])
class TestDimensions(unittest.TestCase):
def setUp(self):
self.file = File.open("unittest.h5", FileMode.Overwrite)
self.block = self.file.create_block("test block", "recordingsession")
self.array = self.block.create_data_array("test array", "signal", DataType.Float, (0, ))
self.set_dim = self.array.append_set_dimension()
self.sample_dim = self.array.append_sampled_dimension(test_sampl)
self.range_dim = self.array.append_range_dimension(test_range)
def tearDown(self):
del self.file.blocks[self.block.id]
self.file.close()
def test_set_dimension(self):
assert(self.set_dim.index == 1)
assert(self.set_dim.dimension_type == DimensionType.Set)
assert(self.set_dim.labels == ())
self.set_dim.labels = test_labels
assert(self.set_dim.labels == test_labels)
def test_sample_dimension(self):
assert(self.sample_dim.index == 2)
assert(self.sample_dim.dimension_type == DimensionType.Sample)
assert(self.sample_dim.label is None)
self.sample_dim.label = test_label
assert(self.sample_dim.label == test_label)
self.sample_dim.label = None
assert(self.sample_dim.label is None)
assert(self.sample_dim.unit is None)
self.sample_dim.unit = "mV"
assert(self.sample_dim.unit == "mV")
self.sample_dim.unit = None
assert(self.sample_dim.unit is None)
assert(self.sample_dim.sampling_interval == test_sampl)
self.sample_dim.sampling_interval = 1.123
assert(self.sample_dim.sampling_interval == 1.123)
assert(self.sample_dim.offset is None)
self.sample_dim.offset = 0.3
assert(self.sample_dim.offset == 0.3)
self.sample_dim.offset = None
assert(self.sample_dim.offset is None)
def test_range_dimension(self):
assert(self.range_dim.index == 3)
assert(self.range_dim.dimension_type == DimensionType.Range)
assert(self.range_dim.label is None)
self.range_dim.label = test_label
assert(self.range_dim.label == test_label)
self.range_dim.label = None
assert(self.range_dim.label is None)
assert(self.range_dim.unit is None)
self.range_dim.unit = "mV"
assert(self.range_dim.unit == "mV")
self.range_dim.unit = None
assert(self.range_dim.unit is None)
assert(self.range_dim.ticks == test_range)
other = tuple([i*3.14 for i in range(10)])
self.range_dim.ticks = other
assert(self.range_dim.ticks == other)
|
UTF-8
|
Python
| false | false | 2,014 |
15,032,385,545,341 |
f3065e0565aa23cd7395124fc9103288ae40a691
|
634bc610b84b3af0d3280106910f050a653b2dc8
|
/setup.py
|
fe5a56d306ef15385319c830fde7ddeb5ab43136
|
[
"MIT"
] |
permissive
|
mabotech/mabolab
|
https://github.com/mabotech/mabolab
|
37aec8d501880ebd398a63d099aebe3f3c46dd94
|
9d70781e438d5597cbb98e3ff3702658036262a3
|
refs/heads/master
| 2020-06-04T17:48:03.790904 | 2014-04-05T08:37:07 | 2014-04-05T08:37:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.txt')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
requires = [
'simplejson',
'SQLAlchemy',
'zope.interface',
'flask',
'pyro',
]
setup(name='mabolab',
version='0.0.1',
description='mabolab',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: ",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='MaboTech',
license='MIT',
author_email='[email protected]',
url='http://www.mabotech.com',
keywords='mabotech lab lib web',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='mabolab',
install_requires = requires,
#data_files=[]
)
|
UTF-8
|
Python
| false | false | 2,014 |
6,047,313,971,468 |
12ba201b27eff6b23194a4fc7438be68060fd0bf
|
bbf5a865d850eb6ed3a914c76360c5dbabfc70e6
|
/Ejercicio13.py
|
88b2c5a740162f32d9818dee8f3c5b9461820bcd
|
[] |
no_license
|
elyex/elyexPythonPygameEjercicios
|
https://github.com/elyex/elyexPythonPygameEjercicios
|
ad44101728fd0701c160067183a0062925061be2
|
c289aacc147dfd443e2db993f47d87d61caeb932
|
refs/heads/master
| 2016-09-10T00:02:02.677743 | 2013-09-18T01:22:38 | 2013-09-18T01:22:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#--------------------------------------------
# Autor: Mauricio Eyx
# GitHub: https://github.com/elyex
# Twitter: @MauEyx
# Facebook: https://www.facebook.com/MauEyx
#--------------------------------------------
# Programa: Imagenes Movimiento con el teclado iz y dr
#--------------------------------------------
import pygame #importar modulo Pygame
pygame.init() #Inicia las librerias de pygame
pantalla=pygame.display.set_mode((480,300))
salir=False
reloj1=pygame.time.Clock()
imagen1=pygame.image.load("monster.png")
(x,y) = (100,100)
vx=0
r1=pygame.Rect(250,80,10,400)
while salir!=True: #Loop principal del juego
for event in pygame.event.get():
if event.type == pygame.QUIT:
salir=True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
vx-=10
if event.key == pygame.K_RIGHT:
vx+=10
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT:
vx=0
if event.key == pygame.K_RIGHT:
vx=0
x+=vx
reloj1.tick(15)
pantalla.fill((255,255,255))
pantalla.blit(imagen1,(x,y))
pygame.draw.rect(pantalla,(0,0,0),r1)
pygame.display.update()
pygame.quit()
|
UTF-8
|
Python
| false | false | 2,013 |
13,761,075,243,113 |
ffb99f7c4cb479279718c2afd82fc5def63e44eb
|
86144988503f1063cb309d66245997dae88cd5d0
|
/packages/Compass/AutoBuildOnSave.py
|
d94a9a4eaa79e2c4f6e68a6b151f067ab9f4d099
|
[] |
no_license
|
vsxed/sublime-set
|
https://github.com/vsxed/sublime-set
|
96240fc2924c4b45af2af289ffbd32e63ec50988
|
28bd3dd6c1bc2ef6cd1dc20e89e2e97c5d5d8f31
|
refs/heads/master
| 2020-02-27T22:02:53.562969 | 2014-03-06T21:03:35 | 2014-03-06T21:03:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from os.path import splitext, basename
import sublime_plugin
class AutoBuildOnSave(sublime_plugin.EventListener):
def on_post_save(self, view):
fileTypesToBuild = [".sass", ".scss"]
filePath = view.file_name()
fileName, fileType = splitext(basename(filePath))
if fileType in fileTypesToBuild:
view.window().run_command("build")
else:
return []
|
UTF-8
|
Python
| false | false | 2,014 |
12,197,707,146,089 |
4155ae93edf4f13deb5cc935a87f7a33242f57e4
|
8060d121f446c7f484e95a155645710e0f3b30de
|
/Products/ATSchemaEditorNG/__init__.py
|
0790f4525d7d6614ab7ac201cc80b4b3f5ec3b13
|
[
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only"
] |
non_permissive
|
a-pasquale/Products.ATSchemaEditorNG
|
https://github.com/a-pasquale/Products.ATSchemaEditorNG
|
e2c9a9f86d4a824732be7c89e580cb6869449273
|
b06491265aa3fbda061ecc9513f2a87ae15f8181
|
refs/heads/master
| 2021-03-12T23:25:40.447268 | 2012-03-02T17:39:38 | 2012-03-02T17:39:38 | 3,471,589 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: iso-8859-1 -*-
"""
ATSchemaEditorNG
License: see LICENSE.txt
$Id: __init__.py 52127 2007-10-21 11:42:29Z naro $
"""
from Products.CMFCore.DirectoryView import registerDirectory
from config import SKINS_DIR, GLOBALS
registerDirectory(SKINS_DIR, GLOBALS)
# make refresh possible
from SchemaEditor import SchemaEditor
from ParentManagedSchema import ParentManagedSchema
import Products.CMFCore
from Products.Archetypes import process_types
from Products.Archetypes.public import listTypes
from config import *
from Products.CMFCore.permissions import AddPortalContent
import patches
def initialize(context):
# install ATSE Tool
import SchemaEditorTool
import ATSETemplateTool
if INSTALL_DEMO_TYPES:
import examples.content
content_types, constructors, ftis = process_types(listTypes(PROJECT_NAME),
PROJECT_NAME)
Products.CMFCore.utils.ContentInit(
'%s Content' % PKG_NAME,
content_types = content_types,
permission = AddPortalContent,
extra_constructors = constructors,
fti = ftis,
).initialize(context)
tools = (SchemaEditorTool.SchemaEditorTool, ATSETemplateTool.ATSETemplateTool )
Products.CMFCore.utils.ToolInit(meta_type=SchemaEditorTool.SchemaEditorTool.meta_type,
tools=tools,
icon='tool.jpg',
).initialize(context)
Products.CMFCore.utils.ToolInit(meta_type=ATSETemplateTool.ATSETemplateTool.meta_type,
tools=tools,
icon='tool.jpg',
).initialize(context)
|
UTF-8
|
Python
| false | false | 2,012 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.