repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 22
values | size
stringlengths 4
7
| content
stringlengths 626
1.05M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 5.21
99.9
| line_max
int64 12
999
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
petezybrick/iote2e
|
iote2e-pyclient/src/iote2epyclient/schema/iote2erequest.py
|
1
|
2136
|
# Copyright 2016, 2017 Peter Zybrick and others.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Iote2eRequest
:author: Pete Zybrick
:contact: [email protected]
:version: 1.0.0
"""
from iote2epyclient.schema.iote2ebase import Iote2eBase
class Iote2eRequest(Iote2eBase):
'''
Avro schema for Request
'''
def __init__(self, login_name, source_name, source_type, request_uuid, request_timestamp, pairs, operation, metadata={} ):
self.login_name = login_name
self.source_name = source_name
self.source_type = source_type
self.request_uuid = request_uuid
self.request_timestamp = request_timestamp
self.pairs = pairs
self.operation = operation
self.metadata = metadata
def __str__(self):
return 'login_name={}, source_name={}, source_type={}, request_uuid={}, request_timestamp={}, pairs={}, operation={}, metadata={}'.format(self.login_name, self.source_name, self.source_type, self.request_uuid, self.request_timestamp, self.pairs, self.operation, self.metadata)
@staticmethod
def requestFromAvroBinarySchema( schema, rawBytes ):
obj = Iote2eBase.commonFromAvroBinarySchema( schema, rawBytes)
iote2eRequest = Iote2eRequest( login_name=obj['login_name'], source_name=obj['source_name'], source_type =obj['source_type'],
request_uuid=obj['request_uuid'],request_timestamp=obj['request_timestamp'],pairs=obj['pairs'],
operation=obj['operation'],metadata=obj['metadata'])
return iote2eRequest
|
apache-2.0
| 8,848,055,591,956,890,000 | 40.076923 | 284 | 0.672285 | false |
superbatlc/phonecab
|
archives/views.py
|
1
|
24415
|
from urllib import urlencode
from django.http import Http404
from django.http import HttpResponse
from django.conf import settings
from django.db.models import Q
from django.shortcuts import render_to_response, redirect
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.template import RequestContext
from django.template.loader import render_to_string
from django.contrib.auth.models import User
from acls.models import Acl
from archives.models import *
from helper.Helper import Helper
from audits.models import Audit
from prefs.models import Pref, Extension
from cdrs.models import SuperbaCDR
@login_required
def archive_phoneuser_home(request):
"""Phoneusers page"""
import time
d = request.GET.dict()
user = request.user
variables = Acl.get_permissions_for_user(user.id, user.is_staff)
variables['phoneusers'] = archive_phoneuser_items(request)
variables['d'] = d
return render_to_response(
'archives/phoneusers/home.html', RequestContext(request, variables))
def archive_phoneuser_items(request):
"""Archived Phoneuser Items List"""
variables = Acl.get_permissions_for_user(request.user.id, request.user.is_staff)
items_per_page = settings.ITEMS_PER_PAGE
keyword = request.GET.get("keyword", "")
page = int(request.GET.get("page", "1"))
d = request.GET.dict()
page = 1
if 'page' in d.keys():
page = int(d['page'])
# elimino la pagina dal dizionario
del d['page']
q_obj = Q(last_name__icontains=keyword)
q_obj.add(Q(pincode__icontains=keyword), Q.OR)
q_obj.add(Q(serial_no__icontains=keyword), Q.OR)
items_list = ArchivedPhoneUser.objects.filter(q_obj).order_by('last_name')
total_items = items_list.count()
items, items_range, items_next_page = Helper.make_pagination(
items_list, page, items_per_page)
prev_page = page - 1
prev_page_disabled = ''
if prev_page < 1:
prev_page = 1
prev_page_disabled = 'disabled'
next_page = 1
next_page_disabled = ''
if items:
next_page = page + 1
if next_page > items.paginator.num_pages:
next_page = items.paginator.num_pages
next_page_disabled = 'disabled'
start_item = 1
if page > 0:
start_item = (page - 1) * items_per_page + 1
end_item = start_item + items_per_page - 1
if end_item > total_items:
end_item = total_items
variables['items'] = items
variables['total_items'] = total_items
variables['prev_page'] = prev_page
variables['next_page'] = next_page
variables['prev_page_disabled'] = prev_page_disabled
variables['next_page_disabled'] = next_page_disabled
variables['current_page'] = page
variables['start_item'] = start_item
variables['end_item'] = end_item
variables['query_string'] = urlencode(d)
variables['d'] = d
if request.is_ajax():
return render_to_response(
'archives/phoneusers/table.html', RequestContext(request, variables))
return render_to_string(
'archives/phoneusers/table.html', RequestContext(request, variables))
@login_required
def archive_phoneuser_view(request, archived_phoneuser_id):
"""Visualizza la pagina di anagrafica"""
archived_phoneuser_id = int(archived_phoneuser_id)
if not archived_phoneuser_id:
raise Http404
variables = Acl.get_permissions_for_user(request.user.id, request.user.is_staff)
phoneuser = archive_phoneuser_data(request, archived_phoneuser_id)
whitelists = archive_whitelist_items(request, archived_phoneuser_id)
credits = archive_credit_items(request, archived_phoneuser_id)
variables['phoneuser'] = phoneuser
variables['whitelists'] = whitelists
variables['credits'] = credits
return render_to_response('archives/phoneusers/page.html',
RequestContext(request,variables))
@login_required
def archive_phoneuser_data(request, archived_phoneuser_id):
"""Recupera e visualizza le informazioni sul phoneuser archiviato"""
variables = Acl.get_permissions_for_user(request.user.id, request.user.is_staff)
if int(archived_phoneuser_id):
try:
archived_phoneuser = ArchivedPhoneUser.objects.get(pk=archived_phoneuser_id)
except:
raise Http404
variables['phoneuser'] = archived_phoneuser
if request.is_ajax():
return render_to_response(
'archives/phoneusers/phoneuser.html', RequestContext(request, variables))
return render_to_string(
'archives/phoneusers/phoneuser.html', RequestContext(request, variables))
@login_required
def archive_whitelist_items(request, archived_phoneuser_id):
archived_phoneuser_id = int(archived_phoneuser_id)
user = request.user
variables = Acl.get_permissions_for_user(user.id, user.is_staff)
whitelists = ArchivedWhitelist.objects.filter(
archived_phoneuser_id=archived_phoneuser_id).order_by('label')
variables['whitelists'] = whitelists
if request.is_ajax():
return render_to_response(
'archives/phoneusers/whitelists/table.html', RequestContext(request, variables))
return render_to_string(
'archives/phoneusers/whitelists/table.html', RequestContext(request, variables))
@login_required
def archive_credit_items(request, archived_phoneuser_id):
archived_phoneuser_id = int(archived_phoneuser_id)
archived_phoneuser = ArchivedPhoneUser.objects.get(pk=archived_phoneuser_id)
user = request.user
variables = Acl.get_permissions_for_user(user.id, user.is_staff)
credits = ArchivedCredit.objects.filter(
archived_phoneuser=archived_phoneuser).order_by('-recharge_date')
variables['items'] = credits
variables['balance'] = archived_phoneuser.balance
variables['archived_phoneuser_id'] = archived_phoneuser_id
if request.is_ajax():
return render_to_response(
'archives/phoneusers/credits/table.html', RequestContext(request, variables))
return render_to_string(
'archives/phoneusers/credits/table.html', RequestContext(request, variables))
@login_required
def archive_cdrs_home(request):
import time
d = request.GET.dict()
user = request.user
variables = Acl.get_permissions_for_user(user.id, user.is_staff)
variables['cdr'] = archive_cdrs_items(request)
variables['d'] = d
data_inizio_cal = time.strftime("%d-%m-%Y")
if 'start_date' in d.keys():
data_inizio_cal = d['start_date']
data_fine_cal = time.strftime("%d-%m-%Y")
if 'end_date' in d.keys():
data_fine_cal = d['end_date']
variables['data_inizio_cal'] = data_inizio_cal
variables['data_fine_cal'] = data_fine_cal
return render_to_response(
'archives/cdrs/home.html', RequestContext(request, variables))
@login_required
def archive_cdrs_items(request):
variables = {}
items_per_page = settings.ITEMS_PER_PAGE
start_date = request.GET.get("start_date", "")
end_date = request.GET.get("end_date", "")
start_time = request.GET.get("start_time", None)
end_time = request.GET.get("end_time", None)
pincode = request.GET.get("pincode", "")
archived_phoneuser_id = request.GET.get("archived_phoneuser_id", "")
dst = request.GET.get("dst", "")
calltype = request.GET.get("calltype", None)
page = int(request.GET.get("page", "1"))
d = request.GET.dict()
page = 1
if 'page' in d.keys():
page = int(d['page'])
# elimino la pagina dal dizionario
del d['page']
q_obj = Q(pincode__icontains=pincode)
q_obj.add(Q(dst__icontains=dst), Q.AND)
if archived_phoneuser_id:
q_obj.add(Q(archived_phoneuser_id=archived_phoneuser_id), Q.AND)
if start_date != '':
start_date = Helper.convert_datestring_format(
start_date, "%d-%m-%Y", "%Y-%m-%d")
if start_time:
start_time = "%s:00" % start_time
else:
start_time = "00:00:00"
start_date = "%s %s" % (start_date, start_time)
q_obj.add(Q(calldate__gte=start_date), Q.AND)
if end_date != '':
end_date = Helper.convert_datestring_format(
end_date, "%d-%m-%Y", "%Y-%m-%d")
if end_time:
end_time = "%s:59" % end_time
else:
end_time = "23:59:59"
end_date = "%s %s" % (end_date, end_time)
q_obj.add(Q(calldate__lte=end_date), Q.AND)
if calltype:
q_obj.add(Q(calltype=calltype), Q.AND)
items_list = ArchivedDetail.objects.filter(q_obj).order_by('-calldate')
total_items = items_list.count()
total_costs = 0.0
# calcoliamo numero e costo complessivo
for item in items_list:
if float(item.price) > 0:
total_costs += float(item.price)
items, items_range, items_next_page = Helper.make_pagination(
items_list, page, items_per_page)
for item in items:
if item.price < 0:
item.price = "0.00"
try:
item.whitelist = ArchivedWhitelist.objects.filter(
archived_phoneuser_id=item.archived_phoneuser_id, phonenumber=item.dst)[0]
except Exception as e:
item.whitelist = '-'
item.src = Extension.get_extension_name(item.src)
prev_page = page - 1
prev_page_disabled = ''
if prev_page < 1:
prev_page = 1
prev_page_disabled = 'disabled'
next_page = 1
next_page_disabled = ''
if items:
next_page = page + 1
if next_page > items.paginator.num_pages:
next_page = items.paginator.num_pages
next_page_disabled = 'disabled'
start_item = 1
if page > 0:
start_item = (page - 1) * items_per_page + 1
end_item = start_item + items_per_page - 1
if end_item > total_items:
end_item = total_items
variables['items'] = items
variables['total_costs'] = total_costs
variables['total_items'] = total_items
variables['prev_page'] = prev_page
variables['next_page'] = next_page
variables['prev_page_disabled'] = prev_page_disabled
variables['next_page_disabled'] = next_page_disabled
variables['current_page'] = page
variables['start_item'] = start_item
variables['end_item'] = end_item
variables['query_string'] = urlencode(d)
variables['d'] = d
variables['archived_phoneuser_id'] = archived_phoneuser_id
if request.is_ajax():
return render_to_response(
'archives/cdrs/table.html', RequestContext(request, variables))
return render_to_string(
'archives/cdrs/table.html', RequestContext(request, variables))
@login_required
def archive_records_home(request):
import time
d = request.GET.dict()
user = request.user
variables = Acl.get_permissions_for_user(user.id, user.is_staff)
variables['records'] = archive_records_items(request)
variables['d'] = d
data_inizio_cal = time.strftime("%d-%m-%Y")
if 'start_date' in d.keys():
data_inizio_cal = d['start_date']
data_fine_cal = time.strftime("%d-%m-%Y")
if 'end_date' in d.keys():
data_fine_cal = d['end_date']
variables['data_inizio_cal'] = data_inizio_cal
variables['data_fine_cal'] = data_fine_cal
return render_to_response(
'archives/records/home.html', RequestContext(request, variables))
@login_required
def archive_records_items(request):
"""Record Items"""
from urllib import urlencode
variables = {}
items_per_page = settings.ITEMS_PER_PAGE
start_date = request.GET.get("start_date", "")
end_date = request.GET.get("end_date", "")
start_time = request.GET.get("start_time", None)
end_time = request.GET.get("end_time", None)
archived_phoneuser_id = request.GET.get("archived_phoneuser_id", "")
pincode = request.GET.get("pincode", "")
page = int(request.GET.get("page", "1"))
d = request.GET.dict()
page = 1
if 'page' in d.keys():
page = int(d['page'])
# elimino la pagina dal dizionario
del d['page']
q_obj = Q(pincode__icontains=pincode)
if archived_phoneuser_id:
q_obj.add(Q(archived_phoneuser_id=archived_phoneuser_id), Q.AND)
if start_date != '':
start_date = Helper.convert_datestring_format(
start_date, "%d-%m-%Y", "%Y-%m-%d")
if start_time:
start_time = "%s:00" % start_time
else:
start_time = "00:00:00"
start_date = "%s %s" % (start_date, start_time)
q_obj.add(Q(calldate__gte=start_date), Q.AND)
if end_date != '':
end_date = Helper.convert_datestring_format(
end_date, "%d-%m-%Y", "%Y-%m-%d")
if end_time:
end_time = "%s:59" % end_time
else:
end_time = "23:59:59"
end_date = "%s %s" % (end_date, end_time)
q_obj.add(Q(calldate__lte=end_date), Q.AND)
items_list = ArchivedRecord.objects.filter(q_obj).order_by('-calldate')
total_items = items_list.count()
items, items_range, items_next_page = Helper.make_pagination(
items_list, page, items_per_page)
for item in items:
try:
details = ArchivedDetail.objects.filter(uniqueid=item.uniqueid)
if not details:
item.detail = SuperbaCDR
item.detail.dst = ''
else:
item.detail = details[0]
try:
item.whitelist = ArchivedWhitelist.objects.get(
archived_phoneuser_id=item.archived_phoneuser_id,
phonenumber=item.detail.dst)
except:
item.whitelist = None
except Exception as e:
pass # TODO gestire
print "Errore nel recupero delle informazioni sulla chiamata"
if item.filename != '':
item.filename = "/recordings/%s" % item.filename
prev_page = page - 1
prev_page_disabled = ''
if prev_page < 1:
prev_page = 1
prev_page_disabled = 'disabled'
next_page = 1
next_page_disabled = ''
if items:
next_page = page + 1
if next_page > items.paginator.num_pages:
next_page = items.paginator.num_pages
next_page_disabled = 'disabled'
start_item = 1
if page > 0:
start_item = (page - 1) * items_per_page + 1
end_item = start_item + items_per_page - 1
if end_item > total_items:
end_item = total_items
variables['items'] = items
variables['total_items'] = total_items
variables['prev_page'] = prev_page
variables['next_page'] = next_page
variables['prev_page_disabled'] = prev_page_disabled
variables['next_page_disabled'] = next_page_disabled
variables['current_page'] = page
variables['start_item'] = start_item
variables['end_item'] = end_item
variables['d'] = d
variables['query_string'] = urlencode(d)
if request.is_ajax():
return render_to_response(
'archives/records/table.html', RequestContext(request, variables))
return render_to_string(
'archives/records/table.html', RequestContext(request, variables))
@login_required
def archive_cdrs_export_excel(request):
import time
import xlwt
book = xlwt.Workbook(encoding='utf8')
sheet = book.add_sheet('Esportazione')
default_style = xlwt.Style.default_style
datetime_style = xlwt.easyxf(num_format_str='dd/mm/yyyy hh:mm')
d = request.GET.dict()
start_date = request.GET.get("start_date", "")
end_date = request.GET.get("end_date", "")
start_time = request.GET.get("start_time", None)
end_time = request.GET.get("end_time", None)
pincode = request.GET.get("pincode", "")
dst = request.GET.get("dst", "")
q_obj = Q(pincode__icontains=pincode)
q_obj.add(Q(dst__icontains=dst), Q.AND)
q_obj.add(Q(valid=True), Q.AND) # esportiamo solo le chiamate ritenute valide
if start_date != '':
start_date = Helper.convert_datestring_format(
start_date, "%d-%m-%Y", "%Y-%m-%d")
if start_time:
start_time = "%s:00" % start_time
else:
start_time = "00:00:00"
start_date = "%s %s" % (start_date, start_time)
q_obj.add(Q(calldate__gte=start_date), Q.AND)
if end_date != '':
end_date = Helper.convert_datestring_format(
end_date, "%d-%m-%Y", "%Y-%m-%d")
if end_time:
end_time = "%s:59" % end_time
else:
end_time = "23:59:59"
end_date = "%s %s" % (end_date, end_time)
q_obj.add(Q(calldate__lte=end_date), Q.AND)
details = ArchivedDetail.objects.filter(q_obj).order_by('-calldate')
sheet.write(0, 0, "Data e ora", style=default_style)
sheet.write(0, 1, "Codice", style=default_style)
sheet.write(0, 2, "Matricola", style=default_style)
sheet.write(0, 3, "Cognome e Nome", style=default_style)
sheet.write(0, 4, "Sorgente", style=default_style)
sheet.write(0, 5, "Destinazione", style=default_style)
sheet.write(0, 6, "Numero Autorizzato", style=default_style)
sheet.write(0, 7, "Durata", style=default_style)
sheet.write(0, 8, "Costo", style=default_style)
for row, rowdata in enumerate(details):
try:
archived_phoneuser = ArchivedPhoneUser.objects.get(id=rowdata.archived_phoneuser_id)
print archived_phoneuser
fullname = archived_phoneuser.get_full_name()
matricola = archived_phoneuser.serial_no
try:
whitelist = ArchivedWhitelist.objects.get(phonenumber=rowdata.dst,
archived_phoneuser=archived_phoneuser)
whitelist_label = whitelist.label
except:
whitelist_label = '-'
except:
fullname = '-'
matricola = '-'
calldate = time.strftime("%d-%m-%Y %H:%M:%S",
time.strptime(str(rowdata.calldate),
"%Y-%m-%d %H:%M:%S"))
billsec = "%sm %ss" % (int(rowdata.billsec / 60), rowdata.billsec % 60)
rowdata.price = rowdata.price > 0 and rowdata.price or 0
sheet.write(row + 1, 0, calldate, style=datetime_style)
sheet.write(row + 1, 1, rowdata.pincode, style=default_style)
sheet.write(row + 1, 2, matricola, style=default_style)
sheet.write(row + 1, 3, fullname, style=default_style)
sheet.write(row + 1, 4, rowdata.src, style=default_style)
sheet.write(row + 1, 5, rowdata.dst, style=default_style)
sheet.write(row + 1, 6, whitelist_label, style=default_style)
sheet.write(row + 1, 7, billsec, style=default_style)
sheet.write(row + 1, 8, rowdata.price, style=default_style)
response = HttpResponse(content_type='application/vnd.ms-excel')
filename = 'Dettaglio_chiamate_archiviate.xls'
response[
'Content-Disposition'] = 'attachment; filename=%s' % filename
book.save(response)
# logghiamo azione
audit = Audit()
detail = Helper.get_filter_detail(d)
what = "Esportazione lista chiamate archiviate corrispondenti ai seguenti criteri: %s" \
% detail
audit.log(user=request.user, what=what)
return response
@login_required
def archive_record_action(request, action, item, archived_record_id=0):
"""Unica funzione per gestire azioni diverse"""
# verifichiamo che l'utente possieda i privilegi
# e che non abbia digitato la url direttamente
if Acl.get_permission_for_function(
request.user.id, Acl.FUNCTION_RECORD) or request.user.is_staff:
if action == 'remove':
if item == 'single':
return _single_record_remove(request, archive_record_id)
else:
return _multi_record_remove(request)
elif action == 'download':
if item == 'single':
return _single_record_export(request, archive_record_id)
else:
return _multi_record_export_as_zip_file(request)
else:
raise Http404
else:
raise Http403
def _multi_record_export_as_zip_file(request):
"Esportazione multifile in formato zip"""
import os
import contextlib
import zipfile
d = request.GET.dict()
start_date = request.GET.get("start_date", "")
end_date = request.GET.get("end_date", "")
start_time = request.GET.get("start_time", None)
end_time = request.GET.get("end_time", None)
pincode = request.GET.get("pincode", "")
q_obj = Q(pincode__icontains=pincode)
if start_date != '':
start_date = Helper.convert_datestring_format(
start_date, "%d-%m-%Y", "%Y-%m-%d")
if start_time:
start_time = "%s:00" % start_time
else:
start_time = "00:00:00"
start_date = "%s %s" % (start_date, start_time)
q_obj.add(Q(calldate__gte=start_date), Q.AND)
if end_date != '':
end_date = Helper.convert_datestring_format(
end_date, "%d-%m-%Y", "%Y-%m-%d")
if end_time:
end_time = "%s:59" % end_time
else:
end_time = "23:59:59"
end_date = "%s %s" % (end_date, end_time)
q_obj.add(Q(calldate__lte=end_date), Q.AND)
items_list = ArchivedRecord.objects.filter(q_obj).order_by('-calldate')
filename = 'registrazioni'
if pincode != '':
try:
phoneuser = ArchivedPhoneUser.objects.get(pincode=pincode)
filename = 'registrazioni %s' % phoneuser
except:
pass
zipname = "%s.zip" % filename
tmpzippath = os.path.join(settings.TMP_ZIP_ROOT, zipname)
file_counter = 0
with contextlib.closing(zipfile.ZipFile(tmpzippath, 'w')) as myzip:
for item in items_list:
detail = ArchivedDetail.objects.filter(uniqueid=item.uniqueid)[0]
if detail.valid:
file_counter += 1
path = os.path.join(settings.RECORDS_ROOT, item.filename)
myzip.write(path, arcname = item.filename)
if not file_counter:
return redirect("/archives/records/?err=1&err_msg=Nessuno dei file soddisfa i criteri per l'esportazione&%s" % urlencode(d))
response = Helper.file_export(tmpzippath)
# logghiamo azione
audit = Audit()
detail = Helper.get_filter_detail(d)
what = "Esportazione registrazioni archiviate corrispondenti ai seguenti criteri: %s" \
% (detail)
audit.log(user=request.user, what=what)
audit.save()
return response
@login_required
def archive_credit_print_recharge(request, archived_credit_id):
"""Stampa Singola Ricarica"""
import datetime
archived_credit_id = int(archived_credit_id)
if archived_credit_id:
try:
archived_credit = ArchivedCredit.objects.get(pk=archived_credit_id)
archived_phoneuser = ArchivedPhoneUser.objects.get(pk=archived_credit.archived_phoneuser_id)
except:
raise Http404
variables = {
'header': Pref.header(),
'phoneuser': archived_phoneuser,
'today': datetime.date.today().strftime("%d-%m-%Y"),
'credit': archived_credit,
}
return render_to_response('phoneusers/credits/print_receipt.html', variables)
else:
raise Http404
@login_required
def archive_credit_export(request, archived_phoneuser_id=0):
"""Stampa bilancio"""
import datetime
archived_phoneuser_id = int(archived_phoneuser_id)
if archived_phoneuser_id:
try:
archived_phoneuser = ArchivedPhoneUser.objects.get(pk=archived_phoneuser_id)
except:
raise Http404
recharges = ArchivedCredit.objects.filter(archived_phoneuser_id=archived_phoneuser_id)
tot_recharges = ArchivedCredit.get_total(archived_phoneuser)
tot_cost = ArchivedDetail.get_cost(archived_phoneuser)
variables = {
'header': Pref.header(),
'phoneuser': archived_phoneuser,
'today': datetime.date.today().strftime("%d-%m-%Y"),
'recharges': recharges,
'tot_recharges': tot_recharges,
'tot_cost': tot_cost,
}
return render_to_response('phoneusers/credits/report.html', variables)
else:
raise Http404
|
gpl-2.0
| 263,253,237,872,637,730 | 34.23088 | 132 | 0.62179 | false |
alexfalcucc/douglas_bot
|
external/jokes.py
|
1
|
2418
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import webdriverplus as webdriver
import os
import pickledb
# simulate GUI
from pyvirtualdisplay import Display
# need selenium webdriver to set the firefoxprofile
from selenium import webdriver as old_webdriver
# webdriverplus is a fork of selenium2 webdriver with added features
class Joke(object):
"""return a joke for the user"""
def __init__(self, db):
self.db = db
self.display = Display(visible=0, size=(1024, 768))
# Latest Chrome on Windows
self.fake_browser = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'
# be able to setup a firefox profile
self.ff_profile = old_webdriver.firefox.firefox_profile.FirefoxProfile()
# sets the download notification off
self.ff_profile.set_preference('browser.download.manager.showWhenStarting', False)
# sets the user agent, latest windows chrome is most common
self.ff_profile.set_preference('general.useragent.override', self.fake_browser)
# sets to not show annoying download panels
# set driver
self.display.start()
self.browser = webdriver.WebDriver(firefox_profile=self.ff_profile)
self.urls = self.get_urls()
def run(self):
print "Updating..."
jokes = []
for url in self.urls:
driver = self.browser.get(url)
print driver.current_url
for joke_element in driver.find('#main article').find('.row'):
try:
vote = joke_element.find('.votes').text.split()
if int(vote[1]) >= 200:
jokes.append(joke_element.find('.joke').text)
except AttributeError:
print "I am not an element."
print jokes
self.db.set('jokes', jokes)
print '*'*100
print 'Updated!'
self.browser.close()
self.display.stop()
def get_urls(self):
default_url = 'http://www.osvigaristas.com.br/piadas/curtas/'
urls = [default_url + 'pagina{}.html'.format(i)
for i in range(2, 51)]
urls.append(default_url)
return urls
def get_jokes(self):
return self.db.get('jokes')
if __name__ == '__main__':
Joke(db=pickledb.load(os.environ['HOME'] + '/douglas_db/douglas.db', True)).run()
|
mit
| -892,453,854,334,419,600 | 35.636364 | 130 | 0.608768 | false |
akalicki/genomics-csi
|
quality-assessment/group3_report2_question1.py
|
1
|
1287
|
#!/usr/bin/env python
"""
Usage: (python) group3_report2_question1.py <PASS FILE> <FAIL FILE>
Reads a pair of input FASTQ file as processed by poretools and counts
the number of 2D reads that were found in each.
"""
import sys
import re
filename_re = re.compile(r"ch(\d+)_file(\d+)_strand_(.*):")
def parse_filename_line(s):
"""Returns a 3-tuple consisting of channel number, file number,
and boolean flag true if 2D read"""
matches = filename_re.search(s)
if matches:
channel_number = int(matches.group(1))
file_number = int(matches.group(2))
twodirections = (matches.group(3) == "twodirections")
return (channel_number, file_number, twodirections)
def get_reads(f):
"""Creates a set of 2d reads found in f."""
reads2d = set([])
for line in f.readlines():
if line[0] == '@': # skip most irrelevant lines
z = parse_filename_line(line)
if z and z[2]:
reads2d.add((z[0], z[1]))
return reads2d
if __name__ == '__main__':
with open(sys.argv[1]) as passfile, open(sys.argv[2]) as failfile:
pass2d = get_reads(passfile)
fail2d = get_reads(failfile)
print ("Passed: %d 2D reads" % len(pass2d))
print ("Failed: %d 2D reads" % len(fail2d))
|
gpl-2.0
| 2,741,729,582,916,814,000 | 31.175 | 70 | 0.613831 | false |
GjjvdBurg/labella.py
|
labella/tex.py
|
1
|
4268
|
# -*- coding: utf-8 -*-
"""
This file is part of labella.py.
Author: G.J.J. van den Burg
License: Apache-2.0
"""
import os
import shutil
import subprocess
import tempfile
import unicodedata
def uni2tex(text):
# Courtesy of https://tex.stackexchange.com/q/23410
accents = {
0x0300: "`",
0x0301: "'",
0x0302: "^",
0x0308: '"',
0x030B: "H",
0x0303: "~",
0x0327: "c",
0x0328: "k",
0x0304: "=",
0x0331: "b",
0x0307: ".",
0x0323: "d",
0x030A: "r",
0x0306: "u",
0x030C: "v",
}
out = ""
txt = tuple(text)
i = 0
while i < len(txt):
char = text[i]
code = ord(char)
# combining marks
if unicodedata.category(char) in ("Mn", "Mc") and code in accents:
out += "\\%s{%s}" % (accents[code], txt[i + 1])
i += 1
# precomposed characters
elif unicodedata.decomposition(char):
base, acc = unicodedata.decomposition(char).split()
acc = int(acc, 16)
base = int(base, 16)
if acc in accents:
out += "\\%s{%s}" % (accents[acc], chr(base))
else:
out += char
else:
out += char
i += 1
return out
def get_latex_fontdoc(text, fontsize="11pt", preamble=""):
tex = r"""\documentclass[preview, {fontsize}]{{standalone}}
{preamble}%
\begin{{document}}
{text}%
\newlength{{\lblwidth}}%
\newlength{{\lblheight}}%
\settowidth{{\lblwidth}}{{{text}}}%
\settoheight{{\lblheight}}{{{text}}}%
\typeout{{LABELWIDTH: \the\lblwidth}}%
\typeout{{LABELHEIGHT: \the\lblheight}}%
\end{{document}}
""".format(
fontsize=fontsize, text=uni2tex(text), preamble=uni2tex(preamble)
)
return tex
def compile_latex(fname, tmpdirname, latexmk_options, silent=True):
compiler = "latexmk"
if latexmk_options:
compiler_args = latexmk_options + [
"--outdir=" + tmpdirname,
"--interaction=nonstopmode",
fname,
]
else:
compiler_args = [
"--pdf",
"--outdir=" + tmpdirname,
"--interaction=nonstopmode",
fname,
]
command = [compiler] + compiler_args
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
except (OSError, IOError) as e:
raise (e)
except subprocess.CalledProcessError as e:
print(e.output.decode())
raise (e)
else:
if not silent:
print(output.decode())
def get_latex_dims(tex, latexmk_options, silent=True):
with tempfile.TemporaryDirectory() as tmpdirname:
basename = "labella_text"
fname = os.path.join(tmpdirname, basename + ".tex")
with open(fname, "w") as fid:
fid.write(tex)
compile_latex(fname, tmpdirname, latexmk_options, silent=silent)
logname = os.path.join(tmpdirname, basename + ".log")
with open(logname, "r") as fid:
lines = fid.readlines()
line_width = next(
(l for l in lines if l.startswith("LABELWIDTH")), None
)
line_height = next(
(l for l in lines if l.startswith("LABELHEIGHT")), None
)
width = line_width.strip().split(":")[-1].strip().rstrip("pt")
height = line_height.strip().split(":")[-1].strip().rstrip("pt")
return float(width), float(height)
def build_latex_doc(tex, latexmk_options, output_name=None, silent=True):
with tempfile.TemporaryDirectory() as tmpdirname:
basename = "labella_text"
fname = os.path.join(tmpdirname, basename + ".tex")
with open(fname, "w") as fid:
fid.write(tex)
compile_latex(fname, tmpdirname, latexmk_options, silent=silent)
pdfname = os.path.join(tmpdirname, basename + ".pdf")
if output_name:
shutil.copy2(pdfname, output_name)
def text_dimensions(
text, fontsize="11pt", preamble="", silent=True, latexmk_options=None
):
tex = get_latex_fontdoc(text, fontsize=fontsize, preamble=preamble)
width, height = get_latex_dims(
tex, silent=silent, latexmk_options=latexmk_options
)
return width, height
|
apache-2.0
| 6,099,058,062,391,097,000 | 26.714286 | 75 | 0.561387 | false |
TravelModellingGroup/TMGToolbox
|
TMGToolbox/src/analysis/traffic/export_countpost_results.py
|
1
|
13131
|
#---LICENSE----------------------
'''
Copyright 2014-2017 Travel Modelling Group, Department of Civil Engineering, University of Toronto
This file is part of the TMG Toolbox.
The TMG Toolbox is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The TMG Toolbox is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the TMG Toolbox. If not, see <http://www.gnu.org/licenses/>.
'''
#---METADATA---------------------
'''
Export Countpost Results
Authors: pkucirek
Latest revision by: James Vaughan
[Description]
'''
#---VERSION HISTORY
'''
0.0.1 Created on 2014-01-16 by pkucirek
0.0.2 Modified to properly append the file extension.
1.0.0 Upgraded for release: Now works with util.fastLoadLinkAttributes.
1.1.0 Added new feature to optionally select an alternate countpost attribute.
Also, now countpost results will be reported in increasing order.
1.1.1 Fixed a bug in the tool page Javascript
1.1.2 Added additional functionality for XTMF
1.1.3 Added checks to make sure the alternative countpost attribute is not used form XTMF if
there is a blank string.
'''
import inro.modeller as _m
import traceback as _traceback
from contextlib import contextmanager
from contextlib import nested
from os.path import splitext
_MODELLER = _m.Modeller() #Instantiate Modeller once.
_util = _MODELLER.module('tmg.common.utilities')
_tmgTPB = _MODELLER.module('tmg.common.TMG_tool_page_builder')
NullPointerException = _util.NullPointerException
##########################################################################################################
class ExportCountpostResults(_m.Tool()):
version = '1.1.3'
tool_run_msg = ""
number_of_tasks = 1 # For progress reporting, enter the integer number of tasks here
# Tool Input Parameters
# Only those parameters neccessary for Modeller and/or XTMF to dock with
# need to be placed here. Internal parameters (such as lists and dicts)
# get intitialized during construction (__init__)
xtmf_ScenarioNumber = _m.Attribute(int) # parameter used by XTMF only
Scenario = _m.Attribute(_m.InstanceType) # common variable or parameter
CountpostAttributeId = _m.Attribute(str)
AlternateCountpostAttributeId = _m.Attribute(str)
ExportFile = _m.Attribute(str)
version = '1.1.2'
def __init__(self):
#---Init internal variables
self.TRACKER = _util.ProgressTracker(self.number_of_tasks) #init the ProgressTracker
#---Set the defaults of parameters used by Modeller
self.Scenario = _MODELLER.scenario #Default is primary scenario
self.CountpostAttributeId = "@stn1"
self.AlternateCountpostAttributeId = "@stn2"
def page(self):
pb = _tmgTPB.TmgToolPageBuilder(self, title="Export Countpost Results v%s" %self.version,
description="Exports traffic assignment results on links flagged with \
a countpost number.",
branding_text="- TMG Toolbox")
if self.tool_run_msg != "": # to display messages in the page
pb.tool_run_status(self.tool_run_msg_status)
pb.add_select_scenario(tool_attribute_name='Scenario',
title='Scenario:',
allow_none=False)
keyval1 = []
keyval2 = [(-1, 'None - No attribute')]
for att in _MODELLER.scenario.extra_attributes():
if att.type == 'LINK':
text = "%s - %s" %(att.id, att.description)
keyval1.append((att.id, text))
keyval2.append((att.id, text))
pb.add_select(tool_attribute_name='CountpostAttributeId',
keyvalues=keyval1,
title="Countpost Attribute",
note="LINK attribute containing countpost id numbers")
pb.add_select(tool_attribute_name='AlternateCountpostAttributeId',
keyvalues=keyval2,
title="Alternate Countpost Attribute",
note="<font color='green'><b>Optional:</b></font> Alternate countpost attribute \
for multiple post per link")
pb.add_select_file(tool_attribute_name='ExportFile',
window_type='save_file',
file_filter="*.csv",
title="Export File")
pb.add_html("""
<script type="text/javascript">
$(document).ready( function ()
{
var tool = new inro.modeller.util.Proxy(%s) ;
$("#Scenario").bind('change', function()
{
$(this).commit();
$("#CountpostAttributeId")
.empty()
.append(tool.preload_scenario_attributes())
inro.modeller.page.preload("#CountpostAttributeId");
$("#CountpostAttributeId").trigger('change');
$("#AlternateCountpostAttributeId")
.empty()
.append("<option value='-1'>None - No attribute</option>")
.append(tool.preload_scenario_attributes())
inro.modeller.page.preload("#AlternateCountpostAttributeId");
$("#AlternateCountpostAttributeId").trigger('change');
});
});
</script>""" % pb.tool_proxy_tag)
return pb.render()
@_m.method(return_type=_m.TupleType)
def percent_completed(self):
return self.TRACKER.getProgress()
@_m.method(return_type=unicode)
def tool_run_msg_status(self):
return self.tool_run_msg
@_m.method(return_type=unicode)
def preload_scenario_attributes(self):
list = []
for att in self.Scenario.extra_attributes():
label = "{id} - {name}".format(id=att.name, name=att.description)
html = unicode('<option value="{id}">{text}</option>'.format(id=att.name, text=label))
list.append(html)
return "\n".join(list)
##########################################################################################################
def run(self):
self.tool_run_msg = ""
try:
if not self.Scenario.has_traffic_results:
raise Exception("Scenario %s has no traffic assignment results" %self.Scenario.number)
if self.CountpostAttributeId is None: raise NullPointerException("Countpost Attribute not specified")
if self.ExportFile is None: raise NullPointerException("Export File not specified")
except Exception as e:
self.tool_run_msg = _m.PageBuilder.format_exception(
e, _traceback.format_exc())
raise
try:
self._Execute()
except Exception as e:
self.tool_run_msg = _m.PageBuilder.format_exception(
e, _traceback.format_exc())
raise
self.tool_run_msg = _m.PageBuilder.format_info("Done.")
def __call__(self, xtmf_ScenarioNumber, CountpostAttributeId, AlternateCountpostAttributeId,
ExportFile):
#---1 Set up scenario
self.Scenario = _m.Modeller().emmebank.scenario(xtmf_ScenarioNumber)
if (self.Scenario is None):
raise Exception("Scenario %s was not found!" %xtmf_ScenarioNumber)
if not self.Scenario.has_traffic_results:
raise Exception("Scenario %s has no traffic assignment results" %self.Scenario.number)
linkAtts = set([att.id for att in self.Scenario.extra_attributes() if att.type == 'LINK'])
if not CountpostAttributeId in linkAtts:
raise NullPointerException("'%s' is not a valid link attribute" %CountpostAttributeId)
if AlternateCountpostAttributeId != "" and not AlternateCountpostAttributeId in linkAtts:
raise NullPointerException("'%s' is not a valid link attribute" %AlternateCountpostAttributeId)
self.CountpostAttributeId = CountpostAttributeId
self.AlternateCountpostAttributeId = AlternateCountpostAttributeId
self.ExportFile = ExportFile
try:
self._Execute()
except Exception as e:
msg = str(e) + "\n" + _traceback.format_exc()
raise Exception(msg)
##########################################################################################################
def _Execute(self):
with _m.logbook_trace(name="{classname} v{version}".format(classname=(self.__class__.__name__), version=self.version),
attributes=self._GetAtts()):
self.TRACKER.reset()
linkResults = _util.fastLoadLinkAttributes(self.Scenario, [self.CountpostAttributeId,
'auto_volume',
'additional_volume',
'auto_time'])
alternateLinkResults = {}
if self.AlternateCountpostAttributeId and self.AlternateCountpostAttributeId != "":
alternateLinkResults = _util.fastLoadLinkAttributes(self.Scenario,
[self.AlternateCountpostAttributeId])
#Remove entries not flagged with a countpost
self._CleanResults(linkResults, alternateLinkResults)
#Get the countpost data, sorted
lines = self._ProcessResults(linkResults, alternateLinkResults)
#Write countpost data to file
self._WriteReport(lines)
##########################################################################################################
#----SUB FUNCTIONS---------------------------------------------------------------------------------
def _GetAtts(self):
atts = {
"Scenario" : str(self.Scenario.id),
"Countpost Attribute": self.CountpostAttributeId,
"Alternate Countpost Attribute": self.AlternateCountpostAttributeId,
"Export File": self.ExportFile,
"Version": self.version,
"self": self.__MODELLER_NAMESPACE__}
return atts
def _CleanResults(self, linkResults, alternateLinkResults):
idsToRemove = []
for linkId, attributes in linkResults.iteritems():
post1 = attributes[self.CountpostAttributeId]
post2 = 0
if linkId in alternateLinkResults:
post2 = alternateLinkResults[linkId][self.AlternateCountpostAttributeId]
if not post1 and not post2:
idsToRemove.append(linkId)
for key in idsToRemove:
linkResults.pop(key)
def _ProcessResults(self, linkResults, alternateLinkResults):
lines = []
posts = 0
self.TRACKER.startProcess(len(linkResults))
for linkIdTuple, attributes in linkResults.iteritems():
linkId = "%s-%s" %linkIdTuple
post1 = attributes[self.CountpostAttributeId]
post2 = 0
if linkIdTuple in alternateLinkResults:
post2 = alternateLinkResults[linkIdTuple][self.AlternateCountpostAttributeId]
volau = attributes['auto_volume']
volad = attributes['additional_volume']
timau = attributes['auto_time']
data = [linkId, volau, volad, timau]
if post1:
lines.append((post1, linkId, volau, volad, timau))
posts += 1
if post2:
lines.append((post2, linkId, volau, volad, timau))
posts += 1
self.TRACKER.completeSubtask()
_m.logbook_write("Found %s countposts in network" %posts)
lines.sort()
return lines
def _WriteReport(self, lines):
with open(self.ExportFile, 'w') as writer:
writer.write("Countpost,Link,Auto Volume,Additional Volume,Auto Time")
for line in lines:
line = [str(c) for c in line]
writer.write("\n" + ','.join(line))
_m.logbook_write("Wrote report to %s" %self.ExportFile)
|
gpl-3.0
| 2,219,207,741,489,187,000 | 40.0375 | 126 | 0.556393 | false |
googleapis/python-pubsub
|
tests/unit/pubsub_v1/subscriber/test_scheduler.py
|
1
|
5542
|
# Copyright 2018, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
import queue
import threading
import time
import warnings
import mock
from google.cloud.pubsub_v1.subscriber import scheduler
def test_subclasses_base_abc():
assert issubclass(scheduler.ThreadScheduler, scheduler.Scheduler)
def test_constructor_defaults():
scheduler_ = scheduler.ThreadScheduler()
assert isinstance(scheduler_.queue, queue.Queue)
assert isinstance(scheduler_._executor, concurrent.futures.Executor)
def test_constructor_options():
scheduler_ = scheduler.ThreadScheduler(executor=mock.sentinel.executor)
assert scheduler_._executor == mock.sentinel.executor
def test_schedule_executes_submitted_items():
called_with = []
callback_done_twice = threading.Barrier(3) # 3 == 2x callback + 1x main thread
def callback(*args, **kwargs):
called_with.append((args, kwargs)) # appends are thread-safe
callback_done_twice.wait()
scheduler_ = scheduler.ThreadScheduler()
scheduler_.schedule(callback, "arg1", kwarg1="meep")
scheduler_.schedule(callback, "arg2", kwarg2="boop")
callback_done_twice.wait(timeout=3.0)
result = scheduler_.shutdown()
assert result == [] # no scheduled items dropped
expected_calls = [(("arg1",), {"kwarg1": "meep"}), (("arg2",), {"kwarg2": "boop"})]
assert sorted(called_with) == expected_calls
def test_schedule_after_executor_shutdown_warning():
def callback(*args, **kwargs):
pass
scheduler_ = scheduler.ThreadScheduler()
scheduler_.schedule(callback, "arg1", kwarg1="meep")
scheduler_._executor.shutdown()
with warnings.catch_warnings(record=True) as warned:
scheduler_.schedule(callback, "arg2", kwarg2="boop")
assert len(warned) == 1
assert issubclass(warned[0].category, RuntimeWarning)
warning_msg = str(warned[0].message)
assert "after executor shutdown" in warning_msg
def test_shutdown_nonblocking_by_default():
called_with = []
at_least_one_called = threading.Event()
at_least_one_completed = threading.Event()
def callback(message):
called_with.append(message) # appends are thread-safe
at_least_one_called.set()
time.sleep(1.0)
at_least_one_completed.set()
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
scheduler_ = scheduler.ThreadScheduler(executor=executor)
scheduler_.schedule(callback, "message_1")
scheduler_.schedule(callback, "message_2")
at_least_one_called.wait()
dropped = scheduler_.shutdown()
assert len(called_with) == 1
assert called_with[0] in {"message_1", "message_2"}
assert len(dropped) == 1
assert dropped[0] in {"message_1", "message_2"}
assert dropped[0] != called_with[0] # the dropped message was not the processed one
err_msg = (
"Shutdown should not have waited "
"for the already running callbacks to complete."
)
assert not at_least_one_completed.is_set(), err_msg
def test_shutdown_blocking_awaits_running_callbacks():
called_with = []
at_least_one_called = threading.Event()
at_least_one_completed = threading.Event()
def callback(message):
called_with.append(message) # appends are thread-safe
at_least_one_called.set()
time.sleep(1.0)
at_least_one_completed.set()
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
scheduler_ = scheduler.ThreadScheduler(executor=executor)
scheduler_.schedule(callback, "message_1")
scheduler_.schedule(callback, "message_2")
at_least_one_called.wait()
dropped = scheduler_.shutdown(await_msg_callbacks=True)
assert len(called_with) == 1
assert called_with[0] in {"message_1", "message_2"}
# The work items that have not been started yet should still be dropped.
assert len(dropped) == 1
assert dropped[0] in {"message_1", "message_2"}
assert dropped[0] != called_with[0] # the dropped message was not the processed one
err_msg = "Shutdown did not wait for the already running callbacks to complete."
assert at_least_one_completed.is_set(), err_msg
def test_shutdown_handles_executor_queue_sentinels():
at_least_one_called = threading.Event()
def callback(_):
at_least_one_called.set()
time.sleep(1.0)
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
scheduler_ = scheduler.ThreadScheduler(executor=executor)
scheduler_.schedule(callback, "message_1")
scheduler_.schedule(callback, "message_2")
scheduler_.schedule(callback, "message_3")
# Simulate executor shutdown from another thread.
executor._work_queue.put(None)
executor._work_queue.put(None)
at_least_one_called.wait()
dropped = scheduler_.shutdown(await_msg_callbacks=True)
assert len(set(dropped)) == 2 # Also test for item uniqueness.
for msg in dropped:
assert msg is not None
assert msg.startswith("message_")
|
apache-2.0
| 6,949,379,709,625,790,000 | 31.034682 | 88 | 0.695417 | false |
followcat/predator
|
industry.py
|
1
|
7111
|
# encoding: utf-8
import jieba
import jieba.posseg
from sources.yingcai_industry import industry_list as yingcai_industry
from sources.liepin_industry import industry_list as liepin_industry
from sources.jingying_industry import industry_list as jingying_industry
from sources.zhilian_industry import industry_list as zhilian_industry
FLAGS = ['x', # spaces
'm', # number and date
'i', 'j',
'u', # unclassified (eg. etc)
'f', # time and place
'q', # quantifier
'p', # preposition
'v', # vernicular expression
]
def InitIndustry(industry_dict = None):
industry_list=[]
for index in industry_dict.keys():
cn = industry_dict[index].strip()
industry_list.append(cn)
return industry_list
def CompeleteMatch(industry_list, industry_dict):
matchdict={}
for value in industry_list:
for key in industry_dict.keys():
if value == industry_dict[key].strip():
matchdict[value] = key
return matchdict
def DeleteMatchItem(industry_dict, matchdict):
for key in matchdict.keys():
industry_dict.pop(matchdict[key])
return industry_dict
def JiebaFenci(string):
fenci_list=jieba.cut_for_search(string)
return fenci_list
def FenciMatch(industry_list, industry_dict):
fenci_match = {}
for key in industry_dict.keys():
fenci_match[key]={}
fenci_match[key][industry_dict[key]]={}
for value in industry_list:
tmp_dict={value:0}
for word in jieba.posseg.cut(industry_dict[key]):
word.word = word.word.strip().encode('utf-8')
if (word.flag not in FLAGS) and (value.find(word.word)!=-1):
#import ipdb;ipdb.set_trace()
tmp_dict[value]+=1
fenci_match[key][industry_dict[key]][value]=tmp_dict[value]
else:
continue
return fenci_match
def FenciMatch2(fenci_match):
partialmatch_dict={}
for key in fenci_match.keys():
#import ipdb;ipdb.set_trace()
for key_key in fenci_match[key].keys():
maxnum = -1
max_index = []
for key_key_key in fenci_match[key][key_key].keys():
if fenci_match[key][key_key][key_key_key] >= maxnum:
if fenci_match[key][key_key][key_key_key] > maxnum:
maxnum = fenci_match[key][key_key][key_key_key]
max_index = []
max_index.append(key_key_key)
else:
max_index.append(key_key_key)
for index in range(len(max_index)):
partialmatch_dict[max_index[index]]={}
partialmatch_dict[max_index[index]][key_key]=key
return partialmatch_dict
def NoneMatch(industry_dict, matchdict, partialmatch,rematchdict):
nonmatch={}
tmplist=[]
tmplist2=[]
for key2 in partialmatch.keys():
for key21 in partialmatch[key2].keys():
tmplist.append(key21)
for key3 in rematchdict.keys():
for key31 in rematchdict[key3].keys():
tmplist2.append(key31)
for key in industry_dict.keys():
temp = industry_dict[key]
if (temp in matchdict.keys()) or (temp in tmplist) or (temp in tmplist2):
continue
else:
nonmatch[key]=temp
print temp
return nonmatch
def Match(tags,industrylist, input_industry):
matchdict = {}
fencidict = {}
partialmatchdict = {}
nonmatch = {}
refencidict ={}
rematchdict = {}
final_nonmatch = {}
industry = input_industry
for key in industry.keys():
industry[key] = industry[key].encode('utf-8')
origin_dict = industry
matchdict = CompeleteMatch(industrylist, origin_dict)
DeleteMatchItem(origin_dict, matchdict)
fencidict = FenciMatch(industrylist, origin_dict)
partialmatchdict = FenciMatch2(fencidict)
nonmatch = NoneMatch(industry, matchdict, partialmatchdict,rematchdict)
refencidict = FenciMatch(industrylist, nonmatch)
rematchdict = FenciMatch2(refencidict)
final_nonmatch = NoneMatch(industry, matchdict, partialmatchdict,rematchdict)
final_nonmatch = {v:k for k, v in final_nonmatch.items()}
return matchdict, partialmatchdict, rematchdict, final_nonmatch
def Merge(tags,industrylist,final_dict,process_industry):
matchdict = {}
partialmatchdict ={}
rematchdict = {}
final_nonmatch = {}
matchdict, partialmatchdict, rematchdict, final_nonmatch = Match(tags,industrylist, process_industry)
for value in industrylist:
final_dict[value][tags]=[]
if value in matchdict.keys():
tmp_item = [matchdict[value],value]
final_dict[value][tags].append(tmp_item)
if value in partialmatchdict.keys():
for key_tmp in partialmatchdict[value].keys():
value_tmp = partialmatchdict[value][key_tmp]
tmp_item2 = [value_tmp,key_tmp]
final_dict[value][tags].append(tmp_item2)
if value in rematchdict.keys():
for key_tmp1 in rematchdict[value].keys():
value_tmp1 = rematchdict[value][key_tmp1]
tmp_item3 = [value_tmp1,key_tmp1]
final_dict[value][tags].append(tmp_item3)
for key in final_nonmatch.keys():
tmp_item4 = [final_nonmatch[key],key]
final_dict['其他行业'][tags].append(tmp_item4)
return final_dict
industrylist = []
init_dict = {}
for key in yingcai_industry.keys():
init_dict[key] = yingcai_industry[key].encode('utf-8')
industrylist = InitIndustry(init_dict)
final_dict = {}
for value in industrylist:
final_dict[value]={}
industry_list = [
['yingcai', yingcai_industry],
['liepin', liepin_industry],
['jingying', jingying_industry],
['zhilian', zhilian_industry]
]
for process_industry in industry_list:
print process_industry[0]
final_dict = Merge(process_industry[0],industrylist,final_dict,process_industry[1])
filepath = '/home/winky/predator/sources/source.py'
sources=open(filepath,'w')
sources.write('#encoding: utf-8\n')
sources.write('industry_dict = {\n')
for key1 in final_dict.keys():
sources.write(' \'{0}\':{1}\n'.format(key1,'{'))
for key2 in final_dict[key1].keys():
sources.write(' \'{}\': ['.format(key2))
for index in range(len(final_dict[key1][key2])):
if index != (len(final_dict[key1][key2])-1):
sources.write('[\'{0}\',\'{1}\'],'.format(final_dict[key1][key2][index][0],
final_dict[key1][key2][index][1]))
else:
sources.write('[\'{0}\',\'{1}\']'.format(final_dict[key1][key2][index][0],
final_dict[key1][key2][index][1]))
sources.write('],\n')
sources.write(' },\n')
sources.write(' }\n')
sources.close()
|
lgpl-3.0
| 1,789,611,919,118,132,500 | 35.055838 | 105 | 0.59313 | false |
mitodl/bootcamp-ecommerce
|
cms/migrations/0022_model_rename_page_to_section.py
|
1
|
1530
|
# Generated by Django 2.2.13 on 2020-06-22 09:38
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("wagtailforms", "0004_add_verbose_name_plural"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("wagtailimages", "0001_squashed_0021"),
("wagtailredirects", "0006_redirect_increase_max_length"),
("wagtailcore", "0045_assign_unlock_grouppagepermission"),
("contenttypes", "0002_remove_content_type_name"),
("cms", "0021_remove_subheading_resource_page"),
]
operations = [
migrations.RenameModel(
old_name="AdmissionsSectionPage", new_name="AdmissionsSection"
),
migrations.RenameModel(old_name="AlumniPage", new_name="AlumniSection"),
migrations.RenameModel(
old_name="CatalogGridPage", new_name="CatalogGridSection"
),
migrations.RenameModel(old_name="HomeAlumniPage", new_name="HomeAlumniSection"),
migrations.RenameModel(
old_name="InstructorsPage", new_name="InstructorsSection"
),
migrations.RenameModel(
old_name="LearningResourcePage", new_name="LearningResourceSection"
),
migrations.RenameModel(
old_name="ProgramDescriptionPage", new_name="ProgramDescriptionSection"
),
migrations.RenameModel(
old_name="ThreeColumnImageTextPage", new_name="ThreeColumnImageTextSection"
),
]
|
bsd-3-clause
| 5,187,569,924,189,244,000 | 37.25 | 88 | 0.655556 | false |
asm-products/spaceship-build
|
server.py
|
1
|
1523
|
__author__ = 'xXxH3LIOSxXx'
from builtins import print
import socket
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to 6969
server_address = ('localhost', 6969)
print('\nSpaceShip build server starting up on %s port %s' % server_address)
sock.bind(server_address)
# Listen for incoming connections, just 1 now for testing
sock.listen(1)
while True:
# Wait for a connection
print('\nWaiting for connections...')
connection, client_address = sock.accept()
# Pre-loading buffer with a MOTD
welcome = bytes('+++++++++++++++++++++++++++++++++++++++++++++++\n++++++++++Welcome to SpaceShip build!++++++++++\n+++++++++++++++++++++++++++++++++++++++++++++++\n\nRight now all this server does is echo your input a single time!\nType something now to try it out...', 'UTF-8')
connection.sendall(welcome)
try:
print('\nClient connecting from', client_address)
# Receive the data and for the time being re-transmit it
while True:
data = connection.recv(1024)
print('Received >> "%s"' % data.decode("utf-8"))
if data:
print('Sending data back to', client_address)
connection.sendall(data)
else:
print('No more data from', client_address)
break
finally:
# Clean up the connection
print('Closing connection from', client_address)
connection.close()
print('Connection closed!')
|
agpl-3.0
| -5,877,231,379,056,113,000 | 33.636364 | 282 | 0.592252 | false |
ENCODE-DCC/WranglerScripts
|
chip_utilities/rerun_analysis_applet.py
|
1
|
8097
|
#!/usr/bin/env python
from __future__ import print_function
import dxpy
from pprint import pformat
import sys
import re
import traceback
import logging
from time import sleep
logging.basicConfig()
logger = logging.getLogger(__name__)
# E3_PROJECT_ID = 'project-BKp5K980bpZZ096Xp1XQ02fZ'
# FRIP_DEV_PROJECT_ID = 'project-F3BpKqj07z6y979Z4X36P6z9'
# FRIP_PROJECT_ID = 'project-F3Bvp4004vxZxbpZBBJGPyYy'
# TEST_ANALYSIS_ID = 'analysis-F2v67b80bpZV0p9q788kgBGp'
# TEST_ANALYSIS_ID = 'analysis-F3BZ8v8036977yg98x815zB3'\
ACCESSION_OUTPUT_FOLDER = "/accession_log/"
# APPLETS_PROJECT_ID = next(dxpy.find_projects(
# name='ENCODE - ChIP Production',
# return_handler=True)).get_id()
APPLETS_PROJECT_ID = dxpy.PROJECT_CONTEXT_ID
APPLETS = {}
EPILOG = '''Notes:
Examples:
%(prog)s
'''
def get_args():
import argparse
parser = argparse.ArgumentParser(
description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
def t_or_f(arg):
ua = str(arg).upper()
if ua == 'TRUE'[:len(ua)]:
return True
elif ua == 'FALSE'[:len(ua)]:
return False
else:
assert not (True or False), "Cannot parse %s to boolean" % (arg)
parser.add_argument('analysis_ids', help='List of analysis IDs to rerun', nargs='*', default=None)
parser.add_argument('--stage', help='Name of stage to replace', required=True)
parser.add_argument('--applet', help='Name of new applet to replace with', required=True)
parser.add_argument('--infile', help='File containing analysis IDs', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('--folder', help='New output folder on DNAnexus. Default is same folder.')
parser.add_argument('--accession', help='Automatically accession the results to the ENCODE Portal', type=t_or_f, default=None)
parser.add_argument('--debug', help="Print debug messages", type=t_or_f, default=None)
return parser.parse_args()
def find_applet_by_name(applet_name, applets_project_id=APPLETS_PROJECT_ID):
if (applet_name, applets_project_id) not in APPLETS:
found = dxpy.find_one_data_object(
classname="applet",
name=applet_name,
project=applets_project_id,
zero_ok=False,
more_ok=False,
return_handler=True)
APPLETS[(applet_name, applets_project_id)] = found
return APPLETS[(applet_name, applets_project_id)]
def stage_named(name, analysis):
try:
stage = next(
stage for stage in analysis.describe()['stages']
if stage['execution']['name'] == name
)
except StopIteration:
stage = None
except:
raise
return stage
def rerun_with_applet(analysis_id, stage_name, applet_name, folder=None):
logger.debug(
'rerun_with_applet: analysis_id %s new_applet_name %s'
% (analysis_id, applet_name))
analysis = dxpy.DXAnalysis(analysis_id)
old_workflow_description = analysis.describe().get('workflow')
old_workflow = dxpy.DXWorkflow(old_workflow_description['id'])
project_id = analysis.describe()['project']
temp = dxpy.api.workflow_new({
'name': analysis.describe()['executableName'],
'project': project_id,
'initializeFrom': {'id': analysis.get_id()},
'properties': old_workflow.get_properties(),
'temporary': True})
new_workflow = dxpy.DXWorkflow(temp['id'])
logger.debug(
'rerun_with_applet: new_workflow %s %s'
% (new_workflow.get_id(), new_workflow.name))
old_stage = stage_named(stage_name, analysis)
accessioning_stage = stage_named('Accession results', analysis)
if accessioning_stage:
new_workflow.remove_stage(accessioning_stage['id'])
new_applet = find_applet_by_name(applet_name)
logger.debug(
'rerun_with_applet: new_applet %s %s'
% (new_applet.get_id(), new_applet.name))
same_input = old_stage['execution']['input']
logger.debug(
'rerun_with_applet: same_input \n%s'
% (pformat(same_input)))
new_workflow.update_stage(
old_stage['id'],
executable=new_applet.get_id(),
stage_input=same_input,
force=True)
m = re.match('ENCSR.{6}', analysis.name)
accession = m.group(0)
analysis_properties = analysis.describe()['properties']
analysis_properties.update({
'experiment_accession': accession,
'original_analysis': analysis_id
})
logger.debug(
'rerun_with_applet: running workflow')
runargs = {
# 'executable_input': {},
'project': project_id,
'name': "%s %s" % (analysis.name, new_applet.name),
'properties': analysis_properties
}
if folder is not None:
runargs.update({'folder': folder})
logger.debug("running new_workflow with args: \n%s" % (pformat(runargs)))
return new_workflow.run({}, **runargs)
def accession_analysis(analysis):
accession_analysis_applet = find_applet_by_name('accession_analysis')
logger.debug(
'accession_analysis: found accession_analysis_applet %s'
% (accession_analysis_applet.name))
accession_output_folder = ACCESSION_OUTPUT_FOLDER
accession_job_input = {
'analysis_ids': [analysis.get_id()],
'wait_on_files': [],
'fqcheck': False,
'skip_control': True,
'force_patch': True,
'encoded_check': False
}
sleep(5)
max_retries = 10
retries = max_retries
accession_job = None
while retries:
logger.debug('accession_analysis: running accession_analysis_applet with input %s' % (accession_job_input))
try:
accession_job = accession_analysis_applet.run(
accession_job_input,
name='Accession %s' % (analysis.name),
folder=accession_output_folder,
depends_on=[analysis.get_id()],
project=analysis.describe()['project']
)
except Exception as e:
logger.error(
"%s launching auto-accession ... %d retries left"
% (e, retries))
sleep(5)
retries -= 1
continue
else:
logger.debug(
"Auto-accession will run as %s %s"
% (accession_job.name, accession_job.get_id()))
break
else:
logging.error("Auto-accession failed with %s" % (e))
return accession_job
def main():
args = get_args()
if args.debug:
logger.setLevel(logging.DEBUG)
logger.debug("Logging level set to DEBUG")
else:
logger.setLevel(logging.INFO)
if args.analysis_ids:
ids = args.analysis_ids
else:
ids = args.infile
first_row = True
for string in ids:
analysis_id = string.strip()
if not analysis_id:
continue
try:
new_analysis = rerun_with_applet(analysis_id, args.stage, args.applet, args.folder)
except:
row = "%s\terror" % (analysis_id)
print("%s\t%s" % (analysis_id, "error"), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
else:
project = dxpy.DXProject(new_analysis.describe()['project'])
row = "%s\t%s\t%s" % (
analysis_id,
new_analysis.get_id(),
project.name
)
if args.accession:
try:
accessioning_job = accession_analysis(new_analysis)
except Exception as e:
logger.error("acccession_analysis failed with %s" % (e))
row += "\tfailed"
else:
row += "\t%s" % (None if not accessioning_job else accessioning_job.get_id())
else:
row += "\tmanually"
if first_row:
print("old_analysis\tnew_analysis\tproject\taccession_job")
print(row)
first_row = False
if __name__ == '__main__':
main()
|
mit
| 5,388,239,026,137,083,000 | 32.04898 | 130 | 0.598864 | false |
fffy2366/image-processing
|
tests/python/deskew.py
|
1
|
1152
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
import copy
'''
https://gist.github.com/mweibel/bd2d6c2271e42ed97b97
'''
img = cv2.imread('/Users/fengxuting/Downloads/1463815812385A98C108', cv2.IMREAD_GRAYSCALE)
def compute_skew(image):
image = cv2.bitwise_not(image)
height, width = image.shape
edges = cv2.Canny(image, 150, 200, 3, 5)
lines = cv2.HoughLinesP(edges, 1, cv2.cv.CV_PI/180, 100, minLineLength=width / 2.0, maxLineGap=20)
angle = 0.0
nlines = lines.size
for x1, y1, x2, y2 in lines[0]:
# angle += np.arctan2(x2 - x1, y2 - y1)
angle += np.arctan2(y2 - y1, x2 - x1)
return angle / nlines
def deskew(image, angle):
image = cv2.bitwise_not(image)
non_zero_pixels = cv2.findNonZero(image)
center, wh, theta = cv2.minAreaRect(non_zero_pixels)
root_mat = cv2.getRotationMatrix2D(center, angle, 1)
# cols, rows = image.shape
rows, cols = image.shape
rotated = cv2.warpAffine(image, root_mat, (cols, rows), flags=cv2.INTER_CUBIC)
return cv2.getRectSubPix(rotated, (cols, rows), center)
deskewed_image = deskew(img.copy(), compute_skew(img))
|
mit
| 7,231,058,920,349,087,000 | 30.162162 | 102 | 0.676215 | false |
geekbert/HelloPythonSourceCode
|
chapter 09/ship-5b-landing.py
|
1
|
6572
|
#!/usr/bin/python
import pyglet
from pyglet.gl import *
from pyglet.window import key
import math
window = pyglet.window.Window(fullscreen=True)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
pyglet.resource.path.append('./images')
pyglet.resource.reindex()
def center_anchor(img):
img.anchor_x = img.width // 2
img.anchor_y = img.height // 2
planet_image = pyglet.resource.image('mars.png')
center_anchor(planet_image)
ship_image = pyglet.resource.image('ship.png')
center_anchor(ship_image)
ship_image_on = pyglet.resource.image('ship_on.png')
center_anchor(ship_image_on)
def wrap(value, width):
while value > width:
value -= width
while value < 0:
value += width
return value
radians_in_circle = math.pi * 2
def to_radians(degrees):
return math.pi * degrees / 180.0
def to_degrees(radians):
return (180 * radians) / math.pi
def xy_to_vec(x, y):
"""distance and angle from (0,0) to (x,y)"""
distance = math.sqrt(x**2 + y**2)
if distance == 0:
return (0,0)
angle = math.acos(float(x) / distance)
if y < 0:
angle = 2*math.pi - angle
return (distance, angle)
def vec_to_xy(distance, angle):
x = distance * math.cos(angle)
y = distance * math.sin(angle)
return (x,y)
def pyglet_to_math(degrees):
x, y = vec_to_xy(1, to_radians(degrees))
x = -x
return xy_to_vec(x,y)[1]
def math_to_pyglet(radians):
x, y = vec_to_xy(1, radians)
x = -x
return to_degrees(xy_to_vec(x,y)[1])
def degree_angle_diff(angle1, angle2):
# assumes degrees
diff = abs(wrap(angle1, 360.) - wrap(angle2, 360.))
if diff > 180:
diff = 360.0 - diff
return diff
class Planet(pyglet.sprite.Sprite):
def __init__(self, image, x=0, y=0, batch=None):
super(Planet, self).__init__(image, x, y, batch=batch)
self.x = x
self.y = y
self.mass = 5000000 # experiment!
self.radius = (self.image.height + self.image.width) / 4
# Planet pulls ship in with gravity
def dist_vec_to(self, target):
dx = self.x - target.x
dy = self.y - target.y
return xy_to_vec(dx, dy)
def force_on(self, target):
G = 1 # experiment!
distance, angle = self.dist_vec_to(target)
return ((G * self.mass) / (distance**2), angle)
def update(self, dt):
# Check collisions
distance, angle = self.dist_vec_to(ship)
#print "**", distance, '\t', ship.radius, self.radius
if distance <= ship.radius + self.radius:
if not ship.landed:
velocity, ship_angle = xy_to_vec(ship.dx, ship.dy)
if velocity > ship.max_speed:
ship.reset()
ship.alive = False
elif degree_angle_diff(ship.rotation, math_to_pyglet(angle)) > 30:
ship.reset()
ship.alive = False
else:
ship.dx = 0
ship.dy = 0
ship.landed = True
print "Landed v2! ship angle = %s (%s), actual angle = %s" % (to_degrees(ship_angle), ship.rotation, to_degrees(angle))
else:
ship.landed = False
if not ship.landed:
# Gravity!
force, angle = self.force_on(ship)
force_x, force_y = vec_to_xy(force * dt, angle)
ship.dx += force_x
ship.dy += force_y
class Ship(pyglet.sprite.Sprite, key.KeyStateHandler):
def __init__(self, image, x=0, y=0, dx=0, dy=0, rotv=0, batch=None):
super(Ship, self).__init__(image, x, y, batch=batch)
self.x = x
self.y = y
self.dx = dx
self.dy = dy
self.rotation = rotv
self.alive = True
self.landed = False
self.radius = self.image.width / 2
self.thrust = 150.0
self.rot_spd = 100.0
self.max_speed = 100
def reset(self):
ship.life_timer = 2.0 # seconds until respawn
self.x = center_x + 300; self.y = center_y
self.dx = 0; self.dy = 150
self.rotation = -90
def update(self, dt):
self.image = ship_image
if not self.alive:
# print "Dead! Respawn in %s" % self.life_timer
self.life_timer -= dt
if self.life_timer > 0:
return
else:
self.reset()
self.alive = True
# Update rotation
if not self.landed:
if self[key.LEFT]:
self.rotation -= self.rot_spd * dt
if self[key.RIGHT]:
self.rotation += self.rot_spd * dt
self.rotation = wrap(self.rotation, 360.)
# Get x/y components of orientation
rotation_x = math.cos(to_radians(self.rotation))
rotation_y = math.sin(to_radians(-self.rotation))
# Update velocity
if self[key.UP]:
self.image = ship_image_on
self.dx += self.thrust * rotation_x * dt
self.dy += self.thrust * rotation_y * dt
self.x += self.dx * dt
self.y += self.dy * dt
self.x = wrap(self.x, window.width)
self.y = wrap(self.y, window.height)
self.velocity, vel_angle = xy_to_vec(ship.dx, ship.dy)
speedometer.text = "Speed: %.02f" % self.velocity
distance, angle = planet.dist_vec_to(self)
speedometer.text += " (rot: %.02f, angle: %.02f)" % (self.rotation, math_to_pyglet(angle))
if self.velocity < self.max_speed * 0.8:
speedometer.color = (0, 255, 0, 255)
elif self.velocity < self.max_speed:
speedometer.color = (255, 255, 0, 255)
else:
speedometer.color = (255, 0, 0, 255)
center_x = int(window.width/2)
center_y = int(window.height/2)
planet = Planet(planet_image, center_x, center_y)
ship = Ship(ship_image)
ship.reset()
speedometer = pyglet.text.Label('Speed: 0',
font_name='Arial',
font_size=36,
x=10, y=10,
anchor_x='left', anchor_y='bottom')
@window.event
def on_draw():
window.clear()
planet.draw()
speedometer.draw()
if ship.alive:
ship.draw()
# Call update 60 times a second
def update(dt):
planet.update(dt)
ship.update(dt)
window.push_handlers(ship)
pyglet.clock.schedule_interval(update, 1/60.0)
pyglet.app.run()
|
bsd-3-clause
| -4,076,408,603,941,285,400 | 28.872727 | 139 | 0.546409 | false |
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/test/tool_shed/functional/test_0470_tool_dependency_repository_type.py
|
1
|
17968
|
from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os
import logging
log = logging.getLogger( __name__ )
category_name = 'Test 0470 Tool dependency repository type'
category_description = 'Test script 0470 for changing repository types.'
package_libx11_repository_name = 'package_x11_client_1_5_proto_7_0_0470'
package_libx11_repository_description = "Contains a tool dependency definition that provides the X11 client libraries and core protocol header files."
package_libx11_repository_long_description = "Xlib is an X Window System protocol client library written in the C programming language."
package_emboss_repository_name = 'package_emboss_5_0_0_0470'
package_emboss_repository_description = "Contains a tool dependency definition that downloads and compiles version 5.0.0 of the EMBOSS tool suite."
package_emboss_repository_long_description = 'EMBOSS is "The European Molecular Biology Open Software Suite".'
datatypes_repository_name = 'emboss_datatypes_0470'
datatypes_repository_description = 'Galaxy applicable data formats used by Emboss tools.'
datatypes_repository_long_description = 'Galaxy applicable data formats used by Emboss tools. This repository contains no tools.'
emboss_repository_name = 'emboss_5_0470'
emboss_repository_description = "Galaxy wrappers for Emboss version 5.0.0 tools"
emboss_repository_long_description = "Galaxy wrappers for Emboss version 5.0.0 tools"
'''
1. Create and populate a repository named package_x11_client_1_5_proto_7_0 that contains only a single file named tool_dependencies.xml.
Keep the repository type as the default "Unrestricted".
2. Create a repository named package_emboss_5_0_0 of type "Unrestricted" that has a repository dependency definition that defines the
above package_x11_client_1_5_proto_7_0 repository. Upload the tool_dependencies.xml file such that it does not have a changeset_revision
defined so it will get automatically populated.
3. Create a repository named emboss_5 of type "Unrestricted" that has a tool-dependencies.xml file defining a complex repository dependency
on the package_emboss_5_0_0 repository above. Upload the tool_dependencies.xml file such that it does not have a change set_revision defined
so it will get automatically populated.
4. Add a comment to the tool_dependencies.xml file to be uploaded to the package_x11_client_1_5_prot_7_0 repository, creating a new installable
changeset revision at the repository tip.
5. Add a comment to the tool_dependencies.xml file for the package_emboss_5_0_0 repository, eliminating the change set_revision attribute so
that it gets automatically populated when uploaded. After uploading the file, the package_emboss_5_0_0 repository should have 2
installable changeset revisions.
6. Add a comment to the tool_dependencies.xml file in the emboss_5 repository, eliminating the changeset_revision attribute so that it gets
automatically populated when uploaded. After uploading the file, the emboss5 repository should have 2 installable metadata revisions.
7. Change the repository type of the package_x11_client_1_5_proto_7_0 repository to be tool_dependency_definition.
8. Change the repository type of the package_emboss_5_0_0 repository to be tool_dependency_definition.
9. Reset metadata on the package_emboss_5_0_0 repository. It should now have only its tip as the installable revision.
10. Reset metadata on the emboss_5 repository. It should now have only its tip as the installable revision.
'''
class TestEnvironmentInheritance( ShedTwillTestCase ):
'''Test referencing environment variables that were defined in a separate tool dependency.'''
def test_0000_initiate_users_and_category( self ):
"""Create necessary user accounts and login as an admin user."""
self.logout()
self.login( email=common.admin_email, username=common.admin_username )
admin_user = self.test_db_util.get_user( common.admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
admin_user_private_role = self.test_db_util.get_private_role( admin_user )
self.create_category( name=category_name, description=category_description )
self.logout()
self.login( email=common.test_user_2_email, username=common.test_user_2_name )
test_user_2 = self.test_db_util.get_user( common.test_user_2_email )
assert test_user_2 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_2_email
test_user_2_private_role = self.test_db_util.get_private_role( test_user_2 )
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
test_user_1 = self.test_db_util.get_user( common.test_user_1_email )
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
test_user_1_private_role = self.test_db_util.get_private_role( test_user_1 )
def test_0005_create_libx11_repository( self ):
'''Create and populate package_x11_client_1_5_proto_7_0_0470.'''
'''
This is step 1 - Create and populate a repository named package_x11_client_1_5_proto_7_0.
Create and populate a repository named package_x11_client_1_5_proto_7_0 that contains only a single file named tool_dependencies.xml.
Keep the repository type as the default "Unrestricted".
'''
category = self.test_db_util.get_category_by_name( category_name )
repository = self.get_or_create_repository( name=package_libx11_repository_name,
description=package_libx11_repository_description,
long_description=package_libx11_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
# Upload the tool dependency definition to the package_x11_client_1_5_proto_7_0_0470 repository.
self.upload_file( repository,
filename='emboss/libx11_proto/first_tool_dependency/tool_dependencies.xml',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Populate package_x11_client_1_5_proto_7_0_0470 with tool dependency definitions.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0010_create_emboss_5_0_0_repository( self ):
'''Create and populate package_emboss_5_0_0_0470.'''
'''
This is step 2 - Create a repository named package_emboss_5_0_0 of type "Unrestricted".
Create a repository named package_emboss_5_0_0 of type "Unrestricted" that has a repository dependency definition that defines the
above package_x11_client_1_5_proto_7_0 repository. Upload the tool_dependencues.xml file such that it does not have a changeset_revision
defined so it will get automatically populated.
'''
category = self.test_db_util.get_category_by_name( category_name )
repository = self.get_or_create_repository( name=package_emboss_repository_name,
description=package_emboss_repository_description,
long_description=package_emboss_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
# Upload the edited tool dependency definition to the package_emboss_5_0_0 repository.
self.upload_file( repository,
filename='emboss/emboss_5_0_0/first_tool_dependency/tool_dependencies.xml',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Populate package_emboss_5_0_0_0470 with tool dependency definitions.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0015_create_emboss_5_repository( self ):
'''Create and populate emboss_5_0470.'''
'''
This is step 3 - Create a repository named emboss_5 of type "Unrestricted".
Create a repository named emboss_5 of type "Unrestricted" that has a tool-dependencies.xml file defining a complex repository dependency
on the package_emboss_5_0_0 repository above. Upload the tool_dependencies.xml file such that it does not have a change set_revision defined
so it will get automatically populated.
'''
category = self.test_db_util.get_category_by_name( category_name )
repository = self.get_or_create_repository( name=emboss_repository_name,
description=emboss_repository_description,
long_description=emboss_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
# Populate emboss_5 with tool and dependency definitions.
self.upload_file( repository,
filename='emboss/0470_files/emboss_complex_dependency.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Populate emboss_5 with tool and dependency definitions.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0020_upload_updated_tool_dependency_to_package_x11( self ):
'''Upload a new tool_dependencies.xml to package_x11_client_1_5_proto_7_0_0470.'''
'''
This is step 4 - Add a comment to the tool_dependencies.xml file to be uploaded to the package_x11_client_1_5_prot_7_0 repository, creating
a new installable changeset revision at the repository tip.
'''
package_x11_repository = self.test_db_util.get_repository_by_name_and_owner( package_libx11_repository_name, common.test_user_1_name )
# Upload the tool dependency definition to the package_x11_client_1_5_proto_7_0_0470 repository.
self.upload_file( package_x11_repository,
filename='emboss/libx11_proto/second_tool_dependency/tool_dependencies.xml',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Populate package_x11_client_1_5_proto_7_0_0470 with tool dependency definitions.',
strings_displayed=[],
strings_not_displayed=[] )
assert len( package_x11_repository.metadata_revisions ) == 1, \
'package_x11_client_1_5_proto_7_0_0470 has incorrect number of metadata revisions, expected 1 but found %d' % \
len( package_x11_repository.metadata_revisions )
def test_0025_upload_updated_tool_dependency_to_package_emboss( self ):
'''Upload a new tool_dependencies.xml to package_emboss_5_0_0_0470.'''
'''
This is step 5 - Add a comment to the tool_dependencies.xml file for the package_emboss_5_0_0 repository, eliminating
the change set_revision attribute so that it gets automatically populated when uploaded. After uploading the file,
the package_emboss_5_0_0 repository should have 2 installable changeset revisions.
'''
package_emboss_repository = self.test_db_util.get_repository_by_name_and_owner( package_emboss_repository_name, common.test_user_1_name )
# Populate package_emboss_5_0_0_0470 with updated tool dependency definition.
self.upload_file( package_emboss_repository,
filename='emboss/emboss_5_0_0/second_tool_dependency/tool_dependencies.xml',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Populate package_emboss_5_0_0_0470 with tool dependency definitions.',
strings_displayed=[],
strings_not_displayed=[] )
assert len( package_emboss_repository.metadata_revisions ) == 2, \
'package_emboss_5_0_0_0470 has incorrect number of metadata revisions, expected 2 but found %d' % \
len( package_emboss_repository.metadata_revisions )
def test_0030_upload_updated_tool_dependency_to_emboss_5_repository( self ):
'''Upload a new tool_dependencies.xml to emboss_5_0470.'''
'''
This is step 6 - Add a comment to the tool_dependencies.xml file in the emboss_5 repository, eliminating the
changeset_revision attribute so that it gets automatically populated when uploaded. After uploading the file,
the emboss5 repository should have 2 installable metadata revisions.
'''
emboss_repository = self.test_db_util.get_repository_by_name_and_owner( emboss_repository_name, common.test_user_1_name )
# Populate package_emboss_5_0_0_0470 with updated tool dependency definition.
self.upload_file( emboss_repository,
filename='emboss/0470_files/tool_dependencies.xml',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Upload updated complex repository dependency definition to emboss_5_0470.',
strings_displayed=[],
strings_not_displayed=[] )
assert len( emboss_repository.metadata_revisions ) == 2, 'package_emboss_5_0_0_0470 has incorrect number of metadata revisions'
def test_0035_modify_package_x11_repository_type( self ):
'''Set package_x11_client_1_5_proto_7_0 type tool_dependency_definition.'''
'''
This is step 7 - Change the repository type of the package_x11_client_1_5_proto_7_0 repository to be tool_dependency_definition.
'''
package_x11_repository = self.test_db_util.get_repository_by_name_and_owner( package_libx11_repository_name, common.test_user_1_name )
self.edit_repository_information( package_x11_repository, repository_type='tool_dependency_definition' )
def test_0040_modify_package_emboss_repository_type( self ):
'''Set package_emboss_5_0_0 to type tool_dependency_definition.'''
'''
This is step 8 - Change the repository type of the package_emboss_5_0_0 repository to be tool_dependency_definition.
'''
package_emboss_repository = self.test_db_util.get_repository_by_name_and_owner( package_emboss_repository_name, common.test_user_1_name )
self.edit_repository_information( package_emboss_repository, repository_type='tool_dependency_definition' )
def test_0045_reset_repository_metadata( self ):
'''Reset metadata on package_emboss_5_0_0_0470 and package_x11_client_1_5_proto_7_0.'''
'''
This is step 9 - Reset metadata on the package_emboss_5_0_0 and package_x11_client_1_5_proto_7_0 repositories. They should
now have only their tip as the installable revision.
'''
package_emboss_repository = self.test_db_util.get_repository_by_name_and_owner( package_emboss_repository_name, common.test_user_1_name )
package_x11_repository = self.test_db_util.get_repository_by_name_and_owner( package_libx11_repository_name, common.test_user_1_name )
self.reset_repository_metadata( package_emboss_repository )
self.reset_repository_metadata( package_x11_repository )
assert len( package_emboss_repository.metadata_revisions ) == 1, 'Repository package_emboss_5_0_0 has %d installable revisions, expected 1.' % \
len( package_emboss_repository.metadata_revisions )
assert len( package_x11_repository.metadata_revisions ) == 1, 'Repository package_x11_client_1_5_proto_7_0 has %d installable revisions, expected 1.' % \
len( package_x11_repository.metadata_revisions )
def test_0050_reset_emboss_5_metadata( self ):
'''Reset metadata on emboss_5.'''
'''
This is step 10 - Reset metadata on the emboss_5 repository. It should now have only its tip as the installable revision.
'''
emboss_repository = self.test_db_util.get_repository_by_name_and_owner( emboss_repository_name, common.test_user_1_name )
self.reset_repository_metadata( emboss_repository )
assert len( emboss_repository.metadata_revisions ) == 1, 'Repository emboss_5 has %d installable revisions, expected 1.' % \
len( emboss_repository.metadata_revisions )
|
gpl-3.0
| -8,324,170,802,987,783,000 | 68.914397 | 161 | 0.647818 | false |
ActiveState/code
|
recipes/Python/437116_Wrapper_class_heapq/recipe-437116.py
|
1
|
3197
|
import heapq
class Heap(list):
"""This is a wrapper class for the heap functions provided
by the heapq module.
"""
__slots__ = ()
def __init__(self, t=[]):
self.extend(t)
self.heapify()
push = heapq.heappush
popmin = heapq.heappop
replace = heapq.heapreplace
heapify = heapq.heapify
def pushpop(self, item):
"Push the item onto the heap and then pop the smallest value"
if self and self[0] < item:
return heapq.heapreplace(self, item)
return item
def __iter__(self):
"Return a destructive iterator over the heap's elements"
try:
while True:
yield self.popmin()
except IndexError:
pass
def reduce(self, pos, newitem):
"Replace self[pos] with a lower value item and then reheapify"
while pos > 0:
parentpos = (pos - 1) >> 1
parent = self[parentpos]
if parent <= newitem:
break
self[pos] = parent
pos = parentpos
self[pos] = newitem
def is_heap(self):
"Return True if the heap has the heap property; False otherwise"
n = len(self)
# The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
try:
for i in xrange(n//2):
if self[i] > self[2*i+1]: return False
if self[i] > self[2*i+2]: return False
except IndexError:
pass
return True
def heapsort(seq):
return [x for x in Heap(seq)]
if __name__ == '__main__':
from random import randint, shuffle
# generate a random test case
n = 15
data = [randint(1,n) for i in xrange(n)]
shuffle(data)
print data
# test the constructor
heap = Heap(data)
print heap, heap.is_heap()
# test popmin
sorted = []
while heap:
sorted.append(heap.popmin())
data.sort()
print heap, heap.is_heap()
print data == sorted
# test 2
shuffle(data)
print data
# test push
for item in data:
heap.push(item)
print heap, heap.is_heap()
# test __iter__
sorted = [x for x in heap]
data.sort()
print data == sorted
# test 3
shuffle(data)
print data
heap = Heap(data)
print heap, heap.is_heap()
# test reduce
for i in range(5):
pos = randint(0,n-1)
decr = randint(1,10)
item = heap[pos] - decr
heap.reduce(pos, item)
# test is_heap
heap = Heap(data)
count = 0
while 1:
shuffle(heap)
if heap.is_heap():
print heap
break
else:
count += 1
print 'It took', count, 'tries to find a heap by chance.'
print heapsort(data)
try:
heap.x = 5
except AttributeError:
print "Can't add attributes."
|
mit
| -3,530,894,290,419,048,400 | 23.40458 | 79 | 0.533625 | false |
phil888/Design3
|
Code/GEL/ControleurDeCharge/ControleurCharge.py
|
1
|
1825
|
__author__ = 'Miary'
from time import sleep
import serial
import sys
class ControlleurCharge():
PORT_CMD = "/dev/ttyACM3"
def __init__(self, port = PORT_CMD):
self.portSerial = serial.Serial(port,115200 , timeout=4)
sleep(1)
#Recharge(0,0); (Pour charger la condensateur)
def recharge(self,rien1,rien2):
commande="Recharge"
self._send_command(commande,rien1,rien2)
#DischargeTake(0,0); (Prendre le tresor)
def dischargeTake(self,rien1,rien2):
commande="DischargeTake"
self._send_command(commande,rien1,rien2)
#DischargeKeep(0,0); (Garder le tresor durant le deplacement)
def dischargeTake(self,rien1,rien2):
commande="DischargeKeep"
self._send_command(commande,rien1,rien2)
#Open(0,0); (Garder la charge apres le chargement / lacher le tresor )
def open(self,rien1,rien2):
commande="Open"
self._send_command(commande,rien1,rien2)
#StationOn(0,0); (Allumer la station de recharge)
def stationOn(self,rien1,rien2):
commande="StationOn"
self._send_command(commande,rien1,rien2)
#StationOff(0,0); (Fermer la station de recharge)
def stationOff(self,rien1,rien2):
commande="StationOff"
self._send_command(commande,rien1,rien2)
def _send_command(self, commande, args1, args2, retry_countdown=3):
self.portSerial.flushInput()
write = "{}({},{});".format(commande, args1, args2)
write1=bytes(write,encoding="UTF-8")
#write1=bytes(write)
self.portSerial.write(write1)
self.portSerial.flushOutput()
#commande_retour = self.portSerial.readline()
#print("CommandeDeRetour:"+str(commande_retour))
#commande_retour1=str(commande_retour)
#print("CommandeDeRetour1:"+commande_retour1)
|
gpl-3.0
| 8,398,340,946,723,164,000 | 28.435484 | 71 | 0.659178 | false |
pqtoan/mathics
|
mathics/builtin/numeric.py
|
3
|
28203
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Numeric evaluation
Support for numeric evaluation with arbitrary precision is just a proof-of-concept.
Precision is not "guarded" through the evaluation process. Only integer precision is supported.
However, things like 'N[Pi, 100]' should work as expected.
"""
from __future__ import unicode_literals
from __future__ import absolute_import
import sympy
import mpmath
from mpmath import mpf
import math
import hashlib
import zlib
import math
from six.moves import range
from collections import namedtuple
from contextlib import contextmanager
from itertools import chain
from mathics.builtin.base import Builtin, Predefined
from mathics.core.numbers import (
dps, convert_int_to_digit_list, machine_precision, machine_epsilon,
get_precision, PrecisionValueError)
from mathics.core.expression import (
Integer, Real, Complex, Expression, Number, Symbol, Rational, from_python,
MachineReal, PrecisionReal)
from mathics.core.convert import from_sympy
class N(Builtin):
"""
<dl>
<dt>'N[$expr$, $prec$]'
<dd>evaluates $expr$ numerically with a precision of $prec$ digits.
</dl>
>> N[Pi, 50]
= 3.1415926535897932384626433832795028841971693993751
>> N[1/7]
= 0.142857
>> N[1/7, 5]
= 0.14286
You can manually assign numerical values to symbols.
When you do not specify a precision, 'MachinePrecision' is taken.
>> N[a] = 10.9
= 10.9
>> a
= a
'N' automatically threads over expressions, except when a symbol has attributes 'NHoldAll', 'NHoldFirst', or 'NHoldRest'.
>> N[a + b]
= 10.9 + b
>> N[a, 20]
= a
>> N[a, 20] = 11;
>> N[a + b, 20]
= 11.000000000000000000 + b
>> N[f[a, b]]
= f[10.9, b]
>> SetAttributes[f, NHoldAll]
>> N[f[a, b]]
= f[a, b]
The precision can be a pattern:
>> N[c, p_?(#>10&)] := p
>> N[c, 3]
= c
>> N[c, 11]
= 11.000000000
You can also use 'UpSet' or 'TagSet' to specify values for 'N':
>> N[d] ^= 5;
However, the value will not be stored in 'UpValues', but in 'NValues' (as for 'Set'):
>> UpValues[d]
= {}
>> NValues[d]
= {HoldPattern[N[d, MachinePrecision]] :> 5}
>> e /: N[e] = 6;
>> N[e]
= 6.
Values for 'N[$expr$]' must be associated with the head of $expr$:
>> f /: N[e[f]] = 7;
: Tag f not found or too deep for an assigned rule.
You can use 'Condition':
>> N[g[x_, y_], p_] := x + y * Pi /; x + y > 3
>> SetAttributes[g, NHoldRest]
>> N[g[1, 1]]
= g[1., 1]
>> N[g[2, 2]] // InputForm
= 8.283185307179586
The precision of the result is no higher than the precision of the input
>> N[Exp[0.1], 100]
= 1.10517
>> % // Precision
= MachinePrecision
>> N[Exp[1/10], 100]
= 1.105170918075647624811707826490246668224547194737518718792863289440967966747654302989143318970748654
>> % // Precision
= 100.
>> N[Exp[1.0`20], 100]
= 2.7182818284590452354
>> % // Precision
= 20.
#> p=N[Pi,100]
= 3.141592653589793238462643383279502884197169399375105820974944592307816406286208998628034825342117068
#> ToString[p]
= 3.141592653589793238462643383279502884197169399375105820974944592307816406286208998628034825342117068
#> 3.14159 * "a string"
= 3.14159 a string
#> N[Pi, Pi]
= 3.14
#> N[1/9, 30]
= 0.111111111111111111111111111111
#> Precision[%]
= 30.
#> N[1.5, 30]
= 1.5
#> Precision[%]
= MachinePrecision
#> N[1.5, 5]
= 1.5
#> Precision[%]
= MachinePrecision
#> {N[x], N[x, 30], N["abc"], N["abc", 30]}
= {x, x, abc, abc}
#> N[I, 30]
= 1.00000000000000000000000000000 I
#> N[1.01234567890123456789]
= 1.01235
#> N[1.012345678901234567890123, 20]
= 1.0123456789012345679
#> N[1.012345678901234567890123, 5]
= 1.0123
#> % // Precision
= 5.
#> N[1.012345678901234567890123, 50]
= 1.01234567890123456789012
#> % // Precision
= 24.
#> N[1.01234567890123456789`]
= 1.01235
#> N[1.01234567890123456789`, 20]
= 1.01235
#> % // Precision
= MachinePrecision
#> N[1.01234567890123456789`, 2]
= 1.01235
#> % // Precision
= MachinePrecision
"""
messages = {
'precbd': "Requested precision `1` is not a machine-sized real number.",
'preclg': ('Requested precision `1` is larger than $MaxPrecision. '
'Using current $MaxPrecision of `2` instead. '
'$MaxPrecision = Infinity specifies that any precision should be allowed.'),
'precsm': 'Requested precision `1` is smaller than $MinPrecision. Using current $MinPrecision of `2` instead.',
}
rules = {
'N[expr_]': 'N[expr, MachinePrecision]',
}
def apply_other(self, expr, prec, evaluation):
'N[expr_, prec_]'
try:
d = get_precision(prec, evaluation)
except PrecisionValueError:
return
if expr.get_head_name() in ('System`List', 'System`Rule'):
return Expression(
expr.head, *[self.apply_other(leaf, prec, evaluation)
for leaf in expr.leaves])
if isinstance(expr, Number):
return expr.round(d)
name = expr.get_lookup_name()
if name != '':
nexpr = Expression('N', expr, prec)
result = evaluation.definitions.get_value(
name, 'System`NValues', nexpr, evaluation)
if result is not None:
if not result.same(nexpr):
result = Expression('N', result, prec).evaluate(evaluation)
return result
if expr.is_atom():
return expr
else:
attributes = expr.head.get_attributes(evaluation.definitions)
if 'System`NHoldAll' in attributes:
eval_range = ()
elif 'System`NHoldFirst' in attributes:
eval_range = range(1, len(expr.leaves))
elif 'System`NHoldRest' in attributes:
if len(expr.leaves) > 0:
eval_range = (0,)
else:
eval_range = ()
else:
eval_range = range(len(expr.leaves))
head = Expression('N', expr.head, prec).evaluate(evaluation)
leaves = expr.leaves[:]
for index in eval_range:
leaves[index] = Expression(
'N', leaves[index], prec).evaluate(evaluation)
return Expression(head, *leaves)
class MachinePrecision(Predefined):
"""
<dl>
<dt>'MachinePrecision'
<dd>represents the precision of machine precision numbers.
</dl>
>> N[MachinePrecision]
= 15.9546
>> N[MachinePrecision, 30]
= 15.9545897701910033463281614204
#> N[E, MachinePrecision]
= 2.71828
#> Round[MachinePrecision]
= 16
"""
rules = {
'N[MachinePrecision, prec_]': 'N[Log[10, 2] * %i, prec]' % machine_precision,
}
class MachineEpsilon_(Predefined):
'''
<dl>
<dt>'$MachineEpsilon'
<dd>is the distance between '1.0' and the next nearest representable machine-precision number.
</dl>
>> $MachineEpsilon
= 2.22045*^-16
>> x = 1.0 + {0.4, 0.5, 0.6} $MachineEpsilon;
>> x - 1
= {0., 0., 2.22045*^-16}
'''
name = '$MachineEpsilon'
def evaluate(self, evaluation):
return MachineReal(machine_epsilon)
class MachinePrecision_(Predefined):
'''
<dl>
<dt>'$MachinePrecision'
<dd>is the number of decimal digits of precision for machine-precision numbers.
</dl>
>> $MachinePrecision
= 15.9546
'''
name = '$MachinePrecision'
rules = {
'$MachinePrecision': 'N[MachinePrecision]',
}
class Precision(Builtin):
"""
<dl>
<dt>'Precision[$expr$]'
<dd>examines the number of significant digits of $expr$.
</dl>
This is rather a proof-of-concept than a full implementation. Precision of
compound expression is not supported yet.
>> Precision[1]
= Infinity
>> Precision[1/2]
= Infinity
>> Precision[0.5]
= MachinePrecision
#> Precision[0.0]
= MachinePrecision
#> Precision[0.000000000000000000000000000000000000]
= 0.
#> Precision[-0.0]
= MachinePrecision
#> Precision[-0.000000000000000000000000000000000000]
= 0.
#> 1.0000000000000000 // Precision
= MachinePrecision
#> 1.00000000000000000 // Precision
= 17.
#> 0.4 + 2.4 I // Precision
= MachinePrecision
#> Precision[2 + 3 I]
= Infinity
#> Precision["abc"]
= Infinity
"""
rules = {
'Precision[z_?MachineNumberQ]': 'MachinePrecision',
}
def apply(self, z, evaluation):
'Precision[z_]'
if not z.is_inexact():
return Symbol('Infinity')
elif z.to_sympy().is_zero:
return Real(0)
else:
return Real(dps(z.get_precision()))
class MinPrecision(Builtin):
'''
<dl>
<dt>'$MinPrecision'
<dd>represents the minimum number of digits of precision permitted in abitrary-precision numbers.
</dl>
>> $MinPrecision
= 0
>> $MinPrecision = 10;
>> N[Pi, 9]
: Requested precision 9 is smaller than $MinPrecision. Using current $MinPrecision of 10. instead.
= 3.141592654
#> N[Pi, 10]
= 3.141592654
#> $MinPrecision = x
: Cannot set $MinPrecision to x; value must be a non-negative number.
= x
#> $MinPrecision = -Infinity
: Cannot set $MinPrecision to -Infinity; value must be a non-negative number.
= -Infinity
#> $MinPrecision = -1
: Cannot set $MinPrecision to -1; value must be a non-negative number.
= -1
#> $MinPrecision = 0;
#> $MaxPrecision = 10;
#> $MinPrecision = 15
: Cannot set $MinPrecision such that $MaxPrecision < $MinPrecision.
= 15
#> $MinPrecision
= 0
#> $MaxPrecision = Infinity;
'''
name = '$MinPrecision'
rules = {
'$MinPrecision': '0',
}
messages = {
'precset': 'Cannot set `1` to `2`; value must be a non-negative number.',
'preccon': 'Cannot set `1` such that $MaxPrecision < $MinPrecision.',
}
class MaxPrecision(Predefined):
'''
<dl>
<dt>'$MaxPrecision'
<dd>represents the maximum number of digits of precision permitted in abitrary-precision numbers.
</dl>
>> $MaxPrecision
= Infinity
>> $MaxPrecision = 10;
>> N[Pi, 11]
: Requested precision 11 is larger than $MaxPrecision. Using current $MaxPrecision of 10. instead. $MaxPrecision = Infinity specifies that any precision should be allowed.
= 3.141592654
#> N[Pi, 10]
= 3.141592654
#> $MaxPrecision = x
: Cannot set $MaxPrecision to x; value must be a positive number or Infinity.
= x
#> $MaxPrecision = -Infinity
: Cannot set $MaxPrecision to -Infinity; value must be a positive number or Infinity.
= -Infinity
#> $MaxPrecision = 0
: Cannot set $MaxPrecision to 0; value must be a positive number or Infinity.
= 0
#> $MaxPrecision = Infinity;
#> $MinPrecision = 15;
#> $MaxPrecision = 10
: Cannot set $MaxPrecision such that $MaxPrecision < $MinPrecision.
= 10
#> $MaxPrecision
= Infinity
#> $MinPrecision = 0;
'''
name = '$MaxPrecision'
rules = {
'$MaxPrecision': 'Infinity',
}
messages = {
'precset': 'Cannot set `1` to `2`; value must be a positive number or Infinity.',
'preccon': 'Cannot set `1` such that $MaxPrecision < $MinPrecision.',
}
class Round(Builtin):
"""
<dl>
<dt>'Round[$expr$]'
<dd>rounds $expr$ to the nearest integer.
<dt>'Round[$expr$, $k$]'
<dd>rounds $expr$ to the closest multiple of $k$.
</dl>
>> Round[10.6]
= 11
>> Round[0.06, 0.1]
= 0.1
>> Round[0.04, 0.1]
= 0.
Constants can be rounded too
>> Round[Pi, .5]
= 3.
>> Round[Pi^2]
= 10
Round to exact value
>> Round[2.6, 1/3]
= 8 / 3
>> Round[10, Pi]
= 3 Pi
Round complex numbers
>> Round[6/(2 + 3 I)]
= 1 - I
>> Round[1 + 2 I, 2 I]
= 2 I
Round Negative numbers too
>> Round[-1.4]
= -1
Expressions other than numbers remain unevaluated:
>> Round[x]
= Round[x]
>> Round[1.5, k]
= Round[1.5, k]
"""
attributes = ('Listable', 'NumericFunction')
rules = {
'Round[expr_?NumericQ]': 'Round[Re[expr], 1] + I * Round[Im[expr], 1]',
'Round[expr_Complex, k_?RealNumberQ]': (
'Round[Re[expr], k] + I * Round[Im[expr], k]'),
}
def apply(self, expr, k, evaluation):
"Round[expr_?NumericQ, k_?NumericQ]"
n = Expression('Divide', expr, k).round_to_float(evaluation, permit_complex=True)
if n is None:
return
elif isinstance(n, complex):
n = round(n.real)
else:
n = round(n)
n = int(n)
return Expression('Times', Integer(n), k)
class Rationalize(Builtin):
'''
<dl>
<dt>'Rationalize[$x$]'
<dd>converts a real number $x$ to a nearby rational number.
<dt>'Rationalize[$x$, $dx$]'
<dd>finds the rational number within $dx$ of $x$ with the smallest denominator.
</dl>
>> Rationalize[2.2]
= 11 / 5
Not all numbers can be well approximated.
>> Rationalize[N[Pi]]
= 3.14159
Find the exact rational representation of 'N[Pi]'
>> Rationalize[N[Pi], 0]
= 245850922 / 78256779
#> Rationalize[1.6 + 0.8 I]
= 8 / 5 + 4 I / 5
#> Rationalize[N[Pi] + 0.8 I, 1*^-6]
= 355 / 113 + 4 I / 5
#> Rationalize[N[Pi] + 0.8 I, x]
: Tolerance specification x must be a non-negative number.
= Rationalize[3.14159 + 0.8 I, x]
#> Rationalize[N[Pi] + 0.8 I, -1]
: Tolerance specification -1 must be a non-negative number.
= Rationalize[3.14159 + 0.8 I, -1]
#> Rationalize[N[Pi] + 0.8 I, 0]
= 245850922 / 78256779 + 4 I / 5
#> Rationalize[17 / 7]
= 17 / 7
#> Rationalize[x]
= x
#> Table[Rationalize[E, 0.1^n], {n, 1, 10}]
= {8 / 3, 19 / 7, 87 / 32, 193 / 71, 1071 / 394, 2721 / 1001, 15062 / 5541, 23225 / 8544, 49171 / 18089, 419314 / 154257}
#> Rationalize[x, y]
: Tolerance specification y must be a non-negative number.
= Rationalize[x, y]
'''
messages = {
'tolnn': 'Tolerance specification `1` must be a non-negative number.',
}
rules = {
'Rationalize[z_Complex]': 'Rationalize[Re[z]] + I Rationalize[Im[z]]',
'Rationalize[z_Complex, dx_?Internal`RealValuedNumberQ]/;dx >= 0': 'Rationalize[Re[z], dx] + I Rationalize[Im[z], dx]',
}
def apply(self, x, evaluation):
'Rationalize[x_]'
py_x = x.to_sympy()
if py_x is None or (not py_x.is_number) or (not py_x.is_real):
return x
return from_sympy(self.find_approximant(py_x))
@staticmethod
def find_approximant(x):
c = 1e-4
it = sympy.ntheory.continued_fraction_convergents(sympy.ntheory.continued_fraction_iterator(x))
for i in it:
p, q = i.as_numer_denom()
tol = c / q**2
if abs(i - x) <= tol:
return i
if tol < machine_epsilon:
break
return x
@staticmethod
def find_exact(x):
p, q = x.as_numer_denom()
it = sympy.ntheory.continued_fraction_convergents(sympy.ntheory.continued_fraction_iterator(x))
for i in it:
p, q = i.as_numer_denom()
if abs(x - i) < machine_epsilon:
return i
def apply_dx(self, x, dx, evaluation):
'Rationalize[x_, dx_]'
py_x = x.to_sympy()
if py_x is None:
return x
py_dx = dx.to_sympy()
if py_dx is None or (not py_dx.is_number) or (not py_dx.is_real) or py_dx.is_negative:
return evaluation.message('Rationalize', 'tolnn', dx)
elif py_dx == 0:
return from_sympy(self.find_exact(py_x))
a = self.approx_interval_continued_fraction(py_x - py_dx, py_x + py_dx)
sym_x = sympy.ntheory.continued_fraction_reduce(a)
return Rational(sym_x)
@staticmethod
def approx_interval_continued_fraction(xmin, xmax):
result = []
a_gen = sympy.ntheory.continued_fraction_iterator(xmin)
b_gen = sympy.ntheory.continued_fraction_iterator(xmax)
while True:
a, b = next(a_gen), next(b_gen)
if a == b:
result.append(a)
else:
result.append(min(a, b) + 1)
break
return result
def chop(expr, delta=10.0 ** (-10.0)):
if isinstance(expr, Real):
if -delta < expr.get_float_value() < delta:
return Integer(0)
elif isinstance(expr, Complex) and expr.is_inexact():
real, imag = expr.real, expr.imag
if -delta < real.get_float_value() < delta:
real = Integer(0)
if -delta < imag.get_float_value() < delta:
imag = Integer(0)
return Complex(real, imag)
elif isinstance(expr, Expression):
return Expression(chop(expr.head), *[
chop(leaf) for leaf in expr.leaves])
return expr
class Chop(Builtin):
"""
<dl>
<dt>'Chop[$expr$]'
<dd>replaces floating point numbers close to 0 by 0.
<dt>'Chop[$expr$, $delta$]'
<dd>uses a tolerance of $delta$. The default tolerance is '10^-10'.
</dl>
>> Chop[10.0 ^ -16]
= 0
>> Chop[10.0 ^ -9]
= 1.*^-9
>> Chop[10 ^ -11 I]
= I / 100000000000
>> Chop[0. + 10 ^ -11 I]
= 0
"""
messages = {
'tolnn': "Tolerance specification a must be a non-negative number.",
}
rules = {
'Chop[expr_]': 'Chop[expr, 10^-10]',
}
def apply(self, expr, delta, evaluation):
'Chop[expr_, delta_:(10^-10)]'
delta = delta.round_to_float(evaluation)
if delta is None or delta < 0:
return evaluation.message('Chop', 'tolnn')
return chop(expr, delta=delta)
class NumericQ(Builtin):
"""
<dl>
<dt>'NumericQ[$expr$]'
<dd>tests whether $expr$ represents a numeric quantity.
</dl>
>> NumericQ[2]
= True
>> NumericQ[Sqrt[Pi]]
= True
>> NumberQ[Sqrt[Pi]]
= False
"""
def apply(self, expr, evaluation):
'NumericQ[expr_]'
def test(expr):
if isinstance(expr, Expression):
attr = evaluation.definitions.get_attributes(
expr.head.get_name())
return 'System`NumericFunction' in attr and all(
test(leaf) for leaf in expr.leaves)
else:
return expr.is_numeric()
return Symbol('True') if test(expr) else Symbol('False')
class RealValuedNumericQ(Builtin):
'''
#> Internal`RealValuedNumericQ /@ {1, N[Pi], 1/2, Sin[1.], Pi, 3/4, aa, I}
= {True, True, True, True, True, True, False, False}
'''
context = 'Internal`'
rules = {
'Internal`RealValuedNumericQ[x_]': 'Head[N[x]] === Real',
}
class RealValuedNumberQ(Builtin):
'''
#> Internal`RealValuedNumberQ /@ {1, N[Pi], 1/2, Sin[1.], Pi, 3/4, aa, I}
= {True, True, True, True, False, True, False, False}
'''
context = 'Internal`'
rules = {
'Internal`RealValuedNumberQ[x_Real]': 'True',
'Internal`RealValuedNumberQ[x_Integer]': 'True',
'Internal`RealValuedNumberQ[x_Rational]': 'True',
'Internal`RealValuedNumberQ[x_]': 'False',
}
class IntegerDigits(Builtin):
"""
<dl>
<dt>'IntegerDigits[$n$]'
<dd>returns a list of the base-10 digits in the integer $n$.
<dt>'IntegerDigits[$n$, $base$]'
<dd>returns a list of the base-$base$ digits in $n$.
<dt>'IntegerDigits[$n$, $base$, $length$]'
<dd>returns a list of length $length$, truncating or padding
with zeroes on the left as necessary.
</dl>
>> IntegerDigits[76543]
= {7, 6, 5, 4, 3}
The sign of $n$ is discarded:
>> IntegerDigits[-76543]
= {7, 6, 5, 4, 3}
>> IntegerDigits[15, 16]
= {15}
>> IntegerDigits[1234, 16]
= {4, 13, 2}
>> IntegerDigits[1234, 10, 5]
= {0, 1, 2, 3, 4}
#> IntegerDigits[1000, 10]
= {1, 0, 0, 0}
#> IntegerDigits[0]
= {0}
"""
attributes = ('Listable',)
messages = {
'int': 'Integer expected at position 1 in `1`',
'ibase': 'Base `1` is not an integer greater than 1.',
}
rules = {
'IntegerDigits[n_]': 'IntegerDigits[n, 10]',
}
def apply_len(self, n, base, length, evaluation):
'IntegerDigits[n_, base_, length_]'
if not(isinstance(length, Integer) and length.get_int_value() >= 0):
return evaluation.message('IntegerDigits', 'intnn')
return self.apply(n, base, evaluation,
nr_elements=length.get_int_value())
def apply(self, n, base, evaluation, nr_elements=None):
'IntegerDigits[n_, base_]'
if not(isinstance(n, Integer)):
return evaluation.message('IntegerDigits', 'int',
Expression('IntegerDigits', n, base))
if not(isinstance(base, Integer) and base.get_int_value() > 1):
return evaluation.message('IntegerDigits', 'ibase', base)
if nr_elements == 0:
# trivial case: we don't want any digits
return Expression('List')
digits = convert_int_to_digit_list(
n.get_int_value(), base.get_int_value())
if nr_elements is not None:
if len(digits) >= nr_elements:
# Truncate, preserving the digits on the right
digits = digits[-nr_elements:]
else:
# Pad with zeroes
digits = [0] * (nr_elements - len(digits)) + digits
return Expression('List', *digits)
class _ZLibHash: # make zlib hashes behave as if they were from hashlib
def __init__(self, fn):
self._bytes = b''
self._fn = fn
def update(self, bytes):
self._bytes += bytes
def hexdigest(self):
return format(self._fn(self._bytes), 'x')
class Hash(Builtin):
"""
<dl>
<dt>'Hash[$expr$]'
<dd>returns an integer hash for the given $expr$.
<dt>'Hash[$expr$, $type$]'
<dd>returns an integer hash of the specified $type$ for the given $expr$.</dd>
<dd>The types supported are "MD5", "Adler32", "CRC32", "SHA", "SHA224", "SHA256", "SHA384", and "SHA512".</dd>
</dl>
> Hash["The Adventures of Huckleberry Finn"]
= 213425047836523694663619736686226550816
> Hash["The Adventures of Huckleberry Finn", "SHA256"]
= 95092649594590384288057183408609254918934351811669818342876362244564858646638
> Hash[1/3]
= 56073172797010645108327809727054836008
> Hash[{a, b, {c, {d, e, f}}}]
= 135682164776235407777080772547528225284
> Hash[SomeHead[3.1415]]
= 58042316473471877315442015469706095084
>> Hash[{a, b, c}, "xyzstr"]
= Hash[{a, b, c}, xyzstr]
"""
rules = {
'Hash[expr_]': 'Hash[expr, "MD5"]',
}
attributes = ('Protected', 'ReadProtected')
# FIXME md2
_supported_hashes = {
'Adler32': lambda: _ZLibHash(zlib.adler32),
'CRC32': lambda: _ZLibHash(zlib.crc32),
'MD5': hashlib.md5,
'SHA': hashlib.sha1,
'SHA224': hashlib.sha224,
'SHA256': hashlib.sha256,
'SHA384': hashlib.sha384,
'SHA512': hashlib.sha512,
}
@staticmethod
def compute(user_hash, py_hashtype):
hash_func = Hash._supported_hashes.get(py_hashtype)
if hash_func is None: # unknown hash function?
return # in order to return original Expression
h = hash_func()
user_hash(h.update)
return from_python(int(h.hexdigest(), 16))
def apply(self, expr, hashtype, evaluation):
'Hash[expr_, hashtype_String]'
return Hash.compute(expr.user_hash, hashtype.get_string_value())
class TypeEscalation(Exception):
def __init__(self, mode):
self.mode = mode
class Fold(object):
# allows inherited classes to specify a single algorithm implementation that
# can be called with machine precision, arbitrary precision or symbolically.
ComputationFunctions = namedtuple(
'ComputationFunctions', ('sin', 'cos'))
FLOAT = 0
MPMATH = 1
SYMBOLIC = 2
math = {
FLOAT: ComputationFunctions(
cos=math.cos,
sin=math.sin,
),
MPMATH: ComputationFunctions(
cos=mpmath.cos,
sin=mpmath.sin,
),
SYMBOLIC: ComputationFunctions(
cos=lambda x: Expression('Cos', x),
sin=lambda x: Expression('Sin', x),
)
}
operands = {
FLOAT: lambda x: None if x is None else x.round_to_float(),
MPMATH: lambda x: None if x is None else x.to_mpmath(),
SYMBOLIC: lambda x: x,
}
def _operands(self, state, steps):
raise NotImplementedError
def _fold(self, state, steps, math):
raise NotImplementedError
def _spans(self, operands):
spans = {}
k = 0
j = 0
for mode in (self.FLOAT, self.MPMATH):
for i, operand in enumerate(operands[k:]):
if operand[0] > mode:
break
j = i + k + 1
if k == 0 and j == 1: # only init state? then ignore.
j = 0
spans[mode] = slice(k, j)
k = j
spans[self.SYMBOLIC] = slice(k, len(operands))
return spans
def fold(self, x, l):
# computes fold(x, l) with the internal _fold function. will start
# its evaluation machine precision, and will escalate to arbitrary
# precision if or symbolical evaluation only if necessary. folded
# items already computed are carried over to new evaluation modes.
yield x # initial state
init = None
operands = list(self._operands(x, l))
spans = self._spans(operands)
for mode in (self.FLOAT, self.MPMATH, self.SYMBOLIC):
s_operands = [y[1:] for y in operands[spans[mode]]]
if not s_operands:
continue
if mode == self.MPMATH:
from mathics.core.numbers import min_prec
precision = min_prec(*[t for t in chain(*s_operands) if t is not None])
working_precision = mpmath.workprec
else:
@contextmanager
def working_precision(_):
yield
precision = None
if mode == self.FLOAT:
def out(z):
return Real(z)
elif mode == self.MPMATH:
def out(z):
return Real(z, precision)
else:
def out(z):
return z
as_operand = self.operands.get(mode)
def converted_operands():
for y in s_operands:
yield tuple(as_operand(t) for t in y)
with working_precision(precision):
c_operands = converted_operands()
if init is not None:
c_init = tuple((None if t is None else as_operand(from_python(t))) for t in init)
else:
c_init = next(c_operands)
init = tuple((None if t is None else out(t)) for t in c_init)
generator = self._fold(
c_init, c_operands, self.math.get(mode))
for y in generator:
y = tuple(out(t) for t in y)
yield y
init = y
|
gpl-3.0
| -3,055,016,242,957,004,300 | 26.541992 | 176 | 0.556962 | false |
dahebolangkuan/ToughRADIUS
|
radiusd/utils.py
|
1
|
14226
|
#!/usr/bin/env python
#coding=utf-8
from pyrad import tools
from twisted.internet.defer import Deferred
from pyrad.packet import AuthPacket
from pyrad.packet import AcctPacket
from pyrad.packet import CoAPacket
from pyrad.packet import AccessRequest
from pyrad.packet import AccessAccept
from pyrad.packet import AccountingRequest
from pyrad.packet import AccountingResponse
from pyrad.packet import CoARequest
from twisted.python import log
from Crypto.Cipher import AES
from Crypto import Random
import binascii
import base64
import datetime
import hashlib
import six
md5_constructor = hashlib.md5
aes_key = 't_o_u_g_h_radius'
PacketStatusTypeMap = {
1 : 'AccessRequest',
2 : 'AccessAccept',
3 : 'AccessReject',
4 : 'AccountingRequest',
5 : 'AccountingResponse',
40 : 'DisconnectRequest',
41 : 'DisconnectACK',
42 : 'DisconnectNAK',
43 : 'CoARequest',
44 : 'CoAACK',
45 : 'CoANAK',
}
def ndebug():
import pdb
pdb.set_trace()
class AESCipher:
def __init__(self, key):
self.bs = 32
self.key = hashlib.sha256(key.encode()).digest()
def encrypt(self, raw):
raw = self._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw))
def decrypt(self, enc):
enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:])).decode('utf-8')
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s)-1:])]
aescipher = AESCipher(aes_key)
encrypt = aescipher.encrypt
decrypt = aescipher.decrypt
def update_secret(secret):
global aescipher
aescipher = AESCipher(secret)
def is_expire(dstr):
if not dstr:
return False
expire_date = datetime.datetime.strptime("%s 23:59:59"%dstr,"%Y-%m-%d %H:%M:%S")
now = datetime.datetime.now()
return expire_date < now
class Storage(dict):
def __getattr__(self, key):
try:
return self[key]
except KeyError, k:
raise AttributeError, k
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError, k:
raise AttributeError, k
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
class AuthDelay():
def __init__(self,reject_delay=0):
self.reject_delay = reject_delay
self.rosters = {}
self.delay_cache = []
def delay_len(self):
return len(self.delay_cache)
def add_roster(self,mac_addr):
if not mac_addr:
return
if mac_addr not in self.rosters:
self.rosters.setdefault(mac_addr,1)
else:
self.rosters[mac_addr] += 1
def del_roster(self,mac_addr):
if mac_addr in self.rosters:
del self.rosters[mac_addr]
def over_reject(self,mac_addr):
return self.reject_delay>0 and self.rosters.get(mac_addr,0)>6
def add_delay_reject(self,reject):
self.delay_cache.append(reject)
def get_delay_reject(self,idx):
return self.delay_cache[idx]
def pop_delay_reject(self):
try:
return self.delay_cache.pop(0)
except:
return None
def format_packet_str(pkt):
attr_keys = pkt.keys()
_str = "\nRadius Packet::%s"%PacketStatusTypeMap[pkt.code]
_str += "\nhost:%s:%s" % pkt.source
_str += "\nid:%s" % pkt.id
_str += "\ncode:%s" % pkt.code
_str += "\nAttributes: "
for attr in attr_keys:
try:
_str += "\n\t%s: %s" % (attr, pkt[attr][0])
except:
try:_str += "\n\t%s: no display" % (attr)
except:pass
return _str
class CoAPacket2(CoAPacket):
def __init__(self, code=CoARequest, id=None, secret=six.b(''),
authenticator=None, **attributes):
CoAPacket.__init__(self, code, id, secret, authenticator, **attributes)
self.deferred = Deferred()
self.source_user = None
self.vendor_id = 0
self.client_macaddr = None
self.created = datetime.datetime.now()
def format_str(self):
return format_packet_str(self)
class AuthPacket2(AuthPacket):
def __init__(self, code=AccessRequest, id=None, secret=six.b(''),
authenticator=None, **attributes):
AuthPacket.__init__(self, code, id, secret, authenticator, **attributes)
self.deferred = Deferred()
self.source_user = None
self.vendor_id = 0
self.vlanid = 0
self.vlanid2 = 0
self.client_macaddr = None
self.created = datetime.datetime.now()
def format_str(self):
return format_packet_str(self)
def __str__(self):
_str = PacketStatusTypeMap[self.code]
_str += " host=%s:%s" % self.source
_str += ",id=%s"%self.id
if self.code == 1:
_str += ",username=%s,mac_addr=%s" % (self.get_user_name(),self.get_mac_addr())
if 'Reply-Message' in self:
_str += ',Reply-Message="%s"' % self['Reply-Message'][0]
return _str
def CreateReply(self, msg=None,**attributes):
reply = AuthPacket2(AccessAccept, self.id,
self.secret, self.authenticator, dict=self.dict,
**attributes)
if msg:
reply.set_reply_msg(tools.EncodeString(msg))
reply.source_user = self.get_user_name()
return reply
def set_reply_msg(self,msg):
if msg:self.AddAttribute(18,msg)
def set_framed_ip_addr(self,ipaddr):
if ipaddr:self.AddAttribute(8,tools.EncodeAddress(ipaddr))
def set_session_timeout(self,timeout):
if timeout:self.AddAttribute(27,tools.EncodeInteger(timeout))
def get_nas_addr(self):
try:return tools.DecodeAddress(self.get(4)[0])
except:return None
def get_mac_addr(self):
if self.client_macaddr:return self.client_macaddr
try:return tools.DecodeString(self.get(31)[0]).replace("-",":")
except:return None
def get_user_name(self):
try:
user_name = tools.DecodeString(self.get(1)[0])
if "@" in user_name:
user_name = user_name[:user_name.index("@")]
return user_name
except:
return None
def get_domain(self):
try:
user_name = tools.DecodeString(self.get(1)[0])
if "@" in user_name:
return user_name[user_name.index("@")+1:]
except:
return None
def get_vlanids(self):
return self.vlanid,self.vlanid2
def get_passwd(self):
try:return self.PwDecrypt(self.get(2)[0])
except:
import traceback
traceback.print_exc()
return None
def get_chappwd(self):
try:return tools.DecodeOctets(self.get(3)[0])
except:return None
def verifyChapEcrypt(self,userpwd):
if isinstance(userpwd, six.text_type):
userpwd = userpwd.strip().encode('utf-8')
_password = self.get_chappwd()
if len(_password) != 17:
return False
chapid = _password[0]
password = _password[1:]
if not self.authenticator:
self.authenticator = self.CreateAuthenticator()
challenge = self.authenticator
if 'CHAP-Challenge' in self:
challenge = self['CHAP-Challenge'][0]
_pwd = md5_constructor("%s%s%s"%(chapid,userpwd,challenge)).digest()
return password == _pwd
def is_valid_pwd(self,userpwd):
if not self.get_chappwd():
return userpwd == self.get_passwd()
else:
return self.verifyChapEcrypt(userpwd)
class AcctPacket2(AcctPacket):
def __init__(self, code=AccountingRequest, id=None, secret=six.b(''),
authenticator=None, **attributes):
AcctPacket.__init__(self, code, id, secret, authenticator, **attributes)
self.deferred = Deferred()
self.source_user = None
self.vendor_id = 0
self.client_macaddr = None
self.ticket = {}
self.created = datetime.datetime.now()
def format_str(self):
return format_packet_str(self)
def __str__(self):
_str = PacketStatusTypeMap[self.code]
_str += " host=%s:%s" % self.source
_str += ",id=%s"%self.id
if self.code == 4:
_str += ",username=%s,mac_addr=%s" % (self.get_user_name(),self.get_mac_addr())
return _str
def CreateReply(self,**attributes):
reply = AcctPacket2(AccountingResponse, self.id,
self.secret, self.authenticator, dict=self.dict,
**attributes)
reply.source_user = self.get_user_name()
return reply
def get_user_name(self):
try:
user_name = tools.DecodeString(self.get(1)[0])
if "@" in user_name:
return user_name[:user_name.index("@")]
else:
return user_name
except:
return None
def get_mac_addr(self):
if self.client_macaddr:return self.client_macaddr
try:return tools.DecodeString(self.get(31)[0]).replace("-",":")
except:return None
def get_nas_addr(self):
try:return tools.DecodeAddress(self.get(4)[0])
except:return None
def get_nas_port(self):
try:return tools.DecodeInteger(self.get(5)[0]) or 0
except:return 0
def get_service_type(self):
try:return tools.DecodeInteger(self.get(0)[0]) or 0
except:return 0
def get_framed_ipaddr(self):
try:return tools.DecodeAddress(self.get(8)[0])
except:return None
def get_framed_netmask(self):
try:return tools.DecodeAddress(self.get(9)[0])
except:return None
def get_nas_class(self):
try:return tools.DecodeString(self.get(25)[0])
except:return None
def get_session_timeout(self):
try:return tools.DecodeInteger(self.get(27)[0]) or 0
except:return 0
def get_calling_stationid(self):
try:return tools.DecodeString(self.get(31)[0])
except:return None
def get_acct_status_type(self):
try:return tools.DecodeInteger(self.get(40)[0])
except:return None
def get_acct_input_octets(self):
try:return tools.DecodeInteger(self.get(42)[0]) or 0
except:return 0
def get_acct_output_octets(self):
try:return tools.DecodeInteger(self.get(43)[0]) or 0
except:return 0
def get_acct_sessionid(self):
try:return tools.DecodeString(self.get(44)[0])
except:return None
def get_acct_sessiontime(self):
try:return tools.DecodeInteger(self.get(46)[0]) or 0
except:return 0
def get_acct_input_packets(self):
try:return tools.DecodeInteger(self.get(47)[0]) or 0
except:return 0
def get_acct_output_packets(self):
try:return tools.DecodeInteger(self.get(48)[0]) or 0
except:return 0
def get_acct_terminate_cause(self):
try:return tools.DecodeInteger(self.get(49)[0]) or 0
except:return 0
def get_acct_input_gigawords(self):
try:return tools.DecodeInteger(self.get(52)[0]) or 0
except:return 0
def get_acct_output_gigawords(self):
try:return tools.DecodeInteger(self.get(53)[0]) or 0
except:return 0
def get_event_timestamp(self,timetype=0):
try:
_time = tools.DecodeDate(self.get(55)[0])
if timetype == 0:
return datetime.datetime.fromtimestamp(_time).strftime("%Y-%m-%d %H:%M:%S")
else:
return datetime.datetime.fromtimestamp(_time-(8*3600)).strftime("%Y-%m-%d %H:%M:%S")
except:
return None
def get_nas_port_type(self):
try:return tools.DecodeInteger(self.get(61)[0]) or 0
except:return 0
def get_nas_portid(self):
try:return tools.DecodeString(self.get(87)[0])
except:return None
def get_ticket(self):
if self.ticket:return self.ticket
self.ticket = Storage(
account_number = self.get_user_name(),
mac_addr = self.get_mac_addr(),
nas_addr = self.get_nas_addr(),
nas_port = self.get_nas_port(),
service_type = self.get_service_type(),
framed_ipaddr = self.get_framed_ipaddr(),
framed_netmask = self.get_framed_netmask(),
nas_class = self.get_nas_class(),
session_timeout = self.get_session_timeout(),
calling_stationid = self.get_calling_stationid(),
acct_status_type = self.get_acct_status_type(),
acct_input_octets = self.get_acct_input_octets(),
acct_output_octets = self.get_acct_output_octets(),
acct_session_id = self.get_acct_sessionid(),
acct_session_time = self.get_acct_sessiontime(),
acct_input_packets = self.get_acct_input_packets(),
acct_output_packets = self.get_acct_output_packets(),
acct_terminate_cause = self.get_acct_terminate_cause(),
acct_input_gigawords = self.get_acct_input_gigawords(),
acct_output_gigawords = self.get_acct_output_gigawords(),
event_timestamp = self.get_event_timestamp(),
nas_port_type=self.get_nas_port_type(),
nas_port_id=self.get_nas_portid()
)
return self.ticket
if __name__ == '__main__':
print AES.block_size
a = encrypt('888888')
print a
print decrypt(a)
|
bsd-2-clause
| 8,013,479,071,721,010,000 | 29.928261 | 100 | 0.575566 | false |
andyvand/cygsystem-config-llvm
|
src/BlockDeviceModel.py
|
1
|
3363
|
import fdisk_wrapper
from Partition import ID_EMPTY, ID_UNKNOWN
from Partition import ID_EXTENDS, ID_SWAPS
from Partition import PARTITION_IDs
from Partition import ID_LINUX_LVM
from BlockDevice import *
class BlockDeviceModel:
def __init__(self):
self.__views = dict()
self.__devices = dict()
fd = fdisk_wrapper.FDisk()
for devname in fd.getDeviceNames():
try:
bd = BlockDevice(devname)
self.__devices[devname] = bd
except:
pass
def reload(self):
for devname in self.__devices:
self.reloadDisk(devname)
def reloadDisk(self, devname):
self.__devices[devname].reload()
self.__notifyViews()
# returns {devname : [Segment1, Segment2, Segment3, ...], ...}
def getDevices(self):
data = dict()
for devname in self.__devices:
data[devname] = self.getDevice(devname)
return data
# returns [Segment1, Segment2, Segment3, ...]
def getDevice(self, devname):
return self.__devices[devname].getSegments()
def getPartition(self, devname, partnum):
segs = self.getDevice(devname)
return self.__getPartition(partnum, segs)
def __getPartition(self, partnum, segs):
for seg in segs:
if seg.id == ID_EMPTY:
continue
if seg.num == partnum:
return seg
elif seg.id in ID_EXTENDS:
part = self.__getPartition(partnum, seg.children)
if part != None:
return part
return None
def add(self, devname, part):
beg = part.beg
end = part.end
id = part.id
boot = part.bootable
num = self.__devices[devname].addAlign(beg, end, id, boot, part.num)
self.__notifyViews()
return num
def addNoAlign(self, devname, part):
beg = part.beg
end = part.end
id = part.id
boot = part.bootable
num = self.__devices[devname].addNoAlign(beg, end, id, boot, part.num)
self.__notifyViews()
return num
def remove(self, devname, partnum):
self.__devices[devname].remove(partnum)
self.__notifyViews()
### commit changes to disk !!! ###
def saveTable(self, devname):
self.__devices[devname].saveTable()
self.reloadDisk(devname)
self.__notifyViews()
def renumberExtends(self, devname):
self.__devices[devname].renumberExtends()
self.__notifyViews()
def getSectorSize(self, devname):
return self.__devices[devname].sectorSize
def printDevicesDebug(self):
for devname in self.__devices:
self.__devices[devname].printout()
def printDevices(self):
devs = self.getDevices()
for dev in devs:
print 'device:', dev
for part in devs[dev]:
part.printout()
# will call obj.funct(self.getDevices()) on any changes
def registerView(self, obj, funct):
self.__views[obj] = funct
def removeView(self, obj):
del(self.__views[obj])
def __notifyViews(self):
for obj in self.__views:
(self.__views[obj])(obj, self.getDevices())
|
gpl-2.0
| 9,137,078,317,026,784,000 | 28.761062 | 78 | 0.558727 | false |
DavisPoGo/Monocle
|
monocle/shared.py
|
1
|
2850
|
from logging import getLogger, LoggerAdapter
from concurrent.futures import ThreadPoolExecutor
from time import time
from asyncio import get_event_loop
from aiohttp import ClientSession
from aiopogo import json_dumps
from aiopogo.session import SESSIONS
LOOP = get_event_loop()
class SessionManager:
@classmethod
def get(cls):
try:
return cls._session
except AttributeError:
cls._session = ClientSession(connector=SESSIONS.get_connector(False),
loop=LOOP,
conn_timeout=5.0,
read_timeout=30.0,
connector_owner=False,
raise_for_status=True,
json_serialize=json_dumps)
return cls._session
@classmethod
def close(cls):
try:
cls._session.close()
except Exception:
pass
class Message:
def __init__(self, fmt, args):
self.fmt = fmt
self.args = args
def __str__(self):
return self.fmt.format(*self.args)
class StyleAdapter(LoggerAdapter):
def __init__(self, logger, extra=None):
super(StyleAdapter, self).__init__(logger, extra or {})
def log(self, level, msg, *args, **kwargs):
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, Message(msg, args), (), **kwargs)
def get_logger(name=None):
return StyleAdapter(getLogger(name))
def call_later(delay, cb, *args):
"""Thread-safe wrapper for call_later"""
try:
return LOOP.call_soon_threadsafe(LOOP.call_later, delay, cb, *args)
except RuntimeError:
if not LOOP.is_closed():
raise
def call_at(when, cb, *args):
"""Run call back at the unix time given"""
delay = when - time()
return call_later(delay, cb, *args)
async def run_threaded(cb, *args):
with ThreadPoolExecutor(max_workers=1) as x:
return await LOOP.run_in_executor(x, cb, *args)
class TtlCache:
"""Simple cache for storing Pokemon with unknown expiration times
It's used in order not to make as many queries to the database.
It schedules sightings to be removed an hour after being seen.
"""
def __init__(self,ttl=300):
self.store = {}
self.ttl = ttl
def __len__(self):
return len(self.store)
def add(self, key):
now = time()
self.store[key] = True
call_at(now + self.ttl, self.remove, key)
def __contains__(self, key):
return key in self.store
def remove(self, key):
if key in self.store:
del self.store[key]
def items(self):
return self.store.items()
|
mit
| -696,705,582,295,584,500 | 25.886792 | 81 | 0.573333 | false |
gdebure/cream
|
projects/urls.py
|
1
|
3069
|
from django.conf.urls import url
from django.contrib.auth.decorators import permission_required, login_required
from django.views.generic import DetailView, ListView, UpdateView, CreateView, DeleteView
from projects.models import Project, Deliverable, DeliverableVolume
from projects.views import ProjectListView, FilteredProjectListView, ProjectDetailView, ProjectUpdateView, ProjectCreateView, ProjectDeleteView
from projects.views import DeliverableListView, DeliverableDetailView, DeliverableUpdateView, DeliverableCreateView, DeliverableDeleteView, AddDeliverableFromProjectView
from projects.views import AddDeliverableVolumeView, DeliverableVolumeUpdateView
from qualifications.views import AddPositionFromProjectView
urlpatterns = [
##################################
# Projects
url(r'^projects/$', ProjectListView.as_view(), name='projects_list'),
url(r'^projects/(?P<pk>\d+)/$', ProjectDetailView.as_view(), name='project_detail'),
url(r'^projects/create/$', ProjectCreateView.as_view(), name='create_project'),
url(r'^projects/(?P<pk>\d+)/update/$', ProjectUpdateView.as_view(), name='update_project'),
url(r'^projects/(?P<pk>\d+)/delete/$', ProjectDeleteView.as_view(), name='delete_project'),
# Add deliverable to a project
url(r'^projects/(?P<pk>\d+)/add_deliverable/$', AddDeliverableFromProjectView.as_view(), name='add_deliverable'),
# Add position to a project
url(r'^projects/(?P<pk>\d+)/add_position/$', AddPositionFromProjectView.as_view(), name='add_projectposition'),
url(r'^projects/(?P<filter>.+)/$', FilteredProjectListView.as_view(),name='projects_list_status'),
##################################
##################################
# Deliverables
url(r'^deliverables/$', DeliverableListView.as_view(), name='deliverables_list' ),
url(r'^deliverables/(?P<pk>\d+)/$', DeliverableDetailView.as_view(), name='deliverable_detail'),
url(r'^deliverables/create/$', DeliverableCreateView.as_view(), name='create_deliverable'),
url(r'^deliverables/(?P<pk>\d+)/update/$', DeliverableUpdateView.as_view(), name='update_deliverable'),
url(r'^deliverables/(?P<pk>\d+)/delete/$', DeliverableDeleteView.as_view(), name='delete_deliverable'),
# Add volume to a deiverable
url(r'^deliverables/(?P<pk>\d+)/add_volume/$', AddDeliverableVolumeView.as_view(), name='add_deliverablevolume'),
##################################
##################################
# Deliverable Volumes
url(r'^deliverablevolumes/(?P<pk>\d+)/$', login_required()(DetailView.as_view( model=DeliverableVolume, template_name='deliverablevolume_detail.html', )), name='deliverablevolume_detail'),
url(r'^deliverablevolumes/(?P<pk>\d+)/update/$', DeliverableVolumeUpdateView.as_view(), name='update_deliverablevolume'),
#url(r'^deliverablevolumes/(?P<pk>\d+)/delete/$', permission_required('projects.delete_deliverablevolume')(DeleteDeliverableVolumeView.as_view()), name='delete_deliverablevolume'),
##################################
]
|
gpl-3.0
| 377,118,828,903,047,900 | 61.653061 | 192 | 0.6797 | false |
sgordon007/jcvi_062915
|
graphics/tree.py
|
1
|
11153
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys
import os.path as op
import logging
from ete2 import Tree
from jcvi.formats.sizes import Sizes
from jcvi.formats.base import DictFile
from jcvi.graphics.base import Rectangle, plt, savefig
from jcvi.graphics.glyph import ExonGlyph, get_setups
from jcvi.apps.base import OptionParser, glob
def truncate_name(name, rule=None):
"""
shorten taxa names for tree display
Options of rule. This only affects tree display.
- headn (eg. head3 truncates first 3 chars)
- oheadn (eg. ohead3 retains only the first 3 chars)
- tailn (eg. tail3 truncates last 3 chars)
- otailn (eg. otail3 retains only the last 3 chars)
n = 1 ~ 99
"""
import re
if rule is None:
return name
k = re.search("(?<=^head)[0-9]{1,2}$", rule)
if k:
k = k.group(0)
tname = name[int(k):]
else:
k = re.search("(?<=^ohead)[0-9]{1,2}$", rule)
if k:
k = k.group(0)
tname = name[:int(k)]
else:
k = re.search("(?<=^tail)[0-9]{1,2}$", rule)
if k:
k = k.group(0)
tname = name[:-int(k)]
else:
k = re.search("(?<=^otail)[0-9]{1,2}$", rule)
if k:
k = k.group(0)
tname = name[-int(k):]
else:
print >>sys.stderr, truncate_name.__doc__
raise ValueError('Wrong rule for truncation!')
return tname
def decode_name(name, barcodemap):
"""
rename seq/taxon name, typically for a tree display,
according to a barcode map given in a dictionary
By definition barcodes should be distinctive.
"""
for barcode in barcodemap:
if barcode in name:
return barcodemap[barcode]
return name
def draw_tree(ax, tx, rmargin=.3, leafcolor="k", supportcolor="k",
outgroup=None, reroot=True, gffdir=None, sizes=None,
trunc_name=None, SH=None, scutoff=0, barcodefile=None,
leafcolorfile=None, leaffont=12):
"""
main function for drawing phylogenetic tree
"""
t = Tree(tx)
if reroot:
if outgroup:
R = t.get_common_ancestor(*outgroup)
else:
# Calculate the midpoint node
R = t.get_midpoint_outgroup()
if R != t:
t.set_outgroup(R)
farthest, max_dist = t.get_farthest_leaf()
margin = .05
xstart = margin
ystart = 1 - margin
canvas = 1 - rmargin - 2 * margin
tip = .005
# scale the tree
scale = canvas / max_dist
num_leaves = len(t.get_leaf_names())
yinterval = canvas / (num_leaves + 1)
# get exons structures, if any
structures = {}
if gffdir:
gffiles = glob("{0}/*.gff*".format(gffdir))
setups, ratio = get_setups(gffiles, canvas=rmargin / 2, noUTR=True)
structures = dict((a, (b, c)) for a, b, c in setups)
if sizes:
sizes = Sizes(sizes).mapping
if barcodefile:
barcodemap = DictFile(barcodefile, delimiter="\t")
if leafcolorfile:
leafcolors = DictFile(leafcolorfile, delimiter="\t")
coords = {}
i = 0
for n in t.traverse("postorder"):
dist = n.get_distance(t)
xx = xstart + scale * dist
if n.is_leaf():
yy = ystart - i * yinterval
i += 1
if trunc_name:
name = truncate_name(n.name, rule=trunc_name)
else:
name = n.name
if barcodefile:
name = decode_name(name, barcodemap)
sname = name.replace("_", "-")
try:
lc = leafcolors[n.name]
except Exception:
lc = leafcolor
else:
# if color is given as "R,G,B"
if "," in lc:
lc = map(float, lc.split(","))
ax.text(xx + tip, yy, sname, va="center",
fontstyle="italic", size=leaffont, color=lc)
gname = n.name.split("_")[0]
if gname in structures:
mrnabed, cdsbeds = structures[gname]
ExonGlyph(ax, 1 - rmargin / 2, yy, mrnabed, cdsbeds,
align="right", ratio=ratio)
if sizes and gname in sizes:
size = sizes[gname]
size = size / 3 - 1 # base pair converted to amino acid
size = "{0}aa".format(size)
ax.text(1 - rmargin / 2 + tip, yy, size, size=leaffont)
else:
children = [coords[x] for x in n.get_children()]
children_x, children_y = zip(*children)
min_y, max_y = min(children_y), max(children_y)
# plot the vertical bar
ax.plot((xx, xx), (min_y, max_y), "k-")
# plot the horizontal bar
for cx, cy in children:
ax.plot((xx, cx), (cy, cy), "k-")
yy = sum(children_y) * 1. / len(children_y)
support = n.support
if support > 1:
support = support / 100.
if not n.is_root():
if support > scutoff / 100.:
ax.text(xx, yy+.005, "{0:d}".format(int(abs(support * 100))),
ha="right", size=leaffont, color=supportcolor)
coords[n] = (xx, yy)
# scale bar
br = .1
x1 = xstart + .1
x2 = x1 + br * scale
yy = ystart - i * yinterval
ax.plot([x1, x1], [yy - tip, yy + tip], "k-")
ax.plot([x2, x2], [yy - tip, yy + tip], "k-")
ax.plot([x1, x2], [yy, yy], "k-")
ax.text((x1 + x2) / 2, yy - tip, "{0:g}".format(br),
va="top", ha="center", size=leaffont)
if SH is not None:
xs = x1
ys = (margin + yy) / 2.
ax.text(xs, ys, "SH test against ref tree: {0}"\
.format(SH), ha="left", size=leaffont, color="g")
def read_trees(tree):
from urlparse import parse_qs
from jcvi.formats.base import read_block
trees = []
fp = open(tree)
for header, tx in read_block(fp, "#"):
header = parse_qs(header[1:])
label = header["label"][0].strip("\"")
outgroup = header["outgroup"]
trees.append((label, outgroup, "".join(tx)))
return trees
def draw_geoscale(ax, minx=0, maxx=175):
"""
Draw geological epoch on million year ago (mya) scale.
"""
a, b = .1, .6 # Correspond to 200mya and 0mya
cv = lambda x: b - (x - b) / (maxx - minx) * (b - a)
ax.plot((a, b), (.5, .5), "k-")
tick = .015
for mya in xrange(maxx - 25, 0, -25):
p = cv(mya)
ax.plot((p, p), (.5, .5 - tick), "k-")
ax.text(p, .5 - 2.5 * tick, str(mya), ha="center", va="center")
ax.text((a + b) / 2, .5 - 5 * tick, "Time before present (million years)",
ha="center", va="center")
# Source:
# http://www.weston.org/schools/ms/biologyweb/evolution/handouts/GSAchron09.jpg
Geo = (("Neogene", 2.6, 23.0, "#fee400"),
("Paleogene", 23.0, 65.5, "#ff9a65"),
("Cretaceous", 65.5, 145.5, "#80ff40"),
("Jurassic", 145.5, 201.6, "#33fff3"))
h = .05
for era, start, end, color in Geo:
start, end = cv(start), cv(end)
end = max(a, end)
p = Rectangle((end, .5 + tick / 2), abs(start - end), h, lw=1, ec="w", fc=color)
ax.text((start + end) / 2, .5 + (tick + h) / 2, era,
ha="center", va="center", size=9)
ax.add_patch(p)
def main(args):
"""
%prog newicktree
Plot Newick formatted tree. The gene structure can be plotted along if
--gffdir is given. The gff file needs to be `genename.gff`. If --sizes is
on, also show the number of amino acids.
With --barcode a mapping file can be provided to convert seq names to
eg. species names, useful in unified tree display. This file should have
distinctive barcodes in column1 and new names in column2, tab delimited.
"""
p = OptionParser(main.__doc__)
p.add_option("--outgroup", help="Outgroup for rerooting the tree. " + \
"Use comma to separate multiple taxa.")
p.add_option("--noreroot", default=False, action="store_true", \
help="Don't reroot the input tree [default: %default]")
p.add_option("--rmargin", default=.3, type="float",
help="Set blank rmargin to the right [default: %default]")
p.add_option("--gffdir", default=None,
help="The directory that contain GFF files [default: %default]")
p.add_option("--sizes", default=None,
help="The FASTA file or the sizes file [default: %default]")
p.add_option("--SH", default=None, type="string",
help="SH test p-value [default: %default]")
p.add_option("--scutoff", default=0, type="int",
help="cutoff for displaying node support, 0-100 [default: %default]")
p.add_option("--barcode", default=None,
help="path to seq names barcode mapping file: " \
"barcode<tab>new_name [default: %default]")
p.add_option("--leafcolor", default="k",
help="Font color for the OTUs, or path to a file " \
"containing color mappings: leafname<tab>color [default: %default]")
p.add_option("--leaffont", default=12, help="Font size for the OTUs")
p.add_option("--geoscale", default=False, action="store_true",
help="Plot geological scale")
opts, args, iopts = p.set_image_options(args, figsize="8x6")
if len(args) != 1:
sys.exit(not p.print_help())
datafile, = args
outgroup = None
reroot = not opts.noreroot
if opts.outgroup:
outgroup = opts.outgroup.split(",")
if datafile == "demo":
tx = """(((Os02g0681100:0.1151,Sb04g031800:0.11220)1.0:0.0537,
(Os04g0578800:0.04318,Sb06g026210:0.04798)-1.0:0.08870)1.0:0.06985,
((Os03g0124100:0.08845,Sb01g048930:0.09055)1.0:0.05332,
(Os10g0534700:0.06592,Sb01g030630:0.04824)-1.0:0.07886):0.09389);"""
else:
logging.debug("Load tree file `{0}`.".format(datafile))
tx = open(datafile).read()
pf = datafile.rsplit(".", 1)[0]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
if opts.geoscale:
draw_geoscale(root)
else:
if op.isfile(opts.leafcolor):
leafcolor = "k"
leafcolorfile = opts.leafcolor
else:
leafcolor = opts.leafcolor
leafcolorfile = None
draw_tree(root, tx, rmargin=opts.rmargin, leafcolor=leafcolor, \
outgroup=outgroup, reroot=reroot, gffdir=opts.gffdir, \
sizes=opts.sizes, SH=opts.SH, scutoff=opts.scutoff, \
barcodefile=opts.barcode, leafcolorfile=leafcolorfile,
leaffont=opts.leaffont)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
if __name__ == '__main__':
main(sys.argv[1:])
|
bsd-2-clause
| 2,709,290,243,902,222,300 | 32.193452 | 88 | 0.538689 | false |
mgrouchy/django-stronghold
|
stronghold/conf.py
|
1
|
2590
|
import re
try:
from django.urls import reverse, NoReverseMatch
except ImportError:
from django.core.urlresolvers import reverse, NoReverseMatch
from django.conf import settings
from django.contrib.auth.decorators import login_required
STRONGHOLD_PUBLIC_URLS = getattr(settings, "STRONGHOLD_PUBLIC_URLS", ())
STRONGHOLD_DEFAULTS = getattr(settings, "STRONGHOLD_DEFAULTS", True)
STRONGHOLD_PUBLIC_NAMED_URLS = getattr(settings, "STRONGHOLD_PUBLIC_NAMED_URLS", ())
def is_authenticated(user):
""" make compatible with django 1 and 2 """
try:
return user.is_authenticated()
except TypeError:
return user.is_authenticated
def test_request(request, view_func, view_args, view_kwargs):
"""
Default test against request in middleware.
Set this in STRONGHOLD_USER_TEST_FUNC in your django.conf.settings if you
want to use the request details to deny permission.
"""
return True
STRONGHOLD_USER_TEST_FUNC = getattr(settings, "STRONGHOLD_USER_TEST_FUNC", is_authenticated)
STRONGHOLD_REQUEST_TEST_FUNC = getattr(settings, "STRONGHOLD_REQUEST_TEST_FUNC", test_request)
if STRONGHOLD_DEFAULTS:
if "django.contrib.auth" in settings.INSTALLED_APPS:
STRONGHOLD_PUBLIC_NAMED_URLS += ("login", "logout")
# Do not login protect the logout url, causes an infinite loop
logout_url = getattr(settings, "LOGOUT_URL", None)
if logout_url:
STRONGHOLD_PUBLIC_URLS += (r"^%s.+$" % logout_url,)
if settings.DEBUG:
# In Debug mode we serve the media urls as public by default as a
# convenience. We make no other assumptions
static_url = getattr(settings, "STATIC_URL", None)
media_url = getattr(settings, "MEDIA_URL", None)
if static_url:
STRONGHOLD_PUBLIC_URLS += (r"^%s.+$" % static_url,)
if media_url:
STRONGHOLD_PUBLIC_URLS += (r"^%s.+$" % media_url,)
# named urls can be unsafe if a user puts the wrong url in. Right now urls that
# dont reverse are just ignored with a warning. Maybe in the future make this
# so it breaks?
named_urls = []
for named_url in STRONGHOLD_PUBLIC_NAMED_URLS:
try:
url = reverse(named_url)
named_urls.append(url)
except NoReverseMatch:
# print "Stronghold: Could not reverse Named URL: '%s'. Is it in your `urlpatterns`? Ignoring." % named_url
# ignore non-matches
pass
STRONGHOLD_PUBLIC_URLS += tuple(["^%s$" % url for url in named_urls])
if STRONGHOLD_PUBLIC_URLS:
STRONGHOLD_PUBLIC_URLS = [re.compile(v) for v in STRONGHOLD_PUBLIC_URLS]
|
mit
| -3,129,621,575,450,528,000 | 34 | 115 | 0.6861 | false |
frederick623/HTI
|
fa_util_py/HTI_FeedTrade_EDD.py
|
1
|
21953
|
import ael
import acm
import os
import stat
import time
import shutil
import string
import HTI_functions
#import HTI_DB_Functions
import datetime
from datetime import date
import HTI_FeedTrade_EDD_Util
import HTI_Util
import HTI_Email_Util
#row details
COL_TRADE_ID = 0
COL_WAY = 1
COL_QTY = 2
COL_PRICE = 3
COL_INS_LOCAL_CODE = 4
COL_TRD_CCY = 5
COL_INS_MIC = 6
COL_INS_CCY = 7
COL_INS_DESC = 8
COL_INS_TYPE = 9
COL_INS_EXP_DAY = 10
COL_INS_POINT_VAL = 11
COL_INS_STRIKE = 12
COL_INS_OPTION_TYPE = 13
COL_INS_EXERCISE_TYPE = 14
COL_INS_BULL_BEAR = 15
COL_INS_BARRIER = 16
COL_INS_OTC = 17
COL_INS_DELIVERY_TYPE = 18
COL_UL_LOCAL_CODE = 19
COL_UL_TYPE = 20
COL_UL_MIC = 21
COL_UL_CCY = 22
COL_UL_NAME = 23
COL_CHANNEL = 24
COL_INS_ISSUE_SIZE = 25
COL_INS_ISSUE_DATE = 26
COL_INS_BARRIER_TYPE = 27
COL_TRD_ACQUIRER = 28
COL_TRD_MSS_ACC_ID = 29
COL_TRD_AE_ACES_GRP_CDE = 30
COL_TRD_TD = 31
COL_TRD_VD = 32
COL_INS_NAME = 33
COL_INS_UL_NAME = 34
COL_INS_UNDERLYING_MKTID = 35
COL_USER_ID = 36
COL_TIMESTAMP = 37
COL_WARRANTPARITY = 38
# the following columns are for MSSD
COL_MSSD_UNDERLYING = 39
COL_MSSD_CALLPUT = 40
COL_MSSD_EXP_MONTH = 41
COL_MSSD_STRIKE = 42
COL_MSSD_POINTVAL = 43
ael_variables = [['asofdate', 'Date', 'string', HTI_Util.get_dates(), "TODAY", 1, 0, 'Date', None, 1], \
['fileName', 'File Name', 'string', None, 'C:\\temp\\test001YYYYMMDD.csv', 1, 0, 'File Name', None, 1], \
['own_issuer', 'Own Issuer', 'string', HTI_Util.getAllIssuers(), '', 1, 0, 'Mark this issuer to the warrant issued by us', None, 1], \
['success_email_subj', 'Success Email Subject', 'string', None, 'FA (PROD) : EDD MSS Trade File Upload - SUCCESS', 1, 0, 'Sucess Email Subject', None, 1], \
['success_emaillist', 'Success Email List', 'string', None, '[email protected]', 1, 0, 'Success Email List', None, 1], \
['successEmail', 'Send Success Email', 'string', HTI_Util.get_yesno(), 'N', 0, 0, 'Send Success Email', None, 1], \
['failure_email_subj', 'Failure Email Subject', 'string', None, 'FA (PROD) : EDD MSS Trade File Upload - FAILED', 1, 0, 'Failure Email Subject', None, 1], \
['failure_emaillist', 'Failure Email List', 'string', None, '[email protected]', 1, 0, 'Failure Email List', None, 1], \
['failureEmail', 'Send Failure Email', 'string', HTI_Util.get_yesno(), 'N', 0, 0, 'Send Failure Email', None, 1], \
['default_pf', 'Default Portfolio', 'string', HTI_Util.getAllPortfolios(), 'Trading Book 1', 1, 0, 'Default Portfolio', None, 1], \
['default_bkr', 'Default Broker', 'string', HTI_Util.getAllBrokers(), 'Haitong Intl Securities Company Ltd.', 1, 0, 'Default Broker', None, 1], \
['default_cpty', 'Default Counterparty', 'string', HTI_Util.getAllParties(), 'Access Asia Investment Holdings', 1, 0, 'Default Counterparty', None, 1], \
['default_acq', 'Default Acquirer', 'string', HTI_Util.getAllAcquirers(), 'HTISEC - FICC', 1, 0, 'Default Acquirer', None, 1], \
['default_trader', 'Default Trader', 'string', None, 'ARENASYS', 1, 0, 'Default Trader', None, 1], \
['default_status', 'Default Trade Status', 'string', HTI_Util.getAllStatus(), 'Simulated', 1, 0, 'Default Trade Status', None, 1], \
['create_ins_only', 'Create Instrument Only', 'string', HTI_Util.get_yesno(), 'N', 0, 0, 'Create Instrument Only', None, 1], \
['fo_system', 'FO System', 'string', None, 'Horizon', 1, 0, 'FO System', None, 1]]
def ValidateAndFeedTrade(dict, FeedTrade):
ret = True
invalidCptyArray = [[],[]]
invalidInsArray = [[],[]]
invalidPfArray = [[],[]]
invalidTrdCcyArray = [[],[]]
invalidAcqArray = [[],[]]
invalidBrokerArray = [[],[]]
invalidBuySellArray = [[],[]]
invalidTraderArray = [[],[]]
new_arr_ins = []
errdict = {"invalidIns":invalidInsArray, "invalidParty":invalidCptyArray, "invalidPf":invalidPfArray, \
"invalidTrdCcy":invalidTrdCcyArray, "invalidAcquirer":invalidAcqArray, \
"invalidBroker":invalidBrokerArray, "invalidBuySell":invalidBuySellArray, \
"invalidTrader":invalidTraderArray}
tdt = ael.date_today()
asofdate = dict['asofdate']
if asofdate == 'TODAY':
asofdate = tdt.to_string('%Y%m%d')
trd_create_dt = tdt.to_string('%Y-%m-%d')
trd_create_dt_plus1 = tdt.add_days(1).to_string('%Y-%m-%d')
else:
asofdate = ael.date(asofdate).to_string('%Y%m%d')
trd_create_dt = ael.date(asofdate).to_string('%Y-%m-%d')
trd_create_dt_plus1 = ael.date(asofdate).add_days(1).to_string('%Y-%m-%d')
DEFAULT_PF = dict['default_pf']
DEFAULT_BKR = dict['default_bkr']
DEFAULT_CPTY = dict['default_cpty']
DEFAULT_ACQ = dict['default_acq']
DEFAULT_STATUS = dict['default_status']
DEFAULT_TRADER = dict['default_trader']
OWN_ISSUER = dict['own_issuer']
FAILURE_EMAILLIST = dict['failure_emaillist']
print 'Failure Email List:', FAILURE_EMAILLIST
FAILURE_RECIPIENTS = FAILURE_EMAILLIST.split(',')
SUCCESS_EMAILLIST = dict['success_emaillist']
print 'Success Email List:', SUCCESS_EMAILLIST
SUCCESS_RECIPIENTS = SUCCESS_EMAILLIST.split(',')
successSubject = dict['success_email_subj']
errSubject = dict['failure_email_subj']
send_failureEmail = dict['failureEmail']
send_successEmail = dict['successEmail']
fo_system = dict['fo_system']
create_ins_only = dict['create_ins_only']
fileName = dict['fileName']
fileName = fileName.replace("YYYYMMDD", tdt.to_string('%Y%m%d'))
print 'File Name:', fileName
try:
fileStats = os.stat(fileName)
fileInfo = {
'Size': fileStats[stat.ST_SIZE],
'LastModified': time.ctime(fileStats[stat.ST_MTIME]),
'LastAccessed': time.ctime(fileStats[stat.ST_ATIME]),
'CreationTime': time.ctime(fileStats[stat.ST_CTIME]),
'Mode': fileStats[stat.ST_MODE]
}
print 'Reading trade file ' + fileName
print 'File date: ' + fileInfo['LastModified']
print 'Process run date: ' + tdt.to_string('%Y-%m-%d')
errmsg = ''
ins_creation_cnt = 0
success_trd_cnt = 0
fail_trd_cnt = 0
already_upload_cnt = 0
skip_trd_cnt = 0
ttl_trd_cnt = 0
sep = ','
try:
# check file corruption
fi = open(fileName)
line = fi.readline()
linecnt = 0
pcecnt = 0
hasTrailer = False
while line.strip() <> "":
data = line.split(sep)
print data, linecnt
tradeId = data[COL_TRADE_ID].strip()
way = data[COL_WAY].strip()
qty = data[COL_QTY].strip()
price = data[COL_PRICE].strip()
insLocalCode = data[COL_INS_LOCAL_CODE].strip()
trdCcy = data[COL_TRD_CCY].strip()
insMic = data[COL_INS_MIC].strip()
insCcy = data[COL_INS_CCY].strip()
if insCcy == '':
insCcy = trdCcy
insDesc = data[COL_INS_DESC].strip()
insProdType = data[COL_INS_TYPE].strip()
insMaturity = data[COL_INS_EXP_DAY].strip()
if insMaturity != '':
#insMaturity = insMaturity[6:8] + '/' + insMaturity[4:6] + '/' + insMaturity[0:4]
insMaturity = insMaturity[0:4] + '-' + insMaturity[4:6] + '-' + insMaturity[6:8]
insPointValue = data[COL_INS_POINT_VAL].strip()
insStrike = data[COL_INS_STRIKE].strip()
insOptionType = data[COL_INS_OPTION_TYPE].strip()
insExecType = data[COL_INS_EXERCISE_TYPE].strip()
insBullBear = data[COL_INS_BULL_BEAR].strip()
insBarrier = data[COL_INS_BARRIER].strip()
insOtc = data[COL_INS_OTC].strip()
insDeliveryType = data[COL_INS_DELIVERY_TYPE].strip()
insUlLocalCode = data[COL_UL_LOCAL_CODE].strip()
insULProdType = data[COL_UL_TYPE].strip()
insULMic = data[COL_UL_MIC].strip()
insULCcy = data[COL_UL_CCY].strip()
insULDesc = data[COL_UL_NAME].strip()
#trdChannel = data[COL_CHANNEL].strip()
trdChannel = fo_system
insIssueSize = data[COL_INS_ISSUE_SIZE].strip()
insIssueDate = data[COL_INS_ISSUE_DATE].strip()
if insIssueDate != '':
insIssueDate = insIssueDate[6:8] + '/' + insIssueDate[4:6] + '/' + insIssueDate[0:4]
insBarrierType = data[COL_INS_BARRIER_TYPE].strip()
trdMssAcquirer = data[COL_TRD_ACQUIRER].strip()
trd_mss_acc = data[COL_TRD_MSS_ACC_ID].strip()
trd_ae_aces_grp_cde = data[COL_TRD_AE_ACES_GRP_CDE].strip()
trd_td = data[COL_TRD_TD].strip()
trd_vd = data[COL_TRD_VD].strip()
insName = data[COL_INS_NAME].strip()
insULName = data[COL_INS_UL_NAME].strip()
insUnderlying_mktId = data[COL_INS_UNDERLYING_MKTID].strip()
fo_userid = data[COL_USER_ID].strip()
timestamp = data[COL_TIMESTAMP].strip()
warrantParity = data[COL_WARRANTPARITY].strip()
#print 'warrantParity', warrantParity
if fo_system == 'MSSD':
mssd_underlying = data[COL_MSSD_UNDERLYING].strip()
mssd_callput = data[COL_MSSD_CALLPUT].strip()
mssd_exp_month = data[COL_MSSD_EXP_MONTH].strip()
mssd_strike = data[COL_MSSD_STRIKE].strip()
mssd_pointval = data[COL_MSSD_POINTVAL].strip()
else:
mssd_underlying = ''
mssd_callput = ''
mssd_exp_month = ''
mssd_strike = ''
mssd_pointval = ''
print tradeId, way, qty, price, insLocalCode, trdCcy, insMic, insCcy, insDesc, insProdType, insMaturity, insPointValue, insStrike, insOptionType, \
insExecType, insBullBear, insBarrier, insOtc, insDeliveryType, insUlLocalCode, insULProdType, insULMic, insULCcy, insULDesc, trdChannel, \
insIssueSize, insIssueDate, insBarrierType, trdMssAcquirer, trd_mss_acc, trd_ae_aces_grp_cde, trd_td, trd_vd, insName, insULName, insUnderlying_mktId, \
fo_userid, timestamp, warrantParity, mssd_underlying, mssd_callput, mssd_exp_month, mssd_strike, mssd_pointval
trdAcquirer = ''
trdCounterparty = ''
trdPortfolio = ''
if trd_td != '':
#trd_td = trd_td[6:8] + '/' + trd_td[4:6] + '/' + trd_td[0:4]
trd_td = trd_td[0:4] + '-' + trd_td[4:6] + '-' + trd_td[6:8]
if trd_vd != '':
#trd_vd = trd_vd[6:8] + '/' + trd_vd[4:6] + '/' + trd_vd[0:4]
trd_vd = trd_vd[0:4] + '-' + trd_vd[4:6] + '-' + trd_vd[6:8]
if trd_mss_acc != '':
#trdCounterparty = HTI_FeedTrade_EDD_Util.mapCounterpartyByMSSAcc(trd_mss_acc)
if trdChannel == 'Horizon':
trdCounterparty = '' # use default counterparty
else:
trdCounterparty = '' # use default counterparty
#trdCounterparty = HTI_FeedTrade_EDD_Util.mapCounterparty(trd_mss_acc, 'MSS_ACC_CODE')
print 'trdCounterparty', trdCounterparty
'''
if trd_ae_aces_grp_cde != '':
#trdPortfolio = HTI_FeedTrade_EDD_Util.mapPfByAE_ACES_GRP_CODE(trd_ae_aces_grp_cde)
if trdChannel == 'Horizon':
trdPortfolio = HTI_FeedTrade_EDD_Util.mapPf(trd_ae_aces_grp_cde, 'HORIZON_PORTFOLIO')
else:
#trdPortfolio = HTI_FeedTrade_EDD_Util.mapPf(trd_ae_aces_grp_cde, 'MSS_AE_ACES_GRP_CDE')
#trdPortfolio = HTI_FeedTrade_EDD_Util.mapPf(trd_ae_aces_grp_cde, 'PF_MSS_ACC_CODE')
trdPortfolio = HTI_FeedTrade_EDD_Util.mapPf(trd_mss_acc, 'PF_MSS_ACC_CODE')
print 'trdPortfolio', trdPortfolio
'''
if create_ins_only == 'Y':
trdnbr = -1
else:
trdnbr = HTI_FeedTrade_EDD_Util.getTrade(tradeId, trdChannel, trd_create_dt, trd_create_dt_plus1)
print 'FO Trade No: %s' % (str(trdnbr))
if trdnbr == -1: #trade is not in FA
newIns = False
trd_insid = ''
issuer_ptyid = ''
if insProdType in ('CBBC', 'Warrant', 'CBBC', 'WARRANT'):
if HTI_FeedTrade_EDD_Util.getIssuerByWarrantName(insName, 'WARRANT_ISSUER_NAME') == OWN_ISSUER:
issuer_ptyid = OWN_ISSUER
#print 'D'
trd_insid, newIns = HTI_FeedTrade_EDD_Util.getInstrument(tradeId, way, qty, price, insLocalCode,
trdCcy, insMic, insCcy, insDesc, insProdType,
insMaturity, insPointValue, insStrike, insOptionType,
insExecType, insBullBear, insBarrier, insOtc, insDeliveryType,
insUlLocalCode, insULProdType, insULMic, insULCcy, insULDesc,
trdChannel, insIssueSize, insIssueDate, insBarrierType,
issuer_ptyid, insName, insULName, insUnderlying_mktId, warrantParity,
mssd_underlying, mssd_callput, mssd_exp_month, mssd_strike, mssd_pointval, fo_system,
new_arr_ins, errdict)
#if newIns and trd_insid == '':
# print 'Created new instrument %s' % (trd_insid)
# ins_creation_cnt = ins_creation_cnt + 1
if create_ins_only != 'Y':
print 'louis', trd_insid, newIns
if trd_ae_aces_grp_cde != '':
if trdChannel == 'Horizon':
trdPortfolio = HTI_FeedTrade_EDD_Util.mapPf(trd_ae_aces_grp_cde, 'HORIZON_PORTFOLIO')
else:
#trdPortfolio = HTI_FeedTrade_EDD_Util.mapPf(trd_mss_acc, 'PF_MSS_ACC_CODE')
#print 'A'
#trdPortfolio = HTI_FeedTrade_EDD_Util.mapPfByMssAcc(trd_mss_acc, trd_insid)
trdPortfolio = HTI_FeedTrade_EDD_Util.mapPf(trd_ae_aces_grp_cde, 'HORIZON_PORTFOLIO')
if trdPortfolio != "":
trdAcquirer = HTI_FeedTrade_EDD_Util.mapAcquirerByPf(trdPortfolio, trd_insid)
print 'trd_mss_acc', trd_mss_acc, tradeId
print 'trdPortfolio', trdPortfolio
trdnbr = -1
tradeSituation = ''
print 'H', trd_insid
print trd_td, trd_vd
trdnbr, tradeSituation = HTI_FeedTrade_EDD_Util.createTrade(asofdate, trd_insid, tradeId, way, qty, price, trdCcy, DEFAULT_PF, DEFAULT_CPTY,
DEFAULT_BKR, DEFAULT_ACQ, DEFAULT_STATUS, DEFAULT_TRADER, trdChannel, trdAcquirer, trdCounterparty,
trdPortfolio, trd_td, trd_vd, trd_mss_acc, trd_ae_aces_grp_cde, fo_userid, timestamp, errdict)
print 'I'
if tradeSituation == 'Success' and trdnbr != -1:
print 'Created new trade FO:%s (%s), FA:%s' % (tradeId, trdChannel, str(trdnbr))
success_trd_cnt = success_trd_cnt + 1
#elif tradeSituation == 'Exist' and trdnbr != -1:
# print 'Trade %s is already exist FO:%s, FA:%s' % (tradeId, str(trdnbr))
# already_upload_cnt = already_upload_cnt + 1
elif tradeSituation == 'Fail':
print 'FO Trade %s (%s) is failed to upload' % (tradeId, trdChannel)
fail_trd_cnt = fail_trd_cnt + 1
else:
print 'FO Trade %s (%s) is skipped' % (tradeId, trdChannel)
skip_trd_cnt = skip_trd_cnt + 1
else:
print 'Trade is already exist FO:%s (%s), FA:%s' % (tradeId, trdChannel, str(trdnbr))
already_upload_cnt = already_upload_cnt + 1
ttl_trd_cnt = ttl_trd_cnt + 1
linecnt = linecnt + 1
line = fi.readline()
fi.close()
print errdict
validationErr = HTI_FeedTrade_EDD_Util.LoopValidationErrMsg(errdict)
errmsg = errmsg + '\n' + validationErr
ins_creation_cnt = len(new_arr_ins)
new_ins_msg = ''
if len(new_arr_ins) > 0:
new_ins_msg = 'Auto-created the following Instrument in FA: -'
new_ins_msg = new_ins_msg + '\n' + 'Instrument Id '
new_ins_msg = new_ins_msg + '\n' + '--------------------------'
for new_insid in new_arr_ins:
new_ins_msg = new_ins_msg + '\n' + new_insid
msg = 'Date: %s' % tdt.to_string('%Y-%m-%d') + '\n'
if create_ins_only != 'Y':
msg = msg + '%s trades from %s' % (str(ttl_trd_cnt), fo_system) + '\n'
msg = msg + '%s trades Already uploaded in previous batches' % str(already_upload_cnt) + '\n'
msg = msg + '%s trades were skipped in this batch' % str(skip_trd_cnt) + '\n'
msg = msg + '%s trades successfully uploaded in this batch' % str(success_trd_cnt) + '\n'
msg = msg + '%s trades failed to upload in this batch' % str(fail_trd_cnt) + '\n'
else:
msg = msg + '%s instruments from %s' % (str(ttl_trd_cnt), fo_system) + '\n'
msg = msg + '%s instruments created in this batch' % str(ins_creation_cnt) + '\n'
if errmsg.strip() != '':
msg = msg + 'Error:' + '\t' + errmsg
msg = msg + '\n\n' + new_ins_msg
subject = errSubject
if send_failureEmail == 'Y':
HTI_Email_Util.EmailNotify(subject, msg, FAILURE_RECIPIENTS)
else:
print subject
print str(FAILURE_RECIPIENTS)
print msg
ret = False
else:
msg = msg + '\n' + 'No Error'
msg = msg + '\n\n' + new_ins_msg
subject = successSubject
if send_successEmail == 'Y':
HTI_Email_Util.EmailNotify(subject, msg, SUCCESS_RECIPIENTS)
else:
print subject
print str(SUCCESS_RECIPIENTS)
print msg
finally:
if fi.closed == False:
fi.close()
else:
fi.close()
return ret
except IOError:
errmsg = 'Date: %s' % tdt.to_string('%Y-%m-%d') + '\n'
errmsg = errmsg + 'Error:' + '\n\t' + fileName + ' does not exist.'
if send_failureEmail == 'Y':
HTI_Email_Util.EmailNotify(errSubject, errmsg, FAILURE_RECIPIENTS)
else:
print errSubject
print errmsg
print FAILURE_RECIPIENTS
except OSError:
errmsg = 'Date: %s' % tdt.to_string('%Y-%m-%d') + '\n'
errmsg = errmsg + 'Error:' + '\n\t' + fileName + ' does not exist.'
if send_failureEmail == 'Y':
HTI_Email_Util.EmailNotify(errSubject, errmsg, FAILURE_RECIPIENTS)
else:
print errSubject
print errmsg
print FAILURE_RECIPIENTS
def ael_main(dict):
'''
# test email connection
subject = "Currency price capture"
body = "Process is run successfully"
HTI_Email_Util.EmailNotify(subject, body, '[email protected]')
return
'''
ret = ValidateAndFeedTrade(dict, True)
|
apache-2.0
| 5,571,595,386,392,807,000 | 48.893182 | 172 | 0.499749 | false |
tsuru/varnishapi
|
tests/test_api.py
|
1
|
12846
|
# Copyright 2014 varnishapi authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import base64
import inspect
import json
import os
import unittest
from feaas import api, plugin, storage
from feaas.managers import ec2
from . import managers
class APITestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.manager = managers.FakeManager()
api.get_manager = lambda: cls.manager
cls.api = api.api.test_client()
def setUp(self):
self.manager.reset()
def test_start_instance(self):
resp = self.api.post("/resources", data={"name": "someapp"})
self.assertEqual(201, resp.status_code)
self.assertEqual("someapp", self.manager.instances[0].name)
def test_start_instance_without_name(self):
resp = self.api.post("/resources", data={"names": "someapp"})
self.assertEqual(400, resp.status_code)
self.assertEqual("name is required", resp.data)
self.assertEqual([], self.manager.instances)
def test_start_instance_unauthorized(self):
self.set_auth_env("varnishapi", "varnish123")
self.addCleanup(self.delete_auth_env)
resp = self.open_with_auth("/resources", method="POST",
data={"names": "someapp"},
user="varnishapi", password="wat")
self.assertEqual(401, resp.status_code)
self.assertEqual("you do not have access to this resource", resp.data)
def test_remove_instance(self):
self.manager.new_instance("someapp")
resp = self.api.delete("/resources/someapp")
self.assertEqual(200, resp.status_code)
self.assertEqual("", resp.data)
self.assertEqual([], self.manager.instances)
def test_remove_instance_not_found(self):
resp = self.api.delete("/resources/someapp")
self.assertEqual(404, resp.status_code)
self.assertEqual("Instance not found", resp.data)
self.assertEqual([], self.manager.instances)
def test_remove_instance_unauthorized(self):
self.set_auth_env("varnishapi", "varnish123")
self.addCleanup(self.delete_auth_env)
resp = self.open_with_auth("/resources/someapp", method="DELETE",
user="varnishapi", password="wat")
self.assertEqual(401, resp.status_code)
self.assertEqual("you do not have access to this resource", resp.data)
def test_bind_app(self):
self.manager.new_instance("someapp")
resp = self.api.post("/resources/someapp/bind-app",
data={"app-host": "someapp.cloud.tsuru.io"})
self.assertEqual(201, resp.status_code)
self.assertEqual("null", resp.data)
self.assertEqual("application/json", resp.mimetype)
bind = self.manager.instances[0].bound[0]
self.assertEqual("someapp.cloud.tsuru.io", bind)
def test_bind_without_app_host(self):
resp = self.api.post("/resources/someapp/bind-app",
data={"app_hooost": "someapp.cloud.tsuru.io"})
self.assertEqual(400, resp.status_code)
self.assertEqual("app-host is required", resp.data)
def test_bind_instance_not_found(self):
resp = self.api.post("/resources/someapp/bind-app",
data={"app-host": "someapp.cloud.tsuru.io"})
self.assertEqual(404, resp.status_code)
self.assertEqual("Instance not found", resp.data)
def test_bind_unauthorized(self):
self.set_auth_env("varnishapi", "varnish123")
self.addCleanup(self.delete_auth_env)
resp = self.open_with_auth("/resources/someapp/bind-app", method="POST",
data={"app-host": "someapp.cloud.tsuru.io"},
user="varnishapi", password="wat")
self.assertEqual(401, resp.status_code)
self.assertEqual("you do not have access to this resource", resp.data)
def test_unbind(self):
self.manager.new_instance("someapp")
self.manager.bind("someapp", "someapp.cloud.tsuru.io")
resp = self.api.delete("/resources/someapp/bind-app",
data={"app-host": "someapp.cloud.tsuru.io"},
headers={"Content-Type": "application/x-www-form-urlencoded"})
self.assertEqual(200, resp.status_code)
self.assertEqual("", resp.data)
self.assertEqual([], self.manager.instances[0].bound)
def test_unbind_instance_not_found(self):
resp = self.api.delete("/resources/someapp/bind-app",
data={"app-host": "someapp.cloud.tsuru.io"})
self.assertEqual(404, resp.status_code)
self.assertEqual("Instance not found", resp.data)
def test_unbind_unauthorized(self):
self.set_auth_env("varnishapi", "varnish123")
self.addCleanup(self.delete_auth_env)
resp = self.open_with_auth("/resources/someapp/bind-app",
method="DELETE",
data={"app-host": "someapp.cloud.tsuru.io"},
user="varnishapi", password="wat")
self.assertEqual(401, resp.status_code)
self.assertEqual("you do not have access to this resource", resp.data)
def test_info(self):
self.manager.new_instance("someapp")
resp = self.api.get("/resources/someapp")
self.assertEqual(200, resp.status_code)
self.assertEqual("application/json", resp.mimetype)
data = json.loads(resp.data)
self.assertEqual({"name": "someapp"}, data)
def test_info_instance_not_found(self):
resp = self.api.get("/resources/someapp")
self.assertEqual(404, resp.status_code)
self.assertEqual("Instance not found", resp.data)
def test_info_unauthorized(self):
self.set_auth_env("varnishapi", "varnish123")
self.addCleanup(self.delete_auth_env)
resp = self.open_with_auth("/resources/someapp", method="GET",
user="varnishapi", password="wat")
self.assertEqual(401, resp.status_code)
self.assertEqual("you do not have access to this resource", resp.data)
def test_status_started(self):
self.manager.new_instance("someapp", state="started")
resp = self.api.get("/resources/someapp/status")
self.assertEqual(204, resp.status_code)
def test_status_pending(self):
self.manager.new_instance("someapp", state="pending")
resp = self.api.get("/resources/someapp/status")
self.assertEqual(202, resp.status_code)
def test_status_error(self):
self.manager.new_instance("someapp", state="error")
resp = self.api.get("/resources/someapp/status")
self.assertEqual(500, resp.status_code)
def test_status_scaling(self):
self.manager.new_instance("someapp", state="scaling")
resp = self.api.get("/resources/someapp/status")
self.assertEqual(204, resp.status_code)
def test_status_not_found(self):
resp = self.api.get("/resources/someapp/status")
self.assertEqual(404, resp.status_code)
self.assertEqual("Instance not found", resp.data)
def test_status_unauthorized(self):
self.set_auth_env("varnishapi", "varnish123")
self.addCleanup(self.delete_auth_env)
resp = self.open_with_auth("/resources/someapp/status", method="GET",
user="varnishapi", password="wat")
self.assertEqual(401, resp.status_code)
self.assertEqual("you do not have access to this resource", resp.data)
def test_scale_instance(self):
self.manager.new_instance("someapp")
resp = self.api.post("/resources/someapp/scale",
data={"quantity": "3"})
self.assertEqual(201, resp.status_code)
_, instance = self.manager.find_instance("someapp")
self.assertEqual(3, instance.units)
def test_scale_instance_invalid_quantity(self):
self.manager.new_instance("someapp")
resp = self.api.post("/resources/someapp/scale",
data={"quantity": "chico"})
self.assertEqual(400, resp.status_code)
self.assertEqual("invalid quantity: chico", resp.data)
def test_scale_instance_negative_quantity(self):
self.manager.new_instance("someapp")
resp = self.api.post("/resources/someapp/scale",
data={"quantity": "-2"})
self.assertEqual(400, resp.status_code)
self.assertEqual("invalid quantity: -2", resp.data)
def test_scale_instance_missing_quantity(self):
self.manager.new_instance("someapp")
resp = self.api.post("/resources/someapp/scale",
data={"quality": "-2"})
self.assertEqual(400, resp.status_code)
self.assertEqual("missing quantity", resp.data)
def test_scale_instance_not_found(self):
resp = self.api.post("/resources/someapp/scale",
data={"quantity": "2"})
self.assertEqual(404, resp.status_code)
self.assertEqual("Instance not found", resp.data)
def test_scale_instance_unauthorized(self):
self.set_auth_env("varnishapi", "varnish123")
self.addCleanup(self.delete_auth_env)
resp = self.open_with_auth("/resources/someapp/scale", method="POST",
data={"quantity": "2"},
user="varnishapi", password="wat")
self.assertEqual(401, resp.status_code)
self.assertEqual("you do not have access to this resource", resp.data)
def test_plugin(self):
expected = inspect.getsource(plugin)
resp = self.api.get("/plugin")
self.assertEqual(200, resp.status_code)
self.assertEqual(expected, resp.data)
def test_plugin_does_not_require_authentication(self):
expected = inspect.getsource(plugin)
resp = self.api.get("/plugin")
self.assertEqual(200, resp.status_code)
self.assertEqual(expected, resp.data)
def open_with_auth(self, url, method, user, password, data=None, headers=None):
encoded = base64.b64encode(user + ":" + password)
if not headers:
headers = {}
headers["Authorization"] = "Basic " + encoded
return self.api.open(url, method=method, headers=headers, data=data)
def set_auth_env(self, user, password):
os.environ["API_USERNAME"] = user
os.environ["API_PASSWORD"] = password
def delete_auth_env(self):
del os.environ["API_USERNAME"], os.environ["API_PASSWORD"]
class ManagerTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
reload(api)
def setUp(self):
if "waaat" in api.managers:
del api.managers["waaat"]
def tearDown(self):
if "API_MANAGER" in os.environ:
del os.environ["API_MANAGER"]
def test_register_manager(self):
manager = lambda x: x
api.register_manager("waaat", manager)
self.assertEqual(manager, api.managers["waaat"])
def test_register_manager_override(self):
first_manager = lambda x: x
second_manager = lambda x, y: x + y
api.register_manager("waaat", first_manager)
api.register_manager("waaat", second_manager, override=True)
self.assertEqual(second_manager, api.managers["waaat"])
def test_register_manager_without_override(self):
first_manager = lambda x: x
second_manager = lambda x, y: x + y
api.register_manager("waaat", first_manager)
with self.assertRaises(ValueError) as cm:
api.register_manager("waaat", second_manager, override=False)
exc = cm.exception
self.assertEqual(("Manager already registered",), exc.args)
def test_get_manager(self):
os.environ["API_MANAGER"] = "ec2"
os.environ["API_MONGODB_URI"] = "mongodb://localhost:27017"
manager = api.get_manager()
self.assertIsInstance(manager, ec2.EC2Manager)
self.assertIsInstance(manager.storage, storage.MongoDBStorage)
def test_get_manager_unknown(self):
os.environ["API_MANAGER"] = "ec3"
with self.assertRaises(ValueError) as cm:
api.get_manager()
exc = cm.exception
self.assertEqual(("ec3 is not a valid manager",),
exc.args)
def test_get_manager_default(self):
os.environ["API_MONGODB_URI"] = "mongodb://localhost:27017"
manager = api.get_manager()
self.assertIsInstance(manager, ec2.EC2Manager)
self.assertIsInstance(manager.storage, storage.MongoDBStorage)
|
bsd-3-clause
| 5,952,788,250,173,263,000 | 41.256579 | 93 | 0.617157 | false |
LEX2016WoKaGru/pyClamster
|
scripts/oper_clouds.py
|
1
|
6068
|
# -*- coding: utf-8 -*-
"""
Created on 14.08.16
Created for pyclamster
Copyright (C) {2016}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# System modules
import os
import warnings
import pickle
import glob
# External modules
import numpy as np
import scipy.misc
import matplotlib.pyplot as plt
# Internal modules
import pyclamster
from pyclamster.clustering.preprocess import LCN
from pyclamster.functions import rbDetection
import pyclamster.matching
warnings.catch_warnings()
warnings.filterwarnings('ignore')
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
image_directory = os.path.join(BASE_DIR, "examples", "images", "lex")
trained_models = os.path.join(BASE_DIR, "data")
plot_dir = os.path.join(BASE_DIR, 'plots')
predictor = pickle.load(open(os.path.join(trained_models, "kmeans.pk"), "rb"))
cams = []
cams.append(pickle.load(open(os.path.join(trained_models, 'sessions', 'FE3_session.pk'), mode='rb')))
cams.append(pickle.load(open(os.path.join(trained_models, 'sessions', 'FE4_session.pk'), mode='rb')))
cams[0].image_series = []
cams[1].image_series = []
cams[0].add_images(os.path.join(image_directory, 'cam3'))
cams[1].add_images(os.path.join(image_directory, 'cam4'))
matching = pyclamster.matching.Matching()
times = {'3': [], '4': []}
# load times
for img3 in cams[0]:
img3.loadTimefromfilename('FE3_Image_%Y%m%d_%H%M%S_UTCp1.jpg')
times['3'].append(img3.time)
for img4 in cams[1]:
img4.loadTimefromfilename('FE4_Image_%Y%m%d_%H%M%S_UTCp1.jpg')
times['4'].append(img4.time)
key_pair = [(k, times['4'].index(t)) for k, t in enumerate(times['3']) if t in times['4']]
t = 0
for keys in key_pair:
i = 0
clouds = []
for k in keys:
img = cams[i][k]
scipy.misc.imsave(
os.path.join(plot_dir, "rectified_{0:d}_{1:d}.png".format(i, k)),
img.image)
image_lcn = pyclamster.Image(img)
image_lcn.data = LCN(size=(50, 50, 3), scale=False).fit_transform(
image_lcn.data / 256)
w, h, _ = original_shape = image_lcn.data.shape
raw_image_lcn = rbDetection(image_lcn.data).reshape((w * h, -1))
label = predictor.predict(raw_image_lcn)
label.reshape((w, h), replace=True)
scipy.misc.imsave(
os.path.join(plot_dir, "lables_kmean_{0:d}_{1:d}.png".format(i, k)),
label.labels)
masks = label.getMaskStore()
cloud_mask_num = [1] # cloud - sky choose right number (0 or 1)
masks.denoise(cloud_mask_num,
5000)
cloud_labels_object, numLabels = masks.labelMask(cloud_mask_num)
scipy.misc.imsave(
os.path.join(plot_dir, "labl"
"es_used_{0:d}_{1:d}.png".format(i, k)),
cloud_labels_object.labels)
cloud_store = cloud_labels_object.getMaskStore()
cloud_lables = [l + 1 for l in range(numLabels)]
clouds.append([cloud_store.getCloud(img, [k, ]) for k in cloud_lables])
j = 0
#print(clouds[i])
for cloud in clouds[i]:
scipy.misc.imsave(
os.path.join(plot_dir, 'template_cloud_{0:d}_{1:d}_{2:d}.png'.format(i, k, j)),
cloud.image.data)
j += 1
print('finished image {0:d} of camera {1:d}'.format(k, i))
i += 1
if not(not clouds[0] or not clouds[1]):
matching_result, _ = matching.matching(clouds[0], clouds[1], min_match_prob=0.5)
t = 0
for result in matching_result:
fig = plt.figure()
ax = plt.subplot(1,3,1)
ax.axis('off')
ax.imshow(result[1].clouds[0].image.data)
ax = plt.subplot(1,3,2)
ax.axis('off')
ax.imshow(result[0].prop_map, interpolation='nearest')
ax = plt.subplot(1,3,3)
ax.axis('off')
ax.imshow(result[1].clouds[1].image.data)
plt.tight_layout()
plt.savefig(os.path.join(plot_dir, 'matching_{0:s}_{1:d}.png'.format(str(keys), t)))
spatial_cloud = result[1]
spatial_cloud.calc_overlapping()
spatial_cloud.calc_position(240)
t+=1
# i = 0
# for c1 in clouds[0]:
# j = 0
# for c2 in clouds[1]:
# result = match_template(c2.image.data, c1.image.data, pad_input=True,
# mode='reflect', constant_values=0)
# scipy.misc.imsave(os.path.join(plot_dir, 'cloud_matching_{0:d}_{1:d}_{2:d}.png'.format(keys[0], i, j)), result)
# j += 1
# i += 1
print('finished image pair {0:s}'.format(str(keys)))
#
#
# for image_path in all_images:
# img = pyclamster.image.Image(image_path)
# img.image = img.resize((512, 512))
#
# image.data = LCN(size=(50,50,3), scale=False).fit_transform(image.data/256)
# raw_image = rbDetection(image.data):d}
# w, h = original_shape = tuple(raw_image[:, :].shape)
# raw_image = np.reshape(raw_image, (w * h, 1))
# label = predictor.predict(raw_image)
# label.reshape((960, 960), replace=True)
# scipy.misc.imsave("cloud.png", label.labels)
# masks = label.getMaskStore()
# masks.denoise([0], 960)
# cloud_labels, _ = masks.labelMask([0,])
# scipy.misc.imsave("labels.png", cloud_labels.labels)
# scipy.misc.imshow(cloud_labels.labels)
# cloud_store = cloud_labels.getMaskStore()
# # TODO: Here is the matching algorithm missing
|
gpl-3.0
| -4,124,422,244,932,323,000 | 36 | 125 | 0.606625 | false |
openSUSE/vdsm
|
vdsm_reg/define.py
|
1
|
3003
|
#
# Copyright 2011 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
errCode = { 'noVM': {'status': {'code': 1, 'message': 'Desktop does not exist'}},
'nfsErr': {'status': {'code': 3, 'message': 'Image repository access timeout'}},
'exist': {'status': {'code': 4, 'message': 'Desktop already exists'}},
'noVmType': {'status': {'code': 5, 'message': 'Unsupported virtual machine type'}},
'down': {'status': {'code': 6, 'message': 'Desktop is down'}},
'copyerr': {'status': {'code': 7, 'message': 'Copy failed'}},
'sparse': {'status': {'code': 8, 'message': 'Sparse creation failed'}},
'createErr': {'status': {'code': 9, 'message': 'Error creating the requested Desktop'}},
'noConPeer': {'status': {'code':10, 'message': 'Could not connect to peer VDS'}},
'MissParam': {'status': {'code':11, 'message': 'Missing required parameter'}},
'migrateErr': {'status': {'code':12, 'message': 'Fatal error during migration'}},
'imageErr': {'status': {'code':13, 'message': 'Drive image file could not be found'}},
'outOfMem': {'status': {'code':14, 'message': 'Not enough free memory to create Desktop'}},
'unexpected': {'status': {'code':16, 'message': 'Unexpected exception'}},
'unsupFormat': {'status': {'code':17, 'message': 'Unsupported image format'}},
'ticketErr': {'status': {'code':18, 'message': 'Error while setting spice ticket'}},
'recovery': {'status': {'code':100, 'message': 'Recovering from crash or still initializing'}},
'installErr': {'status': {'code':101, 'message': 'Vds not operational. Check logs, repair it, and restart'}},
'tmp': {}
}
doneCode = {'code': 0, 'message': 'Done'}
nullCode = {'code': 0, 'message': ''}
#confFile = 'vdsm.conf'
loggerConf = 'logger.conf'
installPath = '/usr/share/vdsm/'
relPath = './'
Kbytes = 1024
Mbytes = 1024 * Kbytes
drives = ['hda', 'hdb', 'hdc', 'hdd', 'cdrom']
requiredParams = ['vmId', 'hda', 'memSize', 'macAddr', 'display']
class myException(Exception): pass
#exitCodes
ERROR = 1
NORMAL = 0
|
gpl-2.0
| 7,629,235,200,827,631,000 | 49.898305 | 123 | 0.605728 | false |
rastersoft/multipackager
|
src/multipackager_module/debian.py
|
1
|
14430
|
#!/usr/bin/env python3
# Copyright 2015 (C) Raster Software Vigo (Sergio Costas)
#
# This file is part of Multipackager
#
# Multipackager is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Multipackager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import os
import shutil
import multipackager_module.package_base
class debian (multipackager_module.package_base.package_base):
def __init__(self, configuration, distro_type, distro_name, architecture, cache_name = None):
multipackager_module.package_base.package_base.__init__(self, configuration, distro_type, distro_name, architecture, cache_name)
def check_path_in_builds(self,project_path):
if self.distro_type == "ubuntu":
# Try the "ubuntu" folder, and if it doesn't exists, try with "debian" one
if self.architecture == "i386":
path_list = ["ubuntu32","UBUNTU32","Ubuntu32","ubuntu","UBUNTU","Ubuntu","debian32","DEBIAN32","Debian32","debian","DEBIAN","Debian"]
else:
path_list = ["ubuntu64","UBUNTU64","Ubuntu64","ubuntu","UBUNTU","Ubuntu","debian64","DEBIAN64","Debian64","debian","DEBIAN","Debian"]
else:
if self.architecture == "i386":
path_list = ["debian32","DEBIAN32","Debian32","debian","DEBIAN","Debian"]
else:
path_list = ["debian64","DEBIAN64","Debian64","debian","DEBIAN","Debian"]
for element in path_list:
path = os.path.join(project_path,element)
if os.path.exists(path):
return path
return None
def set_project_version(self,text):
pos = text.rfind("-")
if (pos != -1):
text = text[:pos]
self.project_version = text
def get_package_name(self,project_path):
""" Returns the final package name for the project specified, or None if can't be determined yet """
if (os.path.exists(os.path.join(project_path,"setup.py"))):
self.read_python_setup(project_path)
package_name = "{:s}-{:s}-{:s}_{:s}-{:s}{:d}_all.deb".format("python2" if self.python2 else "python3",self.project_name,self.distro_name,self.project_version,self.distro_type,self.configuration.revision)
else:
debian_path = self.check_path_in_builds(project_path)
if (debian_path == None):
return True
control_path = os.path.join(debian_path,"control")
if (not os.path.exists(control_path)):
return True
f = open (control_path,"r")
for line in f:
if line[:7] == "Source:":
self.project_name = line[7:].strip()
continue
if line[:8] == "Package:":
self.project_name = line[8:].strip()
continue
if line[:8] == "Version:":
self.set_project_version(line[8:].strip())
continue
f.close()
package_name = "{:s}-{:s}_{:s}-{:s}{:d}_{:s}.deb".format(self.project_name,self.distro_name,self.project_version,self.distro_type,self.configuration.revision,self.architecture)
return package_name
def generate(self,path):
""" Ensures that the base system, to create a CHROOT environment, exists """
# Create all, first, in a temporal folder
tmp_path = path+".tmp"
shutil.rmtree(tmp_path, ignore_errors=True)
os.makedirs(tmp_path)
if self.distro_type == "debian":
server = "http://http.debian.net/debian/"
else:
server = "http://archive.ubuntu.com/ubuntu/"
command = "debootstrap --variant=buildd --arch {:s} {:s} {:s} {:s}".format(self.architecture,self.distro_name,tmp_path,server)
if (0 != self.run_external_program(command)):
return True # error!!!
f = open(os.path.join(tmp_path,"etc","apt","sources.list"),"w")
if (self.distro_type == "debian"):
# Add contrib and non-free to the list of packages sources if DEBIAN
f.write("deb http://ftp.debian.org/debian/ {:s} main contrib non-free\n".format(self.distro_name))
else:
# Add restricted, universe and multiverse if UBUNTU
f.write("deb http://archive.ubuntu.com/ubuntu/ {:s} main restricted universe multiverse\n".format(self.distro_name))
f.close()
command = 'apt clean'
if (0 != self.run_chroot(tmp_path,command)):
return True # error!!!
command = 'apt update'
if (0 != self.run_chroot(tmp_path,command)):
return True # error!!!
command = 'apt install meson ninja-build -y'
if (0 != self.run_chroot(tmp_path,command)):
return True # error!!!
os.sync()
os.rename(tmp_path,path) # rename the folder to the definitive name
os.sync()
shutil.rmtree(tmp_path, ignore_errors=True)
return False # no error
@multipackager_module.package_base.call_with_cache
def update(self,path):
""" Ensures that the chroot environment is updated with the lastest packages """
# Here, we have for sure the CHROOT environment, but maybe it must be updated
retval = self.run_chroot(path,"apt clean")
if (retval != 0):
return True # error!!!!
retval = self.run_chroot(path,"apt update")
if (retval != 0):
return True # error!!!!
retval = self.run_chroot(path,"apt dist-upgrade -y")
if (retval != 0):
return True # error!!!!
return False
@multipackager_module.package_base.call_with_cache
def install_dependencies_full(self,path,dependencies):
retval = self.run_chroot(path,"apt clean")
if (retval != 0):
return retval
retval = self.run_chroot(path,"apt update")
if (retval != 0):
return retval
command = "apt install -y"
for dep in dependencies:
command += " "+dep
return self.run_chroot(path, command)
def install_local_package_internal(self, file_name):
retval = self.run_chroot(path,"apt clean")
if (retval != 0):
return retval
retval = self.run_chroot(path,"apt update")
if (retval != 0):
return True # error!!!!
if 0 != self.run_chroot(self.working_path, "dpkg -i {:s}".format(file_name)):
if 0 != self.run_chroot(self.working_path, "apt install -f -y"):
return True
return False
def install_dependencies(self,project_path,avoid_packages,preinstall):
""" Install the dependencies needed for building this package """
run_update = False
dependencies = []
if (os.path.exists(os.path.join(project_path,"setup.py"))): # it is a python package
control_path = os.path.join(project_path,"stdeb.cfg")
dependencies.append("python3")
dependencies.append("python3-stdeb")
dependencies.append("python3-all")
dependencies.append("python-all")
dependencies.append("fakeroot")
else:
dependencies.append("meson")
dependencies.append("ninja-build")
debian_path = self.check_path_in_builds(project_path)
if debian_path == None:
print (_("There is no DEBIAN/UBUNTU folder with the package specific data"))
return True
control_path = os.path.join(debian_path,"control")
if (not os.path.exists(control_path)):
print (_("There is no CONTROL file with the package specific data"))
return True
f = open (control_path,"r")
for line in f:
if (line[:7] == "Depends") or (line[:13] == "Build-Depends"):
if (line[:8] == "Depends3"):
tmp = line[8:].strip()
elif (line[:7] == "Depends"):
tmp = line[7:].strip()
else:
tmp = line[13:].strip()
if (tmp[0] == ':') or (tmp[0] == '='):
tmp = tmp[1:].strip()
tmp = tmp.split(",")
for element2 in tmp:
tmp2 = element2.split("|")
# if it is a single package, just add it as-is
if (len(tmp2) == 1):
pos = element2.find("(") # remove version info
if (pos != -1):
element2 = element2[:pos]
dependencies.append(element2.strip())
continue
# but if there are several optional packages, check each one and install the first found
list_p = ""
found = False
for element in tmp2:
pos = element.find("(") # remove version info
if (pos != -1):
element = element[:pos]
list_p += " "+element
if not run_update:
self.run_chroot(self.base_path, "apt clean")
self.run_chroot(self.base_path, "apt update")
run_update = True
command = "apt install -y {:s}".format(element)
if (0 == self.run_chroot(self.base_path, command)):
found = True
break
if not found:
print (_("Cant find any of these packages in the guest system:{:s}").format(list_p))
return True
continue
if line[:7] == "Source:":
self.project_name = line[7:].strip()
continue
if line[:8] == "Package:":
self.project_name = line[8:].strip()
continue
if line[:8] == "Version:":
self.set_project_version(line[8:].strip())
continue
f.close()
if (len(dependencies) != 0):
deps2 = []
for d in dependencies:
if avoid_packages.count(d) == 0:
deps2.append(d)
return self.install_dependencies_full(self.base_path,deps2)
return False
def build_python(self):
""" Builds a package for a python project """
destination_dir = os.path.join(self.build_path,"deb_dist")
shutil.rmtree(destination_dir, ignore_errors = True)
if (self.run_chroot(self.working_path, 'bash -c "cd /project && python3 setup.py --command-packages=stdeb.command bdist_deb"')):
return True
return False
def copy_pacs(self,destination_dir,package_name):
files = os.listdir(destination_dir)
for f in files:
if f[-4:] == ".deb":
origin_name = os.path.join(destination_dir,f)
final_name = os.path.join(os.getcwd(),package_name)
if (os.path.exists(final_name)):
os.remove(final_name)
if os.path.isdir(origin_name):
if not self.copy_pacs(origin_name,package_name):
return False
shutil.move(origin_name, final_name)
return False
return True
def build_package(self,project_path):
""" Takes the binaries located at /install_root and build a package """
setup_python = os.path.join(self.build_path,"setup.py")
if (os.path.exists(setup_python)):
destination_dir = os.path.join(self.build_path,"deb_dist")
package_name = self.get_package_name(self.build_path)
return self.copy_pacs(destination_dir,package_name)
debian_path = self.check_path_in_builds(project_path)
package_path = os.path.join(self.working_path,"install_root","DEBIAN")
os.makedirs(package_path)
command = "cp -a {:s} {:s}".format(os.path.join(debian_path,"*"),package_path)
if 0 != self.run_external_program(command):
return True
self.set_perms(os.path.join(package_path,"preinst"))
self.set_perms(os.path.join(package_path,"postinst"))
self.set_perms(os.path.join(package_path,"prerm"))
self.set_perms(os.path.join(package_path,"postrm"))
control_path = os.path.join(package_path,"control")
f1 = open (control_path,"r")
f2 = open (control_path+".tmp","w")
for line in f1:
line = line.replace("\n","").replace("\r","")
if (line == ""): # remove blank lines, just in case
continue
elif (line[:13] == "Architecture:"):
arch = line[13:].strip()
if (arch == "any"):
line = "Architecture: {:s}".format(self.architecture)
elif (line[:7] == "Source:"):
continue
elif (line[:14] == "Build-Depends:"):
continue
elif (line[:8] == "Version:"):
line = "Version: {:s}".format(self.project_version)
f2.write("Installed-Size: {:d}\n".format(int(self.program_size/1024)))
elif (line[:15] == "Installed-Size:"):
continue
f2.write(line+"\n")
f1.close()
f2.close()
os.remove(control_path)
os.rename(control_path+".tmp",control_path)
package_name = self.get_package_name(self.build_path)
command = 'bash -c "cd / && dpkg -b /install_root {:s}"'.format(package_name)
if (self.run_chroot(self.working_path, command)):
return True
shutil.move(os.path.join(self.working_path,package_name), os.getcwd())
return False
|
gpl-3.0
| 988,044,248,390,199,900 | 38.534247 | 215 | 0.545669 | false |
Thortoise/Super-Snake
|
Blender/animation_nodes-master/nodes/geometry/intersect_polyline_plane.py
|
1
|
5665
|
import bpy
from bpy.props import *
from mathutils import Vector, geometry
from ... events import executionCodeChanged
from ... base_types.node import AnimationNode
edgesTypeItems = [ ("POINTS", "Points in order", ""),
("EDGES", "Points by edges", "") ]
class IntersectPolylinePlaneNode(bpy.types.Node, AnimationNode):
bl_idname = "an_IntersectPolylinePlaneNode"
bl_label = "Intersect Polyline Plane"
bl_width_default = 160
edgesType = EnumProperty(name = "Plane Type", default = "POINTS",
items = edgesTypeItems, update = executionCodeChanged)
cyclic = BoolProperty(name = "Cyclic Points", default = False,
description = "Consider last point to first point also, for closed polygon or cyclic spline",
update = executionCodeChanged)
message = StringProperty(name = "Message", default = "Expecting Points")
def create(self):
self.newInput("Vector List", "Positions", "positions")
socket = self.newInput("Edge Indices List", "Edge Indices", "edges")
socket.useIsUsedProperty = True
socket.isUsed = False
self.newInput("Vector", "Plane Point", "planePoint")
self.newInput("Vector", "Plane Normal", "planeNormal", value = (0, 0, 1))
self.newOutput("Vector List", "Intersections List", "intersections")
self.newOutput("Integer List", "Intersected Edge Index", "cutEdges", hide = True)
self.newOutput("Integer List", "Intersected Edge Plane Side", "cutEdgesDir", hide = True)
self.newOutput("Boolean", "Is Valid", "isValid", hide = True)
def draw(self, layout):
if self.edgesType == 'POINTS': layout.prop(self, "cyclic")
def getExecutionCode(self):
if self.inputs["Edge Indices"].isUsed: self.edgesType = 'EDGES'
else: self.edgesType = "POINTS"
isLinked = self.getLinkedInputsDict()
isLinkedOut = self.getLinkedOutputsDict()
if not any(isLinkedOut.values()): return ""
int = "intersections" if isLinkedOut["intersections"] else ""
edge = "cutEdges" if isLinkedOut["cutEdges"] else ""
dir = "cutEdgesDir" if isLinkedOut["cutEdgesDir"] else ""
valid= "isValid" if isLinkedOut["isValid"] else ""
yield from intersectPolylinePlane( "positions", "edges",
"planePoint", "planeNormal",
self.edgesType, self.cyclic,
intersections = int,
cutEdges = edge,
cutEdgesDir = dir,
isValid = valid,
message = "self.message" )
def getUsedModules(self):
return ["mathutils"]
def intersectPolylinePlane( positions, edges, planeCo, planeNo,
type, cyclic,
intersections = "",
cutEdges = "",
cutEdgesDir = "",
isValid = "",
message = ""):
getInt = True if intersections != "" else False
getEdge = True if cutEdges != "" else False
getDir = True if cutEdgesDir != "" else False
getOk = True if isValid != "" else False
getMsg = True if message != "" else False
if not any([getInt, getEdge, getDir, getOk]): return
if getInt : yield intersections + " = []"
if getEdge: yield cutEdges + " = []"
if getDir : yield cutEdgesDir + " = []"
if getOk : yield isValid + " = False"
yield "lenP = len({})".format(positions)
yield "if lenP > 1:"
i = " " * 8
if type == "POINTS":
yield " for i, pos1 in enumerate({}):".format(positions)
if cyclic: i = " " * 4
else: yield " if i != 0:"
yield i + " pos0 = {}[i-1]".format(positions)
elif type == "EDGES":
yield " for i, edge in enumerate({}):".format(edges)
yield " if max(edge) < lenP:"
yield i + " pos0, pos1 = {}[edge[0]], {}[edge[1]]".format(positions, positions)
yield i + " dot0, dot1 = (pos0-{}).dot({}), (pos1-{}).dot({})".format(planeCo, planeNo, planeCo, planeNo,)
yield i + " if dot1 == 0:"
if getInt : yield i + " {}.append(pos1)".format(intersections)
if getEdge: yield i + " {}.append(i)".format(cutEdges)
if getDir : yield i + " {}.append(0)".format(cutEdgesDir)
if getOk : yield i + " {} = True".format(isValid)
yield i + " elif (dot0 > 0 and dot1 < 0):"
if getInt : yield i + (" {}.append(mathutils.geometry.intersect_line_plane(pos0, pos1, {}, {}))"
.format(intersections, planeCo, planeNo))
if getEdge: yield i + " {}.append(i)".format(cutEdges)
if getDir : yield i + " {}.append(-1)".format(cutEdgesDir)
if getOk : yield i + " {} = True".format(isValid)
yield i + " elif (dot0 < 0 and dot1 > 0):"
if getInt : yield i + (" {}.append(mathutils.geometry.intersect_line_plane(pos0, pos1, {}, {}))"
.format(intersections, planeCo, planeNo))
if getEdge: yield i + " {}.append(i)".format(cutEdges)
if getDir : yield i + " {}.append( 1)".format(cutEdgesDir)
if getOk : yield i + " {} = True".format(isValid)
if getMsg :
yield " if not edges: {} = 'No Edges !'".format(message)
yield "else: {} = 'Not enough points'".format(message)
|
gpl-3.0
| 6,015,455,845,871,464,000 | 42.914729 | 113 | 0.542101 | false |
heromod/migrid
|
mig/shared/arcwrapper.py
|
1
|
37100
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# arcwrapper: main ARC middleware wrapper module
#
# Original:
# Copyright (C) 2006-2009 Jonas Lindemann
#
# this version:
# (C) 2009 Jost Berthold, grid.dk
# adapted to usage inside a MiG framework
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""ARC middleware interface module."""
import os
import sys
import string
import commands
import threading
import tempfile
import subprocess
# MiG utilities:
from shared.conf import get_configuration_object
config = get_configuration_object()
logger = config.logger
# Avoid massive log spam when unconditionally importing arcwrapper in other
# modules like jobstatus and jobscriptgenerator
if not config.arc_clusters:
raise Exception('ignoring arcwrapper import without ARC enabled!')
# to make this succeed:
# install nordugrid-arc-client and nordugrid-arc-python
# set LD_LIBRARY_PATH="$NORDUGRID_LOCATION/lib:$GLOBUS_LOCATION/lib
# PYTHONPATH="$NORDUGRID_LOCATION/lib/python2.4/site-packages"
try:
import arclib
except:
logger.error('problems importing arclib... trying workaround')
try:
logger.debug('Current sys.path is %s' % sys.path )
sys.path.append(os.environ['NORDUGRID_LOCATION']
+ '/lib/python2.4/site-packages')
import arclib
except:
raise Exception('arclib not found - no problem unless using ARC')
# (trivially inheriting) exception class of our own
class ARCWrapperError(arclib.ARCLibError):
def __init__(self,msg):
arclib.ARCLibError.__init__(self, msg)
class NoProxyError(arclib.ARCLibError):
""" A special error which can occur in this setting:
The user did not provide a valid proxy certificate, or the one
she provided is expired. We need to treat this error case
specially (advise to update, or create a new proxy)."""
def __init(self,msg):
arclib.ARCLibError.__init__(self,('No proxy available: %s' % msg))
class Proxy(arclib.Certificate):
"""Proxy management class.
This class handles a X509 proxy certificate."""
def __init__(self, filename):
"""Class constructor.
@type filename: string
@param filename: Proxy filename"""
self.__filename = os.path.abspath(filename)
if not os.path.isfile(self.__filename):
raise NoProxyError('Proxy file ' + filename + ' does not exist.')
try:
arclib.Certificate.__init__(self,arclib.PROXY,self.__filename)
except arclib.CertificateError, err:
raise NoProxyError(err.what())
# just testing...
logger.debug('Proxy Certificate %s from %s' \
% (self.GetSN(), self.getFilename()))
logger.debug('time left in seconds: %d' % self.getTimeleft())
def getFilename(self):
"""Return the proxy filename."""
return self.__filename
def getTimeleft(self):
"""Return the amount of time left on the proxy certificate (int)."""
timeleft = 0
if not self.IsExpired():
timeLeftStr = self.ValidFor()
factor = {'days':24*60*60,'day':24*60*60
,'hours':60*60, 'hour':60*60
,'minutes':60, 'minute':60
,'seconds':1, 'second':1}
timeLeftParts = timeLeftStr.split(',')
for part in timeLeftParts:
[val,item] = part.split()
f = factor[item]
if f:
timeleft = timeleft + int(val)*f
return timeleft
# small helpers:
# splitting up an ARC job ID
def splitJobId(jobId):
""" Splits off the last part of the path from an ARC Job ID.
Reason: The job ID is a valid URL to the job directory on the
ARC resource, and all jobs have a common URL prefix. In addition,
job information on the ARC resource is usually obtained by
inspecting files at URL <JobID-prefix>/info/<JobID-last-part>
(see ARC/arclib/jobinfo.cpp).
This function can trigger an arclib.URLError exception.
"""
if not jobId.endswith('/'):
jobId = jobId + '/'
jobURL = arclib.URL(jobId)
path = os.path.split(jobURL.Path())[0]
return (jobURL.Protocol() + '://' + jobURL.Host() + ':'
+ str(jobURL.Port()) + os.path.dirname(path) + '/'
, os.path.basename(path))
# hack: issue a command line, return output and exit code
def getstatusoutput(cmd, env=None, startDir=""):
variableDefs = ""
if env:
for variableName in env.keys():
variableDefs = variableDefs + "%s=%s " % \
(variableName, env[variableName])
execCmd = variableDefs + cmd
if startDir == "":
resultVal, result = commands.getstatusoutput(execCmd)
else:
resultVal, result = commands.getstatusoutput('cd "%s";set;%s' % (startDir, execCmd))
resultLines = result.split('\n')
logger.debug("Executing: %s, result = %d" % (execCmd, resultVal))
if logger.getLogLevel() == 'DEBUG':
if len(resultLines)<200:
i = 0
for line in resultLines:
logger.debug("\t"+str(i)+": "+line.strip())
i = i + 1
return resultVal, resultLines
def popen(cmd, env=None):
variableDefs = ""
if env!=None:
for variableName in env.keys():
variableDefs = variableDefs + "%s=%s " \
% (variableName, env[variableName])
execCmd = variableDefs + cmd
logger.debug("popen: Starting %s" % (execCmd))
f = os.popen(execCmd)
return f
# asking the user for a proxy. This will be called from many places,
# thus centralised here (though too specific ).
def askProxy():
output_objects = []
output_objects.append({'object_type':'sectionheader',
'text':'Proxy upload'})
output_objects.append({'object_type':'html_form',
'text':"""
<form action="upload.py"
enctype="multipart/form-data" method="post">
<p>
Please specify a proxy file to upload:<br>
Such a proxy file can be created using the command-line tool
voms-proxy-init, and can be found in /tmp/x509up_u<your UID>.<br>
<input type="file" name="fileupload" size="40">
<input type="hidden" name="path" value=""" + \
'"' + Ui.proxy_name + '"' + \
""">
<input type="hidden" name="restrict" value="true">
<input type="submit" value="Send file">
</form>
"""})
return output_objects
def create_grid_proxy(cert_path, key_path, proxy_path):
"""
Create a default proxy cert. Uses grid-proxy-init.
In this way no additional voms information is added.
Returns the absolute path of the generated proxy. By standard placed in the /tmp/ folder.
"""
try:
shell_cmd = "../java-bin/generate_proxy %s %s %s" % (cert_path, key_path, proxy_path)
proc = subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out,_) = proc.communicate()
logger.info(out.replace("\n", "."))
except Exception, e:
logger.error("Could not generate a proxy certificate: \n"+str(e))
raise
class Ui:
"""ARC middleware user interface class."""
# static information:
# service URL (Danish resources)
giis=arclib.URL('ldap://gridsrv4.nbi.dk:2135/O=Grid/Mds-Vo-Name=Denmark')
# and benedict cluster URL... for first tests
benedict =\
arclib.URL('ldap://benedict.grid.aau.dk:2135/o=grid/mds-vo-name=local')
fyrgrid =\
arclib.URL('ldap://fyrgrid.grid.aau.dk:2135/o=grid/mds-vo-name=local')
# hard-wired: expected proxy name
proxy_name = '.proxy.pem'
def __init__(self, userdir, require_user_proxy=False):
"""Class constructor"""
# would be nice to hold the Ui instance and have the resources
# set up on instantiation. but several problems arise:
# - A stateless web interface cannot carry over the Ui object
# between several calls. We cannot pickle this information if
# it contains SWIG proxy objects. userdir, proxy and lock can
# be pickled, but _clusters and _queues are the interesting ones.
# - Different users should not share the same Ui! So running the
# whole server with just one Ui will not work either.
# Allowed _clusters and _queues might depend on the user's
# permissions, but we can work with a superset and rely on
# ConstructTargets to filter out the allowed ones.
self._clusters = None # SWIG
self._queues = None # SWIG
self._userdir = None # holds user config, job cache, and proxy file
self._proxy = None # determines user permissions
self._arclibLock = threading.Lock()
proxy_path = os.path.join(userdir, self.proxy_name)
try:
if not os.path.isdir(userdir):
raise ARCWrapperError('Given user directory ' + userdir
+ ' does not exist.')
self._userdir = userdir
# if a proxy is not explicitly required and the user does not have a valid one
# then use the shared default proxy cert
if not require_user_proxy and \
( not os.path.exists(proxy_path) or Proxy(proxy_path).IsExpired() ):
logger.info("Using default proxy certificate.")
# Check if there is already a default proxy certificate and get its location
proxy_path = config.nordugrid_proxy
# it there is no default proxy or it is expired
if not os.path.exists(proxy_path) or Proxy(proxy_path).IsExpired():
cert_path = config.nordugrid_cert
key_path = config.nordugrid_key
# generate a new one
create_grid_proxy(cert_path, key_path, proxy_path)
else:
logger.info("Using personal proxy certificate.")
# proxy constructor might raise an exception as well
self._proxy = Proxy(proxy_path)
if self._proxy.IsExpired(): # should not happen
raise NoProxyError('Expired.')
except NoProxyError, err:
logger.error('Proxy error: %s' % err.what())
raise err
except arclib.ARCLibError, err:
logger.error('Cannot initialise: %s' % err.what())
raise ARCWrapperError(err.what())
except Exception, other:
logger.error('Unexpected error during initialisation.\n %s' % other)
raise ARCWrapperError(other.__str__())
def __initQueues(self):
""" Initialises possible queues for a job submission."""
logger.debug('init queues (for job submission/resource display)')
try:
# init data: cluster information (obtained per user)
self.__lockArclib()
# this takes ages:
# self._clusters = arclib.GetResources(Ui.giis)
self._clusters = []
logger.debug(config.arc_clusters)
for url_str in config.arc_clusters:
if url_str.startswith('ldap://'):
self._clusters.append(arclib.URL(url_str))
elif url_str in ['benedict','fyrgrid']:
self._clusters.append(eval('Ui.' + url_str))
logger.debug('clusters: ')
for c in self._clusters:
logger.debug('\t %s' % c)
self._queues = []
for cl in self._clusters:
qs = arclib.GetQueueInfo(cl)
self._queues = self._queues + list(qs)
self.__unlockArclib()
logger.debug('ARC Init, discovered queues are')
for q in self._queues:
logger.debug('\t %s' % q)
except NoProxyError, err:
self.__unlockArclib()
logger.error('Proxy error during queue initialisation: %s' % err )
raise err
except Exception, err:
self.__unlockArclib()
logger.error('ARC queue initialisation error: %s' % err )
self._clusters = []
self._queues = []
raise ARCWrapperError(err.__str__())
def __lockArclib(self):
""" ensures exclusive access to the interface and sets the environment
so that the user's proxy and home are used.
Locking is perhaps not needed in our setup, where anyway users
cannot share the same Ui (needed if _arclib.so not thread-safe,
though)."""
self._arclibLock.acquire()
self.__setupEnviron()
return
def __unlockArclib(self):
""" Releases the mutex lock of the interface.
Perhaps not needed."""
self._arclibLock.release()
return
def __setupEnviron(self):
"""Make sure the API acts on behalf of the calling user.
Called by __lockArclib.
"""
os.environ['X509_USER_PROXY'] = self._proxy.getFilename()
os.environ['HOME'] = self._userdir
return
def getProxy(self):
""" returns the proxy interface used"""
return self._proxy
def getQueues(self):
""" returns the queues we discovered for the clusters.
TODO: should only return _allowed_ queues
(__initQueues to change)."""
self.__initQueues()
return self._queues
def submitFile(self, xrslFilename, jobName=''):
"""Submit xrsl file as job to available ARC resources.
@type xrslFilename: string
@param xrslFilename: Filename containing a job description in XRSL.
@rtype list:
@return: list containing ARC jobIds (strings).
Throws an ARCWrapperError if unsuccessful."""
logger.debug( 'Submitting a job from file %s...' % xrslFilename )
currDir = os.getcwd()
try:
# Convert XRSL file into a string
f = file(xrslFilename, 'r')
xrslString = f.read()
f.close()
xrslAll = arclib.Xrsl(xrslString)
[jobDir, filename] = os.path.split(xrslFilename)
os.chdir(jobDir)
result = (self.submit(xrslAll, jobName))
os.chdir(currDir)
return result
except arclib.XrslError, err:
logger.error('Ui: XrslError: ' + err.what())
os.chdir(currDir)
raise ARCWrapperError('XrslError: ' + err.what())
def submit(self, xrslAll, jobName=''):
"""Submit xrsl object as job to available ARC resources.
The method expects an arclib.Xrsl object and its current
working directory to contain the referenced files (rel. paths).
@type xrslAll: arclib.Xrsl
@param xrslAll: job description in XRSL (arclib object).
@rtype list:
@return: list of jobIds(strings).
Any error is raised as an exception to the caller, as
ARCWrapperError or NoProxyError."""
try:
# Check for multiple xrsl
xrslSplit = xrslAll.SplitMulti()
# retrieve clusters and their queues
# might throw a NoProxyError, leading us to the end
self.__initQueues()
# Construct submission targets
logger.debug('Ui: Constructing targets:')
allTargets = arclib.ConstructTargets(self._queues, xrslAll)
targets = arclib.PerformStandardBrokering(allTargets)
for t in targets:
logger.debug('\t %s' % t)
# Submit job
jobIds = []
logger.debug('Ui: Submitting job .')
if len(targets) > 0:
self.__lockArclib()
for xrsl in xrslSplit:
jobId = arclib.SubmitJob(xrsl, targets)
jobIds.append(jobId)
logger.debug('Ui:' + jobId + 'submitted.')
jobName = xrsl.GetRelation('jobName'
).GetSingleValue()
arclib.AddJobID(jobId, jobName)
self.__unlockArclib()
return jobIds
else:
# len(targets) == 0, thus:
raise ARCWrapperError("No matching resource for submission.")
except NoProxyError, err:
logger.error('Proxy error during job submission: ' + err.what())
if self._arclibLock.locked():
# should not happen!
# we come here from initQueues
logger.error('submit: still locked???')
self.__unlockArclib()
raise err
except arclib.XrslError, message:
logger.error('Ui,XRSL' + message.what())
if self._arclibLock.locked(): # should not happen!
self.__unlockArclib()
raise ARCWrapperError('XrslError: ' + message.what())
except arclib.JobSubmissionError, message:
logger.error('Ui,Submit: ' + message.what())
self.__unlockArclib()
raise ARCWrapperError('JobSubmissionError: ' + message.what())
except arclib.TargetError, message:
logger.error('Ui,Target: ' + str(message))
if self._arclibLock.locked(): # should not be...
self.__unlockArclib()
raise ARCWrapperError('TargetError: ' + str(message))
except Exception, err:
if self._arclibLock.locked(): # ...
self.__unlockArclib()
logger.error('Unexpected error: %s' % err )
raise ARCWrapperError(err.__str__())
def AllJobStatus(self):
"""Query status of jobs in joblist.
The command returns a dictionary of jobIDs. Each item
in the dictionary consists of an additional dictionary with the
attributes:
name = Job name
status = ARC job states, ACCPTED, SUBMIT, INLRMS etc
error = Error status
sub_time = string(submission_time)
completion = string(completion_time)
cpu_time = string(used_cpu_time)
wall_time = string(used_wall_time)
If there was an error, an empty dictionary is returned.
Example:
jobList = ui.jobStatus()
print jobList['gsiftp://...3217']['name']
print jobList['gsiftp://...3217']['status']
@rtype: dict
@return: job status dictionary."""
logger.debug('Requesting job status for all jobs.')
jobList = {}
# GetJobIDs returns a multimap, mapping job names to JobIDs...
self.__lockArclib()
try:
# ATTENTION: GetJobIDs does not throw an exception
# if the .ngjobs file is not found. Instead, it
# only complains on stderr and returns {}.
if not os.path.isfile( \
os.path.join(self._userdir, '.ngjobs')):
logger.debug('No Job file found, skipping')
return jobList
else:
jobIds = arclib.GetJobIDs()
except Exception, err:
logger.error('could not get job IDs: %s', err)
self.__unlockArclib()
return jobList
self.__unlockArclib()
# use an iterator over the multimap elements
# do not call iter.next() at the end (segfaults!)
iter = jobIds.begin()
i = 0
while i < jobIds.size():
i = i + 1
(jobName,jobId) = iter.next()
# this is what GetJobIDs really does when called with no arguments
# jobListFile = file(os.path.join(self._userdir,
# '.ngjobs'), 'r')
# lines = jobListFile.readlines()
# jobListFile.close()
# for line in lines:
# (jobId, jobName) = line.strip().split('#')
logger.debug('Querying job %s (%s)' % (jobId, jobName))
jobList[jobId] = {}
jobList[jobId]['name'] = jobName
status = None
exitCode = None
sub_time = None
self.__lockArclib()
try:
# jobInfo = arclib.GetJobInfoDirect(jobId)
jobInfo = arclib.GetJobInfo(jobId)
status = jobInfo.status
exitCode = jobInfo.exitcode
sub_time = jobInfo.submission_time.__str__()
completed= jobInfo.completion_time.__str__()
# cpu_time = jobInfo.used_cpu_time.__str__()
# wall_time= jobInfo.used_wall_time.__str__()
except arclib.FTPControlError:
logger.error('Failed to query job %s' % jobName)
status = 'REMOVED'
exitCode = -1
completed = None
cpu_time = None
wall_time = None
self.__unlockArclib()
jobList[jobId]['status'] = status
jobList[jobId]['error' ] = exitCode
jobList[jobId]['submitted'] = sub_time
jobList[jobId]['completed'] = completed
# jobList[jobId]['cpu_time' ] = sub_time
# jobList[jobId]['wall_time'] = sub_time
logger.debug(' %s: %s' % (jobId, jobList[jobId]))
return jobList
def jobStatus(self, jobId):
"""Retrieve status of a particular job.
returns: dictionary containing keys name, status, error...
(see allJobStatus)."""
logger.debug('Requesting job status for %s.' % jobId)
jobInfo = { 'name':'UNKNOWN','status':'NOT FOUND','error':-1 }
# check if we know this job at all:
self.__lockArclib()
job_ = arclib.GetJobIDs([jobId])
self.__unlockArclib()
# ugly! GetJobIDs return some crap if not found...
jobName = [ j for j in job_ ][0]
if jobName == '': # job not found
logger.debug('Job %s was not found.' % jobId)
else:
jobInfo['name'] =jobName
# ASSERT(jobId = jobs[jobName])
self.__lockArclib()
try:
logger.debug('Querying job %s (%s)' % (jobId,jobName))
info = arclib.GetJobInfo(jobId)
jobInfo['status'] = info.status
jobInfo['error'] = info.exitcode
jobInfo['submitted'] = info.submission_time.__str__()
jobInfo['completed'] = info.completion_time.__str__()
# jobInfo['cpu_time' ] = info.used_cpu_time.__str__()
# jobInfo['wall_time'] = info.used_wall_time.__str__()
except arclib.ARCLibError, err:
logger.error('Could not query: %s' % err.what())
jobInfo['status'] = 'UNABLE TO RETRIEVE: ' + err.what(),
jobInfo['error'] = 255
jobInfo['submitted'] = 'unknown'
self.__unlockArclib()
logger.debug(' Returned %s' % jobInfo)
return jobInfo
def cancel(self, jobID):
"""Kill a (running?) job.
If this fails, complain, and retrieve the job status.
@type jobID: string
@param jobID: jobId URL identifier."""
logger.debug('Trying to stop job %s' % jobID )
success = False
self.__lockArclib()
try:
arclib.CancelJob(jobID)
success = True
except arclib.FTPControlError, err:
logger.error('Error canceling job %s: %s' % (jobID, err.what()))
if logger.getLogLevel == 'DEBUG':
try:
info = arclib.GetJobInfoDirect(jobID)
logger.debug('Job status: %s' % info.status)
except arclib.ARCLibError, err:
logger.debug('No job status known')
self.__unlockArclib()
return success
def clean(self, jobId):
"""Removes a (finished?) job from a remote cluster.
If this fails, just remove it from our list (forget it).
@type jobID: string
@param jobID: jobId URL identifier."""
logger.debug('Cleaning up job %s' % jobId )
self.__lockArclib()
try:
arclib.CleanJob(jobId)
except arclib.FTPControlError, err:
logger.error('Failed to clean job %s: %s' % (jobId, err.what()))
arclib.RemoveJobID(jobId)
self.__unlockArclib()
def getResults(self, jobId, downloadDir=''):
"""Download results from grid job.
@type jobId: string
@param jobID: jobId URL identifier.
@type downloadDir: string
@param downloadDir: Download results to specified directory.
@rtype: list
@return: list of downloaded files (strings)"""
logger.debug('Downloading files from job %s' % jobId )
complete = []
currDir = os.getcwd()
# jobID is a valid URL for the job directory.
# we chop off the final number (should be unique enough)
# and use it as a directory name to download (emulates behaviour
# of ngget: downloaddir _prefixes_ the dir to which we download).
try:
(jobPath,jobBasename) = splitJobId(jobId)
jobInfoDir= jobPath + '/info/' + jobBasename
jobDir = jobPath + '/' + jobBasename
os.chdir(self._userdir)
if not downloadDir == '':
if not os.path.exists(downloadDir):
os.mkdir(downloadDir)
elif not os.path.isdir(downloadDir):
raise ARCWrapperError(downloadDir
+ ' exists, not a directory.')
os.chdir(downloadDir)
if not os.path.exists(jobBasename):
os.mkdir(jobBasename)
else:
if not os.path.isdir(jobBasename):
raise ARCWrapperError('Cannot create job directory,'
+' existing file %s in the way.'\
% jobBasename)
os.chdir(jobBasename)
except Exception, err:
logger.error('Error creating job directory: %s' % err)
os.chdir(currDir)
raise ARCWrapperError(err.__str__())
logger.debug('downloading output summary file')
self.__lockArclib()
try:
ftp = arclib.FTPControl()
# We could just download the whole directory.
# But better use the contents of "output" in
# the info-directory... (specified by user)
# to avoid downloading large input files.
# ftp.DownloadDirectory(jobURL, jobBasename)
#
# We use a temp file to get this information first
(tmp,tmpname) = tempfile.mkstemp(prefix='output', text=True)
os.close(tmp)
ftp.Download(arclib.URL(jobInfoDir + '/output'), tmpname)
lines = file(tmpname).readlines()
os.remove(tmpname)
files = [ l.strip().strip('/') for l in lines ]
# also get the entire directory listing from the server
dir = ftp.ListDir(arclib.URL(jobDir),True)
basenames = [os.path.basename(x.filename) for x in dir ]
if '' in files:
logger.debug('downloading _all_ files')
# TODO for files which are already there?
ftp.DownloadDirectory(arclib.URL(jobDir),'.')
complete = basenames
else:
for f in files:
if f in basenames:
# we should download this one
try:
if x.isdir:
logger.debug('DownloadDir %s' % f )
ftp.DownloadDirectory(\
arclib.URL(jobDir + '/' + f), f)
# ... which operates recursively
complete.append( f + '/ (dir)')
else:
logger.debug('Download %s' % f )
ftp.Download(arclib.URL(jobDir + '/' + f), f)
complete.append( f )
except arclib.ARCLibError, err:
logger.error('Error downloading %s: %s' \
% (f,err.what()))
except arclib.ARCLibError, err:
logger.error('ARCLib error while downloading: %s' % err.what())
self.__unlockArclib()
os.chdir(currDir)
raise ARCWrapperError(err.what())
except Exception, err:
logger.error('Error while downloading.\n %s' % err)
self.__unlockArclib()
os.chdir(currDir)
raise ARCWrapperError(err.__str__())
# return
logger.debug(string.join(['downloaded:'] + complete, ' '))
os.chdir(currDir)
return complete
def lsJobDir(self, jobId):
"""List files at a specific URL.
@type jobId: string
@param jobId: jobId, which is URL location of job dir.
@rtype: list
@return: list of FileInfo
"""
# the jobID is a valid URL to the job directory. We can use it to
# inspect its contents.
#
# For other directories (gmlog or other), using FTPControl, we do
# not get accurate file sizes, only for the real output
# and for scripts/files in the proper job directory.
logger.debug('ls in JobDir for job %s' % jobId )
ftp = arclib.FTPControl()
url = arclib.URL(jobId)
self.__lockArclib()
try:
files = ftp.ListDir(url)
except arclib.ARCLibError, err:
logger.debug('Error during file listing: %s' % err.what())
errmsg = arclib.FileInfo()
errmsg.filename = err.what
errmsg.size = 0
errmsg.isDir = False
files = [ errmsg ]
self.__unlockArclib()
# filter out the gmlog if present
def notGmlog(file):
return ((not file.isDir) or (file.filename != 'gmlog'))
return (filter(notGmlog, files))
# stdout of a job can be found directly in its job directory, but might have
# a different name (user can give the name). For a "live output request",
# we download the xrsl description from the info directory and look for
# the respective names.
# For jobs with "joined" stdout and stderr, we get an error when retrieving
# the latter, and fall back to retrieving stdout instead.
def recoverXrsl(self, jobId):
""" retrieves the xrsl for a job (from the server), if possible"""
logger.debug('Trying to obtain xRSL for job %s' % jobId)
xrsl = arclib.Xrsl('')
self.__lockArclib()
try:
(jobPath,jobBasename) = splitJobId(jobId)
xrslURL = arclib.URL(jobPath + '/info/'
+ jobBasename + '/description')
ftp = arclib.FTPControl()
ftp.Download(xrslURL, 'tmp')
str = file('tmp').read()
xrsl = arclib.Xrsl(str)
os.remove('tmp')
except arclib.ARCLibError, err:
logger.error('Failed to get Xrsl: %s' % err.what())
self.__unlockArclib()
logger.debug('Obtained %s' % xrsl)
return xrsl
def getStandardOutput(self, jobId):
"""Get the standard output of a running job.
@type jobID: string
@param jobID: jobId URL identifier.
@rtype: string
@return: output from the job"""
logger.debug('get std. output for %s' % jobId)
try:
xrsl = self.recoverXrsl(jobId)
try:
outname = xrsl.GetRelation('stdout').GetSingleValue()
except arclib.XrslError, err:
outname = 'stdout' # try default if name not found
logger.debug('output file name: %s' % outname)
try:
self.__lockArclib()
ftp = arclib.FTPControl()
ftp.Download(arclib.URL(jobId + '/' + outname))
except Exception, err:
self.__unlockArclib()
raise ARCWrapperError(err.__str__())
self.__unlockArclib()
logger.debug('output downloaded')
result = file(outname).read()
os.remove(outname)
except arclib.ARCLibError, err:
result = 'failed to retrieve job output stdout: %s' % err.what()
logger.error('%s' % result)
logger.debug('output retrieved')
return result
# (resultVal, result) = utils.getstatusoutput('ngcat -o %s'
# % jobId, self._env)
#
# return result
def getStandardError(self, jobId):
"""Get the standard error of a running job.
@type jobID: string
@param jobID: jobId URL identifier.
@rtype: list
@return: list of return value from ARC and output from job."""
logger.debug('get stderr output for %s' % jobId)
try:
xrsl = self.recoverXrsl(jobId)
try:
outname = xrsl.GetRelation('stderr').GetSingleValue()
except arclib.XrslError, err:
outname = 'stderr' # try default if name not found
logger.debug('output file name: %s' % outname)
try:
self.__lockArclib()
ftp = arclib.FTPControl()
ftp.Download(arclib.URL(jobId + '/' + outname))
except Exception, err:
self.__unlockArclib()
raise ARCWrapperError(err.__str__())
self.__unlockArclib()
logger.debug('output downloaded')
result = file(outname).read()
os.remove(outname)
except arclib.ARCLibError, err:
result = 'failed to retrieve job output stderr: %s' % err.what()
logger.error('%s' % result)
logger.debug('output retrieved')
return result
# (resultVal, result) = utils.getstatusoutput('ngcat -e %s'
# % jobId, self._env)
#
# return result
######################### old code:
def getGridLog(self, jobId):
"""Get the grid log of a running job.
@type jobID: string
@param jobID: jobId URL identifier.
@rtype: list
@return: list of return value from ARC and output from job."""
(resultVal, result) = getstatusoutput('ngcat -l %s'
% jobId, self._env)
return result
def copy(self, source, dest=''):
"""Copy file from source URL to dest URL.
@type source: string
@param source: URL of file to copy from.
@type dest: string
@param dest: destination file name on server."""
(resultVal, result) = getstatusoutput('ngcp %s %s'
% (source, dest), self._env)
return resultVal
def pcopy(self, source):
"""Open the ngcp command as a popen process, redirecting output
to stdout and return process file handle.
@type source: string
@param source: URL to open"""
f = popen('ngcp %s /dev/stdout' % source, self._env)
return f
def sync(self):
"""Query grid for jobs and update job list.
@rtype: list
@return: list of [resultVal, result], where resultVal is the return value
from the ARC command and result is a list of command output."""
(resultVal, result) = \
getstatusoutput('ngsync -f -d %d'
% self._debugLevel, self._env)
|
gpl-2.0
| -5,226,272,863,273,399,000 | 36.436932 | 102 | 0.550943 | false |
yaukwankiu/armor
|
pattern_.py
|
1
|
62433
|
# -*- coding: utf-8 -*-
# defining the basic object we will be working with
# adapted from :
# /media/KINGSTON/ARMOR/ARMOR/python/weatherPattern.py ,
# /media/KINGSTON/ARMOR/ARMOR/python/clustering.py ,
# /media/KINGSTON/ARMOR/2013/pythonJan2013/basics.py
# Yau Kwan Kiu, Room 801, 23-1-2013
##############################################################################################
#
#==== imports ================================================================================
# some of the stuff are to be moved to a submodule
import copy
import time
import os
import re
import numpy
import numpy as np
import numpy.ma as ma
#import matplotlib
import matplotlib.pyplot as plt
#import scipy.misc.pilutil as smp
#import numpy.fft as fft
#import shutil
#import sys
import pickle
from copy import deepcopy
try:
from scipy import signal
from scipy import interpolate
except ImportError:
#print "Scipy not installed"
pass
#==== setting up the global parameters========================================================
from defaultParameters import * #bad habits but all these variables are prefixed with "default"
# or at least i try to make them to
import colourbarQPESUMS # the colourbars for the Central Weather Bureau
import colourbarQPESUMSwhiteBackground # the same as above, with white backgrounds
#==== defining the classes ===================================================================
class DBZ(object): #python 2.7 (?) new style class, subclassing object
"""module predecessors: basics.py; weatherPattern.py
NOTE: a DBZ object can be loaded from data or generated in run time (e.g. by translation, or
other operations.) There is flexibility in this. In particular, the object is instantiated before
its values loaded (either from file or from other computations).
attributes (some to be defined at __init__, some afterwards):
DBZ.name - a string, the name of the instance, default = something like "DBZ20120612.0200"
DBZ.matrix - a numpy.ma.core.MaskedArray object
DBZ.datatime - a string like "20120612.0200"
DBZ.dataPath - a string like "../data/dbz20120612.0200.dat"
can be relative (preferred) or absolute
DBZ.outputPath - a string like "../data/dbz20120612.0200.dat"
can be relative (preferred) or absolute
DBZ.inputType - a string to record the type of input file, most common is "txt",
which should be 2-dim arrays in text, separated by " " and "\n",
readable by numpy or matlab
- convention: first row of data = bottom row of pixels
DBZ.image - I am not going to define this yet, since matplotlib.pyplot is pretty fast
DBZ.imagePath - a string like "../data/dbz20120612.0200.png"
can be relative (preferred) or absolute
default = "" (undefined)
DBZ.dt - time interval from the previous image (default=1; how about 10mins = 1/6 hour??)
DBZ.dy - grid size, latitudinal, in km (default =1; how about 0.0125 degree = how many kms?)
DBZ.dx - grid size, longitudinal, in km (same as above)
DBZ.timeStamp - time stamp when the object was created
DBZ.verbose - whether print out a lot of stuff when we work with this object
#################################################################
# DBZ.inputFolder - a string, self evident # <-- not used yet,
# DBZ.outputFolder - ditto # perhaps not here
# DBZ.outputFolderForImages - ditto #
#################################################################
DBZ.database - a string, pointing to the database, somehow, for future
methods:
DBZ.load - load into DBZ.matrix
DBZ.save
DBZ.saveImage
DBZ.printToScreen
use:
>>> from armor import pattern
>>> a = pattern.DBZ(dataTime="20120612.0200",name="", dt=1, dx=1, dy=1, dataPath="", imagePath="")
>>> a.load()
>>> a.printToScreen()
>>> import numpy as np
>>> import armor
>>> import armor.pattern as pattern
>>> dbz=pattern.DBZ
>>> a = dbz('20120612.0300')
DBZ20120612.0300initialised. Use the command '___.load()' to load your data, and '__.printToScreen()' to print it to screen.
>>> b = dbz('20120612.0330')
DBZ20120612.0330initialised. Use the command '___.load()' to load your data, and '__.printToScreen()' to print it to screen.
>>> a.load()
>>> b.load()# -*- coding: utf-8 -*-
>>> c=a-b
DBZ20120612.0300_minus_DBZ20120612.0330initialised. Use the command '___.load()' to load your data, and '__.printToScreen()' to print it to screen.
>>> c.show()
>>> d=a*b
DBZ20120612.0300_times_DBZ20120612.0330initialised. Use the command '___.load()' to load your data, and '__.printToScreen()' to print it to screen.
>>> d.show()
>>>
"""
def __init__(self, dataTime="NoneGiven", matrix=-999, name="", dt=1, dx=1, dy=1,\
dataPath="",outputPath ="",imagePath="",\
cmap='hsv', vmin=-20, vmax=100, coordinateOrigin="default",\
coastDataPath="", relief100DataPath='', relief1000DataPath='',\
relief2000DataPath='', relief3000DataPath='',\
lowerLeftCornerLatitudeLongitude ='',\
upperRightCornerLatitudeLongitude ='',\
database="", verbose=False):
self.timeStamp = str(int(time.time()))
"""
Notes:
1. cmap = colourbar of the dbz plot, need to find out how to plot it with
CWB's colour scheme as specified in the modules colourbarQPESUMS
and colourbarQPESUMSwhiteBackground
2. coordinateOrigin: normally either place at the centre of the picture
or at Taichung Park
(24.145056°N 120.683329°E)
which translates to
(492, 455) in our 881x921 grid
reference:
http://zh.wikipedia.org/wiki/%E8%87%BA%E4%B8%AD%E5%85%AC%E5%9C%92
/media/KINGSTON/ARMOR/2013/python/testing/test104/test104.py
"""
########
#
if name == "":
name = "DBZ" + dataTime
if type(matrix)==type(-999): # if matrix not given,
matrix = ma.zeros((defaultHeight, defaultWidth)) # initialise with zeros
matrix.fill_value = -999 # -999 for missing values always
if isinstance(matrix, ma.MaskedArray):
matrix.fill_value = -999
if isinstance(matrix, np.ndarray) and not isinstance(matrix, ma.MaskedArray):
matrix = matrix.view(ma.MaskedArray)
matrix.mask = None
matrix.fill_value = -999
if dataPath =="":
dataPath = defaultInputFolder + "COMPREF." + dataTime +".dat"
if outputPath =="":
outputPath = defaultOutputFolder + name + '_'+ self.timeStamp + ".dat"
if imagePath =="":
imagePath = defaultOutputFolderForImages + name + '_'+self.timeStamp + ".png"
if coastDataPath == "":
coastDataPath = defaultInputFolder + "taiwanCoast.dat"
if relief100DataPath == "":
relief100DataPath = defaultInputFolder + "relief100.dat"
if relief1000DataPath == "":
relief1000DataPath = defaultInputFolder + "relief1000Extended.dat"
if relief2000DataPath == "":
relief2000DataPath = defaultInputFolder + "relief2000Extended.dat"
if relief3000DataPath == "":
relief3000DataPath = defaultInputFolder + "relief3000Extended.dat"
if lowerLeftCornerLatitudeLongitude =="":
lowerLeftCornerLatitudeLongitude = defaultLowerLeftCornerLatitudeLongitude
if upperRightCornerLatitudeLongitude=="":
upperRightCornerLatitudeLongitude = defaultUpperRightCornerLatitudeLongitude
if database =="":
database = defaultDatabase
###############################################################################
# if matrix shape = (881, 921) then by default the origin at Taichung Park
# (24.145056°N 120.683329°E)
# or (492, 455) in our grid
# else the centre is the origin by default
###############################################################################
if coordinateOrigin == "default": #default
if matrix.shape == (881, 921):
coordinateOrigin = (492, 455)
else:
coordinateOrigin = (matrix.shape[0]//2, matrix.shape[1]//2)
elif coordinateOrigin == "centre" or coordinateOrigin=="center":
coordinateOrigin = (matrix.shape[0]//2, matrix.shape[1]//2)
elif (coordinateOrigin == 'Taichung' or \
coordinateOrigin == 'Taichung Park' or\
coordinateOrigin == 'taichungpark') and matrix.shape==(881,921):
coordinateOrigin = (492,455)
#coordinateOrigin = (0,0) # switch it off - will implement coordinate Origin later
if verbose:
print "------------------------------------------------------------------"
print "armor.pattern.DBZ:\nname, dt, dx, dy, dataPath, imagePath ="
print name, dt, dx, dy, dataPath, imagePath
#
########
self.matrix = matrix
self.dataTime = dataTime
self.name = name
self.dt = dt #retrospective
self.dx = dx #grid size
self.dy = dy
self.outputFolder= defaultOutputFolder
self.dataPath = dataPath
self.outputPath = outputPath
self.imagePath = imagePath
self.coastDataPath = coastDataPath
self.relief100DataPath = relief100DataPath
self.relief1000DataPath = relief1000DataPath
self.relief2000DataPath = relief2000DataPath
self.relief3000DataPath = relief3000DataPath
self.lowerLeftCornerLatitudeLongitude = lowerLeftCornerLatitudeLongitude
self.upperRightCornerLatitudeLongitude = upperRightCornerLatitudeLongitude
self.database = database
self.cmap = cmap
self.vmin = vmin # min and max for makeImage()
self.vmax = vmax
self.coordinateOrigin = coordinateOrigin
self.O = self.coordinateOrigin #alise, no guarentee
self.verbose = verbose
#self.matrix_backups = [] # for storage
#if verbose:
# print(self.name + "initialised. Use the command '___.load()' to load your data, " +\
# "and '__.printToScreen()' to print it to screen.")
#################################################################################
# basic operator overloads
def __call__(self, i=-999, j=-999, display=False):
if i ==-999 and j ==-999:
height, width = self.matrix.shape
h = int(height**.5 /2)
w = int(width**.5 /2)
print self.matrix.filled().astype(int)[height//2-h:height//2+h,\
width//2-w: width//2+w]
return self.matrix.filled().astype(int)
else:
"""
returns interpolated value
"""
arr= self.matrix
i0 = int(i)
j0 = int(j)
i1 = i0 + 1
j1 = j0 + 1
i_frac = i % 1
j_frac = j % 1
f00 = arr[i0,j0]
f01 = arr[i0,j1]
f10 = arr[i1,j0]
f11 = arr[i1,j1]
interpolated_value = (1-i_frac)*(1-j_frac) * f00 + \
(1-i_frac)*( j_frac) * f01 + \
( i_frac)*(1-j_frac) * f10 + \
( i_frac)*( j_frac) * f11
if display:
print i_frac, j_frac, f00, f01, f10, f11
return interpolated_value
def __add__(self, DBZ2):
"""defining the addition of two pattern.DBZ objects
c.f. http://docs.python.org/release/2.5.2/ref/numeric-types.html
can move to CUDA in the future
"""
return DBZ(dataTime=self.dataTime, matrix=self.matrix+DBZ2.matrix,\
name=self.name+"_plus_"+DBZ2.name, \
dt=self.dt, dx=self.dx, dy=self.dy,\
dataPath =self.outputPath+"_plus_"+DBZ2.name+".dat",\
outputPath=self.outputPath+"_plus_"+DBZ2.name+".dat",\
imagePath =self.imagePath +"_plus_"+DBZ2.name+".png",\
database =self.database,\
cmap=self.cmap, verbose=self.verbose)
def __sub__(self, DBZ2):
"""defining the subtraction of two pattern.DBZ objects
c.f. http://docs.python.org/release/2.5.2/ref/numeric-types.html
can move to CUDA in the future
"""
return DBZ(dataTime=self.dataTime, matrix=self.matrix-DBZ2.matrix,\
name=self.name+"_minus_"+DBZ2.name, \
dt=self.dt, dx=self.dx, dy=self.dy,\
dataPath =self.outputPath+"_minus_"+DBZ2.name+".dat",\
outputPath=self.outputPath+"_minus_"+DBZ2.name+".dat",\
imagePath =self.imagePath +"_minus_"+DBZ2.name+".png",\
database =self.database,\
cmap=self.cmap, verbose=self.verbose)
def __mul__(self, M):
""" defining multiplication
c.f. http://docs.python.org/release/2.5.2/ref/numeric-types.html
can move to CUDA in the future
"""
if type(M)==type(1) or type(M)==type(1.1) or type(M)==type(self.matrix) :
matrix = self.matrix * M
name=self.name+"_times_"+ str(M)
if type(M)==type(self):
matrix = self.matrix * M.matrix
name=self.name+"_times_"+ M.name
return DBZ(dataTime=self.dataTime, matrix=matrix,\
dt=self.dt, dx=self.dx, dy=self.dy,\
name =name,
dataPath =self.outputPath+"_times_"+str(M)+".dat",\
outputPath=self.outputPath+"_times_"+str(M)+".dat",\
imagePath =self.imagePath +"_times_"+str(M)+".png",\
database =self.database,\
cmap=self.cmap, verbose=self.verbose)
def __rmul__(self, M):
""" defining multiplication on the right
c.f. http://docs.python.org/release/2.5.2/ref/numeric-types.html
can move to CUDA in the future
"""
if type(M)==type(1) or type(M)==type(1.1) or type(M)==type(self.matrix) :
matrix = self.matrix * M
name=self.name+"_times_"+ str(M)
if type(M)==type(self):
matrix = self.matrix * M.matrix
name=self.name+"_times_"+ M.name
return DBZ(dataTime=self.dataTime, matrix=matrix,\
dt=self.dt, dx=self.dx, dy=self.dy,\
name =name,
dataPath =self.outputPath+"_times_"+str(M)+".dat",\
outputPath=self.outputPath+"_times_"+str(M)+".dat",\
imagePath =self.imagePath +"_times_"+str(M)+".png",\
database =self.database,\
cmap=self.cmap, verbose=self.verbose)
# end basic operator overloads
##################################
############################################################
# basic i/o's
def load(self):
"""
DBZ.load - load into DBZ.matrix
adapted from basics.readToArray(path)
"""
m = np.loadtxt(self.dataPath)
self.matrix = ma.array(m)
# setting the mask
self.matrix.fill_value = -999 # -999 for missing values
# self.matrix.fill_value = -20.1 # -20 for missing values
self.matrix.mask = (m < -20) # smaller than -20 considered no echo
# 1 March 2013
##
# THE FOLLOWING IS SKIPPED TO SAVE MEMORY
# loading coastal data
#try:
# self.coastData = np.loadtxt(self.coastDataPath)
#except:
# print "Cannot load coast data from the path: ", self.coastDataPath
def loadCoast(self):
self.coastData = np.loadtxt(self.coastDataPath)
def load100(self):
self.coastData = np.loadtxt(self.relief100DataPath)
def load1000(self):
self.coastData = np.loadtxt(self.relief1000DataPath)
def load2000(self):
self.coastData = np.loadtxt(self.relief2000DataPath)
def load3000(self):
self.coastData = np.loadtxt(self.relief3000DataPath)
def toArray(self):
"""convert return a normal array filled with -999 for missing values for other uses
"""
return ma.filled(self.matrix)
def save(self):
"""
* We convert the masked array into a standard array with masked data filled by -999
* adapted from basics.writeArrayToTxtFile(arr, path, as_integer=False):
if as_integer:
np.savetxt(path, arr, fmt='%.0f') # 0 decimal place
else:
np.savetxt(path, arr, fmt='%.2f') # two decimal places as default
"""
np.savetxt(self.outputPath, self.toArray())
def saveMatrix(self):
""" alias for self.save()
"""
self.save()
def makeImage(self, matrix="", vmin=99999, vmax=-99999, cmap="", title="",\
showColourbar=True, closeAll=True):
"""
requires: matplotlib
to make the plot before you save/print it to screen
*adapted from basics.printToScreen(m,cmap='gray'):
which was in turn adapted from stackoverflow:
http://stackoverflow.com/questions/7875688/how-can-i-create-a-standard-colorbar-for-a-series-of-plots-in-python
def printToScreen(m,cmap='gray'):
fig, axes = plt.subplots(nrows=1, ncols=1)
# The vmin and vmax arguments specify the color limits
im = axes.imshow(m, vmin=-20, vmax=100, cmap=cmap)
cax = fig.add_axes([0.9, 0.1, 0.03, 0.8])
fig.colorbar(im, cax=cax)
plt.show()
!!! TO DO: FIX THE AXES !!!
"""
if isinstance(matrix, str):
matrix = self.matrix
if title =="":
title = self.name
if cmap == "":
cmap = self.cmap
if vmin == 99999:
vmin = self.vmin
if vmax == -99999:
vmax = self.vmax
# clear the canvass
if closeAll:
#plt.clf()
plt.close()
# make the image
fig, axes = plt.subplots(nrows=1, ncols=1)
im = axes.imshow(matrix, # or np.flipud(self.matrix)?
vmin=vmin, vmax=vmax, cmap=cmap) # The vmin and vmax arguments
# specify the color limits
plt.title(title)
if showColourbar :
cax = fig.add_axes([0.9, 0.1, 0.01, 0.8])
fig.colorbar(im,cax=cax)
#plt.show() # wait, don't show!
def saveImage(self):
self.makeImage()
plt.savefig(self.imagePath, dpi=200)
def printToScreen(self, matrix="", cmap=""):
self.makeImage(matrix=matrix, cmap=cmap)
plt.show()
def show(self, matrix="", cmap=""):
"""alias to printToScreen()
"""
self.printToScreen(matrix=matrix, cmap=cmap)
def showWithFlip(self, cmap=""):
"""flip it upside down and show it
"""
self.matrix = np.flipud(self.matrix)
self.printToScreen(cmap=cmap)
def showWithCoast(self, matrix="", cmap='', intensity=9999):
if matrix=="":
matrix=self.matrix
try:
if self.showingWithCoast: # if already showing coast: do nothing
self.show(matrix=matrix)
return None # just show and go
except AttributeError: # if it didn't happen before: default = False
self.showingWithCoast = False # just do something
self.showingWithCoast = True
self.matrix_backup = self.matrix.copy()
if cmap != '':
self.cmap_backup = self.cmap
self.cmap = cmap
else:
self.cmap_backup = self.cmap
try:
if self.coastData == "" : print "haha" #test for existence
except AttributeError:
self.loadCoast()
print "\n... coast data loaded from ", self.coastDataPath, "for ", self.name
for v in self.coastData:
self.matrix[v[0], v[1]] += intensity
self.show(matrix=matrix)
def show2(self, cmap='', intensity=99999):
""" adding the coastline and then flip it
"""
try:
if self.showingWithCoast: # if already showing coast: do nothing
self.show()
return None # just show and go
except AttributeError: # if it didn't happen before: default = False
self.showingWithCoast = False # just do something
self.showingWithCoast = True
self.matrix_backup = self.matrix.copy()
if cmap != '':
self.cmap_backup = self.cmap.copy()
self.cmap = cmap
else:
self.cmap_backup = self.cmap
try:
if self.coastData == "" : print "haha" #test for existence
except AttributeError:
self.loadCoast()
print "\n... coast data loaded from ", self.coastDataPath, "for ", self.name
for v in self.coastData:
self.matrix[v[0], v[1]] = intensity
self.matrix = np.flipud(self.matrix)
self.printToScreen(cmap=cmap)
def showWithoutCoast(self):
"""resetting
"""
self.showingWithCoast = False
self.cmap = self.cmap_backup
self.matrix = self.matrix_backup
self.show()
def show3(self):
"""alias
"""
self.showWithoutCoast()
def showInverted(self):
self.matrix = np.flipud(self.matrix)
self.printToScreen()
self.matrix = np.flipud(self.matrix)
def show0(self):
"""alias
"""
self.showInverted()
def show4(self):
"""alias
"""
self.showInverted()
def backupMatrix(self, name=""):
"""backing up self.matrix for analysis
paired with self.restoreMatrix()
"""
try:
self.backupCount += 1
if name =="":
name = self.backupCount
self.matrix_backups[name] = self.matrix.copy()
except AttributeError:
self.backupCount = 0
self.matrix_backups = {}
if name =="":
name = self.backupCount
self.matrix_backups[name] = self.matrix.copy()
def restoreMatrix(self, name =""):
"""see self.backupMatrix() for comments
"""
if name =="":
name = self.backupCount
self.matrix = self.matrix_backups[name].copy()
# end basic i/o's
############################################################
#############################################################
# new objects from old
def copy(self):
"""returning a copy of itself
9 March 2013
"""
return DBZ(dataTime =self.dataTime,
matrix =self.matrix.copy(),
name =self.name,
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath,
imagePath=self.imagePath,
coastDataPath=self.coastDataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin= self.coordinateOrigin,
lowerLeftCornerLatitudeLongitude = self.lowerLeftCornerLatitudeLongitude,
upperRightCornerLatitudeLongitude =self.upperRightCornerLatitudeLongitude,
verbose =self.verbose)
def drawCross(self, i="", j="", radius=5, intensity=9999):
"""to draw a cross (+) at the marked point
"""
if i=="" or j=="":
i=self.coordinateOrigin[0]
j=self.coordinateOrigin[1]
matrix=self.matrix.copy()
matrix[i-radius:i+radius+1, j ] = intensity
matrix[i , j-radius:j+radius+1] = intensity
return DBZ(dataTime =self.dataTime,
matrix = matrix,
name =self.name + \
", cross at x,y=(%d,%d), radius=%d" %\
(j, i, radius),
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath,
imagePath=self.imagePath,
coastDataPath=self.coastDataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin= self.coordinateOrigin,
lowerLeftCornerLatitudeLongitude = self.lowerLeftCornerLatitudeLongitude,
upperRightCornerLatitudeLongitude =self.upperRightCornerLatitudeLongitude,
verbose =self.verbose)
def drawCoast(self, intensity=9999, newCopy=False):
"""
adapted from DBZ.show2()
"""
if newCopy:
a = self.copy() # no need for this i guess!!!
else:
a = self
try:
if a.coastData == "" : print "haha" #test for existence
except AttributeError:
a.loadCoast()
print "\n... coast data loaded from ", a.coastDataPath, "for ", a.name
for v in a.coastData:
a.matrix[v[0], v[1]] = intensity
return a
def recentreTaichungPark(self):
"""
2013-08-27
use:
a = pattern.a
a.showTaichungPark()
takes as input
attributes:
lowerLeftCornerLatitudeLongitude
upperRightCornerLatitudeLongitude
constants:
taichung park coordinates (24.145056°N 120.683329°E)
changes:
self.coordinateOrigin
self.O
returns:
grid square for taichung park
"""
#global taichungParkLatitude, taichungParkLongitude
height, width = self.matrix.shape
i0 = taichungParkLatitude #defined in defaultParameters.py
j0 = taichungParkLongitude
# the above two lines dont work, here's a hack fix
#import defaultParameters
#j0 = defaultParameters.taichungParkLongitude
#i0 = defaultParameters.taichungParkLatitude
i1, j1 = self.lowerLeftCornerLatitudeLongitude
i2, j2 = self.upperRightCornerLatitudeLongitude
i3 = 1.*(i0-i1)*height/(i2-i1) # (latitudeTCP-latLowerleft) * grid per latitude
j3 = 1.*(j0-j1)*width/(j2-j1) # ditto for longitude
self.coordinateOrigin = (i3,j3)
self.O = (i3,j3)
return i3, j3
def recentre(self):
"""alias for recentreTaichungPark(self)
"""
return self.recentreTaichungPark()
def recenter(self):
"""alias for recentreTaichungPark(self)
"""
return self.recentreTaichungPark()
def drawRectangle(self, bottom=0, left=0, height=100, width=100, intensity=9999):
""" return a copy with a rectangle on the image
"""
vmax = self.vmax
matrix = self.matrix.copy()
for i in range(bottom, bottom+height):
matrix[i , left:left+2] = intensity
matrix[i , left+width] = intensity
for j in range(left, left+width):
matrix[bottom:bottom+2, j] = intensity
matrix[bottom+height, j] = intensity
return DBZ(dataTime =self.dataTime,
matrix = matrix,
name =self.name + \
", rectangle at x,y=(%d,%d), width=%d, height=%d" %\
(left, bottom, width, height),
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath,
imagePath=self.imagePath,
coastDataPath=self.coastDataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin= self.coordinateOrigin,
verbose =self.verbose)
def getWindow(self, bottom=0, left=0, height=100, width=100):
"""return a dbz object, a window view of itself
"""
name = self.name +'_windowed' + '_bottom' + str(bottom) +\
'_left' + str(left) + '_height' + str(height) + '_width' + str(width)
matrix = self.matrix.copy()
matrix = matrix[bottom:bottom+height, left:left+width]
return DBZ(dataTime =self.dataTime,
matrix = matrix,
name = name,
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath,
imagePath=self.imagePath,
coastDataPath=self.coastDataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin = (height//2, width//2) , #hack
verbose =self.verbose)
def shiftMatrix(self,i,j):
"""shifting the array/dbz pattern; masking the edge
codes migrated from shiiba.py (now armor.shiiba.regression) to here
i = shift in axis-0 = going up
j = shift in axis-1 = going right
"""
#1. copy the matrix
matrix = self.matrix.copy()
#2. shift the matrix
matrix = np.roll(matrix, i,axis=0)
matrix = np.roll(matrix, j,axis=1)
#3. mask the edges
if i>0: # up
matrix.mask[ :i, : ] = 1 #mask the first (=bottom) i rows
if i<0: # down
matrix.mask[i: , : ] = 1 #mask the last (=top) rows; i<0
if j>0: # right
matrix.mask[ : , :j] = 1 #mask the first (=left) columns
if j<0: # left
matrix.mask[ : ,j: ] = 1 #mask the last (=right) columns
#4. return an armor.pattern.DBZ object
self_shifted_by_ij =DBZ(dataTime=self.dataTime, matrix=matrix,\
name=self.name+"shifted"+str((i,j)),\
dt=self.dt, dx=self.dx, dy=self.dy, \
dataPath =self.outputPath+"shifted"+str((i,j))+".dat",\
outputPath =self.outputPath+"shifted"+str((i,j))+".dat",\
imagePath =self.imagePath +"shifted"+str((i,j))+".png",\
database =self.database,\
cmap=self.cmap,
coordinateOrigin = (self.coordinateOrigin,\
self.coordinateOrigin),
verbose=self.verbose)
return self_shifted_by_ij
def shift(self, i, j):
"""alias for shiftMatrix()
"""
return self.shiftMatrix(i,j)
def smooth(self, ker=""):
"""
################################
# smoothing the image by convolution with a kernal
# uses SciPY
# return : a DBZ object, smoothed
# 8 March 2013
#################################
"""
if ker=="":
ker = 1./237. * np.array( [[1, 4, 7, 4, 1], # default kernel
[4,16,26,16, 4],
[7,26,41,26, 7],
[4,16,26,16, 4],
[1, 4, 7, 4, 1]])
phi0 = self.matrix.copy()
phi0.fill_value = -999999999
phi0 = signal.convolve(phi0.filled(),ker)
phi0 = ma.array(phi0, fill_value=-999, mask=(phi0<-80))
# cutting it down to size (881,921)
return DBZ(name=self.name+'smoothed', matrix =phi0[2:-2, 2:-2],
dt=self.dt, dx=self.dx, dy=self.dy,
dataPath =self.dataPath +'smoothed.dat',
outputPath=self.outputPath+'smoothed.dat',
imagePath =self.imagePath +'smoothed.dat',
coastDataPath=self.coastDataPath,
database=self.database,
cmap=self.cmap, vmin=self.vmin, vmax=self.vmax,
coordinateOrigin = self.coordinateOrigin,
verbose=self.verbose)
def coarser(self, scale=2):
"""
################################
# returning a coarser image by averaging 4 nearby points
#
# return : a DBZ object
# parameter "scale" not used yet
# 8 March 2013
# parameter "scale" implementation started on 12 march 2013
#################################
"""
phi = self.matrix.copy()
# trim if dimensions not even
height, width = phi.shape
horizontal = width//scale
vertical = height//scale
phi = phi[0:vertical*scale, 0:horizontal*scale] # trimming
# getting the shifted copies
# 0 1
# 2 3
phi.fill_value = -999999999
phiList = [] #work to be continued here (parameter "scale" implementation)
phi0 = phi[ ::2, ::2].flatten()
phi1 = phi[ ::2,1::2].flatten()
phi2 = phi[1::2, ::2].flatten()
phi3 = phi[1::2,1::2].flatten()
phi_mean= ma.vstack([phi0, phi1, phi2, phi3])
phi_mean= ma.mean(phi_mean, axis=0)
phi_mean= phi_mean.reshape(vertical, horizontal)
# cutting it down to size (881,921)
return DBZ(name=self.name+'coarser', matrix =phi_mean,
dt=self.dt, dx=self.dx, dy=self.dy,
dataPath =self.dataPath +'coarser.dat',
outputPath=self.outputPath+'coarser.dat',
imagePath =self.imagePath +'coarser.dat',
coastDataPath=self.coastDataPath,
database=self.database,
cmap=self.cmap, vmin=self.vmin, vmax=self.vmax,
coordinateOrigin = (self.coordinateOrigin[0] //scale,\
self.coordinateOrigin[1] //scale ) ,
verbose=self.verbose)
def coarser2(self):
""" like coarser() but returning a matrix of the same size, not smaller
do it later when i have time
algorithm:
to multiply self.matrix with a "diagonal" of matrix [[.5, .5],[.5,.5]]
on both left and right.
"""
height, width = self.matrix.shape
pass
def getPrediction(self, C):
"""wrapping armor.shiiba.regression2.getPrediction
"""
from armor.shiiba import regression2
return regression2.getPrediction(C, self)
def predict(self, *args, **kwargs):
"""wrapping self.getPrediction for the moment
"""
return self.getPrediction(*args, **kwargs)
def advect(self, *args, **kwargs):
"""wrapping advection.semiLagrangian.interploate2 for the moment
"""
from armor.advection import semiLagrangian as sl
return sl.interpolate2(self, *args, **kwargs)
def flipud(self):
"""wrapping the function np.flipud
"""
a_flipud = self.copy()
a_flipud.matrix = np.flipud(a_flipud.matrix)
return a_flipud
def fliplr(self):
a_fliplr = self.copy()
a_fliplr.matrix = np.fliplr(a_fliplr.matrix)
return a_fliplr
def threshold(self, threshold=0):
"""getting a threshold image of itself with mask
"""
matrix= self.matrix.copy()
name = self.name + " thresholded at " + str(threshold)
oldMask = matrix.mask.copy()
matrix.mask += (matrix < threshold)
a_thres = DBZ(dataTime =self.dataTime,
matrix =matrix,
name =name,
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath + "_thresholded_" + str(threshold),
imagePath=self.imagePath + "_thresholded_" + str(threshold),
coastDataPath=self.coastDataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin= self.coordinateOrigin,
verbose =self.verbose)
a_thres.oldMask = oldMask
return a_thres
# end new objects from old
#############################################################
############################################################
# functions on object
def cov(self, dbz2):
"""wrapping the ma.cov function: covariance between two images
"""
phi0 = self.matrix.flatten()
phi1 = dbz2.matrix.flatten()
cov = ma.cov(phi0, phi1)
return cov
def corr(self, dbz2):
"""wrappig the ma.corrcoef function: correlation between two images
"""
phi0 = self.matrix.flatten()
phi1 = dbz2.matrix.flatten()
corr = ma.corrcoef(phi0, phi1)
if (not isinstance(corr, float)) and (not isinstance(corr,int)):
corr = corr[0,1] # return a number
return corr
def localCov(self, dbz2, windowSize=7):
"""plotting the local covariance of two dbz patterns
a slow version of the function
>>> test.tic() ; x=a.localCov(b) ; test.toc()
*************************
time spent: 4091.93978906
>>> x
>>> xm=x.matrix
>>> xm.min()
-1.0000000000000002
>>> xm.max()
1.0000000000000002
>>> xm.mean()
0.21721107449067339
>>> x.name = 'local correlation: dbz20120612.0200 - 0210'
>>> x.outputPath='testing/test112/localCorrelationMatrix.dat'
>>> x.save()
>>> x.matrix=np.flipud(x.matrix)
>>> x.imagePath='testing/test112/localCorrelationMatrix.png'
>>> x.saveImage()
>>>
"""
height, width = self.matrix.shape
E = (windowSize-1)/2 #shorthand
# initialise
localcovar = ma.zeros((height,width))
localcovar.mask = True
for i in range(height):
for j in range(width):
window1 = self.matrix[max(0,i-E):min(i+E+1, height),max(0,j-E):min(j+E+1,width)]
window2 = dbz2.matrix[max(0,i-E):min(i+E+1, height),max(0,j-E):min(j+E+1,width)]
localcovar[i,j] = ma.corrcoef(window1.flatten(), window2.flatten())[0,1]
return localcovar
def shiiba(self,b, *args, **kwargs):
"""wrapping armor.analysis.shiiba
"""
from armor import analysis
self.shiibaResult = analysis.shiiba(self, b, *args, **kwargs)
return self.shiibaResult
def shiibaLocal(self, b, *args, **kwargs):
"""wrapping armor.analyais.shiibaLocal
"""
from armor import analysis
self.shiibaLocalResult = analysis.shiibaLocal(self,b, *args, **kwargs)
self.shiibaLocalResult
def shiibaFree(self,b, *args, **kwargs):
"""wrapping armor.shiiba.regressionCFLfree
"""
from armor.shiiba import regressionCFLfree as cflfree
self.shiibaFreeResult = cflfree.regressGlobal(self,b, *args, **kwargs)
return self.shiibaFreeResult
def getVect(self, C):
"""wrapping armor.shiiba.regression2.convert
"""
from armor.shiiba import regression2
return regression2.convert(C, self)
def getKmeans(self, *args, **kwargs):
"""wrapping armor.kmeans.clustering.getKmeans()
8 April 2013
"""
import armor.kmeans.clustering as clust
x = clust.getKmeans(self, *args, **kwargs)
return x
def invariantMoments(self,**kwargs):
"""wrappng armor.geometry.moments.HuMoments
normalise with respect to the degree
"""
from armor.geometry import moments
x = moments.HuMoments(self.matrix, **kwargs)
x[0] = np.sign(x[0])*abs(x[0])**(.5)
x[1] = np.sign(x[1])*abs(x[1])**(.25)
x[2] = np.sign(x[2])*abs(x[2])**(1./6)
x[3] = np.sign(x[3])*abs(x[3])**(1./6)
x[4] = np.sign(x[4])*abs(x[4])**(1./12)
x[5] = np.sign(x[5])*abs(x[5])**(1./8)
x[6] = np.sign(x[6])*abs(x[6])**(1./12)
self.invMom = x
return x
def spline(self):
"""
wrapping the scipy interpolate module
http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RectBivariateSpline.html#scipy.interpolate.RectBivariateSpline
"""
height, width = self.matrix.shape
return interpolate.RectBivariateSpline(range(height), range(width), self.matrix)
# end function on object
############################################################
############################################################
# functions altering (attributes) of object
def findEdges(self, threshold=-9999):
from armor.geometry import edges
m = a.matrix.copy()
if threshold !=-9999:
m.mask += (m<threshold)
m_edges = edges.find(DBZ(matrix=m))
else:
m_edges = edges.find(DBZ(matrix=m))
self.edges = m_edges
return m_edges
# end functions altering (attributes) of object
############################################################
#####################################################
class VectorField(object):
"""wraps two masked arrays sharing the same mask (how can i make them share a mask?)
example:
>>> from armor import pattern
>>> a = pattern.DBZ(dataTime="20120612.0200")
>>> a.load()
>>> a.show()
>>> b = pattern.VectorField(a.matrix, -a.matrix)
>>> b.plot()
>>> b.show()
"""
def __init__(self, U, V, mask=False, name='vectorfield', dataPath="", outputPath="", imagePath="", \
key='vector field', title='title', gridSize=25):
""" U = first = i-component; V=second=j-component
"""
U = U.view(ma.MaskedArray)
V = V.view(ma.MaskedArray)
mask = U.mask + V.mask + mask
U.mask = mask.copy()
V.mask = mask.copy()
self.U = U
self.V = V
self.mask=mask
#################################################
# i don't know how to make this work; comment out
#if not isinstance(mask, type(False)): # if mask explicitly given, initialise with it
# self.U.mask = mask
# self.V.mask = mask
#################################################
self.name = name
self.dataPath = dataPath
self.outputPath = outputPath # for the future
self.imagePath = imagePath
self.key = key
self.title = title
self.gridSize= gridSize
def __sub__(self, vect2):
"""defining the subtraction of two vector fields
"""
if isinstance(vect2, tuple) or isinstance(vect2,list):
name = self.name + "_minus_" + str(vect2)
U = self.U - vect2[0] # we use (x,y) for external interface, not i,j
V = self.V - vect2[1]
mask = self.mask.copy()
key = self.key + " minus " + str(vect2)
title = self.title+" minus " + str(vect2)
gridSize = self.gridSize
else:
name = self.name + "_minus_" + vect2.name
U = self.U - vect2.U
V = self.V - vect2.V
mask = self.mask + vect2.mask.copy()
key = self.key + " minus " + vect2.key
title = self.title+" minus " + vect2.title
gridSize = min(self.gridSize, vect2.gridSize)
outputPath = self.outputPath + name + ".dat"
dataPath = outputPath
imagePath = self.imagePath + name + ".png"
return VectorField(U, V, mask=mask, name=name, dataPath=dataPath, outputPath=outputPath,\
imagePath=imagePath, key=key, title=title, gridSize=gridSize)
def __add__(self, vect2):
"""defining the addition of two vector fields
"""
if isinstance(vect2, tuple) or isinstance(vect2,list):
name = self.name + "_plus_" + str(vect2)
U = self.U + vect2[0] # we use (x,y) for external interface, not i,j
V = self.V + vect2[1]
mask = self.mask.copy()
key = self.key + " plus " + str(vect2)
title = self.title+" plus " + str(vect2)
gridSize = self.gridSize
else:
name = self.name + "_plus_" + vect2.name
U = self.U + vect2.U
V = self.V + vect2.V
mask = self.mask + vect2.mask.copy()
key = self.key + " plus " + vect2.key
title = self.title+" plus " + vect2.title
gridSize = min(self.gridSize, vect2.gridSize)
outputPath = self.outputPath + name + ".dat"
dataPath = outputPath
imagePath = self.imagePath + name + ".png"
return VectorField(U, V, mask=mask, name=name, dataPath=dataPath, outputPath=outputPath,\
imagePath=imagePath, key=key, title=title, gridSize=gridSize)
def __mul__(self, s):
"""scalar for now, will extend later
"""
if isinstance(s, tuple) or isinstance(s,list):
U = self.U * s[0]
V = self.V * s[1]
else:
U = self.U * s
V = self.V * s
mask=self.mask.copy()
name=self.name + "__times__" + str(s)
dataPath=''
outputPath=self.outputPath + "__times__" + str(s)
imagePath =self.imagePath + "__times__" + str(s)
key=self.key + "__times__" + str(s)
title=self.title + "__times__" + str(s)
gridSize = self.gridSize
return VectorField(U=U, V=V, mask=mask, name=name, dataPath=dataPath, \
outputPath=outputPath, imagePath=imagePath, \
key=key, title=title, gridSize=gridSize)
def plot(self, key="", title="", gridSize=0, X=-1, Y=-1, closeAll=True, lowerLeftKey=False):
"""
make the plot without showing it
adapted from
basics.plotVectorField(U, V, X=-1, Y=-1, gridSize=25, key="vector field",\
title="title", saveFileName="", outputToScreen=False):
"""
# clear the canvass
#plt.clf()
if closeAll:
plt.close()
U = self.U.copy()
V = self.V.copy()
if key =="":
key = self.key
if title =="":
title = self.title
if gridSize == 0:
gridSize = self.gridSize
width = U.shape[1]
height = U.shape[0]
if type(X)==type(-1) or type(Y)==type(-1):
X, Y = np.meshgrid(np.arange(0,width), np.arange(0,height))
left = X[ 0, 0]
bottom = Y[ 0, 0]
#computing the length of the vector field at centre for reference
r_centre = (U[height//2, width//2]**2 + V[height//2, width//2]**2) **(0.5)
print "==computing the length of the vector field at centre for reference:==\nr_centre=",\
"r_centre"
if lowerLeftKey:
# making a grid of standardardised vector in the lower-left corner
# for scale reference
U[1:gridSize+1, 1:gridSize+1] = 1
V[1:gridSize+1, 1:gridSize+1] = 0
Q = plt.quiver( X[::gridSize, ::gridSize], Y[::gridSize, ::gridSize],\
U[::gridSize, ::gridSize], V[::gridSize, ::gridSize],\
color='r', units='x', linewidths=(2,), edgecolors=('k'),\
headaxislength=5 )
qk = plt.quiverkey(Q, 0.7, 0.0, 1, 'length='+str(round(r_centre,5))+' at centre',\
fontproperties={'weight': 'bold'})
if lowerLeftKey:
qk = plt.quiverkey(Q, 0.3, 0.0, 1,\
key+',\nlength of the standard arrow in the lower-left corner=1',\
fontproperties={'weight': 'bold'})
plt.axis([left, left+width-1, bottom, bottom+height-1])
plt.title(title)
def showPlot(self,**kwargs):
self.plot(**kwargs)
plt.show()
def show(self,**kwargs): #alias
self.showPlot(**kwargs)
def savePlot(self):
self.plot()
if self.imagePath =="":
self.imagePath = raw_input("Please enter imagePath:")
plt.savefig(self.imagePath, dpi=200)
def saveImage(self):
"""alias for savePlot
"""
self.savePlot()
def toArray(self):
"""return normal arrays filled with -999 for missing values for other uses
"""
return ma.filled(self.U), ma.filled(self.V)
def saveMatrix(self):
"""
* We convert and save the masked arrays into standard arrays with masked data filled by -999
"""
U, V = self.toArray()
np.savetxt(self.outputPath+"U.dat", U, '%.4f')
np.savetxt(self.outputPath+"V.dat", V, '%.4f')
def pickle(self):
pickle.dump(self)
#####################################################
# functions from vector fields to values
def corr(self, vect2, region1="", region2=""):
"""adapted from DBZ.corr():
"""
height, width = self.U.shape
if region1=="":
region1 = (0, 0, height, width)
if region2=="":
region2 = region1
u1 = self.U[region1[0]:region1[0]+region1[2], \
region1[1]:region1[1]+region1[3]].flatten()
u2 = vect2.U[region2[0]:region2[0]+region2[2], \
region2[1]:region2[1]+region2[3]].flatten()
ucorr = ma.corrcoef(u1, u2)
v1 = self.V[region1[0]:region1[0]+region1[2], \
region1[1]:region1[1]+region1[3]].flatten()
v2 = vect2.V[region2[0]:region2[0]+region2[2], \
region2[1]:region2[1]+region2[3]].flatten()
vcorr = ma.corrcoef(v1, v2)
return {'ucorr': ucorr, 'vcorr': vcorr}
##############################################################################
# streams of DBZ objects, with basic operations, comparisons, etc
class DBZstream:
"""
a stream of DBZ objects, with basic i/o facilities
migrating some codes from armor.basicio.dataStream
WE DO ASSUME THAT there are no two sets of data with the same dataTime
or else we would need some extra logic to check for redundancies.
"""
###########################################################
#
# basic construction
def __init__(self, dataFolder='../data_temp/', name="COMPREF.DBZ",
lowerLeftCornerLatitudeLongitude=defaultLowerLeftCornerLatitudeLongitude,
upperRightCornerLatitudeLongitude=defaultUpperRightCornerLatitudeLongitude,
outputFolder="",
imageFolder="",
preload=False):
"""
construct the objects without loading them
input: path of folder "/../../"
process: parse the folder for files
output: sequence of armor.pattern.DBZ objects
DBZ(name, dataPath, dataTime)
# parse the filename and look for clues
"""
if outputFolder =="":
outputFolder = defaultOutputFolder
if imageFolder =="":
imageFolder = defaultImageFolder
self.dataFolder = dataFolder
self.lowerLeftCornerLatitudeLongitude = lowerLeftCornerLatitudeLongitude
self.upperRightCornerLatitudeLongitude = upperRightCornerLatitudeLongitude
self.outputFolder = outputFolder
self.imageFolder = imageFolder
dbzList = []
dataFolder = re.sub(r'\\', '/' , dataFolder) # standardise: g:\\ARMOR .. --> g:/ARMOR
dataSource = '-'.join(dataFolder.split('/')[-2:]) + '-'
if name != "":
self.name = name
else:
self.name = dataSource
L = os.listdir(dataFolder)
L = [v for v in L if v.lower().endswith('.txt') or v.lower().endswith('.dat')] # fetch the data files
for fileName in L:
dataTime = re.findall(r'\d{4}', fileName)
if len(dataTime)<3: # NOT DATED DBZ FILE, REJECT
continue
dataTime = dataTime[0] + dataTime[1] + '.' + dataTime[2]
name = dataSource + fileName
dataPath = dataFolder + fileName
a = DBZ(dataTime=dataTime,
name=name,
dataPath=dataPath,
lowerLeftCornerLatitudeLongitude=lowerLeftCornerLatitudeLongitude,
upperRightCornerLatitudeLongitude=upperRightCornerLatitudeLongitude,
)
if preload:
a.load()
dbzList.append(a)
## there you go! ######
#
self.list = dbzList
#
#######################
def __call__(self, N=-999):
"""
if N is an integer then return the N-th DBZ pattern in the stream
else if N is a string then return those whose names or dataTimes contains N
"""
if N == -999:
return self.list
elif isinstance(N, int):
return self.list[N]
elif isinstance(N, str):
return [v for v in self.list if N in v.name or N in v.dataTime]
def __getitem__(self, N=-999):
"""alias for self.list[] """
return self.list[N]
def __len__(self, dataTime=""):
return len([v for v in self.list if dataTime in v.dataTime])
###########################################################
#
# stream operations
def append(self, filePath):
"""
to append a new member to the DBZstream list
"""
pass
def regrid(self, b):
"""
wrapping armor.geometry.regrid.regrid()
b is another DBZ object representing the grid pattern to be transformed to
"""
from armor.geometry import regrid
for i in range(len(self.list)):
self.list[i] = regrid.regrid(self.list[i], b)
def cutUnloaded(self):
"""
cut the unloaded objects
"""
i=0
while i < len(self.list):
dbzObject = self.list[i]
if (dbzObject.matrix**2).sum()==0:
del(self.list[i])
else:
i+=1
return i # length of the stream in the end
###########################################################
#
# basic I/O
def load(self, N=-999, name="", verbose=False):
"""
N - index of object to be loaded, if N==-999 : load all
if N is a string, look through the list of dbz objects
and load those whose dataTime string contain N
and whose name contains name
"""
if N==-999:
for img in self.list:
img.load()
elif isinstance(N, int):
self.list[N].load()
elif isinstance(N, str):
for img in self.list:
if N in img.dataTime or N in img.name:
img.load()
if verbose:
print img.name, '|',
def setImageFolder(self, folder):
for dbzPattern in self.list:
dbzPattern.imageFolder = folder
#dbzPattern.imagePath = folder + dbzPattern.name + '_'+dbzPattern.dataTime + ".png"
dbzPattern.imagePath = folder + dbzPattern.dataTime + ".png"
def setOutputFolder(self, folder):
for dbzPattern in self.list:
dbzPattern.outputFolder = folder
#dbzPattern.outputPath = folder + dbzPattern.name + '_'+dbzPattern.dataTime + ".dat"
dbzPattern.outputPath = folder + dbzPattern.dataTime + ".dat"
###########################################################
#
# functions on streams, comparisons, etc
def countLoaded(self):
"""
return the number of loaded DBZ objects in the stream
essentially computing those with matrix!=0
"""
return len([v for v in self if (v.matrix**2).sum()!=0])
def corr(self, ds2, verbose=False):
"""
returns a list of correlation of the streams
[(dataTime <str>, corr <float>),...]
"""
ds1 = self # alias
# 1. get the list of common dataTimes
dataTimeList1 = [v.dataTime for v in ds1.list]
dataTimeList2 = [v.dataTime for v in ds2.list]
dataTimeList = sorted(list(set(dataTimeList1).intersection(set(dataTimeList2))))
if verbose:
print dataTimeList
# 2. compute the correlations with the built in DBZ.corr() method
L = []
for T in dataTimeList:
a = ds1(T)[0]
b = ds2(T)[0]
L.append((T, a.corr(b)))
return L
########################
# demo
a = DBZ('20120612.0200')
b = DBZ('20120612.0210')
ds1 = DBZstream()
"""
exit()
python
from armor import pattern
"""
try:
print externalHardDriveRoot
ds2 = DBZstream(dataFolder='%sdata/SOULIK/wrf_shue/' %externalHardDriveRoot,
lowerLeftCornerLatitudeLongitude=(17.7094,113.3272),
upperRightCornerLatitudeLongitude=(28.62909, 127.6353),
)
except:
print 'EXTERNAL HARD DRIVE %sdata/SOULIK/wrf_shue/' %externalHardDriveRoot, "NOT FOUND"
try:
ds2 = DBZstream(dataFolder='%sdata/SOULIK/wrf_shue/' %hardDriveRoot,
lowerLeftCornerLatitudeLongitude=(17.7094,113.3272),
upperRightCornerLatitudeLongitude=(28.62909, 127.6353),
)
print 'HARD DRIVE %sdata/SOULIK/wrf_shue/' %hardDriveRoot, "\nFOUND!!"
except:
print 'HARD DRIVE %sdata/SOULIK/wrf_shue/' %hardDriveRoot, "NOT FOUND"
try:
print externalHardDriveRoot2
ds2 = DBZstream(dataFolder='%sdata/SOULIK/wrf_shue/' %externalHardDriveRoot2,
lowerLeftCornerLatitudeLongitude=(17.7094,113.3272),
upperRightCornerLatitudeLongitude=(28.62909, 127.6353),
)
print 'EXTERNAL HARD DRIVE %sdata/SOULIK/wrf_shue/' %externalHardDriveRoot2, "\nFOUND!!"
except:
print 'EXTERNAL HARD DRIVE %sdata/SOULIK/wrf_shue/' %externalHardDriveRoot2, "NOT FOUND"
try:
ds3 = DBZstream(dataFolder='../data_simulation/20120611_12/', name="WRFoutput",
lowerLeftCornerLatitudeLongitude=(17.7094,113.3272),
upperRightCornerLatitudeLongitude=(28.62909, 127.6353),
preload=False)
except:
print '../data_simulation/20120611_12/ - NOT FOUND'
#a.load()
#b.load()
"""
The following are constructed from data from mr. shue : https://mail.google.com/mail/u/0/?shva=1#search/azteque%40manysplendid.com/14070bb7d7aef48c
wd3
282x342
MaxLatF = 28.62909
MinLatF = 17.7094
MaxLonF = 127.6353
MinLonF = 113.3272
"""
c = DBZ(name='WRF20120612.0200', dataTime='20120612.0200',
dataPath= usbRoot + '/data_simulation/20120611_12/out_201206120200.txt',
lowerLeftCornerLatitudeLongitude= (17.7094, 113.3272),
upperRightCornerLatitudeLongitude= (28.62909,127.6353) ,
)
d = DBZ(name='WRF20120612.0210', dataTime='20120612.0210',
dataPath= usbRoot + '/data_simulation/20120611_12/out_201206120210.txt',
lowerLeftCornerLatitudeLongitude= (17.7094, 113.3272),
upperRightCornerLatitudeLongitude= (28.62909,127.6353) ,
)
|
cc0-1.0
| 3,075,585,754,165,044,000 | 39.301485 | 148 | 0.514681 | false |
emilybache/texttest-runner
|
src/main/python/storytext/lib/storytext/scriptengine.py
|
1
|
4868
|
""" Basic engine class, inherited in guishared.py and implemented by each GUI toolkit code """
import recorder, replayer
import os, sys, imp
try:
# In Py 2.x, the builtins were in __builtin__
BUILTINS = sys.modules['__builtin__']
except KeyError: # pragma: no cover - not worried about Python 3 yet...
# In Py 3.x, they're in builtins
BUILTINS = sys.modules['builtins']
# Behaves as a singleton...
class ScriptEngine:
# Read USECASE_HOME also, the legacy name from PyUseCase
storytextHome = os.path.abspath(os.getenv("STORYTEXT_HOME",
os.getenv("USECASE_HOME",
os.path.expanduser("~/.storytext"))))
def __init__(self, enableShortcuts=False, **kwargs):
os.environ["STORYTEXT_HOME"] = self.storytextHome
self.enableShortcuts = enableShortcuts
self.recorder = recorder.UseCaseRecorder(self.getShortcuts())
self.replayer = self.createReplayer(**kwargs)
self.registerShortcuts()
self.replayer.tryRunScript()
def recorderActive(self):
return self.enableShortcuts or self.recorder.isActive()
def replayerActive(self):
return self.enableShortcuts or self.replayer.isActive()
def active(self):
return self.replayerActive() or self.recorderActive()
def registerShortcuts(self):
for shortcut in self.getShortcuts():
if self.replayerActive():
self.replayer.registerShortcut(shortcut)
@classmethod
def getShortcuts(cls, storyTextHome=None):
home = storyTextHome if storyTextHome else cls.storytextHome
shortcuts = []
if not os.path.isdir(home):
return shortcuts
for fileName in sorted(os.listdir(home)):
if fileName.endswith(".shortcut"):
fullPath = os.path.join(home, fileName)
shortcuts.append(replayer.ReplayScript(fullPath, ignoreComments=True))
return shortcuts
def createReplayer(self, **kw):
return replayer.UseCaseReplayer(self.recorder, **kw)
def applicationEvent(self, name, category=None, supercedeCategories=[], timeDelay=0.001, delayLevel=0):
# Small time delay to avoid race conditions: see replayer
if self.recorderActive():
self.recorder.registerApplicationEvent(name, category, supercedeCategories, delayLevel)
if self.replayerActive():
self.replayer.registerApplicationEvent(name, timeDelay)
def applicationEventRename(self, oldName, newName, oldCategory=None, newCategory=None):
# May need to recategorise in the recorder
if self.recorderActive() and oldCategory != newCategory:
self.recorder.applicationEventRename(oldName, newName, oldCategory, newCategory)
if self.replayerActive():
self.replayer.applicationEventRename(oldName, newName)
def applicationEventDelay(self, name, **kw):
if self.recorderActive():
self.recorder.applicationEventDelay(name, **kw)
def applicationEventRemove(self, *args, **kw):
if self.recorderActive():
self.recorder.unregisterApplicationEvent(*args, **kw)
def run(self, options, args):
if len(args) == 0:
return False
else:
self.handleAdditionalOptions(options)
self.runSystemUnderTest(args)
return True
def handleAdditionalOptions(self, options):
pass
def runSystemUnderTest(self, args):
# By default, just assume it's a python program. Allow this to be overridden
self.run_python_file(args)
def run_python_file(self, args):
"""Run a python file as if it were the main program on the command line.
`args` is the argument array to present as sys.argv, including the first
element representing the file being executed.
Lifted straight from coverage.py by Ned Batchelder
"""
filename = args[0]
# Create a module to serve as __main__
old_main_mod = sys.modules['__main__']
main_mod = imp.new_module('__main__')
sys.modules['__main__'] = main_mod
main_mod.__file__ = filename
main_mod.__builtins__ = BUILTINS
# Set sys.argv and the first path element properly.
old_argv = sys.argv
old_path0 = sys.path[0]
sys.argv = args
sys.path[0] = os.path.dirname(filename)
try:
source = open(filename, 'rU').read()
exec compile(source, filename, "exec") in main_mod.__dict__
finally:
# Restore the old __main__
sys.modules['__main__'] = old_main_mod
# Restore the old argv and path
sys.argv = old_argv
sys.path[0] = old_path0
|
mit
| -3,351,305,828,627,500,500 | 36.736434 | 107 | 0.625103 | false |
SanaMobile/sana.protocol_builder
|
src-django/api/migrations/0026_auto_20180313_2240.py
|
1
|
1573
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2018-03-13 22:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('api', '0025_merge'),
]
operations = [
migrations.CreateModel(
name='Subroutine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('created', models.DateTimeField(auto_now_add=True)),
('last_modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=255)),
('display_name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
],
options={
'ordering': ['last_modified'],
},
),
migrations.AlterField(
model_name='abstractelement',
name='concept',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='abstractelement', to='api.Concept'),
),
migrations.AddField(
model_name='abstractelement',
name='subroutine',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='abstractelements', to='api.Subroutine'),
),
]
|
bsd-3-clause
| -8,603,903,679,947,479,000 | 36.452381 | 146 | 0.58487 | false |
hep-cce/ml_classification_studies
|
cosmoDNN/AutoEncoder/AutoencodeLensGenerate.py
|
1
|
4913
|
"""
Author: Nesar Ramachandra
Only train over lensed images now -- so o/p also seems lensed
Have to train over everything!!!
check again if noiseless and noisy images are matching!
Check 1d/2d issue
Convolutional encoding
increase number of features per layer
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Dropout
from keras import backend as K
import numpy as np
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D, ZeroPadding2D
from keras.optimizers import SGD, RMSprop, Adadelta, Adam
from keras.utils import np_utils
import time
import glob
time_i = time.time()
K.set_image_dim_ordering('tf')
from keras.preprocessing.image import ImageDataGenerator
data_augmentation = True
batch_size = 32
num_classes = 2
num_epoch = 200
learning_rate = 0.00005 # Warning: lr and decay vary across optimizers
decay_rate = 0.0
opti_id = 1 # [SGD, Adam, Adadelta, RMSprop]
loss_id = 0 # [mse, mae] # mse is always better
Dir0 = '../../../'
Dir1 = Dir0 + 'AllTrainTestSets/Encoder/'
Dir2 = ['single/', 'stack/'][1]
data_path = Dir1 + Dir2
DirOutType = ['noisy0', 'noisy1', 'noiseless'] # check above too
image_size = img_rows = img_cols = 45
num_channel = 1
num_files = 9000
train_split = 0.8 # 80 percent
num_train = int(train_split*num_files)
def load_train(fnames):
img_data_list = []
filelist = sorted(glob.glob(fnames + '/*npy'))
for fileIn in filelist: # restricting #files now [:num_files]
# print(fileIn)
img_data = np.load(fileIn)
# print(fileIn)
ravelTrue = False
if ravelTrue: img_data = np.ravel(np.array(img_data))
img_data = img_data.astype('float32')
img_data /= 255.
expandTrue = True
if expandTrue: img_data = np.expand_dims(img_data, axis=4)
# print (img_data.shape)
img_data_list.append(img_data)
# print(np.array(img_data_list).shape)
X_train = np.array(img_data_list)
# labels = np.load(fnames +'_5para.npy')[:num_files]
print (X_train.shape)
labels = np.ones([X_train.shape[0], ])
y_train = np_utils.to_categorical(labels, num_classes)
np.random.seed(12345)
shuffleOrder = np.arange(X_train.shape[0])
# np.random.shuffle(shuffleOrder)
# print(shuffleOrder)
X_train = X_train[shuffleOrder]
y_train = y_train[shuffleOrder]
return X_train, y_train
fnames = data_path + DirOutType[2] #'noiseless'
noiseless_data, noiseless_target = load_train(fnames)
x_train = noiseless_data[0:num_train]
y_train = noiseless_target[0:num_train]
x_val = noiseless_data[num_train:num_files]
y_val = noiseless_target[num_train:num_files]
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
# this is the size of our encoded representations
encoding_dim = 64 # 32 floats -> compression of factor 32.6, assuming the input is 2025 floats
def AutoModel_deep():
# this is our input placeholder
input_img = Input(shape=(image_size*image_size,))
# "encoded" is the encoded representation of the input
# encoded = Dense(encoding_dim, activation='relu')(input_img)
# # "decoded" is the lossy reconstruction of the input
# decoded = Dense(image_size*image_size, activation='sigmoid')(encoded)
encoded1 = Dense(128, activation='relu')(input_img)
encoded2 = Dense(64, activation='relu')(encoded1)
encoded3 = Dense(32, activation='relu')(encoded2)
decoded1 = Dense(64, activation='relu')(encoded3)
decoded2 = Dense(128, activation='relu')(decoded1)
decoded3 = Dense(image_size*image_size, activation='sigmoid')(decoded2)
# this model maps an input to its reconstruction
autoencoder = Model(inputs=input_img, outputs=decoded3)
print("autoencoder model created")
# this model maps an input to its encoded representation
encoder = Model(inputs=input_img, outputs=encoded3)
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(inputs=encoded_input, outputs=decoder_layer(encoded_input))
adam = Adam(lr=learning_rate, decay=decay_rate)
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
return encoder, decoder, autoencoder
encoder, decoder, autoencoder = AutoModel_deep()
# autoencoder.summary()
# Denoising autoencoder
# autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
ModelFit = autoencoder.fit(x_train, x_train ,
batch_size=batch_size, epochs= num_epoch,
verbose=2, validation_data=(x_val, x_val))
|
gpl-3.0
| 8,052,092,181,301,125,000 | 29.515528 | 95 | 0.694484 | false |
TranscribersOfReddit/TranscribersOfReddit
|
tor/core/posts.py
|
1
|
6853
|
import logging
from typing import Dict, Union
from praw.models import Submission # type: ignore
from tor.core.config import Config
from tor.core.helpers import _
from tor.helpers.flair import flair, flair_post
from tor.helpers.reddit_ids import add_complete_post_id, has_been_posted
from tor.helpers.youtube import (has_youtube_transcript, get_yt_video_id,
is_transcribable_youtube_video, is_youtube_url)
from tor.strings import translation
i18n = translation()
log = logging.getLogger(__name__)
PostSummary = Dict[str, Union[str, int, bool, None]]
def process_post(new_post: PostSummary, cfg: Config) -> None:
"""
After a valid post has been discovered, this handles the formatting
and posting of those calls as workable jobs to ToR.
:param new_post: Submission object that needs to be posted.
:param cfg: the config object.
:return: None.
"""
if not should_process_post(new_post, cfg):
return
log.info(f'Posting call for transcription on ID {new_post["name"]} posted by {new_post["author"]}')
if new_post['domain'] in cfg.image_domains:
content_type = 'image'
content_format = cfg.image_formatting
elif new_post['domain'] in cfg.audio_domains:
content_type = 'audio'
content_format = cfg.audio_formatting
elif new_post['domain'] in cfg.video_domains:
content_type = 'video'
content_format = cfg.video_formatting
else:
# This means we pulled from a subreddit bypassing the filters.
content_type = 'Other'
content_format = cfg.other_formatting
if is_youtube_url(str(new_post['url'])):
if not is_transcribable_youtube_video(str(new_post['url'])):
# Not transcribable, so let's add it to the completed posts and skip over it forever
add_complete_post_id(str(new_post['url']), cfg)
return
request_transcription(new_post, content_type, content_format, cfg)
def has_enough_upvotes(post: PostSummary, cfg: Config) -> bool:
"""
Check if the post meets the minimum threshold for karma
"""
subreddit = str(post['subreddit'])
upvotes = int(str(post['ups']))
if subreddit not in cfg.upvote_filter_subs:
# Must not be a sub which has a minimum threshold
return True
if upvotes >= cfg.upvote_filter_subs[subreddit]:
return True
return False
def should_process_post(post: PostSummary, cfg: Config) -> bool:
if not has_enough_upvotes(post, cfg):
return False
if has_been_posted(str(post['name']), cfg):
return False
if post['archived']:
return False
if not post['author']:
return False
return True
def handle_youtube(post: PostSummary, cfg: Config) -> bool:
"""
Handle if there are youtube transcripts
"""
yt_already_has_transcripts = i18n['posts']['yt_already_has_transcripts']
if not is_youtube_url(str(post['url'])):
return False
if not is_transcribable_youtube_video(str(post['url'])):
# Not something we can transcribe, so skip it... FOREVER
add_complete_post_id(str(post['url']), cfg)
return True
if has_youtube_transcript(str(post['url'])):
# NOTE: This has /u/transcribersofreddit post to the original
# subreddit where the video was posted saying it already has
# closed captioning
submission = cfg.r.submission(id=post['name'])
submission.reply(_(yt_already_has_transcripts))
add_complete_post_id(str(post['url']), cfg)
video_id = get_yt_video_id(str(post['url']))
log.info(f'Found YouTube video, {video_id}, with good transcripts.')
return True
return False
def truncate_title(title: str) -> str:
max_length = 250 # This is probably the longest we ever want it
if len(title) <= max_length:
return title
return title[:(max_length - 3)] + '...'
def request_transcription(post: PostSummary, content_type: str, content_format: str, cfg: Config):
# Truncate a post title if it exceeds 250 characters, so the added
# formatting still fits in Reddit's 300 char limit for post titles
title = i18n['posts']['discovered_submit_title'].format(
sub=str(post['subreddit']),
type=content_type.title(),
title=truncate_title(str(post['title'])),
)
url = i18n['urls']['reddit_url'].format(str(post['permalink']))
intro = i18n['posts']['rules_comment'].format(
post_type=content_type,
formatting=content_format,
header=cfg.header,
)
submission: Submission
try:
if is_youtube_url(str(post['url'])) and has_youtube_transcript(str(post['url'])):
# NOTE: This has /u/transcribersofreddit post to the original
# subreddit where the video was posted saying it already has
# closed captioning
video_id = get_yt_video_id(str(post['url']))
submission = cfg.r.submission(id=post['name'])
submission.reply(_(i18n['posts']['yt_already_has_transcripts']))
add_complete_post_id(str(post['name']), cfg)
log.info(f'Found YouTube video, https://youtu.be/{video_id}, with good transcripts.')
return
# The only errors that happen here are on Reddit's side -- pretty much
# exclusively 503s and 403s that arbitrarily resolve themselves. A missed
# post or two is not the end of the world.
except Exception as e:
log.error(
f'{e} - unable to post content.\n'
f'ID: {post["name"]}\n'
f'Title: {post["title"]}\n'
f'Subreddit: {post["subreddit"]}'
)
return
try:
submission = cfg.tor.submit(title=title, url=url)
submission.reply(_(intro))
flair_post(submission, flair.unclaimed)
add_complete_post_id(str(post['name']), cfg)
cfg.redis.incr('total_posted', amount=1)
queue_ocr_bot(post, submission, cfg)
cfg.redis.incr('total_new', amount=1)
# The only errors that happen here are on Reddit's side -- pretty much
# exclusively 503s and 403s that arbitrarily resolve themselves. A missed
# post or two is not the end of the world.
except Exception as e:
log.error(
f'{e} - unable to post content.\n'
f'ID: {post["name"]}\n'
f'Title: {post["title"]}\n'
f'Subreddit: {post["subreddit"]}'
)
def queue_ocr_bot(post: PostSummary, submission: Submission, cfg: Config) -> None:
if post['domain'] not in cfg.image_domains:
# We only OCR images at this time
return
# Set the payload for the job
cfg.redis.set(str(post['name']), submission.fullname)
# Queue up the job reference
cfg.redis.rpush('ocr_ids', str(post['name']))
|
mit
| 5,592,897,859,720,665,000 | 34.14359 | 103 | 0.633591 | false |
btrzcinski/netchat
|
py-client/netclient/command_plugins/chat.py
|
1
|
3397
|
from netclient.cmanager import cmanager
from netclient.mmanager import mmanager
from netclient.decorators import command
from netclient.extensibles import Plugin
from netclient.settings import NO_ARG, GCHAT_TAB, CHAT_TAB
name = 'Chat Commands'
p_class = 'Chat'
depends = ()
class Chat(Plugin):
"""
Chat/Instant messaging-related commands.
"""
aliases = {
'im': 'instantmessage',
'msg': 'instantmessage',
'groupchat': 'subscribe',
'gc': 'subscribe',
'fl': 'getfriends',
'rl': 'getrooms',
'bl': 'getblocked',
'qr': 'queryroom',
'friend': 'addfriend',
'unfriend': 'remfriend',
}
@command('Must specify an addressee and a message (optional).')
def instantmessage(self, context, event):
"""
Sends an NCChat message to the addressee.
"""
args = event.args.strip().split(' ', 1)
if len(args) is 2:
mmanager['chat'].write(args[0], args[1])
else:
n = CHAT_TAB % args[0]
if cmanager['screen'].has_tab(n):
cmanager['screen'].tab_to(n)
else:
cmanager['screen'].new_tab(n, args[0], 'onetoone', True)
@command('Must specify a room to subscribe to.', type='group')
def subscribe(self, context, event):
n = GCHAT_TAB % event.args
if cmanager['screen'].has_tab(n):
cmanager['screen'].tab_to(n)
else:
cmanager['screen'].new_tab(n, event.args, 'group', True)
mmanager['chat'].subscribe(event.args)
@command(NO_ARG)
def getfriends(self, context, event):
"""
Sends a request to retrieve your friends list from the server.
"""
mmanager['chat'].request_friends(True)
@command(NO_ARG)
def getblocked(self, context, event):
"""
Sends a request to retrieve your blocked list from the server.
"""
mmanager['chat'].request_blocked()
@command(NO_ARG)
def getrooms(self, context, event):
"""
Sends a request to retrieve the rooms list from the server.
"""
mmanager['chat'].request_rooms()
@command(type='group')
def queryroom(self, context, event):
"""
Sends a query to the server for information regarding a room.
"""
if event.args:
n = event.args
else:
t = cmanager['screen'].tabs[cmanager['screen'].top_tab]
if t.type == 'group':
n = t.shortname
else:
cmanager['screen'].sendLine('You must specify a room to query.')
return
mmanager['chat'].query_room(n)
@command('Must specify a username to add.')
def addfriend(self, context, event):
mmanager['chat'].add_friend(event.args.strip())
@command('Must specify a friend to remove.')
def remfriend(self, context, event):
mmanager['chat'].rem_friend(event.args.strip())
@command('Must specify a user to block.')
def block(self, context, event):
mmanager['chat'].block(event.args.strip())
@command('Must specify a user to unblock.')
def unblock(self, context, event):
mmanager['chat'].unblock(event.args.strip())
@command()
def setstatus(self, context, event):
mmanager['chat'].set_status(event.args.strip())
|
gpl-2.0
| -5,956,442,884,801,495,000 | 29.881818 | 80 | 0.575213 | false |
smellydog521/classicPlayParsing
|
featureSelection.py
|
1
|
2814
|
from sklearn import preprocessing
import readFile as file;
import random
import operator
import sys
import chardet
reload(sys)
sys.setdefaultencoding("utf-8")
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
import numpy as np
import matplotlib.pyplot as plt
sampleNumber = 10
# dataPath = "../data/traindata.txt";
dataPath = "../data/training3.txt"
# full.txt: with all training and testing data
# training3.txt: with (0, 10)+(23, 80)
header,datas = file.readfiles(dataPath, True, "utf-8","\t")
print header
# to tune the features: [0,9] for statistical indices;
# [10, 22] for unweighted motifs; [23, 79] for weighted motifs
# and [80, 81] are main-category and sub-category
# featureIndex = range(23, 80)
featureIndex = range(0,len(header)-2)
ommitFeatureIndex= [] #[1,3,4,6,7,11]
# ommitFeatureIndex= range(10,23)
# len(header)-1: 5 categories; len(header)-2: len(header)-2
targetIndex = len(header)-1
trainData=[]
trainResult=[]
categories=[]
le = preprocessing.LabelEncoder()
for p in datas:
row = []
for f in featureIndex:
if f not in ommitFeatureIndex:
row.append(float(p[f]))
trainData.append(row)
trainResult.append(p[targetIndex])
if p[targetIndex] not in categories:
categories.append(p[targetIndex])
tmpHeader=[]
for h in header:
if header.index(h) not in ommitFeatureIndex:
tmpHeader.append(h)
header=tmpHeader
trainData = np.array(trainData)
le.fit(categories)
trainResult = le.transform(trainResult)
print trainResult
rf = RandomForestRegressor()
rf.fit(trainData, trainResult)
importances = rf.feature_importances_
print importances
std = np.std([tree.feature_importances_ for tree in rf.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
print indices
# Print the feature ranking
print("Feature ranking:")
xticks = []
xindex = []
ximport = []
for f in range(trainData.shape[1]):
if importances[f]>0:
xindex.append(f)
ximport.append(importances[indices[f]])
xticks.append(header[indices[f]])
print("%d. feature %d %s (%f)" % (f + 1, indices[f] ,header[indices[f]], importances[indices[f]]))
xticks = np.array(xticks)
# Plot the feature importances of the forest
plt.figure()
font = {'size' : 40}
plt.rc('font', **font)
print xindex,ximport
plt.title("Feature importances")
plt.bar(range(len(xindex)), ximport,
color="r", align="center")
plt.xticks(range(len(xindex)), xticks, rotation='vertical',fontsize=15) #, rotation='vertical'
plt.show()
print "Features sorted by their score:"
print sorted(zip(map(lambda x: round(x, 4), rf.feature_importances_), np.array(featureIndex)),
reverse=True)
|
apache-2.0
| 394,683,876,240,928,260 | 26.14 | 107 | 0.677683 | false |
WikipediaLibrary/TWLight
|
TWLight/resources/migrations/0077_auto_20210610_1739.py
|
1
|
29851
|
# Generated by Django 3.1.8 on 2021-06-10 17:39
import TWLight.resources.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("resources", "0076_auto_20210607_1312"),
]
operations = [
migrations.AlterField(
model_name="language",
name="language",
field=models.CharField(
choices=[
("aa", "Qafár af"),
("ab", "Аҧсшәа"),
("abe", "Wôbanakiôdwawôgan"),
("abs", "Bahasa Ambon"),
("ace", "Acèh"),
("acf", "kwéyòl"),
("ady", "Адыгабзэ"),
("ady-cyrl", "ady"),
("ady-latn", "Adygabze"),
("aeb", "aeb-arab"),
("aeb-arab", "تونسي"),
("aeb-latn", "Tûnsî"),
("af", "Afrikaans"),
("ahr", "अहिराणी"),
("ak", "Akan"),
("akz", "Albaamo innaaɬiilka"),
("aln", "Gegë"),
("alt", "алтай тил"),
("am", "አማርኛ"),
("ami", "Pangcah"),
("an", "aragonés"),
("ang", "Ænglisc"),
("ann", "Obolo"),
("anp", "अङ्गिका"),
("ar", "العربية"),
("arc", "ܐܪܡܝܐ"),
("arn", "mapudungun"),
("aro", "Araona"),
("arq", "جازايرية"),
("ary", "الدارجة"),
("ary-arab", "ary"),
("ary-latn", "ed-dārija"),
("arz", "مصرى"),
("as", "অসমীয়া"),
("ase", "American sign language"),
("ast", "asturianu"),
("atj", "atikamekw"),
("atv", "тÿндÿк алтай тил"),
("av", "авар"),
("avk", "Kotava"),
("awa", "अवधी"),
("ay", "Aymar aru"),
("az", "az-latn"),
("az-arab", "تۆرکجه"),
("az-latn", "azərbaycanca"),
("az-cyrl", "азәрбајҹанҹа"),
("azb", "az-arab"),
("azj", "az-latn"),
("ba", "башҡортса"),
("ban", "Bali"),
("ban-bali", "ᬩᬮᬶ"),
("bar", "Boarisch"),
("bas", "ɓasaá"),
("bat-smg", "sgs"),
("bbc-latn", "Batak Toba"),
("bbc-batk", "ᯅᯖᯂ᯲ ᯖᯬᯅ"),
("bbc", "bbc-latn"),
("bcc", "جهلسری بلوچی"),
("bci", "wawle"),
("bcl", "Bikol Central"),
("bdr", "Bajau Sama"),
("be-tarask", "беларуская (тарашкевіца)"),
("be-x-old", "be-tarask"),
("be", "беларуская"),
("bew", "Bahasa Betawi"),
("bfa", "Bari"),
("bft", "بلتی"),
("bfq", "படகா"),
("bg", "български"),
("bgn", "روچ کپتین بلوچی"),
("bh", "bho"),
("bho", "भोजपुरी"),
("bi", "Bislama"),
("bjn", "Banjar"),
("bkm", "Itaŋikom"),
("blc", "ItNuxalkmc"),
("bm", "bamanankan"),
("bn", "বাংলা"),
("bnn", "Bunun"),
("bo", "བོད་ཡིག"),
("bpy", "বিষ্ণুপ্রিয়া মণিপুরী"),
("bqi", "بختیاری"),
("br", "brezhoneg"),
("brh", "Bráhuí"),
("brx", "बर'"),
("bs", "bosanski"),
("btm", "Mandailing"),
("bto", "Iriga Bicolano"),
("bug", "ᨅᨔ ᨕᨘᨁᨗ"),
("bxr", "буряад"),
("byn", "ብሊን"),
("bzj", "Bileez Kriol"),
("ca", "català"),
("cak", "Kaqchikel"),
("cbk", "Chavacano de Zamboanga"),
("cbk-zam", "cbk"),
("ccp", "𑄌𑄋𑄴𑄟𑄳𑄦"),
("cdo", "Mìng-dĕ̤ng-ngṳ̄"),
("cdo-latn", "Mìng-dĕ̤ng-ngṳ̄ Bàng-uâ-cê"),
("cdo-hani", "閩東語(漢字)"),
("ce", "нохчийн"),
("ceb", "Cebuano"),
("ch", "Chamoru"),
("chm", "mhr"),
("chn", "chinuk wawa"),
("cho", "Choctaw"),
("chr", "ᏣᎳᎩ"),
("chy", "Tsetsêhestâhese"),
("ciw", "Anishinaabemowin"),
("cjy", "cjy-hant"),
("cjy-hans", "晋语(简化字)"),
("cjy-hant", "晉語"),
("ckb", "کوردی"),
("ckt", "ԓыгъоравэтԓьэн"),
("cnh", "Lai holh"),
("cnr", "cnr-latn"),
("cnr-cyrl", "црногорски"),
("cnr-latn", "crnogorski"),
("co", "corsu"),
("cop", "ϯⲙⲉⲧⲣⲉⲙⲛ̀ⲭⲏⲙⲓ"),
("cps", "Capiceño"),
("cr", "ᓀᐦᐃᔭᐍᐏᐣ"),
("cr-cans", "cr"),
("cr-latn", "Nēhiyawēwin"),
("crh", "qırımtatarca"),
("crh-cyrl", "къырымтатарджа"),
("crh-latn", "crh"),
("cs", "čeština"),
("csb", "kaszëbsczi"),
("cu", "словѣньскъ / ⰔⰎⰑⰂⰡⰐⰠⰔⰍⰟ"),
("cv", "Чӑвашла"),
("cy", "Cymraeg"),
("da", "dansk"),
("dag", "dagbanli"),
("dar", "дарган"),
("de-at", "Österreichisches Deutsch"),
("de-ch", "Schweizer Hochdeutsch"),
("de-formal", "Deutsch (Sie-Form)"),
("de", "Deutsch"),
("din", "Thuɔŋjäŋ"),
("diq", "Zazaki"),
("doi", "डोगरी"),
("dsb", "dolnoserbski"),
("dtp", "Dusun Bundu-liwan"),
("dty", "डोटेली"),
("dv", "ދިވެހިބަސް"),
("dz", "ཇོང་ཁ"),
("ee", "eʋegbe"),
("egl", "Emiliàn"),
("el", "Ελληνικά"),
("elm", "Eleme"),
("eml", "emiliàn e rumagnòl"),
("en-ca", "Canadian English"),
("en-gb", "British English"),
("en-simple", "Simple English"),
("en", "English"),
("eo", "Esperanto"),
("es-419", "español de América Latina"),
("es-formal", "español (formal)"),
("es", "español"),
("es-ni", "español nicaragüense"),
("esu", "Yup'ik"),
("et", "eesti"),
("eu", "euskara"),
("ext", "estremeñu"),
("eya", "I·ya·q"),
("fa", "فارسی"),
("fan", "Faŋ"),
("fax", "Fala"),
("ff", "Fulfulde"),
("fi", "suomi"),
("fil", "tl"),
("fit", "meänkieli"),
("fiu-vro", "vro"),
("fj", "Na Vosa Vakaviti"),
("fkv", "kvääni"),
("fo", "føroyskt"),
("fon", "fɔ̀ngbè"),
("fr", "français"),
("frc", "français cadien"),
("frp", "arpetan"),
("frr", "Nordfriisk"),
("fuf", "Fuuta Jalon"),
("fur", "furlan"),
("fy", "Frysk"),
("ga", "Gaeilge"),
("gaa", "Ga"),
("gag", "Gagauz"),
("gah", "Alekano"),
("gan-hans", "赣语(简体)"),
("gan-hant", "gan"),
("gan", "贛語"),
("gbm", "गढ़वळि"),
("gbz", "Dari-e Mazdeyasnā"),
("gcf", "Guadeloupean Creole French"),
("gcr", "kriyòl gwiyannen"),
("gd", "Gàidhlig"),
("gez", "ግዕዝ"),
("gl", "galego"),
("gld", "на̄ни"),
("glk", "گیلکی"),
("gn", "Avañe'ẽ"),
("gom", "gom-deva"),
("gom-deva", "गोंयची कोंकणी"),
("gom-latn", "Gõychi Konknni"),
("gor", "Bahasa Hulontalo"),
("got", "𐌲𐌿𐍄𐌹𐍃𐌺"),
("grc", "Ἀρχαία ἑλληνικὴ"),
("gsw", "Alemannisch"),
("gu", "ગુજરાતી"),
("guc", "wayuunaiki"),
("gum", "Namtrik"),
("gur", "Gurenɛ"),
("guw", "gungbe"),
("gv", "Gaelg"),
("ha", "Hausa"),
("ha-arab", "هَوُسَ"),
("ha-latn", "ha"),
("hai", "X̱aat Kíl"),
("hak", "Hak-kâ-fa"),
("haw", "Hawai`i"),
("he", "עברית"),
("hak-hans", "客家语(简体)"),
("hak-hant", "客家語(繁體)"),
("hi", "हिन्दी"),
("hif", "Fiji Hindi"),
("hif-deva", "फ़ीजी हिन्दी"),
("hif-latn", "hif"),
("hil", "Ilonggo"),
("hne", "छत्तीसगढ़ी"),
("ho", "Hiri Motu"),
("hoc", "𑢹𑣉𑣉"),
("hr", "hrvatski"),
("hrx", "Hunsrik"),
("hsb", "hornjoserbsce"),
("hsn", "湘语"),
("ht", "Kreyòl ayisyen"),
("hu-formal", "Magyar (magázó)"),
("hu", "magyar"),
("hy", "հայերեն"),
("hyw", "Արեւմտահայերէն"),
("hz", "Otsiherero"),
("ia", "interlingua"),
("id", "Bahasa Indonesia"),
("ie", "Interlingue"),
("ig", "Igbo"),
("ii", "ꆇꉙ"),
("ik", "Iñupiak"),
("ike-cans", "ᐃᓄᒃᑎᑐᑦ"),
("ike-latn", "inuktitut"),
("ilo", "Ilokano"),
("inh", "ГӀалгӀай"),
("io", "Ido"),
("is", "íslenska"),
("it", "italiano"),
("iu", "ike-cans"),
("izh", "ižoran keel"),
("ja", "日本語"),
("jam", "Patois"),
("jbo", "lojban"),
("jdt", "jdt-cyrl"),
("jdt-cyrl", "жугьури"),
("jje", "제주말"),
("jut", "jysk"),
("jv", "Jawa"),
("jv-java", "ꦗꦮ"),
("ka", "ქართული"),
("kaa", "Qaraqalpaqsha"),
("kab", "Taqbaylit"),
("kac", "Jinghpaw"),
("kbd-cyrl", "kbd"),
("kbd-latn", "Qabardjajəbza"),
("kbd", "Адыгэбзэ"),
("kbp", "Kabɩyɛ"),
("kcg", "Tyap"),
("kea", "Kabuverdianu"),
("kg", "Kongo"),
("kgp", "Kaingáng"),
("khw", "کھوار"),
("ki", "Gĩkũyũ"),
("kiu", "Kırmancki"),
("kj", "Kwanyama"),
("kjh", "хакас"),
("kjp", "ဖၠုံလိက်"),
("kk", "kk-cyrl"),
("kk-arab", "قازاقشا (تٶتە)"),
("kk-cn", "kk-arab"),
("kk-cyrl", "қазақша"),
("kk-kz", "kk-cyrl"),
("kk-latn", "qazaqşa"),
("kk-tr", "kk-latn"),
("kl", "kalaallisut"),
("km", "ភាសាខ្មែរ"),
("kn", "ಕನ್ನಡ"),
("knn", "महाराष्ट्रीय कोंकणी"),
("ko-kp", "조선말"),
("ko", "한국어"),
("koi", "перем коми"),
("koy", "Denaakkenaageʼ"),
("kr", "Kanuri"),
("krc", "къарачай-малкъар"),
("kri", "Krio"),
("krj", "Kinaray-a"),
("krl", "Karjala"),
("ks-arab", "کٲشُر"),
("ks-deva", "कॉशुर"),
("ks", "ks-arab"),
("ksf", "Bafia"),
("ksh", "Ripoarisch"),
("ksw", "စှီၤ ကညီကျိာ်"),
("ku", "ku-latn"),
("ku-arab", "كوردي"),
("ku-latn", "kurdî"),
("kum", "къумукъ"),
("kv", "коми"),
("kw", "kernowek"),
("ky", "Кыргызча"),
("la", "Latina"),
("lad", "lad-latn"),
("lad-latn", "Ladino"),
("lad-hebr", "לאדינו"),
("lag", "Kilaangi"),
("lb", "Lëtzebuergesch"),
("lbe", "лакку"),
("lez", "лезги"),
("lfn", "Lingua Franca Nova"),
("lg", "Luganda"),
("li", "Limburgs"),
("lij", "Ligure"),
("liv", "Līvõ kēļ"),
("lki", "لەکی"),
("lkt", "Lakȟótiyapi"),
("lld", "Ladin"),
("lmo", "lombard"),
("ln", "lingála"),
("lo", "ລາວ"),
("loz", "Silozi"),
("lt", "lietuvių"),
("lrc", "لۊری شومالی"),
("ltg", "latgaļu"),
("lud", "lüüdi"),
("lus", "Mizo ţawng"),
("lut", "dxʷləšucid"),
("luz", "لئری دوٙمینی"),
("lv", "latviešu"),
("lzh", "文言"),
("lzz", "Lazuri"),
("mad", "Madhurâ"),
("mai", "मैथिली"),
("map-bms", "Basa Banyumasan"),
("mdf", "мокшень"),
("mfe", "Morisyen"),
("mg", "Malagasy"),
("mh", "Ebon"),
("mhr", "олык марий"),
("mi", "Māori"),
("mic", "Mi'kmaq"),
("min", "Minangkabau"),
("miq", "Mískitu"),
("mk", "македонски"),
("ml", "മലയാളം"),
("mn", "монгол"),
("mn-cyrl", "mn"),
("mn-mong", "mvf"),
("mnc", "ᠮᠠᠨᠵᡠ ᡤᡳᠰᡠᠨ"),
("mni", "ꯃꯤꯇꯩ ꯂꯣꯟ"),
("mni-beng", "মেইতেই লোন্"),
("mnw", "ဘာသာ မန်"),
("mo", "молдовеняскэ"),
("moe", "innu-aimun"),
("mr", "मराठी"),
("mrh", "Mara"),
("mrj", "кырык мары"),
("mrv", "Magareva"),
("ms", "Bahasa Melayu"),
("ms-arab", "بهاس ملايو"),
("mt", "Malti"),
("mui", "Musi"),
("mus", "Mvskoke"),
("mvf", "ᠮᠣᠩᠭᠣᠯ"),
("mwl", "Mirandés"),
("mwv", "Behase Mentawei"),
("mww", "mww-latn"),
("mww-latn", "Hmoob Dawb"),
("my", "မြန်မာဘာသာ"),
("myv", "эрзянь"),
("mzn", "مازِرونی"),
("na", "Dorerin Naoero"),
("nah", "Nāhuatl"),
("nan", "Bân-lâm-gú"),
("nan-hani", "閩南語(漢字)"),
("nap", "Napulitano"),
("nb", "norsk (bokmål)"),
("nd", "siNdebele saseNyakatho"),
("nds-nl", "Nedersaksisch"),
("nds", "Plattdüütsch"),
("ne", "नेपाली"),
("new", "नेपाल भाषा"),
("ng", "Oshiwambo"),
("nia", "Li Niha"),
("niu", "ko e vagahau Niuē"),
("njo", "Ao"),
("nl-informal", "Nederlands (informeel)"),
("nl", "Nederlands"),
("nn", "norsk (nynorsk)"),
("no", "norsk"),
("nod", "คำเมือง"),
("nog", "ногайша"),
("nov", "Novial"),
("nqo", "ߒߞߏ"),
("nr", "isiNdebele seSewula"),
("nrm", "Nouormand"),
("nso", "Sesotho sa Leboa"),
("nus", "Thok Naath"),
("nv", "Diné bizaad"),
("ny", "Chi-Chewa"),
("nys", "Nyungar"),
("oc", "occitan"),
("ojb", "Ojibwemowin"),
("oka", "n̓səl̓xcin̓"),
("olo", "livvinkarjala"),
("om", "Oromoo"),
("ood", "ʼOʼodham ha-ñeʼokĭ"),
("or", "ଓଡ଼ିଆ"),
("os", "Ирон"),
("osi", "Using"),
("ota", "لسان عثمانى"),
("ovd", "övdalsk"),
("pa", "pa-guru"),
("pa-guru", "ਪੰਜਾਬੀ"),
("pag", "Pangasinan"),
("pam", "Kapampangan"),
("pap", "Papiamentu"),
("pap-aw", "Papiamento"),
("pbb", "Nasa Yuwe"),
("pcd", "Picard"),
("pdc", "Deitsch"),
("pdt", "Plautdietsch"),
("pfl", "Pälzisch"),
("pi", "पालि"),
("pih", "Norfuk / Pitkern"),
("pis", "Pijin"),
("pjt", "Pitjantjatjara"),
("pko", "Pökoot"),
("pl", "polski"),
("pms", "Piemontèis"),
("pnb", "پنجابی"),
("pnt", "Ποντιακά"),
("pov", "guinensi"),
("ppl", "Nawat"),
("prg", "Prūsiskan"),
("prs", "دری"),
("ps", "پښتو"),
("pt-br", "português do Brasil"),
("pt", "português"),
("pwn", "pinayuanan"),
("qu", "Runa Simi"),
("quc", "K'iche'"),
("qug", "Runa shimi"),
("qwh", "anqash qichwa"),
("rap", "arero rapa nui"),
("rcf", "Kreol Réyoné"),
("rej", "Jang"),
("rgn", "Rumagnôl"),
("rhg", "𐴌𐴟𐴇𐴥𐴝𐴚𐴒𐴙𐴝"),
("rif", "Tarifit"),
("rki", "ရခိုင်"),
("rm", "rumantsch"),
("rmc", "romaňi čhib"),
("rmf", "kaalengo tšimb"),
("rmy", "Romani"),
("rn", "Kirundi"),
("ro", "română"),
("roa-rup", "rup"),
("roa-tara", "tarandíne"),
("rtm", "Faeag Rotuma"),
("ru", "русский"),
("rue", "русиньскый"),
("rup", "armãneashti"),
("ruq", "Влахесте"),
("ruq-cyrl", "ruq"),
("ruq-grek", "Megleno-Romanian (Greek script)"),
("ruq-latn", "Vlăheşte"),
("rut", "мыхаӀбишды"),
("rw", "Kinyarwanda"),
("rwr", "मारवाड़ी"),
("ryu", "ʔucināguci"),
("sa", "संस्कृतम्"),
("sah", "саха тыла"),
("sat", "ᱥᱟᱱᱛᱟᱲᱤ"),
("saz", "ꢱꣃꢬꢵꢯ꣄ꢡ꣄ꢬꢵ"),
("sc", "sardu"),
("scn", "sicilianu"),
("sco", "Scots"),
("sd", "سنڌي"),
("sdc", "Sassaresu"),
("sdh", "کوردی خوارگ"),
("se", "davvisámegiella"),
("ses", "Koyraboro Senni"),
("sei", "Cmique Itom"),
("sg", "Sängö"),
("sgs", "žemaitėška"),
("sh", "srpskohrvatski"),
("shi-latn", "Taclḥit"),
("shi-tfng", "ⵜⴰⵛⵍⵃⵉⵜ"),
("shi", "shi-latn"),
("shn", "လိၵ်ႈတႆး"),
("shy-latn", "tacawit"),
("si", "සිංහල"),
("simple", "en-simple"),
("sjd", "кӣллт са̄мь кӣлл"),
("sje", "bidumsámegiella"),
("sjo", "ᠰᡞᠪᡝ ᡤᡞᠰᡠᠨ"),
("sju", "ubmejesámiengiälla"),
("sk", "slovenčina"),
("sl", "slovenščina"),
("sli", "Schläsch"),
("slr", "Salırça"),
("sly", "Bahasa Selayar"),
("skr-arab", "سرائیکی"),
("skr", "skr-arab"),
("syc", "ܣܘܪܝܝܐ"),
("syl", "ꠍꠤꠟꠐꠤ"),
("syl-beng", "সিলেটি"),
("syl-sylo", "syl"),
("sm", "Gagana Samoa"),
("sma", "åarjelsaemien"),
("smj", "julevsámegiella"),
("smn", "anarâškielâ"),
("sms", "nuõrttsääʹmǩiõll"),
("sn", "chiShona"),
("so", "Soomaaliga"),
("son", "soŋay"),
("sq", "shqip"),
("sr", "sr-cyrl"),
("sr-ec", "sr-cyrl"),
("sr-cyrl", "српски"),
("sr-el", "sr-latn"),
("sr-latn", "srpski"),
("srn", "Sranantongo"),
("ss", "SiSwati"),
("st", "Sesotho"),
("stq", "Seeltersk"),
("sty", "себертатар"),
("su", "Sunda"),
("sv", "svenska"),
("sw", "Kiswahili"),
("swb", "Shikomoro"),
("sxu", "Säggssch"),
("szl", "ślůnski"),
("szy", "Sakizaya"),
("ta", "தமிழ்"),
("tay", "Tayal"),
("tcy", "ತುಳು"),
("te", "తెలుగు"),
("tet", "tetun"),
("tg-cyrl", "тоҷикӣ"),
("tg-latn", "tojikī"),
("tg", "tg-cyrl"),
("th", "ไทย"),
("ti", "ትግርኛ"),
("tig", "ትግረ"),
("tk", "Türkmençe"),
("tkr", "ЦӀаӀхна миз"),
("tl", "Tagalog"),
("tly", "tolışi"),
("tly-cyrl", "толыши"),
("tmr", "ארמית בבלית"),
("tn", "Setswana"),
("to", "lea faka-Tonga"),
("tokipona", "Toki Pona"),
("tpi", "Tok Pisin"),
("tr", "Türkçe"),
("trp", "Kokborok (Tripuri)"),
("tru", "Ṫuroyo"),
("trv", "Sediq Taroko"),
("ts", "Xitsonga"),
("tsd", "Τσακωνικά"),
("tt", "татарча"),
("tt-cyrl", "tt"),
("tt-latn", "tatarça"),
("ttt", "Tati"),
("tum", "chiTumbuka"),
("tw", "Twi"),
("twd", "Tweants"),
("ty", "reo tahiti"),
("tyv", "тыва дыл"),
("tzl", "Talossan"),
("tzm", "ⵜⴰⵎⴰⵣⵉⵖⵜ"),
("udm", "удмурт"),
("ug", "ug-arab"),
("ug-arab", "ئۇيغۇرچە"),
("ug-latn", "uyghurche"),
("ug-cyrl", "уйғурчә"),
("uk", "українська"),
("umu", "Huluníixsuwaakan"),
("ur", "اردو"),
("uz", "oʻzbekcha"),
("ve", "Tshivenda"),
("vai", "ꕙꔤ"),
("vec", "vèneto"),
("vep", "vepsän kel’"),
("vi", "Tiếng Việt"),
("vls", "West-Vlams"),
("vmf", "Mainfränkisch"),
("vo", "Volapük"),
("vot", "Vaďďa"),
("vro", "võro"),
("wa", "walon"),
("war", "Winaray"),
("wls", "Faka'uvea"),
("wo", "Wolof"),
("wuu", "吴语"),
("xal", "хальмг"),
("xh", "isiXhosa"),
("xmf", "მარგალური"),
("xsy", "SaiSiyat"),
("ydd", "Eastern Yiddish"),
("yi", "ייִדיש"),
("yo", "Yorùbá"),
("yrk", "Ненэцяʼ вада"),
("yrl", "ñe'engatú"),
("yua", "Maaya T'aan"),
("yue", "粵語"),
("za", "Vahcuengh"),
("zea", "Zeêuws"),
("zgh", "ⵜⴰⵎⴰⵣⵉⵖⵜ ⵜⴰⵏⴰⵡⴰⵢⵜ"),
("zh", "中文"),
("zh-classical", "lzh"),
("zh-cn", "中文(中国大陆)"),
("zh-hans", "中文(简体)"),
("zh-hant", "中文(繁體)"),
("zh-hk", "中文(香港)"),
("zh-min-nan", "nan"),
("zh-mo", "中文(澳門)"),
("zh-my", "中文(马来西亚)"),
("zh-sg", "中文(新加坡)"),
("zh-tw", "中文(台灣)"),
("zh-yue", "yue"),
("zh-cdo", "cdo"),
("zu", "isiZulu"),
("zun", "Shiwi'ma"),
],
max_length=12,
unique=True,
validators=[TWLight.resources.models.validate_language_code],
),
),
]
|
mit
| -8,616,352,393,385,634,000 | 40.173913 | 77 | 0.26807 | false |
jr-minnaar/bitrader
|
bitrader/api_tools.py
|
1
|
6414
|
import json
import os
from functools import partial
from itertools import chain
from logging import getLogger
from typing import Callable
import redis as redis
import requests_cache
from requests import session
from requests_futures.sessions import FuturesSession
logger = getLogger()
# TODO: Allow cache backend to be configurable
# if True:
# requests_cache.install_cache(os.path.join('./', 'api2'), backend='sqlite', expire_after=2592000)
def flatten_dict(response: dict):
# http://feldboris.alwaysdata.net/blog/python-trick-how-to-flatten-dictionaries-values-composed-of-iterables.html
return chain.from_iterable(response.values())
def get_currency_map(response):
if response:
d = dict()
for s in response:
if s:
d.update({'%s:%s' % (s['exchange'], s['symbol']): s['currency']})
return d
class ExternalAPIException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class BaseAPI:
token = ''
url_template = 'https://httpbin.org/{resource}'
resource_templates = {
'ip': 'ip',
'get': 'get?{args}',
'html': 'html',
'delay': 'delay/{n}',
'status': 'status/{code}',
}
endpoint_options = {}
def __init__(self, cache: bool = False, future: bool = True):
if cache:
redis_conn = redis.StrictRedis(host='redis')
self.session = requests_cache.core.CachedSession(
cache_name='api_cache',
backend='redis', expire_after=60 * 60 * 24 * 30,
allowable_codes=(200,),
allowable_methods=('GET',),
old_data_on_error=False,
connection=redis_conn,
)
else:
self.session = session()
if future:
self.future_session = FuturesSession(max_workers=10, session=self.session)
self.url = self.url_template.format(resource='', token=self.token)
def get_resource(self, resource: str, processor: Callable[[dict], dict] = None,
data_format: str = 'raw', future: bool = False, **kwargs):
"""Method doing the actual heavy lifting
Args:
resource:
processor:
data_format: Default = raw. Options: raw, json, dataframe.
Determines the format the data will be returned in.
future: Default = False. Runs requests in parallel if True.
**kwargs:
Returns:
"""
if resource not in self.resource_templates.keys():
raise KeyError('%s is not a valid resource. Options are: %s' % (
resource, self.resource_templates.keys()))
try:
if not getattr(self, 'token', None):
self.token = ''
self.url = self.url_template.format(
token=self.token,
resource=self.resource_templates[resource].format(**kwargs))
except KeyError as e:
cause = e.args[0]
error_message = 'Resource requires extra key: %s. Valid options are: %s' % (
cause, self.endpoint_options.get(cause, 'no options found...'))
print(error_message)
logger.exception(error_message)
raise
else:
callback = partial(request_hook, data_format=data_format, processor=processor, **kwargs)
hooks = dict(response=callback)
request_session = self.future_session if future else self.session
response = {
'url': self.url_template.format(
token=self.token, resource=self.resource_templates[resource].format(**kwargs)),
'response': request_session.get(self.url, hooks=hooks),
'resource': resource,
'kwargs': kwargs,
}
return response
def request_hook(response, data_format: bool = True, processor=None, *args, **kwargs):
"""
Args:
response: The response object.
data_format: data_format: Default = raw. Options: raw, json, dataframe.
Determines the format the data will be returned in.
processor:
*args:
**kwargs:
Returns:
"""
logger.debug(response.url)
if not response.ok:
logger.error('%s %s' % (response.status_code, response.content[:20]))
if kwargs.get('raise_on_error', True):
raise ExternalAPIException('Got non-ok response: {}'.format(response.url))
else:
response.data = None
elif data_format == 'json' or data_format == 'dataframe':
try:
json_response = response.json()
except json.JSONDecodeError:
logger.exception(response.content)
if kwargs.get('raise_on_error', True):
raise ExternalAPIException('Returned invalid json')
else:
response.data = None
else:
if processor:
response.data = processor(json_response)
else:
response.data = json_response
if data_format == 'dataframe':
import pandas as pd
response.data = pd.DataFrame(response.data)
elif data_format == 'raw' and kwargs.get('encoding', ''):
response.data = response.content.decode(kwargs['encoding'])
else:
response.data = response.content
class OpenExchangeAPI(BaseAPI):
token = os.environ.get('OPEN_EXCHANGE_APP_ID', '')
url_template = 'https://openexchangerates.org/api/{resource}?app_id={token}'
resource_templates = {
'historical': 'historical/{date}.json',
'currencies': 'currencies.json',
'latest': 'latest.json',
}
class HTTPBinAPI(BaseAPI):
pass
class Ice3xAPI(BaseAPI):
# token = os.environ.get('OPEN_EXCHANGE_APP_ID', '')
url_template = 'https://www.ICE3X.com/api/v1/{resource}'
resource_templates = {
'generic': '{api_method}/{api_action}?{api_params}',
'stats': 'stats/marketdepthfull',
'orderbook': 'orderbook/info?nonce={nonce}&type=bid&pair_id=6',
}
pair_ids = [{'pair_id': '3', 'pair_name': 'btc/zar', },
{'pair_id': '4', 'pair_name': 'btc/ngn', },
{'pair_id': '6', 'pair_name': 'ltc/zar', },
{'pair_id': '7', 'pair_name': 'ltc/ngn', },
{'pair_id': '11', 'pair_name': 'eth/zar', },]
|
mit
| -6,142,272,176,330,282,000 | 32.061856 | 117 | 0.575148 | false |
tomix86/hpcpm
|
api/tests/test_main.py
|
1
|
5941
|
import configparser
import unittest
import mock
from hpcpm.api.main import main, prepare_app_configuration, parse_args, create_parser, add_arguments_to_parser, \
try_to_configure_logging, configure_logging, try_to_parse_config_file, parse_config_file, handle_parsing_error, \
run_api
class TestMain(unittest.TestCase):
# pylint: disable=invalid-name
def setUp(self):
pass
def tearDown(self):
pass
@mock.patch('hpcpm.api.main.prepare_app_configuration', mock.Mock(return_value=['a', 'b']))
@mock.patch('hpcpm.api.main.run_api')
def test_main(self, m_run_api):
val = main()
self.assertEqual(val, 0)
m_run_api.assert_called_once_with('a', 'b')
@mock.patch('hpcpm.api.main.try_to_configure_logging')
@mock.patch('hpcpm.api.main.parse_args')
@mock.patch('hpcpm.api.main.try_to_parse_config_file')
def test_prepare_app_configuration(self, m_try_to_parse_config_file, m_parse_args, m_try_to_configure_logging, ):
args = mock.Mock()
args.port = 1234
args.config = 'conf.conf'
m_parse_args.return_value = args
m_try_to_parse_config_file.return_value = {'a': 'b'}
val1, val2 = prepare_app_configuration(['qwe'])
self.assertEqual(val1, {'a': 'b'})
self.assertEqual(val2, 1234)
m_parse_args.assert_called_with(['qwe'])
m_try_to_configure_logging.assert_called_once_with('conf.conf')
m_try_to_parse_config_file.assert_called_once_with('conf.conf')
@mock.patch('hpcpm.api.main.create_parser')
@mock.patch('hpcpm.api.main.add_arguments_to_parser')
def test_parse_args(self, m_add_arguments_to_parser, m_create_parser):
parser = mock.MagicMock()
parser.parse_args.return_value = 'args'
m_create_parser.return_value = parser
val = parse_args('a')
self.assertEqual(val, 'args')
m_add_arguments_to_parser.assert_called_once_with(parser)
parser.parse_args.assert_called_once_with('a')
@mock.patch('argparse.ArgumentParser')
def test_create_parser(self, m_ArgumentParser):
m_ArgumentParser.return_value = 'parser'
val = create_parser()
self.assertEqual(val, 'parser')
m_ArgumentParser.assert_called_once_with(description='HPC Power Management - API')
def test_add_arguments_to_parser(self):
parser = mock.MagicMock()
add_arguments_to_parser(parser)
self.assertEqual(parser.add_argument.call_count, 2)
parser.add_argument.assert_any_call('-c', '--config',
action='store',
default='api.conf.ini',
help='path to a config file')
parser.add_argument.assert_called_with('-p', '--port',
action='store',
type=int,
default=8081,
help='port on which API will be exposed')
@mock.patch('hpcpm.api.main.configure_logging')
def test_try_to_configure_logging_no_exception(self, m_configure_logging):
try_to_configure_logging('conf.conf')
m_configure_logging.assert_called_once_with('conf.conf')
@mock.patch('hpcpm.api.main.configure_logging')
@mock.patch('hpcpm.api.main.handle_parsing_error')
def test_try_to_configure_logging_ParsingError(self, m_handle_parsing_error, m_configure_logging):
m_configure_logging.side_effect = configparser.ParsingError(source='abcd')
try_to_configure_logging('conf.conf')
m_handle_parsing_error.called_once_with()
@mock.patch('hpcpm.api.main.configure_logging')
def test_try_to_configure_logging_exception(self, m_configure_logging):
m_configure_logging.side_effect = Exception()
self.assertRaises(Exception, try_to_configure_logging, 'conf.conf')
@mock.patch('logging.config.fileConfig')
def test_configure_logging(self, m_fileConfig):
configure_logging('conf.conf')
m_fileConfig.assert_called_once_with('conf.conf')
@mock.patch('hpcpm.api.main.parse_config_file')
def test_try_to_parse_config_file(self, m_parse_config_file):
m_parse_config_file.return_value = 'config'
val = try_to_parse_config_file('conf.conf')
self.assertEqual(val, 'config')
m_parse_config_file.assert_called_once_with('conf.conf')
@mock.patch('hpcpm.api.main.parse_config_file')
@mock.patch('hpcpm.api.main.handle_parsing_error')
def test_try_to_parse_config_file_ParsingError(self, m_handle_parsing_error, m_parse_config_file):
m_parse_config_file.side_effect = configparser.ParsingError(source='abcd')
try_to_parse_config_file('conf.conf')
m_handle_parsing_error.called_once_with()
@mock.patch('hpcpm.api.main.parse_config_file')
def test_try_to_parse_config_file_exception(self, m_parse_config_file):
m_parse_config_file.side_effect = Exception()
self.assertRaises(Exception, try_to_parse_config_file, 'conf.conf')
@mock.patch('configparser.ConfigParser')
def test_parse_config_file(self, m_ConfigParser):
config = mock.MagicMock(return_value='config')
m_ConfigParser.return_value = config
parse_config_file('conf.conf')
m_ConfigParser.assert_called_once_with()
config.read.assert_called_once_with('conf.conf')
@mock.patch('sys.exit')
def test_handle_parsing_error(self, m_exit):
handle_parsing_error()
m_exit.assert_called_once_with(-1)
@mock.patch('hpcpm.api.app.initialize')
@mock.patch('hpcpm.api.app.run')
def test_run_api(self, m_run, m_initialize):
run_api('conf', 1234)
m_initialize.assert_called_once_with('conf')
m_run.assert_called_once_with(1234)
|
mit
| 2,037,818,618,855,758,600 | 37.083333 | 117 | 0.631375 | false |
inmcm/micropyGPS
|
micropyGPS.py
|
1
|
29443
|
"""
# MicropyGPS - a GPS NMEA sentence parser for Micropython/Python 3.X
# Copyright (c) 2017 Michael Calvin McCoy ([email protected])
# The MIT License (MIT) - see LICENSE file
"""
# TODO:
# Time Since First Fix
# Distance/Time to Target
# More Helper Functions
# Dynamically limit sentences types to parse
from math import floor, modf
# Import utime or time for fix time handling
try:
# Assume running on MicroPython
import utime
except ImportError:
# Otherwise default to time module for non-embedded implementations
# Should still support millisecond resolution.
import time
class MicropyGPS(object):
"""GPS NMEA Sentence Parser. Creates object that stores all relevant GPS data and statistics.
Parses sentences one character at a time using update(). """
# Max Number of Characters a valid sentence can be (based on GGA sentence)
SENTENCE_LIMIT = 90
__HEMISPHERES = ('N', 'S', 'E', 'W')
__NO_FIX = 1
__FIX_2D = 2
__FIX_3D = 3
__DIRECTIONS = ('N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW', 'SW', 'WSW', 'W',
'WNW', 'NW', 'NNW')
__MONTHS = ('January', 'February', 'March', 'April', 'May',
'June', 'July', 'August', 'September', 'October',
'November', 'December')
def __init__(self, local_offset=0, location_formatting='ddm'):
"""
Setup GPS Object Status Flags, Internal Data Registers, etc
local_offset (int): Timzone Difference to UTC
location_formatting (str): Style For Presenting Longitude/Latitude:
Decimal Degree Minute (ddm) - 40° 26.767′ N
Degrees Minutes Seconds (dms) - 40° 26′ 46″ N
Decimal Degrees (dd) - 40.446° N
"""
#####################
# Object Status Flags
self.sentence_active = False
self.active_segment = 0
self.process_crc = False
self.gps_segments = []
self.crc_xor = 0
self.char_count = 0
self.fix_time = 0
#####################
# Sentence Statistics
self.crc_fails = 0
self.clean_sentences = 0
self.parsed_sentences = 0
#####################
# Logging Related
self.log_handle = None
self.log_en = False
#####################
# Data From Sentences
# Time
self.timestamp = [0, 0, 0]
self.date = [0, 0, 0]
self.local_offset = local_offset
# Position/Motion
self._latitude = [0, 0.0, 'N']
self._longitude = [0, 0.0, 'W']
self.coord_format = location_formatting
self.speed = [0.0, 0.0, 0.0]
self.course = 0.0
self.altitude = 0.0
self.geoid_height = 0.0
# GPS Info
self.satellites_in_view = 0
self.satellites_in_use = 0
self.satellites_used = []
self.last_sv_sentence = 0
self.total_sv_sentences = 0
self.satellite_data = dict()
self.hdop = 0.0
self.pdop = 0.0
self.vdop = 0.0
self.valid = False
self.fix_stat = 0
self.fix_type = 1
########################################
# Coordinates Translation Functions
########################################
@property
def latitude(self):
"""Format Latitude Data Correctly"""
if self.coord_format == 'dd':
decimal_degrees = self._latitude[0] + (self._latitude[1] / 60)
return [decimal_degrees, self._latitude[2]]
elif self.coord_format == 'dms':
minute_parts = modf(self._latitude[1])
seconds = round(minute_parts[0] * 60)
return [self._latitude[0], int(minute_parts[1]), seconds, self._latitude[2]]
else:
return self._latitude
@property
def longitude(self):
"""Format Longitude Data Correctly"""
if self.coord_format == 'dd':
decimal_degrees = self._longitude[0] + (self._longitude[1] / 60)
return [decimal_degrees, self._longitude[2]]
elif self.coord_format == 'dms':
minute_parts = modf(self._longitude[1])
seconds = round(minute_parts[0] * 60)
return [self._longitude[0], int(minute_parts[1]), seconds, self._longitude[2]]
else:
return self._longitude
########################################
# Logging Related Functions
########################################
def start_logging(self, target_file, mode="append"):
"""
Create GPS data log object
"""
# Set Write Mode Overwrite or Append
mode_code = 'w' if mode == 'new' else 'a'
try:
self.log_handle = open(target_file, mode_code)
except AttributeError:
print("Invalid FileName")
return False
self.log_en = True
return True
def stop_logging(self):
"""
Closes the log file handler and disables further logging
"""
try:
self.log_handle.close()
except AttributeError:
print("Invalid Handle")
return False
self.log_en = False
return True
def write_log(self, log_string):
"""Attempts to write the last valid NMEA sentence character to the active file handler
"""
try:
self.log_handle.write(log_string)
except TypeError:
return False
return True
########################################
# Sentence Parsers
########################################
def gprmc(self):
"""Parse Recommended Minimum Specific GPS/Transit data (RMC)Sentence.
Updates UTC timestamp, latitude, longitude, Course, Speed, Date, and fix status
"""
# UTC Timestamp
try:
utc_string = self.gps_segments[1]
if utc_string: # Possible timestamp found
hours = (int(utc_string[0:2]) + self.local_offset) % 24
minutes = int(utc_string[2:4])
seconds = float(utc_string[4:])
self.timestamp = (hours, minutes, seconds)
else: # No Time stamp yet
self.timestamp = (0, 0, 0)
except ValueError: # Bad Timestamp value present
return False
# Date stamp
try:
date_string = self.gps_segments[9]
# Date string printer function assumes to be year >=2000,
# date_string() must be supplied with the correct century argument to display correctly
if date_string: # Possible date stamp found
day = int(date_string[0:2])
month = int(date_string[2:4])
year = int(date_string[4:6])
self.date = (day, month, year)
else: # No Date stamp yet
self.date = (0, 0, 0)
except ValueError: # Bad Date stamp value present
return False
# Check Receiver Data Valid Flag
if self.gps_segments[2] == 'A': # Data from Receiver is Valid/Has Fix
# Longitude / Latitude
try:
# Latitude
l_string = self.gps_segments[3]
lat_degs = int(l_string[0:2])
lat_mins = float(l_string[2:])
lat_hemi = self.gps_segments[4]
# Longitude
l_string = self.gps_segments[5]
lon_degs = int(l_string[0:3])
lon_mins = float(l_string[3:])
lon_hemi = self.gps_segments[6]
except ValueError:
return False
if lat_hemi not in self.__HEMISPHERES:
return False
if lon_hemi not in self.__HEMISPHERES:
return False
# Speed
try:
spd_knt = float(self.gps_segments[7])
except ValueError:
return False
# Course
try:
if self.gps_segments[8]:
course = float(self.gps_segments[8])
else:
course = 0.0
except ValueError:
return False
# TODO - Add Magnetic Variation
# Update Object Data
self._latitude = [lat_degs, lat_mins, lat_hemi]
self._longitude = [lon_degs, lon_mins, lon_hemi]
# Include mph and hm/h
self.speed = [spd_knt, spd_knt * 1.151, spd_knt * 1.852]
self.course = course
self.valid = True
# Update Last Fix Time
self.new_fix_time()
else: # Clear Position Data if Sentence is 'Invalid'
self._latitude = [0, 0.0, 'N']
self._longitude = [0, 0.0, 'W']
self.speed = [0.0, 0.0, 0.0]
self.course = 0.0
self.valid = False
return True
def gpgll(self):
"""Parse Geographic Latitude and Longitude (GLL)Sentence. Updates UTC timestamp, latitude,
longitude, and fix status"""
# UTC Timestamp
try:
utc_string = self.gps_segments[5]
if utc_string: # Possible timestamp found
hours = (int(utc_string[0:2]) + self.local_offset) % 24
minutes = int(utc_string[2:4])
seconds = float(utc_string[4:])
self.timestamp = (hours, minutes, seconds)
else: # No Time stamp yet
self.timestamp = (0, 0, 0)
except ValueError: # Bad Timestamp value present
return False
# Check Receiver Data Valid Flag
if self.gps_segments[6] == 'A': # Data from Receiver is Valid/Has Fix
# Longitude / Latitude
try:
# Latitude
l_string = self.gps_segments[1]
lat_degs = int(l_string[0:2])
lat_mins = float(l_string[2:])
lat_hemi = self.gps_segments[2]
# Longitude
l_string = self.gps_segments[3]
lon_degs = int(l_string[0:3])
lon_mins = float(l_string[3:])
lon_hemi = self.gps_segments[4]
except ValueError:
return False
if lat_hemi not in self.__HEMISPHERES:
return False
if lon_hemi not in self.__HEMISPHERES:
return False
# Update Object Data
self._latitude = [lat_degs, lat_mins, lat_hemi]
self._longitude = [lon_degs, lon_mins, lon_hemi]
self.valid = True
# Update Last Fix Time
self.new_fix_time()
else: # Clear Position Data if Sentence is 'Invalid'
self._latitude = [0, 0.0, 'N']
self._longitude = [0, 0.0, 'W']
self.valid = False
return True
def gpvtg(self):
"""Parse Track Made Good and Ground Speed (VTG) Sentence. Updates speed and course"""
try:
course = float(self.gps_segments[1])
spd_knt = float(self.gps_segments[5])
except ValueError:
return False
# Include mph and km/h
self.speed = (spd_knt, spd_knt * 1.151, spd_knt * 1.852)
self.course = course
return True
def gpgga(self):
"""Parse Global Positioning System Fix Data (GGA) Sentence. Updates UTC timestamp, latitude, longitude,
fix status, satellites in use, Horizontal Dilution of Precision (HDOP), altitude, geoid height and fix status"""
try:
# UTC Timestamp
utc_string = self.gps_segments[1]
# Skip timestamp if receiver doesn't have on yet
if utc_string:
hours = (int(utc_string[0:2]) + self.local_offset) % 24
minutes = int(utc_string[2:4])
seconds = float(utc_string[4:])
else:
hours = 0
minutes = 0
seconds = 0.0
# Number of Satellites in Use
satellites_in_use = int(self.gps_segments[7])
# Get Fix Status
fix_stat = int(self.gps_segments[6])
except (ValueError, IndexError):
return False
try:
# Horizontal Dilution of Precision
hdop = float(self.gps_segments[8])
except (ValueError, IndexError):
hdop = 0.0
# Process Location and Speed Data if Fix is GOOD
if fix_stat:
# Longitude / Latitude
try:
# Latitude
l_string = self.gps_segments[2]
lat_degs = int(l_string[0:2])
lat_mins = float(l_string[2:])
lat_hemi = self.gps_segments[3]
# Longitude
l_string = self.gps_segments[4]
lon_degs = int(l_string[0:3])
lon_mins = float(l_string[3:])
lon_hemi = self.gps_segments[5]
except ValueError:
return False
if lat_hemi not in self.__HEMISPHERES:
return False
if lon_hemi not in self.__HEMISPHERES:
return False
# Altitude / Height Above Geoid
try:
altitude = float(self.gps_segments[9])
geoid_height = float(self.gps_segments[11])
except ValueError:
altitude = 0
geoid_height = 0
# Update Object Data
self._latitude = [lat_degs, lat_mins, lat_hemi]
self._longitude = [lon_degs, lon_mins, lon_hemi]
self.altitude = altitude
self.geoid_height = geoid_height
# Update Object Data
self.timestamp = [hours, minutes, seconds]
self.satellites_in_use = satellites_in_use
self.hdop = hdop
self.fix_stat = fix_stat
# If Fix is GOOD, update fix timestamp
if fix_stat:
self.new_fix_time()
return True
def gpgsa(self):
"""Parse GNSS DOP and Active Satellites (GSA) sentence. Updates GPS fix type, list of satellites used in
fix calculation, Position Dilution of Precision (PDOP), Horizontal Dilution of Precision (HDOP), Vertical
Dilution of Precision, and fix status"""
# Fix Type (None,2D or 3D)
try:
fix_type = int(self.gps_segments[2])
except ValueError:
return False
# Read All (up to 12) Available PRN Satellite Numbers
sats_used = []
for sats in range(12):
sat_number_str = self.gps_segments[3 + sats]
if sat_number_str:
try:
sat_number = int(sat_number_str)
sats_used.append(sat_number)
except ValueError:
return False
else:
break
# PDOP,HDOP,VDOP
try:
pdop = float(self.gps_segments[15])
hdop = float(self.gps_segments[16])
vdop = float(self.gps_segments[17])
except ValueError:
return False
# Update Object Data
self.fix_type = fix_type
# If Fix is GOOD, update fix timestamp
if fix_type > self.__NO_FIX:
self.new_fix_time()
self.satellites_used = sats_used
self.hdop = hdop
self.vdop = vdop
self.pdop = pdop
return True
def gpgsv(self):
"""Parse Satellites in View (GSV) sentence. Updates number of SV Sentences,the number of the last SV sentence
parsed, and data on each satellite present in the sentence"""
try:
num_sv_sentences = int(self.gps_segments[1])
current_sv_sentence = int(self.gps_segments[2])
sats_in_view = int(self.gps_segments[3])
except ValueError:
return False
# Create a blank dict to store all the satellite data from this sentence in:
# satellite PRN is key, tuple containing telemetry is value
satellite_dict = dict()
# Calculate Number of Satelites to pull data for and thus how many segment positions to read
if num_sv_sentences == current_sv_sentence:
# Last sentence may have 1-4 satellites; 5 - 20 positions
sat_segment_limit = (sats_in_view - ((num_sv_sentences - 1) * 4)) * 5
else:
sat_segment_limit = 20 # Non-last sentences have 4 satellites and thus read up to position 20
# Try to recover data for up to 4 satellites in sentence
for sats in range(4, sat_segment_limit, 4):
# If a PRN is present, grab satellite data
if self.gps_segments[sats]:
try:
sat_id = int(self.gps_segments[sats])
except (ValueError,IndexError):
return False
try: # elevation can be null (no value) when not tracking
elevation = int(self.gps_segments[sats+1])
except (ValueError,IndexError):
elevation = None
try: # azimuth can be null (no value) when not tracking
azimuth = int(self.gps_segments[sats+2])
except (ValueError,IndexError):
azimuth = None
try: # SNR can be null (no value) when not tracking
snr = int(self.gps_segments[sats+3])
except (ValueError,IndexError):
snr = None
# If no PRN is found, then the sentence has no more satellites to read
else:
break
# Add Satellite Data to Sentence Dict
satellite_dict[sat_id] = (elevation, azimuth, snr)
# Update Object Data
self.total_sv_sentences = num_sv_sentences
self.last_sv_sentence = current_sv_sentence
self.satellites_in_view = sats_in_view
# For a new set of sentences, we either clear out the existing sat data or
# update it as additional SV sentences are parsed
if current_sv_sentence == 1:
self.satellite_data = satellite_dict
else:
self.satellite_data.update(satellite_dict)
return True
##########################################
# Data Stream Handler Functions
##########################################
def new_sentence(self):
"""Adjust Object Flags in Preparation for a New Sentence"""
self.gps_segments = ['']
self.active_segment = 0
self.crc_xor = 0
self.sentence_active = True
self.process_crc = True
self.char_count = 0
def update(self, new_char):
"""Process a new input char and updates GPS object if necessary based on special characters ('$', ',', '*')
Function builds a list of received string that are validate by CRC prior to parsing by the appropriate
sentence function. Returns sentence type on successful parse, None otherwise"""
valid_sentence = False
# Validate new_char is a printable char
ascii_char = ord(new_char)
if 10 <= ascii_char <= 126:
self.char_count += 1
# Write Character to log file if enabled
if self.log_en:
self.write_log(new_char)
# Check if a new string is starting ($)
if new_char == '$':
self.new_sentence()
return None
elif self.sentence_active:
# Check if sentence is ending (*)
if new_char == '*':
self.process_crc = False
self.active_segment += 1
self.gps_segments.append('')
return None
# Check if a section is ended (,), Create a new substring to feed
# characters to
elif new_char == ',':
self.active_segment += 1
self.gps_segments.append('')
# Store All Other printable character and check CRC when ready
else:
self.gps_segments[self.active_segment] += new_char
# When CRC input is disabled, sentence is nearly complete
if not self.process_crc:
if len(self.gps_segments[self.active_segment]) == 2:
try:
final_crc = int(self.gps_segments[self.active_segment], 16)
if self.crc_xor == final_crc:
valid_sentence = True
else:
self.crc_fails += 1
except ValueError:
pass # CRC Value was deformed and could not have been correct
# Update CRC
if self.process_crc:
self.crc_xor ^= ascii_char
# If a Valid Sentence Was received and it's a supported sentence, then parse it!!
if valid_sentence:
self.clean_sentences += 1 # Increment clean sentences received
self.sentence_active = False # Clear Active Processing Flag
if self.gps_segments[0] in self.supported_sentences:
# parse the Sentence Based on the message type, return True if parse is clean
if self.supported_sentences[self.gps_segments[0]](self):
# Let host know that the GPS object was updated by returning parsed sentence type
self.parsed_sentences += 1
return self.gps_segments[0]
# Check that the sentence buffer isn't filling up with Garage waiting for the sentence to complete
if self.char_count > self.SENTENCE_LIMIT:
self.sentence_active = False
# Tell Host no new sentence was parsed
return None
def new_fix_time(self):
"""Updates a high resolution counter with current time when fix is updated. Currently only triggered from
GGA, GSA and RMC sentences"""
try:
self.fix_time = utime.ticks_ms()
except NameError:
self.fix_time = time.time()
#########################################
# User Helper Functions
# These functions make working with the GPS object data easier
#########################################
def satellite_data_updated(self):
"""
Checks if the all the GSV sentences in a group have been read, making satellite data complete
:return: boolean
"""
if self.total_sv_sentences > 0 and self.total_sv_sentences == self.last_sv_sentence:
return True
else:
return False
def unset_satellite_data_updated(self):
"""
Mark GSV sentences as read indicating the data has been used and future updates are fresh
"""
self.last_sv_sentence = 0
def satellites_visible(self):
"""
Returns a list of of the satellite PRNs currently visible to the receiver
:return: list
"""
return list(self.satellite_data.keys())
def time_since_fix(self):
"""Returns number of millisecond since the last sentence with a valid fix was parsed. Returns 0 if
no fix has been found"""
# Test if a Fix has been found
if self.fix_time == 0:
return -1
# Try calculating fix time using utime; if not running MicroPython
# time.time() returns a floating point value in secs
try:
current = utime.ticks_diff(utime.ticks_ms(), self.fix_time)
except NameError:
current = (time.time() - self.fix_time) * 1000 # ms
return current
def compass_direction(self):
"""
Determine a cardinal or inter-cardinal direction based on current course.
:return: string
"""
# Calculate the offset for a rotated compass
if self.course >= 348.75:
offset_course = 360 - self.course
else:
offset_course = self.course + 11.25
# Each compass point is separated by 22.5 degrees, divide to find lookup value
dir_index = floor(offset_course / 22.5)
final_dir = self.__DIRECTIONS[dir_index]
return final_dir
def latitude_string(self):
"""
Create a readable string of the current latitude data
:return: string
"""
if self.coord_format == 'dd':
formatted_latitude = self.latitude
lat_string = str(formatted_latitude[0]) + '° ' + str(self._latitude[2])
elif self.coord_format == 'dms':
formatted_latitude = self.latitude
lat_string = str(formatted_latitude[0]) + '° ' + str(formatted_latitude[1]) + "' " + str(formatted_latitude[2]) + '" ' + str(formatted_latitude[3])
else:
lat_string = str(self._latitude[0]) + '° ' + str(self._latitude[1]) + "' " + str(self._latitude[2])
return lat_string
def longitude_string(self):
"""
Create a readable string of the current longitude data
:return: string
"""
if self.coord_format == 'dd':
formatted_longitude = self.longitude
lon_string = str(formatted_longitude[0]) + '° ' + str(self._longitude[2])
elif self.coord_format == 'dms':
formatted_longitude = self.longitude
lon_string = str(formatted_longitude[0]) + '° ' + str(formatted_longitude[1]) + "' " + str(formatted_longitude[2]) + '" ' + str(formatted_longitude[3])
else:
lon_string = str(self._longitude[0]) + '° ' + str(self._longitude[1]) + "' " + str(self._longitude[2])
return lon_string
def speed_string(self, unit='kph'):
"""
Creates a readable string of the current speed data in one of three units
:param unit: string of 'kph','mph, or 'knot'
:return:
"""
if unit == 'mph':
speed_string = str(self.speed[1]) + ' mph'
elif unit == 'knot':
if self.speed[0] == 1:
unit_str = ' knot'
else:
unit_str = ' knots'
speed_string = str(self.speed[0]) + unit_str
else:
speed_string = str(self.speed[2]) + ' km/h'
return speed_string
def date_string(self, formatting='s_mdy', century='20'):
"""
Creates a readable string of the current date.
Can select between long format: Januray 1st, 2014
or two short formats:
11/01/2014 (MM/DD/YYYY)
01/11/2014 (DD/MM/YYYY)
:param formatting: string 's_mdy', 's_dmy', or 'long'
:param century: int delineating the century the GPS data is from (19 for 19XX, 20 for 20XX)
:return: date_string string with long or short format date
"""
# Long Format Januray 1st, 2014
if formatting == 'long':
# Retrieve Month string from private set
month = self.__MONTHS[self.date[1] - 1]
# Determine Date Suffix
if self.date[0] in (1, 21, 31):
suffix = 'st'
elif self.date[0] in (2, 22):
suffix = 'nd'
elif self.date[0] == (3, 23):
suffix = 'rd'
else:
suffix = 'th'
day = str(self.date[0]) + suffix # Create Day String
year = century + str(self.date[2]) # Create Year String
date_string = month + ' ' + day + ', ' + year # Put it all together
else:
# Add leading zeros to day string if necessary
if self.date[0] < 10:
day = '0' + str(self.date[0])
else:
day = str(self.date[0])
# Add leading zeros to month string if necessary
if self.date[1] < 10:
month = '0' + str(self.date[1])
else:
month = str(self.date[1])
# Add leading zeros to year string if necessary
if self.date[2] < 10:
year = '0' + str(self.date[2])
else:
year = str(self.date[2])
# Build final string based on desired formatting
if formatting == 's_dmy':
date_string = day + '/' + month + '/' + year
else: # Default date format
date_string = month + '/' + day + '/' + year
return date_string
# All the currently supported NMEA sentences
supported_sentences = {'GPRMC': gprmc, 'GLRMC': gprmc,
'GPGGA': gpgga, 'GLGGA': gpgga,
'GPVTG': gpvtg, 'GLVTG': gpvtg,
'GPGSA': gpgsa, 'GLGSA': gpgsa,
'GPGSV': gpgsv, 'GLGSV': gpgsv,
'GPGLL': gpgll, 'GLGLL': gpgll,
'GNGGA': gpgga, 'GNRMC': gprmc,
'GNVTG': gpvtg, 'GNGLL': gpgll,
'GNGSA': gpgsa,
}
if __name__ == "__main__":
pass
|
mit
| 6,360,984,072,224,029,000 | 34.455422 | 163 | 0.518792 | false |
codedsk/hubcheck
|
hubcheck/pageobjects/widgets/tools_status_remaining_steps.py
|
1
|
5784
|
from hubcheck.pageobjects.basepagewidget import BasePageWidget
from hubcheck.pageobjects.basepageelement import Link
from hubcheck.pageobjects.basepageelement import TextReadOnly
import re
class ToolsStatusRemainingSteps(BasePageWidget):
def __init__(self, owner, locatordict={}):
super(ToolsStatusRemainingSteps,self).__init__(owner,locatordict)
# load hub's classes
ToolsStatusRemainingSteps_Locators = \
self.load_class('ToolsStatusRemainingSteps_Locators')
# update this object's locator
self.locators.update(ToolsStatusRemainingSteps_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.register = TextReadOnly(self,{'base':'register'})
self.upload = TextReadOnly(self,{'base':'upload'})
self.upload_done = Link(self,{'base':'upload_done'})
self.upload_howto = Link(self,{'base':'upload_howto'})
self.toolpage = TextReadOnly(self,{'base':'toolpage'})
self.toolpage_create = Link(self,{'base':'toolpage_create'})
self.toolpage_preview = Link(self,{'base':'toolpage_preview'})
self.toolpage_edit = Link(self,{'base':'toolpage_edit'})
self.test_approve = TextReadOnly(self,{'base':'test_approve'})
self.approve_it = Link(self,{'base':'approve_it'})
self.updated_approve = Link(self,{'base':'updated_approve'})
self.publish = TextReadOnly(self,{'base':'publish'})
self.updated_publish = Link(self,{'base':'updated_publish'})
# update the component's locators with this objects overrides
self._updateLocators()
def _checkLocatorsRegistered(self,widgets=None,cltype='Registered'):
widgets = [self.register, self.upload, self.toolpage,
self.test_approve, self.publish]
return self._checkLocators(widgets,cltype)
def _checkLocatorsCreated(self,widgets=None,cltype='Created'):
widgets = [self.register, self.upload, self.toolpage,
self.test_approve, self.publish]
return self._checkLocators(widgets,cltype)
def _checkLocatorsUploaded(self,widgets=None,cltype='Uploaded'):
widgets = [self.register, self.upload, self.toolpage,
self.test_approve, self.publish]
return self._checkLocators(widgets,cltype)
def _checkLocatorsInstalled(self,widgets=None,cltype='Installed'):
widgets = [self.register, self.upload, self.toolpage,
self.test_approve, self.updated_approve, self.publish]
return self._checkLocators(widgets,cltype)
def _checkLocatorsApproved(self,widgets=None,cltype='Approved'):
widgets = [self.register, self.upload, self.toolpage,
self.test_approve, self.publish, self.updated_publish]
return self._checkLocators(widgets,cltype)
def _checkLocatorsToolPageCreate(self,widgets=None,cltype='ToolPageCreate'):
widgets = [self.toolpage_create]
return self._checkLocators(widgets,cltype)
def _checkLocatorsToolPageCreated(self,widgets=None,cltype='ToolPageCreated'):
widgets = [self.toolpage_preview, self.toolpage_edit]
return self._checkLocators(widgets,cltype)
def _get_status(self,w):
obj = getattr(self,w)
class_attrs = obj.get_attribute('class').split()
for status in ['incomplete','complete','todo']:
if status in class_attrs:
break
else:
status = None
return status
def get_register_status(self):
return self._get_status('register')
def get_upload_status(self):
return self._get_status('upload')
def get_toolpage_status(self):
return self._get_status('toolpage')
def get_test_approve_status(self):
return self._get_status('test_approve')
def get_publish_status(self):
return self._get_status('publish')
def goto_toolpage_create(self):
return self.toolpage_create.click()
def goto_toolpage_preview(self):
return self.toolpage_preview.click()
def goto_toolpage_edit(self):
return self.toolpage_edit.click()
def goto_upload_done(self):
return self.upload_done.click()
def goto_upload_howto(self):
return self.upload_howto.click()
def goto_approve_tool(self):
return self.approve_it.click()
def goto_installed_update_tool(self):
return self.updated_approve.click()
def goto_approved_update_tool(self):
return self.updated_publish.click()
class ToolsStatusRemainingSteps_Locators_Base(object):
"""locators for ToolsStatusRemainingSteps object"""
locators = {
'base' : "css=#whatsnext",
'register' : "css=#whatsnext ul li:nth-of-type(1)",
'upload' : "css=#whatsnext ul li:nth-of-type(2)",
'upload_done' : "css=#Uploaded_ .flip",
'upload_howto' : "css=#whatsnext ul li:nth-of-type(2) > a.developer-wiki",
'toolpage' : "css=#whatsnext ul li:nth-of-type(3)",
'toolpage_create' : "css=#whatsnext .create-resource",
'toolpage_preview' : "css=#whatsnext .preview-resource",
'toolpage_edit' : "css=#whatsnext .edit-resource",
'test_approve' : "css=#whatsnext ul li:nth-of-type(4)",
'approve_it' : "css=#Approved_ .flip",
'updated_approve' : "css=#Updated_ .flip",
'publish' : "css=#whatsnext ul li:nth-of-type(5)",
'updated_publish' : "css=#Updated .flip",
}
|
mit
| -8,004,347,333,840,673,000 | 30.606557 | 87 | 0.62742 | false |
jheddings/indigo-netdev
|
test/test_arp.py
|
1
|
10584
|
#!/usr/bin/env python2.7
import logging
import unittest
import time
import arp
from uuid import getnode
# keep logging output to a minumim for testing
logging.basicConfig(level=logging.ERROR)
################################################################################
class ArpCacheTestBase(unittest.TestCase):
#---------------------------------------------------------------------------
def _getLocalMacAddress(self):
# TODO convert this to octets split by colons
return hex(getnode())
#---------------------------------------------------------------------------
def _buildTableFromCommand(self, cmd):
cache = arp.ArpCache(cmd=cmd)
cache.rebuildArpCache()
return cache
#---------------------------------------------------------------------------
def _buildTableFromLine(self, line):
cache = arp.ArpCache(cmd=None)
cache._updateCacheLine(line)
return cache
#---------------------------------------------------------------------------
def _buildTableFromLines(self, lines):
cache = arp.ArpCache(cmd=None)
cache._updateCacheLines(lines)
return cache
################################################################################
class ArpCacheContainerTest(unittest.TestCase):
#---------------------------------------------------------------------------
def test_GetNone(self):
cache = arp.ArpCache()
self.assertIsNone(cache['key'])
#---------------------------------------------------------------------------
def test_SetGetNone(self):
cache = arp.ArpCache()
cache['none'] = None
self.assertIsNone(cache['none'])
#---------------------------------------------------------------------------
def test_BasicSetGetString(self):
cache = arp.ArpCache()
cache['key'] = 'value'
self.assertEqual(cache['key'], 'value')
#---------------------------------------------------------------------------
def test_BasicSetGetInt(self):
cache = arp.ArpCache()
cache[1] = 42
self.assertEqual(cache[1], 42)
#---------------------------------------------------------------------------
def test_BasicSetGetMixed(self):
cache = arp.ArpCache()
cache['key'] = 42
cache[1] = 'value'
self.assertEqual(cache['key'], 42)
self.assertEqual(cache[1], 'value')
################################################################################
class BasicArpCommandUnitTest(ArpCacheTestBase):
#---------------------------------------------------------------------------
def test_NullTableUnitTest(self):
cache = self._buildTableFromCommand(None)
self.assertEqual(cache.getActiveDeviceCount(), 0)
#---------------------------------------------------------------------------
def test_DefaultTableUnitTest(self):
cache = arp.ArpCache()
cache.rebuildArpCache()
self.assertNotEqual(cache.getActiveDeviceCount(), 0)
#---------------------------------------------------------------------------
def test_BadReturnValue(self):
cache = self._buildTableFromCommand('/bin/false')
self.assertEqual(cache.getActiveDeviceCount(), 0)
#---------------------------------------------------------------------------
def test_EmptyTable(self):
cache = self._buildTableFromCommand('/bin/true')
self.assertEqual(cache.getActiveDeviceCount(), 0)
#---------------------------------------------------------------------------
def test_GarbageOutput(self):
self._buildTableFromCommand('dd if=/dev/urandom bs=1 count=1024')
################################################################################
class ArpTableParsingUnitTest(ArpCacheTestBase):
#---------------------------------------------------------------------------
def test_SimpleArpTableLowerCase(self):
mac = '20:c4:df:a0:54:28'
cache = self._buildTableFromLine(
'localhost (127.0.0.1) at %s on en0 ifscope [ethernet]' % mac
)
self.assertTrue(cache.isActive(mac))
self.assertTrue(cache.isActive(mac.upper()))
#---------------------------------------------------------------------------
def test_SimpleArpTableUpperCase(self):
mac = '20:C4:D7:A0:54:28'
cache = self._buildTableFromLine(
'localhost (127.0.0.1) at %s on en0 ifscope [ethernet]' % mac
)
self.assertTrue(cache.isActive(mac))
self.assertTrue(cache.isActive(mac.lower()))
#---------------------------------------------------------------------------
# seen using DD-WRT routers - slightly different than macOS
def test_RouterLineFormat(self):
mac = '30:2A:43:B2:01:2F'
cache = self._buildTableFromLines([
'DD-WRT v3.0-r29264 std (c) 2016 NewMedia-NET GmbH',
'? (10.0.0.1) at %s [ether] on br0' % mac
])
self.assertTrue(cache.isActive(mac))
#---------------------------------------------------------------------------
def test_MultilineBasicTable(self):
cache = self._buildTableFromLines([
'? (0.0.0.0) at 11:22:33:44:55:66 on en0 ifscope [ethernet]',
'? (0.0.0.0) at AA:BB:CC:DD:EE:FF on en0 ifscope [ethernet]',
'? (0.0.0.0) at 12:34:56:78:9A:BC on en0 ifscope [ethernet]'
])
self.assertTrue(cache.isActive('11:22:33:44:55:66'))
self.assertTrue(cache.isActive('AA:BB:CC:DD:EE:FF'))
self.assertTrue(cache.isActive('12:34:56:78:9A:BC'))
#---------------------------------------------------------------------------
def test_LeadingZerosInAddressOctects(self):
cache = self._buildTableFromLines([
'node (127.0.0.1) at 0:2a:43:4:b:51 on en0 ifscope [ethernet]',
'node (127.0.0.1) at 20:a2:04:b3:0c:ed on en0 ifscope [ethernet]'
])
self.assertTrue(cache.isActive('0:2a:43:4:b:51'))
self.assertTrue(cache.isActive('00:2a:43:04:0b:51'))
self.assertTrue(cache.isActive('20:a2:04:b3:0c:ed'))
self.assertTrue(cache.isActive('20:a2:4:b3:c:ed'))
################################################################################
class ArpTablePurgeUnitTest(ArpCacheTestBase):
#---------------------------------------------------------------------------
def test_BasicPurgeTest(self):
cache = arp.ArpCache(timeout=1, cmd=None)
now = time.time()
cache['current'] = now
cache['recent'] = now - 30
cache['expired'] = now - 61
cache.purgeExpiredDevices()
self.assertIn('current', cache.cache);
self.assertIn('recent', cache.cache);
self.assertNotIn('expired', cache.cache);
#---------------------------------------------------------------------------
def test_KeepFutureItems(self):
cache = arp.ArpCache(timeout=1, cmd=None)
now = time.time()
cache['soon'] = now + 10
cache['future'] = now + 300
cache.purgeExpiredDevices()
self.assertIn('soon', cache.cache);
self.assertIn('future', cache.cache);
################################################################################
class ArpTableUpdateTest(ArpCacheTestBase):
# this test relies on internals of the ArpCache, such as directly modifying
# the contents of the cache and the _update methods
arp_data = [
'localhost (127.0.0.1) at 01:23:45:67:89:ab on en0 ifscope [ethernet]',
'pc (192.168.0.1) at ef:cd:ab:12:34:56 on en0 ifscope [ethernet]',
'laptop (192.168.0.2) at ab:12:cd:34:ef:56 on en0 ifscope [ethernet]'
]
#---------------------------------------------------------------------------
def test_BasicUpdateTest(self):
cache = self._buildTableFromLines(self.arp_data)
self.assertTrue(cache.isActive('01:23:45:67:89:ab'));
self.assertTrue(cache.isActive('ef:cd:ab:12:34:56'));
self.assertTrue(cache.isActive('ab:12:cd:34:ef:56'));
#---------------------------------------------------------------------------
def test_BasicLineUpdateTest(self):
cache = arp.ArpCache(timeout=1, cmd=None)
cache._updateCacheLine(self.arp_data[0])
self.assertTrue(cache.isActive('01:23:45:67:89:ab'));
self.assertFalse(cache.isActive('ef:cd:ab:12:34:56'));
self.assertFalse(cache.isActive('ab:12:cd:34:ef:56'));
#---------------------------------------------------------------------------
def test_LineUpdateTimeShift(self):
cache = arp.ArpCache(timeout=1, cmd=None)
cache._updateCacheLine(self.arp_data[0])
first_time = cache['01:23:45:67:89:ab']
time.sleep(1)
cache._updateCacheLine(self.arp_data[0])
second_time = cache['01:23:45:67:89:ab']
self.assertGreater(second_time, first_time)
################################################################################
class ArpTableActiveExpiredUnitTest(ArpCacheTestBase):
# this test relies on internals of the ArpCache, such as directly modifying
# the contents of the cache and the _isExpired method
#---------------------------------------------------------------------------
def test_BasicExpirationTests(self):
cache = arp.ArpCache(timeout=1, cmd=None)
now = time.time()
self.assertFalse(cache._isExpired(now))
self.assertFalse(cache._isExpired(now - 30))
self.assertFalse(cache._isExpired(now - 59))
self.assertTrue(cache._isExpired(now - 60))
self.assertTrue(cache._isExpired(now - 500))
#---------------------------------------------------------------------------
def test_SimpleCurrentItemCheck(self):
cache = arp.ArpCache(timeout=1, cmd=None)
now = time.time()
cache['current'] = now
cache['recent'] = now - 30
self.assertTrue(cache.isActive('current'))
self.assertTrue(cache.isActive('recent'))
#---------------------------------------------------------------------------
def test_SimpleExpiredItemCheck(self):
cache = arp.ArpCache(timeout=1, cmd=None)
now = time.time()
# since we created the table with a 1-minute timeout...
cache['expired'] = now - 60
cache['inactive'] = now - 61
cache['ancient'] = now - 300
self.assertFalse(cache.isActive('expired'))
self.assertFalse(cache.isActive('inactive'))
self.assertFalse(cache.isActive('ancient'))
|
mit
| 3,271,104,529,743,581,000 | 35.496552 | 80 | 0.464286 | false |
SCECcode/BBP
|
bbp/comps/irikura_hf_cfg.py
|
1
|
4026
|
#!/usr/bin/env python
"""
Copyright 2010-2018 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division, print_function
# Import Python modules
import sys
# Import Broadband modules
import bband_utils
class IrikuraHFCfg(object):
"""
Define the configuration parameters for the Irikura Receipe
Method 2 HF codes
"""
cfgdict = {}
vmodel = {}
def getval(self, attr):
try:
val = self.cfgdict[attr]
except KeyError:
print("Invalid Source File - Missing attribute: %s" % (attr))
print("Exiting")
sys.exit(1)
return val
def parse_src(self, a_srcfile):
"""
This function calls bband_utils' parse property file function
to get a dictionary of key, value pairs and then looks for the
parameters needed.
"""
self.cfgdict = bband_utils.parse_properties(a_srcfile)
val = self.getval("magnitude")
self.MAGNITUDE = float(val)
val = self.getval("fault_length")
self.LENGTH = float(val)
val = self.getval("fault_width")
self.WIDTH = float(val)
val = self.getval("depth_to_top")
self.DEPTH_TO_TOP = float(val)
val = self.getval("strike")
self.STRIKE = float(val)
val = self.getval("rake")
self.RAKE = float(val)
val = self.getval("dip")
self.DIP = float(val)
val = self.getval("lat_top_center")
self.LAT_TOP_CENTER = float(val)
val = self.getval("lon_top_center")
self.LON_TOP_CENTER = float(val)
val = self.getval("hypo_along_stk")
self.HYPO_ALONG_STK = float(val)
val = self.getval("hypo_down_dip")
self.HYPO_DOWN_DIP = float(val)
val = self.getval("seed")
self.SEED = int(val)
def parse_velmodel(self, a_velmodel):
"""
This function parses the velocity model file and stores the
data in the vmodel dictionary.
"""
# Initialize velocity model structure
self.vmodel = {'h': [],
'vp': [],
'vs': [],
'rho': [],
'qp': [],
'qs': []}
vel_file = open(a_velmodel, 'r')
for line in vel_file:
line = line.strip()
pieces = line.split()
if len(pieces) == 3:
self.nlay = float(pieces[0])
continue
# Skip lines without the 6 values
if len(pieces) != 6:
continue
pieces = [float(piece) for piece in pieces]
self.vmodel['h'].append(pieces[0])
self.vmodel['vp'].append(pieces[1])
self.vmodel['vs'].append(pieces[2])
self.vmodel['rho'].append(pieces[3])
self.vmodel['qp'].append(pieces[4])
self.vmodel['qs'].append(pieces[5])
vel_file.close()
def __init__(self, a_srcname=None, a_velmodel=None):
"""
Set up parameters for the Irikura recipe
"""
if a_srcname and a_velmodel:
self.parse_src(a_srcname)
self.parse_velmodel(a_velmodel)
# Filter parameters
self.filter_order = 3
self.filter_flo = 1.0e+10
self.filter_fhi = 0.2
self.filter_phase = 0
if __name__ == "__main__":
IRIKURA_HF_CFG = IrikuraHFCfg(sys.argv[1])
print("Created Test Config Class: %s" % (sys.argv[0]))
|
apache-2.0
| -7,633,021,810,526,376,000 | 29.044776 | 73 | 0.568058 | false |
jimsrc/seatos
|
etc/etc/mix_indivs.py
|
1
|
7787
|
#!/usr/bin/env ipython
# -*- coding: utf-8 -*-
import shared.shared_funcs as sf
import shared.console_colors as ccl
import numpy as np
import argparse, os
from glob import glob
#--- figures
from pylab import figure, close
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.transforms as transforms
#--- funcs
def get_units(obs):
if any([_nm in obs for _nm in ('B')]) and \
('rmsBoB' not in obs) and \
('ratio' not in obs):
return '[nT]'
elif any([_nm in obs for _nm in ('V')]):
return '[km/s]'
elif any([_nm in obs for _nm in ('beta','rmsBoB','ratio')]):
return '[1]'
else:
return '[?]'
def get_meta(fnm):
meta = {} # meta info
for line in open(fnm,'r').readlines():
if not line.startswith('#'):
continue
if line.split()[1]=='dt':
meta['dt'] = float(line.split()[-1])
if line.split()[1]=='ini':
# grab all after the first ':', and clear the '\n'
meta['ini'] = line[-line[::-1].rfind(':')+1:-1]
return meta
#--- retrieve args
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""
Script to mix together time profiles of different structures
one next to the other. For ex., put the profiles of MCs next to the profiles of sheaths.
Finally, we produce figures of this mixed profiles.
""",
)
parser.add_argument(
'-left', '--left',
type=str,
default='./test',
help='input directory for left part',
)
parser.add_argument(
'-right', '--right',
type=str,
default='./test',
help='input directory for right part',
)
parser.add_argument(
'-plot', '--plot',
type=str,
default='./figs'
)
parser.add_argument(
'-obs', '--obs',
type=str,
nargs='+',
default=['Bmag.ACE1sec','rmsB.ACE1sec'],
help="""
keynames of the variables to extract.
For ACE, use:
B, rmsB, rmsBoB, V, beta, Pcc, Temp, AlphaRatio.
For Auger_..., use:
CRs.
""",
)
parser.add_argument(
'-ba', '--BefAft',
type=int,
nargs=2,
default=[0,0],
help="""
Fractions of the extraction time-span in units of the time-width
of the structure. These fractions refer to before and
after the leading and trailing border respectively. Can
be float values.
Must be integers.
""",
)
pa = parser.parse_args()
print "----------------------------"
#--- check
#for idir in (pa.left, pa.right):
for obs in pa.obs:
print " > checking: "+obs
# observable name must be between '_' characters
fnm_ls_left = glob(pa.left +'/*_%s_*.txt'%obs)
fnm_ls_right = glob(pa.right+'/*_%s_*.txt'%obs)
n_left, n_right = len(fnm_ls_left), len(fnm_ls_right)
assert n_left>0 and n_right>0,\
"""
we have no files in left-dir (N:{n_left}) or
right-dir (N:{n_right}), with the pattern '*_{pattern}_*.txt'
""".format(n_left=n_left, n_right=n_right, pattern=obs)
#assert len(fnm_ls)==1,\
#"""
#There must be ONLY ONE file in '{dir}' for
#each observable.
#This wasn't satisfied for '{obs}', as we have:\n {flist}
#""".format(
# dir=idir,
# obs=obs,
# flist=reduce(lambda x, y: x+y, [' '+_f+'\n' for _f in fnm_ls]),
#)
#--- all IDs of events w/ some data
id_events = []
#--- grab data from left & right :-)
buff = sf.dummy2()
for obs in pa.obs:
# list of files on the right
fnm_ls_right = glob(pa.right+'/*_%s_*.txt'%obs)
# buffers for left and right
le, ri = buff[obs].le, buff[obs].ri = sf.dummy2(), sf.dummy2()
buff[obs].units = get_units(obs)
# iterate over files, for each observable
for fnm_left in glob(pa.left+'/*_%s_*.txt'%obs):
# list of base-filenames on the right
fnms_right_base = [_f.split('/')[-1] for _f in fnm_ls_right]
# find match on the right
if fnm_left.split('/')[-1] in fnms_right_base:
fnm_right = pa.right +'/'+ fnm_left.split('/')[-1] # right match for this left
print fnm_right.split('/')[-1]
assert os.path.isfile(fnm_right)
# event id
ind_id = fnm_left.split('/')[-1].rfind('id.')+3
id = int(fnm_left.split('/')[-1][ind_id:ind_id+3]) # [int]
if id not in id_events: id_events.append(id) # collect
# get data
le[id].t, le[id].data = np.loadtxt(fnm_left, unpack=True)
ri[id].t, ri[id].data = np.loadtxt(fnm_right, unpack=True)
# get widths
meta = get_meta(fnm_left)
le[id].dt = meta['dt']
le[id].ini = meta['ini']
meta = get_meta(fnm_right)
ri[id].dt = meta['dt']
ri[id].ini = meta['ini']
print "--> finished i/o"
nr = 1 # scale for row size
tfac = 24. # '1' to show in days
nobs = len(pa.obs)
#--- ratio of (right-width)/(left-ratio)
opt = {
'ms' : 3,
'mec' : 'none',
}
for id in id_events:
ok = True
# this event-id must have data in ALL observables
for obs in pa.obs:
ok &= id in buff[obs].le.keys() and id in buff[obs].ri.keys()
ok &= ~np.isnan(np.nanmean(buff[obs].le[id].data)) # any valid data?
ok &= ~np.isnan(np.nanmean(buff[obs].ri[id].data)) # any valid data?
if not ok:
continue # next event-id
dt_left = buff[obs].le[id].dt
dt_right = buff[obs].ri[id].dt
# x-limits for plot
xlim = -dt_left, dt_left+2.*dt_right
# new fig
#fig = figure(1, figsize=(6,4))
fig = plt.figure(1, figsize=(9, 10))
gs = GridSpec(nrows=6*nr, ncols=2)
gs.update(left=0.1, right=0.98, hspace=0.13, wspace=0.15)
print id
for obs, io in zip(pa.obs, range(nobs)):
le, ri = buff[obs].le[id], buff[obs].ri[id]
#ax = fig.add_subplot(nobs, 1, io+1)
ax = plt.subplot(gs[io*nr:(io+1)*nr, 0:2])
trans = transforms.blended_transform_factory(
ax.transData, ax.transAxes)
#--- left
t, data = le.t, le.data
# only plot what is BEFORE 'left' structure ends
cc = t<=dt_left
_max_le = np.nanmax(data[(cc)&(t>xlim[0])])
_min_le = np.nanmin(data[(cc)&(t>xlim[0])])
ax.plot(tfac*t[cc], data[cc], '-ok', **opt)
#--- right
t, data = ri.t, ri.data
# only plot what is AFTER 'right' structure begins
cc = t>=dt_left
_max_ri = np.nanmax(data[(cc)&(t<xlim[1])])
_min_ri = np.nanmin(data[(cc)&(t<xlim[1])])
ax.plot(tfac*t[cc], data[cc], '-ok', **opt)
#--- shade for left
rect1 = patches.Rectangle((0., 0.), width=tfac*dt_left, height=1,
transform=trans, color='orange',
alpha=0.3)
ax.add_patch(rect1)
#--- shade for right
rect1 = patches.Rectangle((tfac*dt_left, 0.), width=tfac*dt_right, height=1,
transform=trans, color='blue',
alpha=0.3)
ax.add_patch(rect1)
#--- deco
ax.set_xlim(np.min([tfac*xlim[0],-tfac*1.]), tfac*xlim[1])
ax.set_ylim(top=np.max([_max_le,_max_ri]))
ax.grid(True)
ax.set_ylabel(obs.split('.')[0]+' '+buff[obs].units)
# ticks & labels x
if io+1==nobs: #n==nvars-1:
#ax.set_xlabel('time normalized to\nsheath/MC passage [1]', fontsize=11)
ax.set_xlabel('days since shock\n(%s)'%le.ini)
else:
ax.set_xlabel('')
#ax.get_xaxis().set_ticks([])
ax.xaxis.set_ticklabels([])
if obs.split('.')[0] in ('rmsB','rmsB_ratio','beta'):
#import pdb; pdb.set_trace()
ax.set_ylim(bottom=np.max([1e-3,np.min([_min_le,_min_ri])]))
ax.set_yscale('log')
#--- save
fname_fig = pa.plot+'/test_%03d.png'%id
fig.savefig(fname_fig, dpi=100, bbox_inches='tight')
close(fig)
#EOF
|
mit
| 5,169,129,289,202,146,000 | 30.526316 | 90 | 0.562476 | false |
alexmoratalla/yambopy
|
yambopy/bse/bse_absorption.py
|
2
|
9973
|
# Copyright (c) 2018, Henrique Pereira Coutada Miranda
#
# All rights reserved.
#
# This file is part of the yambopy project
#
"""
This file contains a class to analyse the BSE absorption spectra
The features to be provided include:
1. Read the BSE diago file and plot chi including excitonic effects
2. Obtain the excitonic states energy, degeneracies and optical intensity
3. Create flows to run ypp and obtain the excitonic wavefunctions
4. Produce a json file that can be used in the excitonwebsite
Long term:
1. For the same exciton store multiple WFs changing the hole position
To initialize this structure we require instances of:
- YamboExcitonDB (which already requires YamboSaveDB)
The json format produced by this class for the excitonwebsite is:
structure stuff
-lattice
-atoms
-atypes
bse stuff
-eps - Absorption spectra
-eel - Electron energy loss spectra
-nx,ny,nz - the dimensions of the datagrid
-datagrid
-excitons - list of excitons to show
each exciton entry contains:
-datagrid
-intensity
-index
-energy
"""
from __future__ import print_function, division
import os
from yambopy.tools.string import marquee
from yambopy import *
class YamboBSEAbsorptionSpectra():
"""
Create a file with information about the excitons from Yambo files
"""
def __init__(self,excitondb):
"""
Parameters:
excitondb - an instance of the excitonDB class
"""
self.excitondb = excitondb
@classmethod
def from_folder(cls,folder):
raise NotImplementedError('from_folder not implemented yet')
#try to find a excitonDB class
#initialize this class
return cls
def get_excitons(self,min_intensity=0.1,max_energy=4,eps=1e-4):
"""
Obtain the excitons using ypp
Parameters:
min_intensity - Only plot excitons with intensity larger than this value (default: 0.1)
max_energy - Only plot excitons with energy below this value (default: 4 eV)
Degen_Step - Only plot excitons whose energy is different by more that this value (default: 0.0)
"""
excitons = self.excitondb.get_nondegenerate(eps=eps)
#filter with energy
excitons = excitons[excitons[:,0]<max_energy]
#filter with intensity
excitons = excitons[excitons[:,1]>min_intensity]
#filter with degen
if Degen_Step:
#create a list with differences in energy
new_excitons = []
prev_exc = 0
for exc in self.excitons:
e,i,index = exc
#if the energy of this exciton is too diferent then we add it to the list
if abs(e-prev_exc)<Degen_Step:
new_excitons[-1][1] += i
continue
new_excitons.append([e,i,index])
intensity = 0
prev_exc = e
self.excitons = np.array(new_excitons)
#create dictionary with excitons
excitons = self.data["excitons"]
for e,intensity,i in self.excitons:
exciton = {"energy": e,
"intensity": intensity,
"index": i}
excitons.append(exciton)
return self.excitons
def get_wavefunctions(self, FFTGvecs=30,
Cells=[1,1,1], Hole=[0,0,0],
Direction="123", Format="x",
Degen_Step=0.0100,
MinWeight=1e-8,
repx=list(range(-1,2)), repy=list(range(-1,2)), repz=list(range(-1,2)),
wf=False):
"""
Collect all the wavefuncitons with an intensity larger than self.threshold
Parameters:
FFTGvecs - Number of FFTGvecs. Related to how accurate the representation is
Cells - Number of cells to plot in real space
Hole - Define the hole position in cartesian coordinates
Direction - Choose the directions to plot along
Format - Choose the format to plot in. Can be: x for xcrysden or g for gnuplot (default: 'x' for xcrysden)
Degen_Step - Threshold to merge degenerate states. If the difference in energy between the states is smaller than
this value their wavefunctions will be plotted together
repx - Number or repetitions along the x direction
repy - Number or repetitions along the x direction
repz - Number or repetitions along the x direction
wf - Get the wavefuncitons in real space or not (default: False)
"""
if self.excitons is None:
raise ValueError( "Excitons not present. Run YamboBSEAbsorptionSpectra.get_excitons() first" )
self.data["excitons"] = []
#create a ypp file using YamboIn for reading the wavefunction
yppwf = YamboIn('ypp -e w -V all',filename='ypp.in',folder=self.path)
yppwf['Format'] = Format
yppwf['Direction'] = Direction
yppwf['FFTGvecs'] = [FFTGvecs,'Ry']
yppwf['Degen_Step'] = [Degen_Step,'eV']
yppwf['Hole'] = [Hole,'']
yppwf['Cells'] = [Cells,'']
#create a ypp file using YamboIn for reading the excitonic weights
yppew = YamboIn('ypp -e a',filename='ypp.in',folder=self.path)
yppew['MinWeight'] = MinWeight
yppew['Degen_Step'] = Degen_Step
keywords = ["lattice", "atoms", "atypes", "nx", "ny", "nz"]
for exciton in self.excitons:
#get info
e,intensity,i = exciton
if wf:
##############################################################
# Excitonic Wavefunction
##############################################################
#create ypp input for the wavefunction file and run
yppwf["States"] = "%d - %d"%(i,i)
yppwf.write("%s/yppwf_%d.in"%(self.path,i))
filename = "o-%s.exc_%dd_%d%s"%(self.job_string,len(Direction),i,{"g":"","x":".xsf"}[Format] )
print(filename)
if not os.path.isfile(filename):
os.system("cd %s; ypp -F yppwf_%d.in -J %s"%(self.path,i,self.job_string))
#read the excitonic wavefunction
if Format == 'x':
ewf = YamboExcitonWaveFunctionXSF()
else:
ewf = YamboExcitonWaveFunctionGnuplot()
ewf.read_file("%s/%s"%(self.path,filename))
data = ewf.get_data()
for word in keywords:
if word in data:
self.data[word] = data[word]
#calculate center of mass of atoms
lat = np.array(data["lattice"])
center_atom = np.zeros([3])
for atype,x,y,z in data["atoms"]:
center_atom += np.array([x,y,z])
center_atom /= len(data["atoms"])
center_atom_red = car_red([center_atom],lat)[0]
#shift wavefunctions grid to center of mass
nx = data['nx']
ny = data['ny']
nz = data['nz']
#make center_atom_red commensurate with fft
center_atom_red = center_atom_red * np.array([nx,ny,nz])
center_atom_red_int = [int(x) for x in center_atom_red]
displacement = np.array([nx,ny,nz])/2-center_atom_red_int
dx,dy,dz = displacement
# shift grid
# http://www.xcrysden.org/doc/XSF.html
dg = np.array(data["datagrid"]).reshape([nz,ny,nx])
dg = np.roll(dg,dx,axis=2)
dg = np.roll(dg,dy,axis=1)
dg = np.roll(dg,dz,axis=0)
data["datagrid"] = dg.flatten()
#shift atoms
atoms = []
dx,dy,dz = red_car([displacement/np.array([nx,ny,nz],dtype=float)],lat)[0]
for atype,x,y,z in data["atoms"]:
atoms.append([atype,x+dx,y+dy,z+dz])
self.data["atoms"] = atoms
##############################################################
# Excitonic Amplitudes
##############################################################
#create ypp input for the amplitudes file and run
yppew["States"] = "%d - %d"%(i,i)
yppew.write("%s/yppew_%d.in"%(self.path,i))
filename = "%s/o-%s.exc_weights_at_%d"%(self.path,self.job_string,i)
if not os.path.isfile(filename):
os.system("cd %s; ypp -F yppew_%d.in -J %s"%(self.path,i,self.job_string))
#read the excitonic weigths
ew = YamboExcitonWeight(filename,save=self.save,path=self.path)
qpts, weights = ew.calc_kpts_weights(repx=repx,repy=repy,repz=repz)
############
# Save data
############
exciton = {"energy": e,
"intensity": intensity,
"weights": weights,
"qpts": qpts,
"index": i}
if wf:
exciton["hole"] = Hole
exciton["datagrid"] = np.array(data["datagrid"])
self.data["excitons"].append(exciton)
def plot(self):
pass
def write_json(self,filename="absorptionspectra",verbose=0):
"""
Write a jsonfile with the absorption spectra
and the wavefunctions of certain excitons
"""
JsonDumper(self.data,"%s.json"%filename)
def __str__(self):
lines = []; app = lines.append
app(marquee(self.__class__.__name__))
app(str(self.excitondb.get_string(mark='-')))
return "\n".join(lines)
|
bsd-3-clause
| -2,864,162,447,705,877,000 | 38.418972 | 123 | 0.534343 | false |
Ealdor/pypbp
|
src/init.py
|
1
|
3068
|
# -*- coding: utf-8 -*-
###############################################################################
## Copyright (C) 2014 Jorge Zilbermann [email protected]
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
import json
import sys
import string
import pygame
from cells import *
import constants
def init_pygame():
""" Función inicializar los modulos de pygame necesarios """
pygame.font.init()
def init_puzzle(fname, ones = 0):
""" Inicializa el tablero desde un archivo pasado
Args:
fname(string): nombre del fichero
Returns:
ncolumns(string): numero de columnas del puzzle
nrows(string): numero de filas del puzzle
table(list): tabla del puzzle
"""
# ABRIR ARCHIVO
try:
f = open(fname, 'r')
except IOError:
print "File not found"
sys,exit()
typef = fname.rsplit('.')[-1]
# CONTEO DE COLUMNAS Y FILAS
if typef == 'csv':
contador = 0
ncolumns = len(string.split(string.strip(f.readline()), ','))
f.seek(0)
for linea in f.xreadlines( ): contador+= 1
nrows = contador
elif typef == 'json':
contador = 0
data = json.load(f)
for row in xrange(len(data)):
for col in xrange(len(data[row])):
contador += 1
break
nrows = len(data)
ncolumns = contador
f.seek(0)
# INICIALIZACION DE LA TABLA
table = [[Cell(x*CELL_WIDTH, y*CELL_WIDTH, 0) for y in xrange(0, int(nrows))] for x in xrange(0, int(ncolumns))]
# PARSEO DE LOS ARCHIVOS
if typef == 'csv': # CSV
for x in xrange(0, int(nrows)):
num = string.split(string.strip(f.readline()), ',')
for y in xrange(0, int(ncolumns)):
tn = int(string.split(num.pop(0), ',')[0])
if tn != 0:
table[y][x] = Cell(y*CELL_WIDTH, x*CELL_WIDTH, tn, BLACK)
if ones == 1 and tn == 1:
table[y][x].background_color = BLACK
table[y][x].number_color = WHITE
table[y][x].connections.append(table[y][x])
elif typef == 'json': # JSON
data = json.load(f)
for row in xrange(len(data)):
for col in xrange(len(data[row])):
value = data[row][col]["number"]
c = data[row][col]["color"]
colour = [c["r"], c["g"], c["b"]]
table[col][row] = Cell(col*CELL_WIDTH, row*CELL_WIDTH, value, colour)
if ones == 1 and value == 1:
table[col][row].background_color = colour
table[col][row].number_color = WHITE
table[col][row].connections.append(table[col][row])
f.close()
return ncolumns, nrows, table
|
gpl-3.0
| 3,273,599,677,606,871,000 | 30.628866 | 113 | 0.622432 | false |
ocr-doacao/ocr
|
ocrDoacao/models/imagem.py
|
1
|
1747
|
#!/usr/bin/python
# -*- coding: UTF8 -*-
import os
from django.db import models
import shutil
from hashlib import md5
from uuid import uuid4
from datetime import datetime
from .ong import Ong
import os
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from django.conf import settings
from django.db import IntegrityError
class Imagem(models.Model):
PREPROCESSAMENTO = 1
PROCESSAMENTO = 2
PROCESSADO = 3
ESTADO_CHOICES = (
(PREPROCESSAMENTO, 'Preprocessamento'),
(PROCESSAMENTO, 'Processamento'),
(PROCESSADO, 'Processado'),
)
ong = models.ForeignKey(Ong)
path = models.CharField(max_length=255)
md5 = models.CharField(max_length=32, unique=True)
hora_envio = models.DateTimeField(auto_created=True)
estado = models.PositiveSmallIntegerField(choices=ESTADO_CHOICES, default=PREPROCESSAMENTO)
def save(self, fd, ong):
print "\n\n"
mname = __name__.split(".")[0]
fname, fext = os.path.splitext(fd.name)
content = fd.read()
self.md5 = md5(content).hexdigest()
self.ong = ong
self.hora_envio = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.path = os.path.join(str(uuid4()))
complete_path = os.path.join(mname, "static", "ongs", ong.nome, self.path)
os.makedirs(complete_path, 0744)
self.path = os.path.join(self.path, "img" + fext)
path = default_storage.save(os.path.join(complete_path, "img" + fext), ContentFile(content))
os.path.join(settings.MEDIA_ROOT, path)
try:
return super(Imagem, self).save()
except IntegrityError as e:
shutil.rmtree(complete_path)
raise e
|
apache-2.0
| 6,614,184,241,457,169,000 | 33.27451 | 100 | 0.654264 | false |
Jitsusama/lets-do-dns
|
lets_do_dns/arguments.py
|
1
|
1282
|
"""Validates CLI Arguments."""
import argparse
class Arguments(object):
"""Parses Passed Arguments."""
def __init__(self, arguments):
parser = argparse.ArgumentParser(
description=self._description, epilog=self._epilog)
parser.parse_args(arguments[1:])
@property
def _description(self):
return '''\
Perform ACME DNS01 authentication for the EFF's certbot program.
The DNS01 authentication record will be created via DigitalOcean's
REST API.'''
@property
def _epilog(self):
return '''\
This program requires the presence of the CERTBOT_DOMAIN and
CERTBOT_VALIDATION environment variables. These should be supplied by
the certbot program when this program is called via its
--manual-auth-hook or --manual-cleanup-hook arguments.
This program also requires the presence of the DO_APIKEY and
DO_DOMAIN environment variables. These have to be provided via the
environment that certbot is executed from.
DO_APIKEY refers to a DigitalOcean API key generated through its API
control panel. This key should have read and write access to your
DigitalOcean account.
DO_DOMAIN refers to which domain under your DigitalOcean account will
function as the root of the certbot SSL certificate authentication
request.'''
|
apache-2.0
| 6,260,953,681,800,066,000 | 31.05 | 69 | 0.74883 | false |
Seraf/LISA
|
lisa/server/web/manageplugins/models.py
|
1
|
1901
|
from mongoengine import *
from lisa.server.web.weblisa.settings import DBNAME
connect(DBNAME)
class Description(EmbeddedDocument):
lang = StringField(max_length=2)
description = StringField()
class Plugin(DynamicDocument):
name = StringField(max_length=120, required=True, help_text='Name of the plugin')
lang = ListField(StringField(max_length=2), help_text="List of supported languages : ['all','WebSocket']")
enabled = BooleanField(help_text="Boolean to know if the plugin is enabled or not")
version = StringField(help_text="The version number of the plugin")
description = ListField(EmbeddedDocumentField(Description), help_text="Contains a description of the plugin")
configuration = DictField(help_text="Configuration dictionnary of the plugin")
meta = {
'collection': 'plugins',
'allow_inheritance': False
}
class Intent(DynamicDocument):
plugin = ReferenceField(Plugin, reverse_delete_rule=CASCADE)
name = StringField(required=True, help_text="Name of the intent (whitespaces are _ ). Ex: core_intents_list")
module = StringField(required=True, help_text="The path to the module including the class name. Ex: core.intents.Intents")
function = StringField(required=True, help_text="The function name. Ex: list")
enabled = BooleanField(default=False, help_text="Boolean to know if the intent is enabled or not")
meta = {
'collection': 'intents',
'allow_inheritance': False
}
class Rule(DynamicDocument):
plugin = ReferenceField(Plugin, reverse_delete_rule=CASCADE)
enabled = BooleanField()
meta = {
'collection': 'rules',
'allow_inheritance': False
}
class Cron(DynamicDocument):
plugin = ReferenceField(Plugin, reverse_delete_rule=CASCADE)
enabled = BooleanField()
meta = {
'collection': 'crons',
'allow_inheritance': False
}
|
mit
| -7,481,705,972,571,776,000 | 41.266667 | 126 | 0.703314 | false |
h3/django-unsocial
|
setup.py
|
1
|
3057
|
#!/usr/bin/env python
from distutils.core import setup
from distutils.command.install_data import install_data
from distutils.command.install import INSTALL_SCHEMES
import os, sys, setuptools
class osx_install_data(install_data):
# On MacOS, the platform-specific lib dir is /System/Library/Framework/Python/.../
# which is wrong. Python 2.5 supplied with MacOS 10.5 has an Apple-specific fix
# for this in distutils.command.install_data#306. It fixes install_lib but not
# install_data, which is why we roll our own install_data class.
def finalize_options(self):
# By the time finalize_options is called, install.install_lib is set to the
# fixed directory, so we set the installdir to install_lib. The
# install_data class uses ('install_data', 'install_dir') instead.
self.set_undefined_options('install', ('install_lib', 'install_dir'))
install_data.finalize_options(self)
if sys.platform == "darwin":
cmdclasses = {'install_data': osx_install_data}
else:
cmdclasses = {'install_data': install_data}
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Tell distutils to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('unsocial'):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
# Small hack for working with bdist_wininst.
# See http://mail.python.org/pipermail/distutils-sig/2004-August/004134.html
if len(sys.argv) > 1 and sys.argv[1] == 'bdist_wininst':
for file_info in data_files:
file_info[0] = '\\PURELIB\\%s' % file_info[0]
version = __import__('unsocial').VERSION
setup(
name = 'django-unsocial',
version = version.replace(' ', '-'),
description = 'Django-unsocial, Middleware to remove social site widgets while developing',
author = 'Maxime Haineault',
author_email = '[email protected]',
cmdclass = cmdclasses,
data_files = data_files,
url = 'http://www.python.org/sigs/distutils-sig/',
packages = packages,
scripts = [],
)
|
bsd-3-clause
| -2,120,861,813,325,233,000 | 36.280488 | 104 | 0.679097 | false |
triagemd/tensorflow-serving-cluster
|
tensorflow_serving_cluster/k8s/service.py
|
1
|
5252
|
import os
import json
import kubernetes as k8s
from ..utils import flatten
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
class KubernetesService(object):
CONFIG_KEYS = [
'namespace',
'deployment_name',
'service_name',
'app_name',
'replicas',
'container_name',
'container_image',
'container_port',
'service_port',
'service_type',
'environment',
]
def __init__(self, config):
if os.environ.get('LOCAL_SERVING') is None:
try:
k8s.config.load_kube_config()
except FileNotFoundError:
k8s.config.load_incluster_config()
self.k8s_api = k8s.client.CoreV1Api()
self.k8s_api_beta = k8s.client.ExtensionsV1beta1Api()
missing_keys = []
for key in self.CONFIG_KEYS:
if key not in config:
missing_keys.append(key)
if len(missing_keys) > 0:
raise ValueError('missing %s from deployment/service config' % (', '.join(missing_keys), ))
for key, value in config.items():
if key in self.CONFIG_KEYS:
self.__dict__[key] = value
def up(self):
environment = self.environment
# Filter the environment specs by available secrets
environment_by_name = {}
env_secrets = {}
for e in environment:
key = e.get('valueFrom', {}).get('secretKeyRef', {}).get('key')
if key:
env_secrets[key] = e
environment_by_name[e['name']] = e
secrets = self.k8s_api.list_namespaced_secret(self.namespace)
secrets = flatten([secret.data.keys() for secret in secrets.items])
secrets = set.intersection(set(env_secrets.keys()), set(secrets))
non_secrets = set(environment_by_name.keys()) - set(env_secrets.keys())
env_keys = list(secrets) + list(non_secrets)
environment = [environment_by_name[key] for key in env_keys]
# Create namespace if necessary
try:
namespace = k8s.client.V1Namespace()
namespace.metadata = k8s.client.V1ObjectMeta(name=self.namespace)
self.k8s_api.create_namespace(body=namespace)
except k8s.client.rest.ApiException as e:
body = json.loads(e.body)
if body.get('reason') != 'AlreadyExists':
raise
# Build the deployment specs
deployment = k8s.client.ExtensionsV1beta1Deployment()
deployment.api_version = 'extensions/v1beta1'
deployment.kind = 'Deployment'
deployment.metadata = k8s.client.V1ObjectMeta(name=self.deployment_name)
spec = k8s.client.ExtensionsV1beta1DeploymentSpec()
spec.replicas = self.replicas
spec.template = k8s.client.V1PodTemplateSpec()
spec.template.metadata = k8s.client.V1ObjectMeta(labels={'app': self.app_name})
spec.template.spec = k8s.client.V1PodSpec()
# Build the container specs
container = k8s.client.V1Container()
container.name = self.container_name
container.image = self.container_image
container.ports = [k8s.client.V1ContainerPort(container_port=self.container_port)]
container.env = environment
spec.template.spec.containers = [container]
deployment.spec = spec
# Build the service specs
service = k8s.client.V1Service()
service.api_version = 'v1'
service.kind = 'Service'
service.metadata = k8s.client.V1ObjectMeta(name=self.service_name)
spec = k8s.client.V1ServiceSpec()
spec.selector = {'app': self.app_name}
spec.type = self.service_type
spec.ports = [
k8s.client.V1ServicePort(protocol='TCP', port=self.service_port, target_port=self.container_port),
]
service.spec = spec
try:
self.k8s_api_beta.create_namespaced_deployment(namespace=self.namespace, body=deployment)
self.k8s_api.create_namespaced_service(namespace=self.namespace, body=service)
except k8s.client.rest.ApiException as e:
body = json.loads(e.body)
if body.get('reason') != 'AlreadyExists':
raise
self.k8s_api_beta.patch_namespaced_deployment(self.deployment_name, namespace=self.namespace, body=deployment)
self.k8s_api.patch_namespaced_service(self.service_name, namespace=self.namespace, body=service)
def down(self):
try:
self.k8s_api.delete_namespaced_service(name=self.service_name, namespace=self.namespace)
except k8s.client.rest.ApiException as e:
body = json.loads(e.body)
if body.get('reason') != 'NotFound':
raise
try:
self.k8s_api_beta.delete_namespaced_deployment(
name=self.deployment_name,
namespace=self.namespace,
body=k8s.client.V1DeleteOptions(propagation_policy='Foreground', grace_period_seconds=5)
)
except k8s.client.rest.ApiException as e:
body = json.loads(e.body)
if body.get('reason') != 'NotFound':
raise
|
mit
| 3,461,963,857,653,558,300 | 36.514286 | 122 | 0.605674 | false |
zapier/django-pipeline
|
pipeline/storage.py
|
1
|
4914
|
from __future__ import unicode_literals
import os
from django.contrib.staticfiles import finders
from django.contrib.staticfiles.storage import CachedFilesMixin, StaticFilesStorage
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import get_storage_class
from django.utils.functional import LazyObject
from pipeline.conf import settings
class PipelineMixin(object):
packing = True
def post_process(self, paths, dry_run=False, **options):
if dry_run:
return []
from pipeline.packager import Packager
packager = Packager(storage=self)
for package_name in packager.packages['css']:
package = packager.package_for('css', package_name)
if self.packing:
paths_written = packager.pack_stylesheets(package)
for path in paths_written:
paths[path] = (self, path)
else:
# TODO: bcooksey 5/15/13. Not sure why we pretend we packed if packing is false...will this mess up source maps
output_file = package.output_filename
paths[output_file] = (self, output_file)
for package_name in packager.packages['js']:
package = packager.package_for('js', package_name)
if self.packing:
paths_written = packager.pack_javascripts(package)
for path in paths_written:
paths[path] = (self, path)
else:
# TODO: bcooksey 5/15/13. Not sure why we pretend we packed if packing is false...will this mess up source maps
output_file = package.output_filename
paths[output_file] = (self, output_file)
super_class = super(PipelineMixin, self)
if hasattr(super_class, 'post_process'):
return super_class.post_process(paths, dry_run, **options)
return [
(path, path, True)
for path in paths
]
def get_available_name(self, name):
if self.exists(name):
self.delete(name)
return name
class NonPackagingMixin(object):
packing = False
class PipelineStorage(PipelineMixin, StaticFilesStorage):
pass
class NonPackagingPipelineStorage(NonPackagingMixin, PipelineStorage):
pass
class PipelineCachedStorage(PipelineMixin, CachedFilesMixin, StaticFilesStorage):
pass
class NonPackagingPipelineCachedStorage(NonPackagingMixin, PipelineCachedStorage):
pass
class BaseFinderStorage(PipelineStorage):
finders = None
def __init__(self, finders=None, *args, **kwargs):
if finders is not None:
self.finders = finders
if self.finders is None:
raise ImproperlyConfigured("The storage %r doesn't have a finders class assigned." % self.__class__)
super(BaseFinderStorage, self).__init__(*args, **kwargs)
def path(self, name):
path = self.finders.find(name)
if not path:
path = super(BaseFinderStorage, self).path(name)
return path
def exists(self, name):
exists = self.finders.find(name) is not None
if not exists:
return super(BaseFinderStorage, self).exists(name)
return exists
def listdir(self, path):
for finder in finders.get_finders():
for storage in finder.storages.values():
try:
return storage.listdir(path)
except OSError:
pass
def match_location(self, name, path, prefix=None):
if prefix:
prefix = "%s%s" % (prefix, os.sep)
name = name[len(prefix):]
if path == name:
return name
if os.path.splitext(path)[0] == os.path.splitext(name)[0]:
return name
return None
def find_storage(self, name):
for finder in finders.get_finders():
for path, storage in finder.list([]):
prefix = getattr(storage, 'prefix', None)
matched_path = self.match_location(name, path, prefix)
if matched_path:
return matched_path, storage
raise ValueError("The file '%s' could not be found with %r." % (name, self))
def _open(self, name, mode="rb"):
name, storage = self.find_storage(name)
return storage._open(name, mode)
def _save(self, name, content):
name, storage = self.find_storage(name)
# Ensure we overwrite file, since we have no control on external storage
if storage.exists(name):
storage.delete(name)
return storage._save(name, content)
class PipelineFinderStorage(BaseFinderStorage):
finders = finders
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.PIPELINE_STORAGE)()
default_storage = DefaultStorage()
|
mit
| -3,469,726,839,481,209,000 | 31.76 | 127 | 0.617623 | false |
pytroll/satpy
|
satpy/writers/utils.py
|
1
|
1191
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""Writer utilities."""
def flatten_dict(d, parent_key='', sep='_'):
"""Flatten a nested dictionary.
Based on https://stackoverflow.com/a/6027615/5703449
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, dict):
items.extend(flatten_dict(v, parent_key=new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
|
gpl-3.0
| -4,987,203,511,559,517,000 | 35.090909 | 79 | 0.678421 | false |
owtf/owtf
|
owtf/plugins/auxiliary/selenium/[email protected]
|
1
|
1165
|
import logging
from owtf.config import config_handler
from owtf.plugin.params import plugin_params
DESCRIPTION = "Sends a bunch of URLs through selenium"
CATEGORIES = ["RCE", "SQLI", "XSS", "CHARSET"]
def run(PluginInfo):
Content = []
logging.info(
"WARNING: This plugin requires a small selenium installation, please run '%s' if you have issues"
% config.get_val("INSTALL_SCRIPT")
)
args = {
"Description": DESCRIPTION,
"Mandatory": {
"BASE_URL": "The URL to be pre-pended to the tests",
"CATEGORY": "Category to use (i.e. " + ", ".join(sorted(CATEGORIES)) + ")",
},
"Optional": {"REPEAT_DELIM": config_handler.get_val("REPEAT_DELIM_DESCRIP")},
}
for args in plugin_params.get_args(args, PluginInfo):
plugin_params.set_config(args)
InputFile = config_handler.get_val("SELENIUM_URL_VECTORS_" + args["CATEGORY"])
URLLauncher = ServiceLocator.get_component(
"selenium_handler"
).CreateURLLauncher(
{"BASE_URL": args["BASE_URL"], "INPUT_FILE": InputFile}
)
URLLauncher.run()
return Content
|
bsd-3-clause
| 4,028,115,766,826,848,000 | 32.285714 | 105 | 0.614592 | false |
VictorLoren/pyRecipeBook
|
FoodGroups.py
|
1
|
3301
|
# For XML Element Tree manipulation
import xml.etree.ElementTree as ET #place within somewhere?
# FoodGroups object inspired by P90X
class FoodGroups:
# Other Properties shared for all objects (constant)
groups = ['Fats','Carbs','Dairy','Fruit','Snacks','Proteins',
'Condiments','Vegetables']
# Snack object
class Snack:
# String representation of object
def __repr__(self):
return ET.tostring(self.xml)
# Converting to string
def __str__(self):
return ET.tostring(self.xml)
# Initiate Snack object with number of servings (default to 0)
def __init__(self,SGL=0,DBL=0,BAR=0,DRINK=0):
self.SGL = SGL
self.DBL = DBL
self.BAR = BAR
self.DRINK = DRINK
self.xml = self.__createXML() #XML representation of object
# Create XML instance of the object
def __createXML(self):
snack = ET.Element('snack') #create root element
# Create subelements
sgl = ET.SubElement(snack,'sgl')
dbl = ET.SubElement(snack,'dbl')
bar = ET.SubElement(snack,'bar')
drink = ET.SubElement(snack,'drink')
# Text within tag
sgl.text='%0.1f' %self.SGL
dbl.text='%0.1f' %self.DBL
bar.text='%0.1f' %self.BAR
drink.text='%0.1f' %self.DRINK
# Give XML instance
return snack
# String representation of object
def __repr__(self):
return ET.tostring(self.xml)
# Converting to string
def __str__(self):
return ET.tostring(self.xml)
# Initiate object with number (decimal or int)
def __init__(self,fats=0,carbs=0,proteins=0,dairy=0,fruit=0,vegetables=0,
condiments=0,snacks=Snack()):
self.fats = fats
self.carbs = carbs
self.dairy = dairy
self.fruit = fruit
self.snacks = snacks #object with four types (SGL,DBL,BAR,DRINK)
self.proteins = proteins
self.condiments = condiments
self.vegetables = vegetables
self.xml = self.__createXML() #XML representation of object
# Create XML instance of the object
def __createXML(self):
foodGroups = ET.Element('foodGroups') #create root element
# Create subelements
fats = ET.SubElement(foodGroups,'fats')
carbs = ET.SubElement(foodGroups,'carbs')
dairy = ET.SubElement(foodGroups,'dairy')
fruit = ET.SubElement(foodGroups,'fruit')
snacks = foodGroups.append(self.snacks.xml) #XML from Snack instance
proteins = ET.SubElement(foodGroups,'proteins')
condiments = ET.SubElement(foodGroups,'condiments')
vegetables = ET.SubElement(foodGroups,'vegetables')
# Text within tag
fats.text = '%0.1f' %self.fats
carbs.text = '%0.1f' %self.carbs
dairy.text = '%0.1f' %self.dairy
fruit.text = '%0.1f' %self.fruit
proteins.text = '%0.1f' %self.proteins
condiments.text = '%0.1f' %self.condiments
vegetables.text = '%0.1f' %self.vegetables
# Give XML instance
|
mit
| -7,288,414,778,304,980,000 | 38.771084 | 80 | 0.569221 | false |
Azure/azure-sdk-for-python
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2019_11_01_preview/models/_monitor_management_client_enums.py
|
1
|
6090
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class KnownDataCollectionRuleAssociationProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The resource provisioning state.
"""
CREATING = "Creating"
UPDATING = "Updating"
DELETING = "Deleting"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class KnownDataCollectionRuleProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The resource provisioning state.
"""
CREATING = "Creating"
UPDATING = "Updating"
DELETING = "Deleting"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class KnownDataFlowStreams(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
MICROSOFT_ANTI_MALWARE_STATUS = "Microsoft-AntiMalwareStatus"
MICROSOFT_AUDITD = "Microsoft-Auditd"
MICROSOFT_CISCOASA = "Microsoft-CISCOASA"
MICROSOFT_COMMON_SECURITY_LOG = "Microsoft-CommonSecurityLog"
MICROSOFT_COMPUTER_GROUP = "Microsoft-ComputerGroup"
MICROSOFT_EVENT = "Microsoft-Event"
MICROSOFT_FIREWALL_LOG = "Microsoft-FirewallLog"
MICROSOFT_HEALTH_STATE_CHANGE = "Microsoft-HealthStateChange"
MICROSOFT_HEARTBEAT = "Microsoft-Heartbeat"
MICROSOFT_INSIGHTS_METRICS = "Microsoft-InsightsMetrics"
MICROSOFT_OPERATION_LOG = "Microsoft-OperationLog"
MICROSOFT_PERF = "Microsoft-Perf"
MICROSOFT_PROCESS_INVESTIGATOR = "Microsoft-ProcessInvestigator"
MICROSOFT_PROTECTION_STATUS = "Microsoft-ProtectionStatus"
MICROSOFT_ROME_DETECTION_EVENT = "Microsoft-RomeDetectionEvent"
MICROSOFT_SECURITY_BASELINE = "Microsoft-SecurityBaseline"
MICROSOFT_SECURITY_BASELINE_SUMMARY = "Microsoft-SecurityBaselineSummary"
MICROSOFT_SECURITY_EVENT = "Microsoft-SecurityEvent"
MICROSOFT_SYSLOG = "Microsoft-Syslog"
MICROSOFT_WINDOWS_EVENT = "Microsoft-WindowsEvent"
class KnownExtensionDataSourceStreams(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
MICROSOFT_ANTI_MALWARE_STATUS = "Microsoft-AntiMalwareStatus"
MICROSOFT_AUDITD = "Microsoft-Auditd"
MICROSOFT_CISCOASA = "Microsoft-CISCOASA"
MICROSOFT_COMMON_SECURITY_LOG = "Microsoft-CommonSecurityLog"
MICROSOFT_COMPUTER_GROUP = "Microsoft-ComputerGroup"
MICROSOFT_EVENT = "Microsoft-Event"
MICROSOFT_FIREWALL_LOG = "Microsoft-FirewallLog"
MICROSOFT_HEALTH_STATE_CHANGE = "Microsoft-HealthStateChange"
MICROSOFT_HEARTBEAT = "Microsoft-Heartbeat"
MICROSOFT_INSIGHTS_METRICS = "Microsoft-InsightsMetrics"
MICROSOFT_OPERATION_LOG = "Microsoft-OperationLog"
MICROSOFT_PERF = "Microsoft-Perf"
MICROSOFT_PROCESS_INVESTIGATOR = "Microsoft-ProcessInvestigator"
MICROSOFT_PROTECTION_STATUS = "Microsoft-ProtectionStatus"
MICROSOFT_ROME_DETECTION_EVENT = "Microsoft-RomeDetectionEvent"
MICROSOFT_SECURITY_BASELINE = "Microsoft-SecurityBaseline"
MICROSOFT_SECURITY_BASELINE_SUMMARY = "Microsoft-SecurityBaselineSummary"
MICROSOFT_SECURITY_EVENT = "Microsoft-SecurityEvent"
MICROSOFT_SYSLOG = "Microsoft-Syslog"
MICROSOFT_WINDOWS_EVENT = "Microsoft-WindowsEvent"
class KnownPerfCounterDataSourceScheduledTransferPeriod(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The interval between data uploads (scheduled transfers), rounded up to the nearest minute.
"""
PT1_M = "PT1M"
PT5_M = "PT5M"
PT15_M = "PT15M"
PT30_M = "PT30M"
PT60_M = "PT60M"
class KnownPerfCounterDataSourceStreams(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
MICROSOFT_PERF = "Microsoft-Perf"
MICROSOFT_INSIGHTS_METRICS = "Microsoft-InsightsMetrics"
class KnownSyslogDataSourceFacilityNames(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
AUTH = "auth"
AUTHPRIV = "authpriv"
CRON = "cron"
DAEMON = "daemon"
KERN = "kern"
LPR = "lpr"
MAIL = "mail"
MARK = "mark"
NEWS = "news"
SYSLOG = "syslog"
USER = "user"
UUCP = "UUCP"
LOCAL0 = "local0"
LOCAL1 = "local1"
LOCAL2 = "local2"
LOCAL3 = "local3"
LOCAL4 = "local4"
LOCAL5 = "local5"
LOCAL6 = "local6"
LOCAL7 = "local7"
class KnownSyslogDataSourceLogLevels(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
DEBUG = "Debug"
INFO = "Info"
NOTICE = "Notice"
WARNING = "Warning"
ERROR = "Error"
CRITICAL = "Critical"
ALERT = "Alert"
EMERGENCY = "Emergency"
class KnownSyslogDataSourceStreams(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
MICROSOFT_SYSLOG = "Microsoft-Syslog"
class KnownWindowsEventLogDataSourceScheduledTransferPeriod(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The interval between data uploads (scheduled transfers), rounded up to the nearest minute.
"""
PT1_M = "PT1M"
PT5_M = "PT5M"
PT15_M = "PT15M"
PT30_M = "PT30M"
PT60_M = "PT60M"
class KnownWindowsEventLogDataSourceStreams(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
MICROSOFT_WINDOWS_EVENT = "Microsoft-WindowsEvent"
MICROSOFT_EVENT = "Microsoft-Event"
|
mit
| 6,882,301,238,570,826,000 | 36.826087 | 113 | 0.702299 | false |
budnyjj/bsuir_magistracy
|
dissertation/synopsis/lst/nonlinear_lse.py
|
1
|
2883
|
import sympy as sp
import numpy as np
def search_lse(expression, parameters,
values, result_values,
init_estimates, num_iter=1):
"""
Computes estimates of ditribution parameters with
LSE method.
Parameters:
expression --- sympy object, which represents target
expression, for example, 'a * exp(-alpha*x)'
parameters --- tuple of sympy objects, whose estimates
should be found, for example, (a, alpha)
values --- dict of values, keyed by sympy objects,
for example, {x: [x1, x2, ...]}
result_values --- dict of result values, keyed by
sympy objects, for example, {y: [y1, y2, ...]}
init_estimates --- dict of init values of estimates,
used in iterational search of estimates,
keyed by sympy objects, for example: {x: 0, y: 0}
num_iter --- number of method iterations
Yield:
number of iteration,
computed estimates of symbolic parameters
"""
# get list of symbolic values
sym_vals = tuple(values.keys())
# get array of real values
vals = []
for sym_val in sym_vals:
vals.append(values[sym_val])
vals = np.array(vals).T
# get result values as first value of dict
res_vals = [next(iter(result_values.values()))]
res_vals = np.array(res_vals).T
# init effective estimates with basic values
cur_estimates = []
for parameter in parameters:
cur_estimates.append(init_estimates[parameter])
cur_estimates = np.array([cur_estimates]).T
# get matrix of symbolic derivatives of expression
sym_diff_funcs = []
for parameter in parameters:
sym_diff_funcs.append(sp.diff(expression, parameter))
for i in range(num_iter):
# substitute current parameter values into sym_expr
subs = {}
for i_param, sym_param in enumerate(parameters):
subs[sym_param] = cur_estimates[i_param]
cur_f = sp.lambdify(
sym_vals, expression.subs(subs), 'numpy')
cur_appr = np.vectorize(cur_f)(vals)
# compute derivates of sym_expr by sym_params
diff_funcs = []
for param_i, parameter in enumerate(parameters):
diff_func = sym_diff_funcs[param_i].subs(subs)
diff_funcs.append(
sp.lambdify(sym_vals, diff_func, 'numpy'))
# construct Q from rows
q_rows = []
for diff_func in diff_funcs:
q_rows.append(np.vectorize(diff_func)(vals))
Q = np.hstack(q_rows)
Q_t = Q.T
# calculate addition =
# ((Q_t*Q)^-1)*Q_t*(res_vals - cur_appr)
add = np.linalg.inv(np.dot(Q_t, Q))
add = np.dot(add, Q_t)
add = np.dot(add, res_vals - cur_appr)
cur_estimates += add
# yield first row
yield i + 1, cur_estimates.T[0]
|
gpl-3.0
| 341,764,966,453,909,900 | 37.44 | 62 | 0.592785 | false |
lzamparo/SdA_reduce
|
theano_models/SdA/test_max_norm.py
|
1
|
7924
|
import numpy
import cPickle
import theano
import theano.tensor as T
from mlp.logistic_sgd import LogisticRegression
from AutoEncoder import AutoEncoder
from SdA import SdA
from numpy.linalg import norm
from theano.tensor.shared_randomstreams import RandomStreams
from extract_datasets import extract_unlabeled_chunkrange
from load_shared import load_data_unlabeled
from tables import openFile
import os
import sys
import time
from datetime import datetime
from optparse import OptionParser
def test_restrict_norm_SdA(num_epochs=10, pretrain_lr=0.00001, lr_decay = 0.98, batch_size=20):
"""
Pretrain an SdA model for the given number of training epochs, applying norm restrictions on the W matrices. Try ReLU units, since their weights seem to blow up
on this data set.
:type num_epochs: int
:param num_epochs: number of epoch to do pretraining
:type pretrain_lr: float
:param pretrain_lr: learning rate to be used during pre-training
:type batch_size: int
:param batch_size: train in mini-batches of this size
"""
layer_types=['ReLU','ReLU']
current_dir = os.getcwd()
os.chdir(options.dir)
today = datetime.today()
day = str(today.date())
hour = str(today.time())
output_filename = "test_max_norm_sda_." + '_'.join([elem for elem in layer_types]) + day + "." + hour
output_file = open(output_filename,'w')
os.chdir(current_dir)
print >> output_file, "Run on " + str(datetime.now())
# Get the training data sample from the input file
data_set_file = openFile(str(options.inputfile), mode = 'r')
datafiles = extract_unlabeled_chunkrange(data_set_file, num_files = 10)
train_set_x = load_data_unlabeled(datafiles, features = (5,20))
data_set_file.close()
# compute number of minibatches for training, validation and testing
n_train_batches, n_features = train_set_x.get_value(borrow=True).shape
n_train_batches /= batch_size
# numpy random generator
numpy_rng = numpy.random.RandomState(89677)
print '... building the model'
# Set the initial value of the learning rate
learning_rate = theano.shared(numpy.asarray(pretrain_lr,
dtype=theano.config.floatX))
# Function to decrease the learning rate
decay_learning_rate = theano.function(inputs=[], outputs=learning_rate,
updates={learning_rate: learning_rate * lr_decay})
sda_model = SdA(numpy_rng=numpy_rng, n_ins=n_features,
hidden_layers_sizes=[5, 5],
corruption_levels = [0.25, 0.25],
layer_types=layer_types)
#########################
# PRETRAINING THE MODEL #
#########################
print '... getting the pretraining functions'
pretraining_fns = sda_model.pretraining_functions(train_set_x=train_set_x,
batch_size=batch_size,
learning_rate=learning_rate)
#print '... dumping pretraining functions to output file pre pickling'
#print >> output_file, 'Pretraining functions, pre pickling'
#for i in xrange(sda.n_layers):
#theano.printing.debugprint(pretraining_fns[i], file = output_file, print_type=True)
print '... getting the max-norm regularization functions'
max_norm_regularization_fns = sda_model.max_norm_regularization()
print '... pre-training the model'
start_time = time.clock()
## Pre-train layer-wise
corruption_levels = [float(options.corruption), float(options.corruption)]
for i in xrange(sda_model.n_layers):
for epoch in xrange(num_epochs):
# go through the training set
c = []
for batch_index in xrange(n_train_batches):
c.append(pretraining_fns[i](index=batch_index,
corruption=corruption_levels[i]))
# regularize weights here
scale = max_norm_regularization_fns[i](norm_limit=options.norm_limit)
print >> output_file, 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
print >> output_file, numpy.mean(c)
print >> output_file, 'Learning rate '
print >> output_file, learning_rate.get_value(borrow=True)
print >> output_file, 'Scale ', scale
decay_learning_rate()
end_time = time.clock()
print >> output_file, ('Pretraining time for file ' +
os.path.split(__file__)[1] +
' was %.2fm to go through %i epochs' % (((end_time - start_time) / 60.), (num_epochs / 2)))
# Pickle the SdA
print >> output_file, 'Pickling the model...'
f = file(options.savefile, 'wb')
cPickle.dump(sda_model, f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
# Unpickle the SdA
print >> output_file, 'Unpickling the model...'
f = file(options.savefile, 'rb')
pickled_sda = cPickle.load(f)
f.close()
# Test that the W-matrices and biases for the dA layers in sda are all close to the W-matrices
# and biases freshly unpickled
for i in xrange(pickled_sda.n_layers):
pickled_dA_params = pickled_sda.dA_layers[i].get_params()
fresh_dA_params = sda_model.dA_layers[i].get_params()
if not numpy.allclose(pickled_dA_params[0].get_value(), fresh_dA_params[0].get_value()):
print >> output_file, ("numpy says that Ws in layer %i are not close" % (i))
print >> output_file, "Norm for pickled dA " + pickled_dA_params[0].name + ": "
print >> output_file, norm(pickled_dA_params[0].get_value())
print >> output_file, "Values for pickled dA " + pickled_dA_params[0].name + ": "
print >> output_file, numpy.array_repr(pickled_dA_params[0].get_value())
print >> output_file, "Norm for fresh dA " + fresh_dA_params[0].name + ": "
print >> output_file, norm(fresh_dA_params[0].get_value())
print >> output_file, "Values for fresh dA " + fresh_dA_params[0].name + ": "
print >> output_file, numpy.array_repr(fresh_dA_params[0].get_value())
if not numpy.allclose(pickled_dA_params[1].get_value(), fresh_dA_params[1].get_value()):
print >> output_file, ("numpy says that the biases in layer %i are not close" % (i))
print >> output_file, "Norm for pickled dA " + pickled_dA_params[1].name + ": "
print >> output_file, norm(pickled_dA_params[1].get_value())
print >> output_file, "Values for pickled dA " + pickled_dA_params[1].name + ": "
print >> output_file, numpy.array_repr(pickled_dA_params[1].get_value())
print >> output_file, "Norm for fresh dA " + fresh_dA_params[1].name + ": "
print >> output_file, norm(fresh_dA_params[1].get_value())
print >> output_file, "Values for fresh dA " + pickled_dA_params[1].name + ": "
print >> output_file, numpy.array_repr(pickled_dA_params[1].get_value())
output_file.close()
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-d", "--dir", dest="dir", help="test output directory")
parser.add_option("-s","--savefile",dest = "savefile", help = "Save the model to this pickle file")
parser.add_option("-n","--normlimit",dest = "norm_limit", type=float, help = "limit the norm of each vector in each W matrix to norm_limit")
parser.add_option("-i", "--inputfile", dest="inputfile", help="the data (hdf5 file) prepended with an absolute path")
parser.add_option("-c", "--corruption", dest="corruption", help="use this amount of corruption for the dA s")
(options, args) = parser.parse_args()
test_restrict_norm_SdA()
|
bsd-3-clause
| 1,504,079,744,226,445,600 | 44.028409 | 166 | 0.610172 | false |
bjmorgan/lattice_mc
|
lattice_mc/lookup_table.py
|
1
|
3897
|
import math
import sys
import itertools as it
from lattice_mc.global_vars import kT, rate_prefactor
def metropolis( delta_E ):
"""
Boltzmann probability factor for an event with an energy change `delta_E`, following the Metropolis algorithm.
Args:
delta_E (Float): The change in energy.
Returns:
(Float): Metropolis relative probability for this event.
"""
if delta_E <= 0.0:
return 1.0
else:
return math.exp( -delta_E / ( kT ) )
class LookupTable: #TODO if nearest-neighbour and coordination number dependent look-up tables have different data structures, they should each subclass this general class: the different implementations for setting these up and accessing the jump probabilities can then be self-contained
"""
LookupTable class
"""
def __init__( self, lattice, hamiltonian ):
"""
Initialise a LookupTable object instance.
Args:
lattice (lattice_mc.Lattice): The lattice object, used to define the allowed jumps.
hamiltonian (Str): The model Hamiltonian used to define the jump energies.
Allowed values = `nearest-neigbour`
Returns:
None
"""
expected_hamiltonian_values = [ 'nearest-neighbour' ]
if hamiltonian not in expected_hamiltonian_values:
raise ValueError( hamiltonian )
self.site_energies = lattice.site_energies
self.nn_energy = lattice.nn_energy
self.cn_energy = lattice.cn_energies
self.connected_site_pairs = lattice.connected_site_pairs()
self.max_coordination_per_site = lattice.max_site_coordination_numbers()
self.site_specific_coordination_per_site = lattice.site_specific_coordination_numbers()
if hamiltonian == 'nearest-neighbour':
self.generate_nearest_neighbour_lookup_table()
def relative_probability( self, l1, l2, c1, c2 ):
"""
The relative probability for a jump between two sites with specific site types and coordination numbers.
Args:
l1 (Str): Site label for the initial site.
l2 (Str): Site label for the final site.
c1 (Int): Coordination number for the initial site.
c2 (Int): Coordination number for the final site.
Returns:
(Float): The relative probability of this jump occurring.
"""
if self.site_energies:
site_delta_E = self.site_energies[ l2 ] - self.site_energies[ l1 ]
else:
site_delta_E = 0.0
if self.nn_energy:
delta_nn = c2 - c1 - 1 # -1 because the hopping ion is not counted in the final site occupation number
site_delta_E += delta_nn * self.nn_energy
return metropolis( site_delta_E )
def generate_nearest_neighbour_lookup_table( self ):
"""
Construct a look-up table of relative jump probabilities for a nearest-neighbour interaction Hamiltonian.
Args:
None.
Returns:
None.
"""
self.jump_probability = {}
for site_label_1 in self.connected_site_pairs:
self.jump_probability[ site_label_1 ] = {}
for site_label_2 in self.connected_site_pairs[ site_label_1 ]:
self.jump_probability[ site_label_1 ][ site_label_2 ] = {}
for coordination_1 in range( self.max_coordination_per_site[ site_label_1 ] ):
self.jump_probability[ site_label_1 ][ site_label_2 ][ coordination_1 ] = {}
for coordination_2 in range( 1, self.max_coordination_per_site[ site_label_2 ] + 1 ):
self.jump_probability[ site_label_1 ][ site_label_2 ][ coordination_1 ][ coordination_2 ] = self.relative_probability( site_label_1, site_label_2, coordination_1, coordination_2 )
|
mit
| 4,607,539,403,801,278,000 | 41.824176 | 287 | 0.623557 | false |
JanusWind/FC
|
janus_widget_opt.py
|
1
|
2266
|
################################################################################
##
## Janus -- GUI Software for Processing Thermal-Ion Measurements from the
## Wind Spacecraft's Faraday Cups
##
## Copyright (C) 2016 Bennett A. Maruca ([email protected])
##
## This program is free software: you can redistribute it and/or modify it under
## the terms of the GNU General Public License as published by the Free Software
## Foundation, either version 3 of the License, or (at your option) any later
## version.
##
## This program is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
## FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
## details.
##
## You should have received a copy of the GNU General Public License along with
## this program. If not, see http://www.gnu.org/licenses/.
##
################################################################################
################################################################################
## LOAD THE NECESSARY MODULES.
################################################################################
# Load the modules necessary for the graphical interface.
from PyQt4.QtGui import QTabWidget
from janus_dialog_opt import dialog_opt
#from janus_widget_nln_fls import widget_opt_fls
################################################################################
## DEFINE THE "widget_opt" CLASS TO CUSTOMIZE "QTabWidget" FOR OPTIONS MENU.
################################################################################
class widget_opt( QTabWidget ) :
#-----------------------------------------------------------------------
# DEFINE THE INITIALIZATION FUNCTION.
#-----------------------------------------------------------------------
def __init__( self, core ) :
# Inherit all attributes of an instance of "QTabWidget".
super( widget_nln, self ).__init__( )
# Store the Janus core.
self.core = core
# Intialize this widget's sub-widgets and add them as tabs.
self.wdg_par = dialog_opt( self.core )
# self.wdg_fls = widget_opt_fls( self.core )
self.addTab( self.wdg_par, 'Display Options' )
# self.addTab( self.wdg_fls, 'Files Options' )
|
gpl-3.0
| -5,210,201,473,605,904,000 | 36.147541 | 80 | 0.518976 | false |
LethusTI/supportcenter
|
supportcenter/forum/signals.py
|
1
|
2563
|
# -*- coding: utf-8 -*-
__all__ = ('forum_post_save', 'reply_post_save')
from django.template.loader import render_to_string
from django.core.mail import EmailMultiAlternatives
from supportcenter.accounts.models import User
from supportcenter.settings import DEPLOY_URL
def forum_post_save(sender, document, created):
if created:
staffers = User.objects.filter(is_superuser=True)
out_dict = dict([[user.email, user] for user in staffers])
for user in staffers:
context = {
'name': user.get_full_name(),
'email': user.email,
'topic': document,
'site': {
'domain': DEPLOY_URL,
'name': "Lethus support center"
}
}
subject = render_to_string(
'forum/emails/new_topic_subject.txt', context)
message = render_to_string(
'forum/emails/new_topic_message.txt', context)
message_html = render_to_string(
'forum/emails/new_topic_message.html', context)
subject = u' '.join(line.strip() for line in subject.splitlines()).strip()
msg = EmailMultiAlternatives(subject, message, to=[user.email])
msg.attach_alternative(message_html, 'text/html')
msg.send()
def reply_post_save(sender, document, created):
if created:
staffers = User.objects.filter(is_superuser=True)
out_dict = dict([[user.email, user.get_full_name()] for user in staffers])
for reply in document.forum.replies:
out_dict[reply.email] = reply.name
for email, name in out_dict.iteritems():
context = {
'name': name,
'email': email,
'reply': document,
'site': {
'domain': DEPLOY_URL,
'name': "Lethus support center"
}
}
subject = render_to_string(
'forum/emails/new_reply_subject.txt', context)
message = render_to_string(
'forum/emails/new_reply_message.txt', context)
message_html = render_to_string(
'forum/emails/new_reply_message.html', context)
subject = u' '.join(line.strip() for line in subject.splitlines()).strip()
msg = EmailMultiAlternatives(subject, message, to=[email])
msg.attach_alternative(message_html, 'text/html')
msg.send()
|
gpl-3.0
| 3,620,539,312,881,742,000 | 34.597222 | 86 | 0.545064 | false |
blrm/openshift-tools
|
scripts/monitoring/cron-send-miner-check.py
|
1
|
3104
|
#!/usr/bin/env python
'''
Send OpenShift Pro Online miner program checks to Zagg
'''
# Adding the ignore because it does not like the naming of the script
# to be different than the class name
# pylint: disable=invalid-name
# pylint: disable=wrong-import-position
# pylint: disable=broad-except
# pylint: disable=line-too-long
import argparse
import logging
import time
import StringIO
import re
# pylint: disable=import-error
from openshift_tools.monitoring.ocutil import OCUtil
from openshift_tools.monitoring.metric_sender import MetricSender
logging.basicConfig(
format='%(asctime)s - %(relativeCreated)6d - %(levelname)-8s - %(message)s',
)
logger = logging.getLogger()
logger.setLevel(logging.WARN)
ocutil = OCUtil()
def runOCcmd(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
oc_time = time.time()
oc_result = ocutil.run_user_cmd(cmd, base_cmd=base_cmd, )
logger.debug("oc command took %s seconds", str(time.time() - oc_time))
return oc_result
def parse_args():
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='OpenShift pro online miner check tool')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='increase output verbosity')
parser.add_argument('-l', '--list', nargs='+', help='A list of pod name for the miner program', required=True)
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
return args
def send_metrics(key, result):
""" send data to MetricSender """
logger.debug("send_metrics()")
ms_time = time.time()
ms = MetricSender()
logger.info("Send data to MetricSender")
ms.add_metric({key : result})
logger.debug({key : result})
ms.send_metrics()
logger.info("Data sent to Zagg in %s seconds", str(time.time() - ms_time))
def check_miner_programs(pods):
""" check if the miner pods running on the cluster """
logger.info('Check the miner pods with name: %s', pods)
miner_list = pods
pod_list = StringIO.StringIO(runOCcmd("get pod --all-namespaces -o custom-columns=NAME:.metadata.name"))
miner_count = 0
miner_pod = []
for line in pod_list.readlines():
for name in miner_list:
if re.search(name, line):
miner_count += 1
miner_pod.append(line.rstrip())
logger.info("Number of miner pods are running on the cluster: %s", miner_count)
if miner_count != 0:
logger.debug("A list of miner pods: %s", miner_pod)
# tolerant if the pod number less than 20
if miner_count > 20:
logger.debug("There are more than 20 miner pods running on the cluster")
return 1
return 0
def main():
""" run the monitoring check """
args = parse_args()
miner_status = check_miner_programs(args.list)
miner_program_key = 'openshift.pro.online.miner.abuse'
miner_program_result = miner_status
# send metrics to Zabbix
send_metrics(miner_program_key, miner_program_result)
if __name__ == '__main__':
main()
|
apache-2.0
| -4,260,598,473,763,307,500 | 29.732673 | 114 | 0.666559 | false |
aaronprunty/starfish
|
setup.py
|
1
|
2282
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from setuptools import setup, find_packages
if sys.version_info <= (2,7):
sys.exit('''
Sorry, Python <= 2.7 is not supported. Vezda requires Python >= 3.
(Try obtaining the Anaconda Python distribution at
https://www.anaconda.com/download/
for example.)
''')
setup(name = 'vezda',
version = '0.1.29',
description = 'A set of command-line tools for imaging with the linear sampling method',
python_requires = '>=3',
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics'
],
keywords = 'imaging shape reconstruction linear sampling method',
url = 'https://github.com/aaronprunty/vezda',
author = 'Aaron C. Prunty',
license = 'Apache License, Version 2.0',
packages = find_packages(),
package_data = {'vezda': ['examples/*', 'docs/*']},
include_package_data = True,
install_requires = ['argparse',
'matplotlib',
'numpy',
'pathlib',
'scipy',
'scikit-image',
'tqdm'],
entry_points = {
'console_scripts': [
'vzdata = vezda.setDataPath:cli',
'vzgrid = vezda.setSamplingGrid:cli',
'vzhome = vezda.home:cli',
'vzimage = vezda.plotImage:cli',
'vzsolve = vezda.LSMSolver:cli',
'vzsvd = vezda.SVD:cli',
'vzwiggles = vezda.plotWiggles:cli',
'vzwindow = vezda.setWindow:cli'
]
},
zip_safe = False)
|
apache-2.0
| 6,325,990,463,517,400,000 | 39.75 | 94 | 0.485539 | false |
JrGoodle/clowder
|
clowder/cli/forall.py
|
1
|
3270
|
"""Clowder command line forall controller
.. codeauthor:: Joe DeCapo <[email protected]>
"""
import argparse
import os
from typing import List, Optional
import clowder.util.formatting as fmt
import clowder.util.parallel as parallel
from clowder.clowder_controller import CLOWDER_CONTROLLER, print_clowder_name, valid_clowder_yaml_required
from clowder.config import Config
from clowder.git.clowder_repo import print_clowder_repo_status
from clowder.util.console import CONSOLE
from clowder.util.error import CommandArgumentError
from .util import add_parser_arguments
def add_forall_parser(subparsers: argparse._SubParsersAction) -> None: # noqa
"""Add clowder forall parser
:param argparse._SubParsersAction subparsers: Subparsers action to add parser to
"""
parser = subparsers.add_parser('forall', help='Run command or script in project directories')
parser.formatter_class = argparse.RawTextHelpFormatter
parser.set_defaults(func=forall)
add_parser_arguments(parser, [
(['command'], dict(metavar='<command>', nargs=1, default=None,
help='command to run in project directories')),
(['projects'], dict(metavar='<project|group>', default='default', nargs='*',
choices=CLOWDER_CONTROLLER.project_choices_with_default,
help=fmt.project_options_help_message('projects and groups to run command for'))),
(['--ignore-errors', '-i'], dict(action='store_true', help='ignore errors in command or script')),
(['--jobs', '-j'], dict(metavar='<n>', nargs=1, default=None, type=int,
help='number of jobs to use running commands in parallel')),
])
# TODO: Split out forall_handler() to parse args, then call typed forall() function
@valid_clowder_yaml_required
@print_clowder_name
@print_clowder_repo_status
def forall(args) -> None:
"""Clowder forall command private implementation
:raise CommandArgumentError:
"""
jobs = None
if args.jobs:
jobs = args.jobs[0]
if not args.command:
raise CommandArgumentError('Missing command')
command = args.command[0]
_forall_impl(command, args.ignore_errors, projects=args.projects, jobs=jobs)
def _forall_impl(command: str, ignore_errors: bool, projects: List[str], jobs: Optional[int] = None) -> None:
"""Runs script in project directories specified
:param str command: Command or script and optional arguments
:param bool ignore_errors: Whether to exit if command returns a non-zero exit code
:param List[str] projects: Project names to clean
:param Optional[int] jobs: Number of jobs to use running parallel commands
"""
projects = Config().process_projects_arg(projects)
projects = CLOWDER_CONTROLLER.filter_projects(CLOWDER_CONTROLLER.projects, projects)
jobs_config = Config().jobs
jobs = jobs_config if jobs_config is not None else jobs
if jobs is not None and jobs != 1 and os.name == "posix":
if jobs <= 0:
jobs = 4
parallel.forall(projects, jobs, command, ignore_errors)
return
for project in projects:
CONSOLE.stdout(project.status())
project.run(command, ignore_errors=ignore_errors)
|
mit
| -1,905,861,807,495,073,800 | 36.159091 | 110 | 0.687768 | false |
sparcs-kaist/otlplus
|
apps/timetable/urls.py
|
1
|
1770
|
"""otlplus URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from django.http import HttpResponseRedirect
from . import views
urlpatterns = [
url(r'^users/(?P<user_id>\d+)/timetables$', views.user_instance_timetable_list_view),
url(r'^users/(?P<user_id>\d+)/timetables/(?P<timetable_id>\d+)$', views.user_instance_timetable_instance_view),
url(r'^users/(?P<user_id>\d+)/timetables/(?P<timetable_id>\d+)/add-lecture$', views.user_instance_timetable_instance_add_lecture_view),
url(r'^users/(?P<user_id>\d+)/timetables/(?P<timetable_id>\d+)/remove-lecture$', views.user_instance_timetable_instance_remove_lecture_view),
url(r'^users/(?P<user_id>\d+)/wishlist$', views.user_instance_wishlist_view),
url(r'^users/(?P<user_id>\d+)/wishlist/add-lecture$', views.user_instance_wishlist_add_lecture_view),
url(r'^users/(?P<user_id>\d+)/wishlist/remove-lecture$', views.user_instance_wishlist_remove_lecture_view),
url(r'^share/timetable/image$', views.share_timetable_image_view),
url(r'^share/timetable/calendar$', views.share_timetable_calendar_view),
url(r'^external/google/google_auth_return$', views.external_google_google_auth_return_view),
]
|
mit
| 5,463,092,638,090,802,000 | 56.096774 | 145 | 0.706215 | false |
RyogaLi/prob_model
|
src/main.py
|
1
|
18444
|
#!/usr/bin/python
# coding=utf-8
# v2.0
from read_files import *
from supplementary import *
from prob_model import *
from conf import *
import os
import fnmatch
import subprocess
import argparse
import logging.config
def load_pickle(filename):
with open(filename, 'rb') as pkl_file:
filecontent = pickle.load(pkl_file)
return filecontent
def load_npy(filename):
filecontent = np.load(filename)
return filecontent
def save_as_matrix(filename):
"""
save file as matrix
:param filename:
:return:
"""
matrix = []
with open(filename, "r") as mix:
for line in mix:
line = line.strip().split(",")
line = [i.strip("\"\"\n") for i in line]
matrix.append(line)
return np.asarray(matrix)
def find(pattern, path):
"""
Find file in path that with filename matches pattern
:param pattern:
:param path:
:return: file path
"""
result = None
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result = os.path.join(root, name)
return result
def write_output_to_file(filename, data):
# write content in data by line to filename
with open(filename, "w") as output:
for item in data:
output.write("%s\n" % item)
def read_tumour_spreadsheet(input_file):
"""
read the spread sheet and save it into numpy arrray
:param input_file: csv file contains tumour names and chromatin file names
:return: matrix
"""
spreadsheet = []
with open(input_file, "r") as spreads:
for line in spreads:
if "sample_name" not in line:
spreadsheet.append(line.split())
return spreadsheet
def get_line(title, matrix):
"""
get a line by title(first element)
:param title:
:return: list contains the title and rest of the line
"""
return matrix[np.where(matrix[:, 0] == title)]
def three_fold_validation():
# three fold validation
for variants_file in vcf_list[group]:
# select only breast cancer
bc = []
with open("/home/ryogali/dev/prob_model/BRCA_files.txt") as files:
for line in files:
line = line.strip("\"\"\n")
bc.append(line)
if variants_file.endswith(".vcf"):
tumour_name = variants_file.split(".")[0]
else:
main_logger.debug("Only VCF files are accepted.")
continue
if tumour_name not in bc: # not a brca tumor
print("not bc")
continue
# # find mixture directory
# mixture = None
# mixture_path = os.path.join(mixture_dir, tumour_name)
# # check if tumour exist in mixture path
# if os.path.exists(mixture_path):
# mixture = find("mixtures.csv", mixture_path)
# # check if mixture.csv found
# if mixture == None:
# continue
#
# main_logger.info("Tumor name: %s", tumour_name)
# # get signatures and time points
# sigs, mixture = read_mix_file(mixture)
# main_logger.info("%s signatures analyzed", str(len(sigs)))
# to do get new mixture here
sigs = []
mixture = []
# convert file to matrix
mixture_matrix = save_as_matrix(mixture_overall)
# print(mixture_matrix)
# print(tumour_name)
# select tumor name from the matrix
tumor_sig = mixture_matrix[(mixture_matrix[:,0]==tumour_name)|(mixture_matrix[:,0]=="")]
# print(tumor_sig)
if tumor_sig.shape[0]<2:
continue
# select where signatures != 0
for i in range(len(tumor_sig[1])):
# print(i)
if tumor_sig[1][i] != "0":
# print(tumor_sig[1][i])
sigs.append(tumor_sig[0][i])
mixture.append(tumor_sig[1][i])
for i in range(len(sigs)):
sigs[i] = "Signature " + sigs[i]
mixture = [float(i) for i in mixture[1:]]
# print(sigs)
# print(mixture)
##################################################################
##################################################################
vf = os.path.join(input_dir, variants_file)
variants_parser = VariantsFileParser(vf, chromatin_file, mRNA_file, hg19_file, trinuc, mixture, alex_signature_file, sigs)
# get input data to the model
# n = n fold validation, 1/n as train data and 2/n as test data
# low support data are those mutations that has FILTER = LOWSUPPORT
test, train, low_support = variants_parser._get_input_data(3)
# get low support data feature
low_support_data = variants_parser._get_features(low_support)
# get random data feature
if chromatin_file:
random_data = generate_data(mixture, alex_signature_file, sigs, True)
else:
random_data = generate_data(mixture, alex_signature_file, sigs, False)
for i in range(len(train)):
# get features from train data
train_data = variants_parser._get_features(train[i])
# get features from test data
test_data = variants_parser._get_features(test[i])
# train the model
train_matrix = ProbModel(train_data)
train_matrix._fit()
# predict probabilities for train data
if chromatin_file:
train_pa, train_pt, train_ps = \
train_matrix._predict_proba(train_matrix._mut,
train_matrix._tr_X,
train_matrix._strand_X,
train_matrix._strand)
test_matrix = ProbModel(test_data)
test_pa, test_pt, test_ps = \
train_matrix._predict_proba(test_matrix._mut,
test_matrix._tr_X,
test_matrix._strand_X,
test_matrix._strand)
# predict probabilities for low_sup data
low_support_matrix = ProbModel(low_support_data)
lowsup_pa, lowsup_pt, lowsup_ps = train_matrix._predict_proba(low_support_matrix._mut,
low_support_matrix._tr_X,
low_support_matrix._strand_X,
low_support_matrix._strand)
# predict probabilities for random data
random_matrix = ProbModel(random_data)
random_pa, random_pt, random_ps = train_matrix._predict_proba(random_matrix._mut,
random_matrix._tr_X,
random_matrix._strand_X,
random_matrix._strand)
else:
train_pa, train_pt, train_ps = train_matrix._predict_proba(train_matrix._mut,
train_matrix._mut,
train_matrix._strand_X,train_matrix._strand)
# predict probabilities for test data
test_matrix = ProbModel(test_data)
test_pa, test_pt, test_ps = train_matrix._predict_proba(test_matrix._mut,
test_matrix._mut ,
test_matrix._strand_X,
test_matrix._strand)
# predict probabilities for low_sup data
low_support_matrix = ProbModel(low_support_data)
lowsup_pa, lowsup_pt, lowsup_ps = train_matrix._predict_proba(low_support_matrix._mut,
low_support_matrix._mut,
low_support_matrix._strand_X,
low_support_matrix._strand)
# predict probabilities for random data
random_matrix = ProbModel(random_data)
random_pa, random_pt, random_ps = train_matrix._predict_proba(random_matrix._mut,
random_matrix._mut,
random_matrix._strand_X,
random_matrix._strand)
# write the probabilities to file
write_output_to_file(os.path.join(train_prob_dir,
tumour_name)+ "."+str(i)+".train.txt",
train_matrix._calculate_proba(train_pa, train_pt, train_ps))
write_output_to_file(os.path.join(test_prob_dir,
tumour_name)+"."+str(i)+".test.txt",
test_matrix._calculate_proba(test_pa, test_pt, test_ps))
write_output_to_file(os.path.join(lowsup_prob_dir,
tumour_name)+"."+str(i)+".lowsup.txt",
low_support_matrix._calculate_proba(lowsup_pa, lowsup_pt, lowsup_ps))
write_output_to_file(os.path.join(random_prob_dir,
tumour_name)+"."+str(i)+".random.txt",
random_matrix._calculate_proba(random_pa, random_pt, random_ps))
main_logger.info("DONE-%s-%s",tumour_name, str(i))
def single_file_main():
# get mixture matrix (one file for all the tumours)
mixture_matrix = save_as_matrix(mixture_overall)
# select mixture for this tumour from mixture matrix
tumor_sig = mixture_matrix[(mixture_matrix[:, 0] == tumour_id) | (mixture_matrix[:, 0] == "")]
# if tumour not found in the matrix, skip this tumour
if tumor_sig.shape[0] < 2:
main_logger.info("No mixture found for tumour: %s", tumour_id)
return
# select where signatures != 0
sigs = []
mixture = []
for i in range(len(tumor_sig[1])):
# print(i)
if tumor_sig[1][i] != "0":
# print(tumor_sig[1][i])
sigs.append(tumor_sig[0][i])
mixture.append(tumor_sig[1][i])
for i in range(len(sigs)):
sigs[i] = "Signature " + sigs[i]
variants_parser = VariantsFileParser(vcf_file, chromatin_dict, mRNA_file, hg19_file, trinuc, mixture[1:],
alex_signature_file, sigs)
# get input data to the model
# n = n fold validation, 1/n as train data and 2/n as test data
# low support data are those mutations that has FILTER = LOWSUPPORT
# if n == 0, train and test data = 2:1
test, train, low_support = variants_parser._get_input_data()
# train the model using training data
train_data = variants_parser._get_features(train)
train_matrix = ProbModel(train_data)
train_matrix._fit()
# save the model to disk
filename = "./trained/"+ tumour_id+"_trained.sav"
pickle.dump(train_matrix, open(filename, 'wb'))
# for test data
test_data = variants_parser._get_features(test)
test_matrix = ProbModel(test_data)
# for low support data
low_support_data = variants_parser._get_features(low_support)
low_support_matrix = ProbModel(low_support_data)
# compute probabilities for train, test and low support data
train_pa, train_pt, train_ps = \
train_matrix._predict_proba(train_matrix._mut,
train_matrix._tr_X,
train_matrix._strand_X,
train_matrix._strand)
test_pa, test_pt, test_ps = \
train_matrix._predict_proba(test_matrix._mut,
test_matrix._tr_X,
test_matrix._strand_X,
test_matrix._strand)
low_pa, low_pt, low_ps = \
train_matrix._predict_proba(low_support_matrix._mut,
low_support_matrix._tr_X,
low_support_matrix._strand_X,
low_support_matrix._strand)
# write output to output directory
write_output_to_file(os.path.join(output_dir, tumour_id) + ".train.txt",
train_matrix._calculate_proba(train_pa, train_pt, train_ps))
write_output_to_file(os.path.join(output_dir, tumour_id) + ".test.txt",
test_matrix._calculate_proba(test_pa, test_pt, test_ps))
write_output_to_file(os.path.join(output_dir, tumour_id) + ".test.txt",
low_support_matrix._calculate_proba(low_pa, low_pt, low_ps))
def validated_file_main():
# get tumour name
tumour_name = vcf_file.split(".")[0]
# get mixture matrix (one file for all the tumours)
mixture_matrix = save_as_matrix(mixture_overall)
# select mixture for this tumour from mixture matrix
tumor_sig = mixture_matrix[(mixture_matrix[:, 0] == tumour_id) | (mixture_matrix[:, 0] == "")]
# if tumour not found in the matrix, skip this tumour
if tumor_sig.shape[0] < 2:
main_logger.info("No mixture found for tumour: %s", tumour_id)
return
# select where signatures != 0
sigs = []
mixture = []
for i in range(len(tumor_sig[1])):
# print(i)
if tumor_sig[1][i] != "0":
# print(tumor_sig[1][i])
sigs.append(tumor_sig[0][i])
mixture.append(tumor_sig[1][i])
for i in range(len(sigs)):
sigs[i] = "Signature " + sigs[i]
# here vcf_file_path is the path to validated files
vf = os.path.join(vcf_file_path, vcf_file)
# parse input validated file
validated_parser = ValidatedVCFParser(vf)
validated_variants = validated_parser._parse()
# from list.txt find corresponding vcf file
corresponding_file = ""
with open(file_list, "r") as list:
for line in list:
if tumour_id in line:
corresponding_file = line.strip()
break
if corresponding_file == "":
main_logger.info("Validated tumour: %s not found. Skipping to next tumour...", tumour_id)
return
corresponding_file = consensus_path + corresponding_file
# parse corresponding vcf file
cf_parser = VariantsFileParser(corresponding_file, chromatin_dict, mRNA_file, hg19_file, trinuc, mixture[1:],
alex_signature_file, sigs)
# get input data to the model
train, passed, notseen = cf_parser._get_input_data(validate=validated_variants)
print(train.shape)
print(passed.shape)
# todo bug: not senn matrix bug
print(notseen.shape)
# train the model using training data
train_data = cf_parser._get_features(train)
train_matrix = ProbModel(train_data)
train_matrix._fit()
# save the model to disk
filename = "./trained/" + tumour_id + "_trained.sav"
pickle.dump(train_matrix, open(filename, 'wb'))
# for negative data
notseen_data = cf_parser._get_features(notseen)
notseen_matrix = ProbModel(notseen_data)
# for positive data - PASS
pos_data = cf_parser._get_features(passed)
pos_matrix = ProbModel(pos_data)
# compute probabilities for train and test data
train_pa, train_pt, train_ps = \
train_matrix._predict_proba(train_matrix._mut,
train_matrix._tr_X,
train_matrix._strand_X,
train_matrix._strand)
notseen_pa, notseen_pt, notseen_ps = \
train_matrix._predict_proba(notseen_matrix._mut,
notseen_matrix._tr_X,
notseen_matrix._strand_X,
notseen_matrix._strand)
pos_pa, pos_pt, pos_ps = \
train_matrix._predict_proba(pos_matrix._mut,
pos_matrix._tr_X,
pos_matrix._strand_X,
pos_matrix._strand)
# write output to output directory
write_output_to_file(os.path.join(output_dir, tumour_id) + ".train.txt",
train_matrix._calculate_proba(train_pa, train_pt, train_ps))
write_output_to_file(os.path.join(output_dir, tumour_id) + ".pos.txt",
pos_matrix._calculate_proba(pos_pa, pos_pt, pos_ps))
write_output_to_file(os.path.join(output_dir, tumour_id) + ".neg.txt",
notseen_matrix._calculate_proba(notseen_pa, notseen_pt, notseen_ps))
if __name__ == '__main__':
# get logger
logging.config.fileConfig("src/logging.conf")
main_logger = logging.getLogger("main")
# all mixture in one file
mixture_overall = "./data/overall_exposures.sigsBeta2.csv"
# logging configration
parser = argparse.ArgumentParser(description='Predict mutation probabilities')
parser.add_argument("--validated", help="To run the model on validated (deep sequenced) tumours", required=False)
##################################################################
# this argument is used for running the pipeline on Boltzman
parser.add_argument('--group', default=-1,type=int, required=False)
args = parser.parse_args()
##################################################################
main_logger.info("output files will be saved into: %s", output_dir)
##################################################################
# Load feature files
feature_data = "./data/"
main_logger.info("Loading required feature files...")
try:
# main_logger.info("Loading required feature files...")
mRNA_file = load_pickle(os.path.join(feature_data, "mRNA.pickle"))
main_logger.info("mRNA loaded")
trinuc = load_pickle(os.path.join(feature_data,"trinucleotide.pickle"))
main_logger.info("trinuc loaded")
alex_signature_file = load_npy(os.path.join(feature_data,"signature.npy"))
main_logger.info("alex_signature loaded")
hg19_file = load_pickle(os.path.join(feature_data,"hg.pickle"))
main_logger.info("hg file loaded")
except Exception as error:
main_logger.exception("Please provide valid compiled feature data files.")
exit()
##################################################################
all_vcf = os.listdir(vcf_file_path)
# following code is for paralizing jobs
if args.group != -1:
group = args.group
GROUP_SIZE = 147 #change this based on cores
vcf_list = [all_vcf[i:i + GROUP_SIZE] for i in xrange(0, len(all_vcf), GROUP_SIZE)]
else:
group = 0
vcf_list = [all_vcf]
# end
##################################################################
# read spread sheet and run the model
spreadsheet = read_tumour_spreadsheet(tumour_type)
for line in spreadsheet:
# main_logger.info("================================================")
if args.validated == "True": # here we deal with validated tumours with different tumour id
tumour_id = line[3].split(":")[0].split("=")[1]
vcf_file = subprocess.check_output('find ' + vcf_file_path + ' -name ' + tumour_id + "*", shell=True)
tumour_id = line[0]
else:
tumour_id = line[0]
vcf_file = subprocess.check_output('find '+vcf_file_path+' -name '+tumour_id+"*",shell=True)
vcf_file = vcf_file.strip().split("//")[1]
if vcf_file =="":
main_logger.error("error processing spreadsheet...")
continue
main_logger.info("Processing tumour: %s", vcf_file)
main_logger.info("Tumour type: %s", line[1])
if line[2] == "N/A":
chromatin_file = 0
main_logger.info("No chromatin profile specified for this tumour")
else:
chromatin_file = os.path.join(chromatin_path, line[2]+".bed")
main_logger.info("chromatin profile: %s", chromatin_file)
chromatin_dict = read_chromatin(chromatin_file)
# run model on this vcf file
if args.validated == "True":
validated_file_main()
else:
single_file_main()
##################################################################
|
gpl-3.0
| 3,120,607,121,179,623,000 | 36.036145 | 124 | 0.595858 | false |
shyampurk/tto-bluemix
|
ttoBackground/ne_scikit.py
|
1
|
5386
|
from itertools import chain
from sklearn.neighbors import NearestNeighbors as nn
import pymongo
from pymongo import MongoClient
import datetime
import pytz
import logging
# this is for deprecation warnings,
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
LOG_FILENAME = 'TTOBackgroundLogs.log'
logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG,format='%(asctime)s, %(levelname)s, %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
'''****************************************************************************************
Function Name : ne_scikitalgo (Algorithm operation)
Description : Function which does the scikit algorithm operation and gives the result
Parameters : ne_df (test data generated in testprep function)
****************************************************************************************'''
def ne_scikitalgo(ne_df):
# def ne_scikitalgo():
try:
uri ='mongodb://rajeevtto:[email protected]:35315,ds035315-a1.mongolab.com:35315/newttobackground?replicaSet=rs-ds035315'
client = MongoClient(uri)
newttobackground = client.newttobackground
logging.info('connected')
ne_cursor = newttobackground.ttoopvalcoll.find({"route":"NEWARK-EDISON"})
netestData = []
netrainData = []
nerealTime = []
time = []
for doc in ne_cursor:
netrainData.append([float(doc['Zone']),float(doc['Temparature'][0]),float(doc['Temparature'][1]),float(doc['CodedWeather'][0]),float(doc['CodedWeather'][1]),float(doc['CodedDay'])])
nerealTime.append(doc['realTime'])
for z,t1,t2,w1,w2,c,d in zip(ne_df['Zone'],ne_df['Temparature1'],ne_df['Temparature2'],ne_df['CodedWeather1'],ne_df['CodedWeather2'],ne_df['CodedDay'],ne_df['Date']):
netestData.append([float(z),float(t1),float(t2),float(w1),float(w2),c])
time.append(d)
logging.info("netrainData length %d"%(len(netrainData)))
logging.info("netestData length %d"%(len(netestData)))
neigh = nn(n_neighbors = 5)
neigh.fit(netrainData)
nn(algorithm = 'auto',metric = 'euclidean')
distances = []
indexs = []
data = []
for i in netestData:
data.append(neigh.kneighbors(i))
for i in range(len(data)):
distances.append(data[i][0])
indexs.append(data[i][1])
predicted_ind = []
predicted_val = [] # we are considering the realTime in this case
for i in range(len(indexs)):
predicted_ind.append(indexs[i][0])
new_predicted_ind = list(chain.from_iterable(predicted_ind))
for k in new_predicted_ind:
predicted_val.append(nerealTime[k]) # nerealTime is the list where training set realTime values stored
# seperating them as list of five for individual 5 neighbors
listoffive = []
for i in range(0,len(predicted_val),5):
listoffive.append(predicted_val[i:i+5])
prediction = []
for i in range(len(listoffive)):
prediction.append(listoffive[i][0])
predictioninmins = []
for i in prediction:
predictioninmins.append(float(i)/60.0)
docCount = newttobackground.ttoresultcoll.find({"route":"NEWARK-EDISON"}).count()
logging.info('NE -> before adding new results docCount %d'%(docCount))
'''for testing purpose im closing it i will comeback again'''
lowleveldelList = [] # for the below 6hrs range
highleveldelList = [] # for the regular update delete pupose
newarkedison_time = datetime.datetime.now(pytz.timezone('US/Eastern'))
newarkedison_dayname = newarkedison_time.strftime("%A")
newarkedison_hour = int(newarkedison_time.strftime("%H"))
newarkedison_minute = int(newarkedison_time.strftime("%M"))
newarkedison_second = int(newarkedison_time.strftime("%S"))
newarkedison_year = int(newarkedison_time.strftime("%Y"))
newarkedison_month = int(newarkedison_time.strftime("%m"))
newarkedison_day = int(newarkedison_time.strftime("%d"))
presentTime = datetime.datetime(newarkedison_year,newarkedison_month,newarkedison_day,newarkedison_hour,newarkedison_minute,newarkedison_second)
sixhrLimit = presentTime-datetime.timedelta(hours=6)
logging.info("ne six hours back time %s"%(str(sixhrLimit)))
highleveldelCursor = newttobackground.ttoresultcoll.find({"route":"NEWARK-EDISON","time" :{ "$gt":presentTime}})
lowleveldelCursor = newttobackground.ttoresultcoll.find({"route":"NEWARK-EDISON","time" :{ "$lt":sixhrLimit}})
for docid in highleveldelCursor:
highleveldelList.append(docid['_id'])
for docid in lowleveldelCursor:
lowleveldelList.append(docid['_id'])
combinedDelList = []
combinedDelList.extend(lowleveldelList)
combinedDelList.extend(highleveldelList)
logging.info("ne docs before sixhourslimit %d"%(len(lowleveldelList)))
logging.info("ne regular update doc length %d"%(len(highleveldelList)))
newttobackground.ttoresultcoll.remove({'_id':{"$in":combinedDelList}}) # Dangerous line
for i in range(len(time)):
doc = {
"route":"NEWARK-EDISON",
"time":time[i],
"predictioninsecs":prediction[i],
"predictioninmins":predictioninmins[i]
}
docid = newttobackground.ttoresultcoll.insert_one(doc)
del doc
docCount = newttobackground.ttoresultcoll.find({"route":"NEWARK-EDISON"}).count()
logging.info('NE -> after adding new results docCount %d'%(docCount))
return True
except Exception as e:
logging.error("The exception occured in ne_scikit %s,%s"%(e,type(e)))
return False
|
mit
| 2,577,618,418,320,694,000 | 32.042945 | 184 | 0.688452 | false |
qedsoftware/commcare-hq
|
custom/ilsgateway/tanzania/handlers/zipline.py
|
1
|
2111
|
from corehq.apps.products.models import SQLProduct
from custom.ilsgateway.tanzania.handlers.keyword import KeywordHandler
from custom.ilsgateway.tanzania.reminders import INVALID_PRODUCT_CODE
from custom.zipline.api import ProductQuantity
class ParseError(Exception):
pass
class ZiplineGenericHandler(KeywordHandler):
help_message = None
error_message = None
def _check_quantities(self, quantities):
for quantity in quantities:
try:
int(quantity)
except ValueError:
raise ParseError(self.error_message)
def _check_product_codes(self, product_codes):
for product_code in product_codes:
try:
SQLProduct.objects.get(code=product_code, domain=self.domain)
except SQLProduct.DoesNotExist:
raise ParseError(INVALID_PRODUCT_CODE % {'product_code': product_code})
def parse_message(self, text):
text_split = text.split()
product_codes = text_split[::2]
quantities = text_split[1::2]
product_codes_len = len(product_codes)
if product_codes_len == 0 or product_codes_len != len(quantities):
raise ParseError(self.error_message)
self._check_quantities(quantities)
self._check_product_codes(product_codes)
return zip(product_codes, quantities)
def help(self):
self.respond(self.help_message)
return True
def send_success_message(self):
raise NotImplementedError()
def invoke_api_function(self, quantities_list):
raise NotImplementedError()
def handle(self):
content = self.msg.text.split(' ', 1)[1]
quantities_list = []
try:
parsed_report = self.parse_message(content)
except ParseError as e:
self.respond(e.message)
return True
for product_code, quantity in parsed_report:
quantities_list.append(ProductQuantity(product_code, quantity))
self.invoke_api_function(quantities_list)
self.send_success_message()
return True
|
bsd-3-clause
| 8,946,865,580,194,183,000 | 30.984848 | 87 | 0.646139 | false |
thijsdezoete/keepmeposted
|
pypiwatch/pypi/models.py
|
1
|
1834
|
from django.db import models
import requests
from natsort import natsorted
class Package(models.Model):
name = models.CharField(max_length=200)
url = models.CharField(max_length=500, blank=True)
version = models.CharField(max_length=25, blank=True)
watchers = models.ManyToManyField('Watcher', through='PackageWatchers')
def latest_version_known(self, version=None):
_version = self.get_versions(latest=True)
if self.version != _version or _version != version:
return False
return True
def get_version(self, update=False):
version = self.get_versions(latest=True)
if self.version != version and update:
self.version = version
self.save()
return version
def get_versions(self, latest=False):
# TODO: Make compatible with other origins(self hosted pypi's)
url = "http://pypi.python.org/pypi/{package_name}/json".format(
package_name=self.name)
result = requests.get(url).json()
if latest:
return result['info']['version']
return natsorted(result['releases'].keys())
def __unicode__(self):
return self.__str__()
def __str__(self):
return self.name
class Watcher(models.Model):
email = models.CharField(max_length=300)
packages = models.ManyToManyField('Package', through='PackageWatchers')
def __unicode__(self):
return self.__str__()
def __str__(self):
return self.email
class PackageWatchers(models.Model):
watcher = models.ForeignKey(Watcher)
package = models.ForeignKey(Package)
def __unicode__(self):
return self.__str__()
def __str__(self):
return "{watcher}->{package}".format(watcher=self.watcher.email, package=self.package.name)
|
gpl-2.0
| 1,232,742,931,979,227,100 | 28.111111 | 99 | 0.629771 | false |
jlgoldman/writetogov
|
config/constants.py
|
1
|
2618
|
# Create a .env file with secret keys and such.
#
# Suggested default values for .env:
#
# DEBUG = True
# HOST = 'localhost:5000'
# HTTPS = False
# FLASK_SECRET_KEY = <generate once with os.urandom(24)>
# PUBLIC_ID_ENCRYPTION_KEY = <generate once with os.urandom(24)>
# GOOGLE_MAPS_API_KEY = <generate using steps below>
import os
import dotenv
dotenv_filename = dotenv.find_dotenv()
if dotenv_filename:
dotenv.load_dotenv(dotenv_filename)
def parse_bool(env_value):
return env_value is not None and env_value.lower() not in ('0', 'false')
DEBUG = parse_bool(os.environ.get('DEBUG')) # Set to True for development
HOST = os.environ.get('HOST') # Set to e.g. localhost:5000 or a local proxy like ngrok
HTTPS = True # Set to False if using localhost in development
PROJECTPATH = os.environ.get('PROJECTPATH')
FLASK_SECRET_KEY = os.environ.get('FLASK_SECRET_KEY') # Generate a local secret with import os; os.urandom(24)
PUBLIC_ID_ENCRYPTION_KEY = os.environ.get('PUBLIC_ID_ENCRYPTION_KEY') # Generate a local secret with import os; os.urandom(24)
APP_LOG_FILENAME = os.path.join(PROJECTPATH, 'app.log')
LOG_TO_STDOUT = parse_bool(os.environ.get('LOG_TO_STDOUT'))
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI')
TEMPLATE_ROOT = os.path.join(PROJECTPATH, 'templates')
STATIC_ROOT = os.path.join(PROJECTPATH, 'static')
PDF_FONT_FILE = os.path.join(PROJECTPATH, 'data/fonts/cmunrm.ttf')
REP_AUTOCOMPLETE_DATA_FNAME = os.path.join(PROJECTPATH, 'data/rep_autocomplete.20170210.json')
# Register for a Google Cloud Console project, go to the Credentials section,
# generate an API key, and enable Google Maps JavaScript API and
# Google Places API Web Service
GOOGLE_MAPS_API_KEY = os.environ.get('GOOGLE_MAPS_API_KEY')
PROPUBLICA_API_KEY = os.environ.get('PROPUBLICA_API_KEY') # Only needed for one-time imports of data, not needed for running the server.
SENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY') # Only needed if testing email sending for reminders and subscriptions
STRIPE_SECRET_KEY = os.environ.get('STRIPE_SECRET_KEY') # Only needed if testing billing for mailing letters using Lob
STRIPE_PUBLISHABLE_KEY = os.environ.get('STRIPE_PUBLISHABLE_KEY') # Only needed if testing billing for mailing letters using Lob
LOB_API_KEY = os.environ.get('LOB_API_KEY') # Only needed if testing Lob API calls for mailing letters.
INTERNAL_IPS = os.environ.get('INTERNAL_IPS', '').split(',')
MONITORING_NOTIFICATION_EMAILS = os.environ.get('MONITORING_NOTIFICATION_EMAILS', '').split(',')
def abspath(*path_elements):
return os.path.join(PROJECTPATH, *path_elements)
|
bsd-3-clause
| -1,489,657,184,676,276,200 | 43.372881 | 137 | 0.743316 | false |
skuda/client-python
|
kubernetes/client/models/v1_event_source.py
|
1
|
3611
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1EventSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, component=None, host=None):
"""
V1EventSource - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'component': 'str',
'host': 'str'
}
self.attribute_map = {
'component': 'component',
'host': 'host'
}
self._component = component
self._host = host
@property
def component(self):
"""
Gets the component of this V1EventSource.
Component from which the event is generated.
:return: The component of this V1EventSource.
:rtype: str
"""
return self._component
@component.setter
def component(self, component):
"""
Sets the component of this V1EventSource.
Component from which the event is generated.
:param component: The component of this V1EventSource.
:type: str
"""
self._component = component
@property
def host(self):
"""
Gets the host of this V1EventSource.
Node name on which the event is generated.
:return: The host of this V1EventSource.
:rtype: str
"""
return self._host
@host.setter
def host(self, host):
"""
Sets the host of this V1EventSource.
Node name on which the event is generated.
:param host: The host of this V1EventSource.
:type: str
"""
self._host = host
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
apache-2.0
| -6,035,318,909,030,842,000 | 24.792857 | 105 | 0.526447 | false |
inkfountain/learn-py-a-little
|
lesson_list/lesson.py
|
1
|
5017
|
# 1. 列表
'''
列表很常用,比如之前文件读取方法,一次性读取出内容是一个字符串列表,每个字符串都是一行
以下是一些列表:
'''
str_list = ['ab', 'cd', 'e']
int_list = [1, 2, 3, 4, 5]
'''
列表的项可以是任意类型的数据,甚至可以是不同类型的
以下是复杂一些的列子:
'''
list_list = [[1,2], [3,4]]
tuple_list = [(5,6), (7,8)]
clutter_list = [1, 'a', [3,4], (5, 6)]
# 2. 获取列表的值
'''
列表的数据是有序的,从第一个开始数,依次是0、1、2……等等,这就叫做索引
索引的范围是根列表的长度相关的,比如`int_list`的长度是5,那么索引的范围就是0~4
取数据的方式是在列表变量的后头追加一个`[索引]`
举例:
'''
print u'int_list第一项的值', int_list[0]
print u'int_list第二项的值', int_list[1]
'''
上头说的索引应该称之为`正索引`,另一种索引叫`负索引`,表示倒数第几个
(`正索引`、`负索引`,都叫索引)
比如`int_list`的长度是5,正索引的范围是0~4,负索引范围是-1~-5
举例:
'''
print u'int_list最后一项的值', int_list[-1]
print u'int_list倒数第二项的值', int_list[-2]
'''
正过来数是从左往右,从0开始;倒过来数是从右往左,从-1开始
下面给个图示 (图中simple_list的长度为n):
-------------------------------------------------------------------------------------
正过来数: 0 1 2 3 (n-3) (n-2) (n-1)
simple_list = ['a', 'b', 'c', 'd', . . . 'x', 'y', 'z']
倒过来数: -(n) -(n-1) -(n-2) -(n-3) -3 -2 -1
-------------------------------------变形后-------------------------------------------
正过来数: 0 1 2 3 (n-3) (n-2) (n-1)
simple_list = ['a', 'b', 'c', 'd', . . . 'x', 'y', 'z']
倒过来数: 0-n 1-n 2-n 3-n (n-3)-n (n-2)-n (n-1)-n
-------------------------------------------------------------------------------------
对于长度为n的列表,如果某一项的正索引是x, 那么负索引就是x-n
你代码里写负索引的话,运行代码时,python环境自动将其加上列表的长度n,变成正索引,这个不用我们操心
'''
'''
列表同一项上的两个索引,表示的值是一样的,都是这项的值,负索引是为了表示的方便
举例:
'''
print u'获取`int_list`倒数第二项的值:'
print u'用正索引方式获取:', int_list[len(int_list) - 1] # len 函数定义在下边第四部分
print u'用负索引方式获取:', int_list[-2]
'''
我想修改某一项的值,怎么做?
举例:
'''
print '修改前: ', int_list[1]
int_list[1] = 100
print '修改后: ', int_list[1]
# 3. 获取列表中某几项的值
'''
上头讲的是根据索引获取某一项的值的方法
而同时获取连续几个值的方法为:在列表变量的后头追加一个`[x:y+1]`
x: 第一个值的索引
y: 最后一个值的索引
这里要注意两点:
1. 获取某一项的值的时候返回的是具体的值,而获取连续几项值的方法返回的是一个list
2. 表达式中写的是y+1, 表示你要获取的连续项的下一项的索引,而这个项是不包含在结果list中的,
可以把表达式理解为一个左闭右开的区间 --> 区间[x, y+1), y+1是不包含在区间内的
'''
print u'我要获取int_list中从第二项到第四项的值(也就是从索引1到索引3)'
print u'获取的值为:', int_list[1:4] # 注意这里是3+1
'''
之前说代码在运行时负索引会自动加上列表长度n,变成正索引
所以获取连续值的时候,也可以使用负索引
'''
print u'我要获取int_list中从第二项到倒数第二项的值(也就是从索引1到索引-2)'
print u'负索引方式获取:', int_list[1:-2] # 相比底下,看出简洁来了木有
print u'正索引方式获取:', int_list[1:len(int_list)-1]
# 4. 两个有用的函数
'''
那么列表有多长呢?
这时使用函数`len`, len能够返回列表的长度
'''
print u'str_list 的长度为', len(str_list)
print u'clutter_list 的长度为', len(clutter_list)
'''
我想弄出一个很长的递增数列, 这就需要函数`range`
举例:
'''
list_a = range(100) # 0, 1, 2, ... 99
list_b = range(10, 100) # 10, 11, 12, ... 98, 99
list_c = range(10, 100, 2) # 10, 12, 14, ... 96, 98
'''
range(x) 一个参数的时候表示从0开始到x-1,
range(x, y) 两个参数的时候表示从x到y-1
range(x, y, d) 三个参数的时候表示从x到y-1, 并且后一个数比前一个数多d
'''
# 4. 练习
'''
1. 创建一个列表,列表中的值在1到100之间,且被3整除(不需要for循环)
2. 创建一个0到100的偶数的list,并筛选出能被7整除的数(需要for循环遍历和if判断,还有今天的知识点)
'''
|
gpl-2.0
| 5,786,600,061,030,252,000 | 20.832117 | 85 | 0.541291 | false |
Dipsingh/mpls-pce
|
pce_controller.py
|
1
|
4286
|
__author__ = 'dipsingh'
import json
import socket
import gevent
import pcep_handler
import te_controller
import json
from gevent import monkey
monkey.patch_socket()
MAXCLIENTS = 10
PCEADDR='0.0.0.0'
PCEPORT=4189
def parse_config(pce_config_file):
SR_TE = False
TunnelName =''
ERO_LIST = list() # ERO List
TUNNEL_SRC_DST = list() #Tunnel Source and Destination
LSPA_PROPERTIES = list() #Setup Priority,Hold Priority,Local Protection Desired(0 means false)
SR_ERO_LIST = list() # ERO List with SR Labels
with open(pce_config_file) as data_file:
data= json.load(data_file)
for key in data:
if key == 'TunnelName':
TunnelName = data[key]
if key == 'SR-TE':
SR_TE = data[key]
if key == 'EndPointObject':
for endpoint in data[key]:
if endpoint == 'Tunnel_Source':
TUNNEL_SRC_DST.insert(0,data[key][endpoint])
if endpoint == 'Tunnel_Destination':
TUNNEL_SRC_DST.insert(1,data[key][endpoint])
if key == 'LSPA_Object':
for lspa_object in data[key]:
if lspa_object == 'Hold_Priority':
LSPA_PROPERTIES.insert(0,data[key][lspa_object])
if lspa_object == 'Setup_Priority':
LSPA_PROPERTIES.insert(1,data[key][lspa_object])
if lspa_object == 'FRR_Desired':
LSPA_PROPERTIES.insert(2,data[key][lspa_object])
if key == 'ERO_LIST':
for ero in data[key]:
for key_ip in ero:
ERO_LIST.append(key_ip)
if key == 'SR_ERO_LIST':
for ero in data[key]:
for sr_ip in ero:
SR_ERO_LIST.append((sr_ip,ero[sr_ip]))
return (SR_TE,str.encode(TunnelName),tuple(TUNNEL_SRC_DST),tuple(LSPA_PROPERTIES),tuple(ERO_LIST),tuple(SR_ERO_LIST))
def send_ka(client_sock,pcep_context):
while True:
client_sock.send(pcep_context.generate_ka_msg())
gevent.sleep(pcep_context._ka_timer)
def pcc_handler(client_sock,sid,controller,parsed_results):
PCE_INIT_FlAG= True
PCE_UPD_FLAG = True
pcep_context = pcep_handler.PCEP(open_sid = sid)
print ("Received Client Request from ",client_sock[1])
msg_received = client_sock[0].recv(1000)
pcep_context.parse_recvd_msg(msg_received)
client_sock[0].send(pcep_context.generate_open_msg(30))
ka_greenlet = gevent.spawn(send_ka,client_sock[0],pcep_context)
while True:
msg= client_sock[0].recv(1000)
parsed_msg = pcep_context.parse_recvd_msg(msg)
result = controller.handle_pce_message(client_sock[1],parsed_msg)
if PCE_UPD_FLAG:
if result:
if result[0]!=None:
for key in result[1]:
pcep_msg_upd = pcep_context.generate_lsp_upd_msg(result[1][key],parsed_results[4])
print ("Sending PCC Update Request")
if pcep_msg_upd:
client_sock[0].send(pcep_msg_upd)
if PCE_INIT_FlAG:
if parsed_results[0]:
pcep_msg_init = pcep_context.generate_sr_lsp_inititate_msg(parsed_results[5],parsed_results[2],parsed_results[3],parsed_results[1])
print ("Creating SR TE Tunnel")
if pcep_msg_init:
client_sock[0].send(pcep_msg_init)
else:
pcep_msg_init = pcep_context.generate_lsp_inititate_msg(parsed_results[4],parsed_results[2],parsed_results[3],parsed_results[1])
print ("Creating TE Tunnel")
if pcep_msg_init:
client_sock[0].send(pcep_msg_init)
PCE_INIT_FlAG=False
client_sock[0].close()
def main ():
CURRENT_SID=0
parsed_results =parse_config('PCE_Config.json')
pce_server_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
controller = te_controller.TEController()
pce_server_sock.bind((PCEADDR,PCEPORT))
pce_server_sock.listen(MAXCLIENTS)
while True:
client = pce_server_sock.accept()
gevent.spawn(pcc_handler,client,CURRENT_SID,controller,parsed_results)
CURRENT_SID += 1
if __name__ == '__main__':
main()
|
mit
| -5,891,687,146,532,034,000 | 39.056075 | 147 | 0.586328 | false |
Lewuathe/kaggle-repo
|
facebook-recruiting-iii-keyword-extraction/src/tagger.py
|
1
|
1811
|
# -*- coding: utf-8 -*-
import nltk
import sklearn
import csv
import re
import numpy as np
import os
import sys
if __name__ == "__main__":
#
# Training data from Train.csv
# Id, Title, Body, Tag
#
print "Reading start"
train_file = csv.reader(open("Train.csv", "rb"))
train_header = train_file.next()
test_file = csv.reader(open("Test.csv", "rb"))
test_header = test_file.next()
result_file = open("Result.csv", "w")
result_file.write('"Id","Tags"\n')
traindata = []
testdata = []
docs = []
print "Train Start"
i = 0
for data in train_file:
tokens = re.split(r"\W+", nltk.clean_html(data[2]))
#tokens = nltk.word_tokenize(nltk.clean_html(data[2]))
docs.append(tokens)
i += 1
if i > 100000:
break
print "Make collection start"
# Make the collection for calculating TF-IDF
collection = nltk.TextCollection(docs)
print "Testing data start"
for data in test_file:
title_tokens = nltk.word_tokenize(data[1])
tokens = re.split(r"\W+", nltk.clean_html(data[2]))
#tokens = nltk.word_tokenize(nltk.clean_html(data[2]))
for title_token in title_tokens:
for i in range(0, 10):
tokens.append(title_token)
uniqTokens = set(tokens)
tf_idf_scores = {}
for token in uniqTokens:
tf_idf_scores[token] = collection.tf_idf(token, tokens)
sorted_tf_idf_scores = sorted(tf_idf_scores.items(), key=lambda x:x[1])
keywords = [ k for k, v in sorted_tf_idf_scores if v > 0.1]
if len(keywords) <= 0:
keywords = [ sorted_tf_idf_scores[-1][0] ]
result_file.write("%s,\"%s\"\n" % (data[0], " ".join(keywords)))
|
mit
| 5,775,642,661,804,750,000 | 24.871429 | 79 | 0.561016 | false |
Superjom/sematic
|
utils.py
|
1
|
4223
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Aug 21, 2013
@author: Chunwei Yan @ pkusz
@mail: [email protected]
'''
from __future__ import division
import os
import sys
pjoin = os.path.join
def get_type(w):
if '/' in w:
pos = w.index('/')
return w[pos+1:]
def get_word(w):
if '/' in w:
ws = w.split('/')
return ws[0]
else:
return w
def get_num_lines(path):
shell = os.popen('cat %s | wc -l' % path).read()
num = int(shell)
return num
def sparse_value2list(v, n, _min, _max):
#print _min, _max, v
step = (_max-_min) / n
#print step
v -= _min
nn = int(v/step) - 1
#print nn
_list = [0 for i in range(n)]
_list[nn] = 1
return _list
class Memoize:
def __init__(self, f):
self.f = f
self.memo = {}
def __call__(self, *args):
"""
if len(self.memo) > 50000:
raise Exception, "out of range"
"""
num_of_iter = args[0]
if num_of_iter == 0:
self.memo = {}
key = str(args)
if not key in self.memo:
self.memo[key] = self.f(*args)
return self.memo[key]
memoized = Memoize
def substCost(x, y):
if x == y: return 0
else: return 2
@memoized
def minEditDistR(target, source):
i = len(target)
j = len(source)
if i == 0: return j
elif j==0: return i
return min(
minEditDistR(target[:i-1], source) + 1,
minEditDistR(target, source[:j-1])+1,
minEditDistR(target[:i-1], source[:j-1]) + substCost(source[j-1], target[i-1]))
@memoized
def minEditDistRe(num_iter, target, source):
if num_iter > 5000:
raise Exception, "out of iter_num"
i = len(target)
j = len(source)
if i == 0: return j
elif j==0: return i
return min(
minEditDistRe(num_iter+1, target[:i-1], source) + 1,
minEditDistRe(num_iter+1, target, source[:j-1])+1,
minEditDistRe(num_iter+1, target[:i-1], source[:j-1]) + substCost(source[j-1], target[i-1]))
#import bsddb
def bdb_open(path):
db = bsddb.btopen(path, 'c')
return db
import json
tojson = json.dumps
fromjson = json.loads
def args_check(num_args, usage_intro):
argv = sys.argv
len_args = len(argv[1:])
if type(num_args) == type( []) :
if not len_args in num_args:
print "=" * 50
print " usage: %s" % usage_intro
print "=" * 50
sys.exit(-1)
else:
if not len_args == num_args:
print "=" * 50
print " usage: %s" % usage_intro
print "=" * 50
sys.exit(-1)
return argv[1:]
class ArgsAction(object):
def __init__(self):
self.action = {}
def add_action(self, num_args, _type, action, info="", is_class=True):
self.action[_type] = (num_args, action, info, is_class)
def start(self):
if len(sys.argv) == 1:
print
print "Error: Unknown action"
print
sys.exit(-1)
_type = sys.argv[1]
if _type not in self.action:
print "Error: Unknown action"
else:
num_args, action, info, is_class = self.action[_type]
args = args_check(num_args+1, info)
if is_class:
c = action(*args[1:])
c()
else:
action(*args[1:])
class Dic(object):
def from_list(self, _list):
self.dic = list(set(_list))
def get(self, word):
return self.dic[word]
def tofile(self, path, encode=None):
"""
encode = (fencode, tencode)
"""
with open(path, 'w') as f:
c = ' '.join(self.dic)
if encode is not None:
c = c.decode(encode[0], 'ignore').encode(encode[1], 'ignore')
f.write(c)
def fromfile(self, path):
self.dic = {}
with open(path) as f:
c = f.read()
_list = c.split()
for i, w in enumerate(_list):
self.dic[w] = i
strip = lambda x: x.strip()
if __name__ == "__main__":
print sparse_value2list(2.5, 6, 0, 6)
|
mit
| 4,757,516,781,984,522,000 | 22.076503 | 104 | 0.505328 | false |
a-kirin/Dockerfiles
|
sample01/web/sample01/memos/views.py
|
1
|
1833
|
from django.utils import timezone
from django.http import HttpResponseRedirect, HttpResponse
from django.http import Http404
from django.shortcuts import get_object_or_404, render
from django.shortcuts import render
from django.template import loader
from django.urls import reverse
from django.views import generic
from .models import Memo
def __str__(self):
return self.memo_text
def index(request):
# return HttpResponse("Hello, world. You're at the memos index.")
# latest_memo_list = Memo.objects.order_by('-pub_date')[:5]
# output = ', '.join([q.memo_text for q in latest_memo_list])
# return HttpResponse(output)
latest_memo_list = Memo.objects.order_by('-pub_date')[:5]
# template = loader.get_template('memos/index.html')
context = {
'latest_memo_list': latest_memo_list,
}
# return HttpResponse(template.render(context, request))
return render(request, 'memos/index.html', context)
class IndexView(generic.ListView):
template_name = 'memos/index.html'
context_object_name = 'latest_memo_list'
def get_queryset(self):
return Memo.objects.order_by('-pub_date')[:5]
# def detail(request, memo_id):
# try:
# memo = Memo.objects.get(pk=memo_id)
# except Memo.DoesNotExist:
# raise Http404("Memo does not exist")
# memo = get_object_or_404(Memo, pk=memo_id)
# return HttpResponse("You're looking at memo %s." % memo_id)a
# return render(request, 'memos/detail.html', {'memo': memo})
class DetailView(generic.DetailView):
model = Memo
template_name = 'memos/detail.html'
def memo(request):
memo = Memo(memo_text=request.POST['text'], pub_date=timezone.now())
memo.save()
# return HttpResponse(request.POST['text'])
return HttpResponseRedirect(reverse('memos:detail', args=(memo.id,)))
|
mit
| -3,979,993,249,563,734,000 | 30.067797 | 73 | 0.688489 | false |
IfengAutomation/uitester
|
uitester/config.py
|
1
|
1548
|
import json
import os
from os.path import pardir
app_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), pardir)
config_file = os.path.join(app_dir, 'config')
class Config:
"""
Config class
Config.read() read config from json file, return a Config instance
Config.save() save settings in json file
"""
def __init__(self):
self.debug = False
self.target_package = "com.ifeng.at.testagent"
self.libs = os.path.abspath(os.path.join(app_dir, 'libs'))
self.port = 11800
self.images = os.path.abspath(os.path.join(app_dir, 'images'))
@classmethod
def read(cls):
if not os.path.exists(config_file):
return Config.make_default_config()
conf_json = json.loads(open(config_file, 'r').read())
conf = cls()
for k in conf_json:
setattr(conf, k, conf_json[k])
return conf
@staticmethod
def make_default_config():
conf = Config()
f = open(config_file, 'w')
conf_json_str = json.dumps(conf.__dict__, indent=4, ensure_ascii=False)
f.write(conf_json_str)
f.close()
return conf
@staticmethod
def clear_config():
if os.path.exists(config_file):
os.remove(config_file)
def save(self):
conf_str = json.dumps(self.__dict__, ensure_ascii=False, indent=4)
if not os.path.exists(config_file):
Config.make_default_config()
f = open(config_file, 'w')
f.write(conf_str)
f.close()
|
apache-2.0
| -8,947,084,162,272,649,000 | 27.145455 | 79 | 0.587209 | false |
mozilla/caravela
|
web/assets.py
|
1
|
1604
|
import os
from flask import Flask
from flask.ext.assets import Environment, Bundle
def init(app):
assets = Environment(app)
if app.config.get('CARAVELA_ENV') == 'production':
assets.debug=False
assets.auto_build=False
#else:
# assets.debug = True
assets.url = app.static_url_path
assets.register('common.js', Bundle(
'lib/jquery-1.9.1.min.js',
'lib/bootstrap.js',
'lib/modernizr-2.6.1.min.js',
'lib/underscore-min.js',
'lib/less-1.3.0.min.js',
'lib/jquery-ui-1.10.1.custom.min.js',
'lib/jquery.mousewheel.js',
'lib/handlebars-1.0.0.js',
'lib/ember-1.0.0.js',
'lib/ember-data.js',
'lib/ember-table.js',
'lib/d3.v3.min.js',
'lib/vega.js',
'lib/d3.geo.projection.min.js',
'lib/codemirror.js',
'lib/mode/javascript/javascript.js',
'js/app.js',
'js/routes/*.js',
'js/controllers/*.js',
'js/models/*.js',
'js/views/*.js',
#filters='rjsmin',
output='assets/common.js'
))
sass = Bundle(
'**/*.sass',
filters='compass',
output='assets/sass.css'
)
assets.register('sass.css', sass)
assets.register('common.css', Bundle(
sass,
'css/bootstrap.min.css',
'lib/codemirror.css',
'css/persona-buttons.css',
'css/style.css',
output='assets/common.css'
))
assets.config.update(dict(
jst_compiler = "Em.Handlebars.compile",
jst_namespace= "Em.TEMPLATES"
))
assets.register('app.handlebars', Bundle(
'templates/*.handlebars',
'templates/**/*.handlebars',
filters='jst',
output='assets/app.handlebars'
))
|
mpl-2.0
| -508,393,695,490,358,300 | 18.802469 | 52 | 0.609102 | false |
cigroup-ol/metaopt
|
metaopt/tests/integration/util/import_function.py
|
1
|
2111
|
# -*- coding: utf-8 -*-
"""
Test for the import function utility.
"""
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# Third Party
import nose
# First Party
from metaopt.concurrent.invoker.util.determine_package import determine_package
from metaopt.concurrent.worker.util.import_function import import_function
def f():
return "My name is f."
class LocalClass(object):
"""Stub class as determination target."""
def foo_method(self):
"""Stub method as determination target."""
return "My name is foo_method."
def bar_method(self):
"""Stub method as determination target."""
return "My name is bar_method."
class TestImportFunction(object):
"""Tests for the import function utility."""
def test_import_local_function(self):
"""A function can be imported by its own package."""
import_function(determine_package(f))
assert f() == "My name is f."
def _test_import_local_class(self): # TODO
"""A function can be imported by the package of a class next to it."""
module = determine_package(LocalClass)
name = f.__name__
function = {"module": module, "name": name}
import_function(function)
assert f() == "My name is f."
def test_import_local_class(self):
"""A function can be imported by the package of a class next to it."""
import_function(determine_package(LocalClass))
assert f() == "My name is f."
def test_import_local_method(self):
"""A function can be imported by the package of a method next to it."""
import_function(determine_package(LocalClass().foo_method))
assert f() == "My name is f."
def test_import_local_methods(self):
"""Tests that two methods of the same class are in the same package."""
package_foo = determine_package(LocalClass().foo_method)
package_bar = determine_package(LocalClass().bar_method)
assert package_foo == package_bar
if __name__ == '__main__':
nose.runmodule()
|
bsd-3-clause
| -4,074,311,497,425,656,300 | 29.594203 | 79 | 0.644718 | false |
PinkInk/upylib
|
captive/captive1.py
|
1
|
1989
|
import network, socket
class CaptivePortal:
def __init__(self, essid, auth, filen):
self.essid = essid
self.authmode = 1
self.filen = filen
self.ap = network.WLAN(network.AP_IF)
self.ip = ''
def start(self):
# setup AP
self.ap.active(True)
self.ap.config(essid=self.essid, authmode=self.authmode)
self.ip = self.ap.ifconfig()[0]
# setup DNS
self.udps = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.udps.setblocking(False)
self.udps.bind((self.ip, 53)) # don't bind to other interfaces
# setup HTTP
self.tcps = socket.socket()
self.tcps.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.tcps.settimeout(2)
self.tcps.bind((ip, 80))
self.tcps.listen(1)
while True:
try:
# DNS
data, addr = self.udps.recvfrom(1024)
self.udps.sendto(dnsResponse(data, self.ip), addr)
# HTTP
con = s.accept()
except KeyboardInterrupt:
break
except:
pass
@staticmethod
def dnsResponse(data, ip):
if (data[2]>>3)&15 == 0: # std qry
domain = ''
ptr = 12
len = data[ptr]
while len != 0:
domain += data[ptr+1:ptr+len+1].decode('utf-8') + '.'
ptr += len+1
len = data[ptr]
if domain: # not an empty string
return data[:2] + b"\x81\x80" \
+ data[4:6]*2 \
+ b'\x00\x00\x00\x00' \
+ data[12:] \
+ b'\xc0\x0c\x00\x01\x00\x01\x00\x00\x00\x3c\x00\x04' \
+ bytes(map(int, ip.split('.')))
return b''
else:
return b''
|
mit
| -8,181,752,270,712,772,000 | 27.014085 | 78 | 0.449975 | false |
tengqm/senlin-container
|
senlin/tests/functional/test_cluster_health.py
|
1
|
2983
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.tests.functional import api as test_api
from senlin.tests.functional import base
from senlin.tests.functional.utils import test_utils
class TestClusterHealth(base.SenlinFunctionalTest):
def setUp(self):
super(TestClusterHealth, self).setUp()
# Create profile
self.profile = test_api.create_profile(
self.client, test_utils.random_name('profile'),
test_utils.spec_nova_server)
def tearDown(self):
# Delete profile
test_api.delete_profile(self.client, self.profile['id'])
super(TestClusterHealth, self).tearDown()
def test_cluster_check_recover(self):
# Create cluster
desired_capacity = 3
min_size = 2
max_size = 5
cluster = test_api.create_cluster(self.client,
test_utils.random_name('cluster'),
self.profile['id'], desired_capacity,
min_size, max_size)
cluster = test_utils.wait_for_status(test_api.get_cluster, self.client,
cluster['id'], 'ACTIVE')
# Check cluster health status
action_id = test_api.action_cluster(self.client, cluster['id'],
'check')
test_utils.wait_for_status(test_api.get_action, self.client,
action_id, 'SUCCEEDED')
cluster = test_api.get_cluster(self.client, cluster['id'])
self.assertEqual('ACTIVE', cluster['status'])
# Perform cluster recovering operation
action_id = test_api.action_cluster(self.client, cluster['id'],
'recover')
test_utils.wait_for_status(test_api.get_action, self.client,
action_id, 'SUCCEEDED')
action_id = test_api.action_cluster(self.client, cluster['id'],
'recover',
{'operation': 'REBUILD'})
test_utils.wait_for_status(test_api.get_action, self.client,
action_id, 'SUCCEEDED')
# Delete cluster
test_api.delete_cluster(self.client, cluster['id'])
cluster = test_utils.wait_for_delete(test_api.get_cluster, self.client,
cluster['id'])
|
apache-2.0
| -7,870,152,927,161,468,000 | 44.892308 | 79 | 0.572913 | false |
vicky2135/lucious
|
oscar/lib/python2.7/site-packages/django_redis/client/default.py
|
1
|
18075
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import random
import socket
import warnings
import zlib
from collections import OrderedDict
from django.conf import settings
from django.core.cache.backends.base import DEFAULT_TIMEOUT, get_key_func
from django.core.exceptions import ImproperlyConfigured
from django.utils.encoding import smart_text
from redis.exceptions import ConnectionError
from redis.exceptions import ResponseError
# Compatibility with redis-py 2.10.x+
try:
from redis.exceptions import TimeoutError, ResponseError
_main_exceptions = (TimeoutError, ResponseError, ConnectionError, socket.timeout)
except ImportError:
_main_exceptions = (ConnectionError, socket.timeout)
from ..util import CacheKey, load_class, integer_types
from ..exceptions import ConnectionInterrupted, CompressorError
from .. import pool
class DefaultClient(object):
def __init__(self, server, params, backend):
self._backend = backend
self._server = server
self._params = params
self.reverse_key = get_key_func(params.get("REVERSE_KEY_FUNCTION") or
"django_redis.util.default_reverse_key")
if not self._server:
raise ImproperlyConfigured("Missing connections string")
if not isinstance(self._server, (list, tuple, set)):
self._server = self._server.split(",")
self._clients = [None] * len(self._server)
self._options = params.get("OPTIONS", {})
self._slave_read_only = self._options.get('SLAVE_READ_ONLY', True)
serializer_path = self._options.get("SERIALIZER", "django_redis.serializers.pickle.PickleSerializer")
serializer_cls = load_class(serializer_path)
compressor_path = self._options.get("COMPRESSOR", "django_redis.compressors.identity.IdentityCompressor")
compressor_cls = load_class(compressor_path)
self._serializer = serializer_cls(options=self._options)
self._compressor = compressor_cls(options=self._options);
self.connection_factory = pool.get_connection_factory(options=self._options)
def __contains__(self, key):
return self.has_key(key)
def get_next_client_index(self, write=True, tried=()):
"""
Return a next index for read client.
This function implements a default behavior for
get a next read client for master-slave setup.
Overwrite this function if you want a specific
behavior.
"""
if tried and len(tried) < len(self._server):
not_tried = [i for i in range(0, len(self._server)) if i not in tried]
return random.choice(not_tried)
if write or len(self._server) == 1:
return 0
return random.randint(1, len(self._server) - 1)
def get_client(self, write=True, tried=(), show_index=False):
"""
Method used for obtain a raw redis client.
This function is used by almost all cache backend
operations for obtain a native redis client/connection
instance.
"""
index = self.get_next_client_index(write=write, tried=tried or [])
if self._clients[index] is None:
self._clients[index] = self.connect(index)
if show_index:
return self._clients[index], index
else:
return self._clients[index]
def connect(self, index=0):
"""
Given a connection index, returns a new raw redis client/connection
instance. Index is used for master/slave setups and indicates that
connection string should be used. In normal setups, index is 0.
"""
return self.connection_factory.connect(self._server[index])
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None, client=None, nx=False, xx=False):
"""
Persist a value to the cache, and set an optional expiration time.
Also supports optional nx parameter. If set to True - will use redis setnx instead of set.
"""
nkey = self.make_key(key, version=version)
nvalue = self.encode(value)
if timeout is True:
warnings.warn("Using True as timeout value, is now deprecated.", DeprecationWarning)
timeout = self._backend.default_timeout
if timeout == DEFAULT_TIMEOUT:
timeout = self._backend.default_timeout
original_client = client
tried = []
while True:
try:
if not client:
client, index = self.get_client(write=True, tried=tried, show_index=True)
if timeout is not None:
if timeout > 0:
# Convert to milliseconds
timeout = int(timeout * 1000)
elif timeout <= 0:
if nx:
# Using negative timeouts when nx is True should
# not expire (in our case delete) the value if it exists.
# Obviously expire not existent value is noop.
timeout = None
else:
# redis doesn't support negative timeouts in ex flags
# so it seems that it's better to just delete the key
# than to set it and than expire in a pipeline
return self.delete(key, client=client, version=version)
return client.set(nkey, nvalue, nx=nx, px=timeout, xx=xx)
except _main_exceptions as e:
if not original_client and not self._slave_read_only and len(tried) < len(self._server):
tried.append(index)
client = None
continue
raise ConnectionInterrupted(connection=client, parent=e)
def incr_version(self, key, delta=1, version=None, client=None):
"""
Adds delta to the cache version for the supplied key. Returns the
new version.
"""
if client is None:
client = self.get_client(write=True)
if version is None:
version = self._backend.version
old_key = self.make_key(key, version)
value = self.get(old_key, version=version, client=client)
try:
ttl = client.ttl(old_key)
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
if value is None:
raise ValueError("Key '%s' not found" % key)
if isinstance(key, CacheKey):
new_key = self.make_key(key.original_key(), version=version + delta)
else:
new_key = self.make_key(key, version=version + delta)
self.set(new_key, value, timeout=ttl, client=client)
self.delete(old_key, client=client)
return version + delta
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None, client=None):
"""
Add a value to the cache, failing if the key already exists.
Returns ``True`` if the object was added, ``False`` if not.
"""
return self.set(key, value, timeout, version=version, client=client, nx=True)
def get(self, key, default=None, version=None, client=None):
"""
Retrieve a value from the cache.
Returns decoded value if key is found, the default if not.
"""
if client is None:
client = self.get_client(write=False)
key = self.make_key(key, version=version)
try:
value = client.get(key)
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
if value is None:
return default
return self.decode(value)
def persist(self, key, version=None, client=None):
if client is None:
client = self.get_client(write=True)
key = self.make_key(key, version=version)
if client.exists(key):
client.persist(key)
def expire(self, key, timeout, version=None, client=None):
if client is None:
client = self.get_client(write=True)
key = self.make_key(key, version=version)
if client.exists(key):
client.expire(key, timeout)
def lock(self, key, version=None, timeout=None, sleep=0.1,
blocking_timeout=None, client=None):
if client is None:
client = self.get_client(write=True)
key = self.make_key(key, version=version)
return client.lock(key, timeout=timeout, sleep=sleep,
blocking_timeout=blocking_timeout)
def delete(self, key, version=None, prefix=None, client=None):
"""
Remove a key from the cache.
"""
if client is None:
client = self.get_client(write=True)
try:
return client.delete(self.make_key(key, version=version,
prefix=prefix))
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
def delete_pattern(self, pattern, version=None, prefix=None, client=None, itersize=None):
"""
Remove all keys matching pattern.
"""
if client is None:
client = self.get_client(write=True)
pattern = self.make_key(pattern, version=version, prefix=prefix)
kwargs = {'match': pattern, }
if itersize:
kwargs['count'] = itersize
try:
count = 0
for key in client.scan_iter(**kwargs):
client.delete(key)
count += 1
return count
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
def delete_many(self, keys, version=None, client=None):
"""
Remove multiple keys at once.
"""
if client is None:
client = self.get_client(write=True)
keys = [self.make_key(k, version=version) for k in keys]
if not keys:
return
try:
return client.delete(*keys)
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
def clear(self, client=None):
"""
Flush all cache keys.
"""
if client is None:
client = self.get_client(write=True)
try:
count = 0
for key in client.scan_iter("*"):
client.delete(key)
count += 1
return count
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
def decode(self, value):
"""
Decode the given value.
"""
try:
value = int(value)
except (ValueError, TypeError):
try:
value = self._compressor.decompress(value)
except CompressorError:
# Handle little values, chosen to be not compressed
pass
value = self._serializer.loads(value)
return value
def encode(self, value):
"""
Encode the given value.
"""
if isinstance(value, bool) or not isinstance(value, integer_types):
value = self._serializer.dumps(value)
value = self._compressor.compress(value)
return value
return value
def get_many(self, keys, version=None, client=None):
"""
Retrieve many keys.
"""
if client is None:
client = self.get_client(write=False)
if not keys:
return {}
recovered_data = OrderedDict()
new_keys = [self.make_key(k, version=version) for k in keys]
map_keys = dict(zip(new_keys, keys))
try:
results = client.mget(*new_keys)
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
for key, value in zip(new_keys, results):
if value is None:
continue
recovered_data[map_keys[key]] = self.decode(value)
return recovered_data
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None, client=None):
"""
Set a bunch of values in the cache at once from a dict of key/value
pairs. This is much more efficient than calling set() multiple times.
If timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
"""
if client is None:
client = self.get_client(write=True)
try:
pipeline = client.pipeline()
for key, value in data.items():
self.set(key, value, timeout, version=version, client=pipeline)
pipeline.execute()
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
def _incr(self, key, delta=1, version=None, client=None):
if client is None:
client = self.get_client(write=True)
key = self.make_key(key, version=version)
try:
try:
# if key expired after exists check, then we get
# key with wrong value and ttl -1.
# use lua script for atomicity
lua = """
local exists = redis.call('EXISTS', KEYS[1])
if (exists == 1) then
return redis.call('INCRBY', KEYS[1], ARGV[1])
else return false end
"""
value = client.eval(lua, 1, key, delta)
if value is None:
raise ValueError("Key '%s' not found" % key)
except ResponseError:
# if cached value or total value is greater than 64 bit signed
# integer.
# elif int is encoded. so redis sees the data as string.
# In this situations redis will throw ResponseError
# try to keep TTL of key
timeout = client.ttl(key)
# returns -2 if the key does not exist
# means, that key have expired
if timeout == -2:
raise ValueError("Key '%s' not found" % key)
value = self.get(key, version=version, client=client) + delta
self.set(key, value, version=version, timeout=timeout,
client=client)
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
return value
def incr(self, key, delta=1, version=None, client=None):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
return self._incr(key=key, delta=delta, version=version, client=client)
def decr(self, key, delta=1, version=None, client=None):
"""
Decreace delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
return self._incr(key=key, delta=-delta, version=version,
client=client)
def ttl(self, key, version=None, client=None):
"""
Executes TTL redis command and return the "time-to-live" of specified key.
If key is a non volatile key, it returns None.
"""
if client is None:
client = self.get_client(write=False)
key = self.make_key(key, version=version)
if not client.exists(key):
return 0
t = client.ttl(key)
if t >= 0:
return t
elif t == -1:
return None
elif t == -2:
return 0
else:
# Should never reach here
return None
def has_key(self, key, version=None, client=None):
"""
Test if key exists.
"""
if client is None:
client = self.get_client(write=False)
key = self.make_key(key, version=version)
try:
return client.exists(key)
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
def iter_keys(self, search, itersize=None, client=None, version=None):
"""
Same as keys, but uses redis >= 2.8 cursors
for make memory efficient keys iteration.
"""
if client is None:
client = self.get_client(write=False)
pattern = self.make_key(search, version=version)
for item in client.scan_iter(match=pattern, count=itersize):
item = smart_text(item)
yield self.reverse_key(item)
def keys(self, search, version=None, client=None):
"""
Execute KEYS command and return matched results.
Warning: this can return huge number of results, in
this case, it strongly recommended use iter_keys
for it.
"""
if client is None:
client = self.get_client(write=False)
pattern = self.make_key(search, version=version)
try:
encoding_map = [smart_text(k) for k in client.keys(pattern)]
return [self.reverse_key(k) for k in encoding_map]
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
def make_key(self, key, version=None, prefix=None):
if isinstance(key, CacheKey):
return key
if prefix is None:
prefix = self._backend.key_prefix
if version is None:
version = self._backend.version
return CacheKey(self._backend.key_func(key, prefix, version))
def close(self, **kwargs):
if getattr(settings, "DJANGO_REDIS_CLOSE_CONNECTION", False):
for c in self.client.connection_pool._available_connections:
c.disconnect()
del self._client
|
bsd-3-clause
| 1,254,981,264,407,704,600 | 33.168242 | 113 | 0.575602 | false |
vyzyv/numpp
|
tests/speed/utilities/average_time.py
|
1
|
1246
|
import re
import argparse
pattern = re.compile('INFO:root:(.*)')
def parse_times(log, function):
times = []
with open(log) as text:
for line in text:
times.append(function(line))
return times;
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('numpp')
parser.add_argument('other')
parser.add_argument('--message', '-m', required=True,
help='''What kind of log is being measured, e.g. compilation, runtime''')
args = parser.parse_args()
function = lambda line: float(pattern.findall(line)[0]) if args.message.upper() == 'COMPILATION' else float(line)
numpp = parse_times(args.numpp, function)
other = parse_times(args.other, function)
print("-------------------")
print(" NUMPP ")
print("-------------------\n")
print("{:s} average time in nanoseconds: {:f}".format(args.message, sum(numpp)/len(numpp)))
print("Based on {:d} cases\n".format(len(numpp)))
print("-------------------")
print(" OTHER ")
print("-------------------\n")
print("{:s} average time in nanoseconds: {:f}".format(args.message, sum(other)/len(other)))
print("Based on {:d} cases\n".format(len(other)))
|
mit
| 6,729,755,695,857,442,000 | 34.6 | 117 | 0.572231 | false |
ornlneutronimaging/iBeatles
|
ibeatles/table_dictionary/table_dictionary_handler.py
|
1
|
15710
|
from qtpy.QtWidgets import QFileDialog
import numpy as np
import os
import pyqtgraph as pg
import pandas as pd
import ibeatles.fitting.fitting_handler
from ibeatles.utilities.array_utilities import get_min_max_xy
# from ibeatles.utilities.math_tools import get_random_value
class TableDictionaryHandler(object):
selected_color = {'pen': (0, 0, 0, 30),
'brush': (0, 255, 0, 150)}
lock_color = {'pen': (0, 0, 0, 30),
'brush': (255, 0, 0, 240)}
header = ['x0', 'y0', 'x1', 'y1', 'row_index', 'column_index', 'lock', 'active',
'fitting_confidence', 'd_spacing_value', 'd_spacing_err', 'd_spacing_fixed',
'sigma_value', 'sigma_err', 'sigma_fixed',
'intensity_value', 'intensity_err', 'intensity_fixed',
'alpha_value', 'alpha_err', 'alpha_fixed',
'a1_value', 'a1_err', 'a1_fixed',
'a2_value', 'a2_err', 'a2_fixed',
'a5_value', 'a5_err', 'a5_fixed',
'a6_value', 'a6_err', 'a6_fixed']
def __init__(self, parent=None):
self.parent = parent
def fill_table_with_variable(self, variable_name='d_spacing', value=np.NaN, list_keys=[], all_keys=False):
table_dictionary = self.parent.table_dictionary
if all_keys:
list_keys = table_dictionary.keys()
for _key in list_keys:
table_dictionary[_key][variable_name]['val'] = value
self.parent.table_dictionary = table_dictionary
def populate_table_dictionary_entry(self, index=0, array=[]):
table_dictionary = self.parent.table_dictionary
table_dictionary[str(index)] = {'bin_coordinates': {'x0': array[0],
'x1': array[2],
'y0': array[1],
'y1': array[3]},
'selected_item': None,
'locked_item': None,
'row_index': array[4],
'column_index': array[5],
'selected': False,
'lock': array[6],
'active': array[7],
'fitting_confidence': array[8],
'd_spacing': {'val': array[9],
'err': array[10],
'fixed': array[11]},
'sigma': {'val': array[12],
'err': array[13],
'fixed': array[14]},
'intensity': {'val': array[15],
'err': array[16],
'fixed': array[17]},
'alpha': {'val': array[18],
'err': array[19],
'fixed': array[20]},
'a1': {'val': array[21],
'err': array[22],
'fixed': array[23]},
'a2': {'val': array[24],
'err': array[25],
'fixed': array[26]},
'a5': {'val': array[27],
'err': array[28],
'fixed': array[29]},
'a6': {'val': array[30],
'err': array[31],
'fixed': array[32]},
}
self.parent.table_dictionary = table_dictionary
def create_table_dictionary(self):
'''
this will define the corner position and index of each cell
'''
if len(np.array(self.parent.data_metadata['normalized']['data_live_selection'])) == 0:
return
if not self.parent.table_dictionary == {}:
return
bin_size = self.parent.binning_bin_size
pos = self.parent.binning_line_view['pos']
# calculate outside real edges of bins
min_max_xy = get_min_max_xy(pos)
from_x = min_max_xy['x']['min']
to_x = min_max_xy['x']['max']
from_y = min_max_xy['y']['min']
to_y = min_max_xy['y']['max']
table_dictionary = {}
_index = 0
_index_col = 0
for _x in np.arange(from_x, to_x, bin_size):
_index_row = 0
for _y in np.arange(from_y, to_y, bin_size):
_str_index = str(_index)
table_dictionary[_str_index] = {'bin_coordinates': {'x0': np.NaN,
'x1': np.NaN,
'y0': np.NaN,
'y1': np.NaN},
'selected_item': None,
'locked_item': None,
'row_index': _index_row,
'column_index': _index_col,
'selected': False,
'lock': False,
'active': False,
'fitting_confidence': np.NaN,
'd_spacing': {'val': np.NaN,
'err': np.NaN,
'fixed': False},
'sigma': {'val': np.NaN,
'err': np.NaN,
'fixed': False},
'intensity': {'val': np.NaN,
'err': np.NaN,
'fixed': False},
'alpha': {'val': np.NaN,
'err': np.NaN,
'fixed': False},
'a1': {'val': np.NaN,
'err': np.NaN,
'fixed': False},
'a2': {'val': np.NaN,
'err': np.NaN,
'fixed': False},
'a5': {'val': np.NaN,
'err': np.NaN,
'fixed': False},
'a6': {'val': np.NaN,
'err': np.NaN,
'fixed': False},
}
table_dictionary[_str_index]['bin_coordinates']['x0'] = _x
table_dictionary[_str_index]['bin_coordinates']['x1'] = _x + bin_size
table_dictionary[_str_index]['bin_coordinates']['y0'] = _y
table_dictionary[_str_index]['bin_coordinates']['y1'] = _y + bin_size
# create the box to show when bin is selected
selection_box = pg.QtGui.QGraphicsRectItem(_x, _y,
bin_size,
bin_size)
selection_box.setPen(pg.mkPen(self.selected_color['pen']))
selection_box.setBrush(pg.mkBrush(self.selected_color['brush']))
table_dictionary[_str_index]['selected_item'] = selection_box
# create the box to show when bin is locked
lock_box = pg.QtGui.QGraphicsRectItem(_x, _y,
bin_size,
bin_size)
lock_box.setPen(pg.mkPen(self.lock_color['pen']))
lock_box.setBrush(pg.mkBrush(self.lock_color['brush']))
table_dictionary[_str_index]['locked_item'] = lock_box
_index += 1
_index_row += 1
_index_col += 1
self.parent.table_dictionary = table_dictionary
# self.parent.fitting_ui.table_dictionary = table_dictionary
self.parent.fitting_selection['nbr_row'] = _index_row
self.parent.fitting_selection['nbr_column'] = _index_col
def full_table_selection_tool(self, status=True):
table_dictionary = self.parent.table_dictionary
for _index in table_dictionary:
_item = table_dictionary[_index]
_item['active'] = status
table_dictionary[_index] = _item
self.parent.table_dictionary = table_dictionary
def unselect_full_table(self):
self.full_table_selection_tool(status=False)
def select_full_table(self):
self.full_table_selection_tool(status=True)
def get_average_parameters_activated(self):
table_dictionary = self.parent.table_dictionary
d_spacing = []
alpha = []
sigma = []
a1 = []
a2 = []
a5 = []
a6 = []
for _index in table_dictionary.keys():
_entry = table_dictionary[_index]
if _entry['active']:
_d_spacing = _entry['d_spacing']['val']
_alpha = _entry['alpha']['val']
_sigma = _entry['sigma']['val']
_a1 = _entry['a1']['val']
_a2 = _entry['a2']['val']
_a5 = _entry['a5']['val']
_a6 = _entry['a6']['val']
d_spacing.append(_d_spacing)
alpha.append(_alpha)
sigma.append(_sigma)
a1.append(_a1)
a2.append(_a2)
a5.append(_a5)
a6.append(_a6)
mean_d_spacing = self.get_mean_value(d_spacing)
mean_alpha = self.get_mean_value(alpha)
mean_sigma = self.get_mean_value(sigma)
mean_a1 = self.get_mean_value(a1)
mean_a2 = self.get_mean_value(a2)
mean_a5 = self.get_mean_value(a5)
mean_a6 = self.get_mean_value(a6)
return {'d_spacing': mean_d_spacing,
'alpha': mean_alpha,
'sigma': mean_sigma,
'a1': mean_a1,
'a2': mean_a2,
'a5': mean_a5,
'a6': mean_a6}
def get_mean_value(self, array=[]):
if array == []:
return np.NaN
else:
return np.nanmean(array)
def import_table(self):
default_file_name = str(self.parent.ui.normalized_folder.text()) + '_fitting_table.csv'
table_file = str(QFileDialog.getOpenFileName(self.parent,
'Define Location and File Name Where to Export the Table!',
os.path.join(self.parent.normalized_folder, default_file_name)))
if table_file:
pandas_data_frame = pd.read_csv(table_file)
o_table = TableDictionaryHandler(parent=self.parent)
numpy_table = pandas_data_frame.values
# loop over each row in the pandas data frame
for _index, _row_values in enumerate(numpy_table):
o_table.populate_table_dictionary_entry(index=_index,
array=_row_values)
o_fitting = ibeatles.fitting.fitting_handler.FittingHandler(parent=self.parent)
o_fitting.fill_table()
def export_table(self):
default_file_name = str(self.parent.ui.normalized_folder.text()) + '_fitting_table.csv'
table_file = str(QFileDialog.getSaveFileName(self.parent,
'Select or Define Name of File!',
default_file_name,
"CSV (*.csv)"))
if table_file:
table_dictionary = self.parent.table_dictionary
o_table_formated = FormatTableForExport(table=table_dictionary)
pandas_data_frame = o_table_formated.pandas_data_frame
header = self.header
pandas_data_frame.to_csv(table_file, header=header)
class FormatTableForExport(object):
pandas_data_frame = []
def __init__(self, table={}):
pandas_table = []
for _key in table:
_entry = table[_key]
x0 = _entry['bin_coordinates']['x0']
y0 = _entry['bin_coordinates']['y0']
x1 = _entry['bin_coordinates']['x1']
y1 = _entry['bin_coordinates']['y1']
row_index = _entry['row_index']
column_index = _entry['column_index']
lock = _entry['lock']
active = _entry['active']
fitting_confidence = _entry['fitting_confidence']
[d_spacing_val,
d_spacing_err,
d_spacing_fixed] = self.get_val_err_fixed(_entry['d_spacing'])
[sigma_val,
sigma_err,
sigma_fixed] = self.get_val_err_fixed(_entry['sigma'])
[intensity_val,
intensity_err,
intensity_fixed] = self.get_val_err_fixed(_entry['intensity'])
[alpha_val,
alpha_err,
alpha_fixed] = self.get_val_err_fixed(_entry['alpha'])
[a1_val,
a1_err,
a1_fixed] = self.get_val_err_fixed(_entry['a1'])
[a2_val,
a2_err,
a2_fixed] = self.get_val_err_fixed(_entry['a2'])
[a5_val,
a5_err,
a5_fixed] = self.get_val_err_fixed(_entry['a5'])
[a6_val,
a6_err,
a6_fixed] = self.get_val_err_fixed(_entry['a6'])
_row = [x0, x1, y0, y1,
row_index, column_index,
lock, active,
fitting_confidence,
d_spacing_val, d_spacing_err, d_spacing_fixed,
sigma_val, sigma_err, sigma_fixed,
intensity_val, intensity_err, intensity_fixed,
alpha_val, alpha_err, alpha_fixed,
a1_val, a1_err, a1_fixed,
a2_val, a2_err, a2_fixed,
a5_val, a5_err, a5_fixed,
a6_val, a6_err, a6_fixed,
]
pandas_table.append(_row)
pandas_data_frame = pd.DataFrame.from_dict(pandas_table)
self.pandas_data_frame = pandas_data_frame
def get_val_err_fixed(self, item):
return [item['val'], item['err'], item['fixed']]
|
mit
| 1,044,178,349,400,479,100 | 42.39779 | 117 | 0.400509 | false |
gtagency/roscorobot
|
Applications/corobot_pid/node/corobot_pid_planner.py
|
1
|
2508
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('corobot_pid')
import rospy
from std_msgs.msg import Float64
from std_msgs.msg import Int32
from corobot_msgs.msg import MotorCommand
import cv2
import cv
import numpy as np
import math
import time
from random import randint
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
from math import sin
# Very Very simple planner turns the wheels on when it sees green
# and turns the wheels off when it doesnt
# Hardcoded values for green android folder
LOW_HSV = [60, 50, 50]
HIGH_HSV = [90, 255, 255]
#GLOBAL IMAGES
hsv_img = None
bin_img = None
cvbridge = CvBridge()
target_velocity_pub = None
# Get binary thresholded image
# low_HSV, hi_HSV - low, high range values for threshold as a list [H,S,V]
# debug= True to display the binary image generated
def get_binary(src_img, low_HSV, hi_HSV, debug=False):
global hsv_img, bin_img
#convert to HSV
hsv_img = cv2.cvtColor(src_img, cv.CV_BGR2HSV)
#generate binary image
lower = np.array(low_HSV)
higher = np.array(hi_HSV)
bin_img = cv2.inRange(hsv_img, lower, higher)
if debug:
cv2.namedWindow("Binary")
cv2.imshow("Binary",bin_img)
cv2.waitKey(0)
cv2.destroyWindow("Binary")
return bin_img
def receiveImage(data):
global target_velocity_pub, cvbridge
try:
cv_image = cvbridge.imgmsg_to_cv(data, "bgr8")
except CvBridgeError, e:
print e
arr = np.asarray(cv_image)
bin_img = get_binary(arr, LOW_HSV, HIGH_HSV)
imgSize = np.shape(bin_img)
# NOTE: Assumes an accurate color/marker detection at the very top row of the image
start = -1
end = -1
row = 0
for j in range(imgSize[1]):
if start < 0 and bin_img[row,j] != 0:
start = j
if end < 0 and start >= 0 and bin_img[row,j] == 0:
end = j
if (start >= 0 and end >= 0):
break
target_vel = 0
print start, end
if start >= 0 or end >= 0:
target_vel = 50
else:
print "No line detected, error_rad = 0"
target_velocity_pub.publish(target_vel)
def node():
global target_velocity_pub
rospy.init_node('cplan')
rospy.Subscriber('image_raw', Image, receiveImage)
target_velocity_pub = rospy.Publisher('target_velocity', Int32)
print "Ready to control the robot"
rospy.spin()
if __name__ == '__main__':
try:
node()
except rospy.ROSInterruptException:
pass
|
gpl-2.0
| -4,663,933,212,038,569,000 | 24.591837 | 87 | 0.650718 | false |
djpugh/pyunits
|
units.py
|
1
|
40152
|
import unittest
from re import compile
class UnitError(Exception):
__doc__="""Exception raised for unit errors."""
class Unit(float):
_prefixes={'G':1000000000.0,'M':1000000.0,'K':1000.0,'k':1000.0,'d':0.1,'c':0.01,'m':0.001,'n':0.000000001}
_units={'m':{'SIVAL':1.0,'TYPE':'Length'},'ft':{'SIVAL':0.3048,'TYPE':'Length'},'s':{'SIVAL':1.0,'TYPE':'Time'},'min':{'SIVAL':60.0,'TYPE':'Time'},'kg':{'SIVAL':1.0,'TYPE':'Mass'},
'g':{'SIVAL':0.001,'TYPE':'Mass'},'lb':{'SIVAL':2.2046226,'TYPE':'Mass'},'C':{'SIVAL':1.0,'TYPE':'Charge'},'hr':{'SIVAL':3600.0, 'TYPE':'Time'},'miles':{'SIVAL':1609.344,'TYPE':'Length'}}
_compoundUnits={'Ohm':{'SIVAL':1.0,'UNITS':'kg*m**2/s*C**2'},'A':{'SIVAL':1.0,'UNITS':'C/s'},'J':{'SIVAL':1.0,'UNITS':'kg*m**2/s**2'},'N':{'SIVAL':1.0,'UNITS':'kg*m/s**2'},'V':{'SIVAL':1.0,'UNITS':'kg*m**2/C*s**2'}}
_separators={'MULTIPLY':'\*\*|\^|\*','DIVIDE':'\/'}
def __new__(cls,value,units=False):
self=float.__new__(cls,value)
if units:
self.setUnits(units)
else:
self.__setattr__('units',False)
self.__setattr__('order',False)
return self
def __float__(self):
return super(Unit,self).__float__()
def __format__(self,formatSpec):
import re
r=re.compile("\.?([<>=\^]?)([\d\.]*)([FfgGeEn]?)([ _]?)(.*)")
m=r.match(formatSpec)
formatSpec=m.group(1)+m.group(2)+m.group(3)
if formatSpec=='.':
formatSpec=''
showUnits=True
if len(m.group(5)):
showUnits=m.group(5)[-1]!='A'
newUnits=m.group(5).rstrip('a')
newUnits=newUnits.rstrip('A')
space=''
if len(m.group(4)):
space=' '
newUnits=newUnits.strip()
if newUnits!='':
if not showUnits:
formatSpec+='A'
else:
formatSpec+=space+'a'
return self.convert(newUnits).__format__(formatSpec)
if self.units and showUnits:
return super(Unit,self).__format__(formatSpec)+space+self.units
return super(Unit,self).__format__(formatSpec)
def setUnits(self,units):
multiply=compile(self._separators['MULTIPLY'])
divide=compile(self._separators['DIVIDE'])
actUnit=dict(zip(['Numerator','Denominator'],[multiply.split('*'.join(divide.split(units)[::2])),multiply.split('*'.join(divide.split(units)[1::2]))]))
#Determine values for the order and scaling of the units
order,scaling=self.combine(self.unitParse(actUnit['Numerator']),self.unitParse(actUnit['Denominator']))
self.__setattr__('units',units)
self.__setattr__('order',order)
def invert(self):
invertedUnits=False
if self.units:
multiply=compile(self._separators['MULTIPLY'])
divide=compile(self._separators['DIVIDE'])
actUnit=dict(zip(['Numerator','Denominator'],[multiply.split('*'.join(divide.split(self.units)[::2])),multiply.split('*'.join(divide.split(self.units)[1::2]))]))
import ipdb
if len(divide.split(self.units)[1::2]):
invertedUnits='/'.join(['*'.join(divide.split(self.units)[1::2]),'*'.join(divide.split(self.units)[::2])])
else:
invertedUnits='**-1*'.join(multiply.split('*'.join(divide.split(self.units)[::2])))+'**-1'
return self.__new__(self.__class__,1.0/float(self),invertedUnits)
def __eq__(self,other):
if type(other)==type(self):
#check order and then convert to unit
if not other.order or self.compare(other.order,self.order):
return super(Unit,self).__eq__(other.convert(self.units))
return False
else:
return super(Unit,self).__eq__(other)
def __ne__(self,other):
return not self==other
def __add__(self,other):
if type(other)==type(self):
#check order and then convert to unit
if not self.order:
return self.__new__(self.__class__,float(self)+float(other),other.units)
if not other.order or self.compare(other.order,self.order):
return self.__new__(self.__class__,super(Unit,self).__add__(other.convert(self.units)),self.units)
raise UnitError('Dimensionality of units does not match')
else:
return self.__new__(self.__class__,super(Unit,self).__add__(other),self.units)
def __repr__(self):
if self.units:
return super(Unit,self).__repr__()+' '+self.units
return super(Unit,self).__repr__()
def __sub__(self,other):
if type(other)==type(self):
#check order and then convert to unit
if not self.order:
return self.__new__(self.__class__,float(self)-float(other),other.units)
if not other.order or self.compare(other.order,self.order):
return self.__new__(self.__class__,super(Unit,self).__sub__(other.convert(self.units)),self.units)
raise UnitError('Dimensionality of units does not match')
else:
return Unit(super(Unit,self).__sub__(other),self.units)
def __mul__(self,other):
if type(other)==type(self):
#check order and then convert to unit
if not self.order:
return other*float(self)
if not other.order or self.compare(other.order,self.order) and self.order:
return self.__new__(self.__class__,super(Unit,self).__mul__(other.convert(self.units)),self.units)
elif other.order:
newValue=float(self)*float(other)
units=self.units.split('/')[0]+'*'+other.units.split('/')[0]
if len(self.units.split('/'))>1 and len(other.units.split('/'))>1:
units+='/'+self.units.split('/')[1]+'*'+other.units.split('/')[1]
elif len(self.units.split('/'))>1:
units+='/'+self.units.split('/')[1]
elif len(other.units.split('/'))>1:
units+='/'+other.units.split('/')[1]
return self.__new__(self.__class__,newValue,units)
raise UnitError('Dimensionality of units does not match')
else:
return Unit(super(Unit,self).__mul__(other),self.units)
def __div__(self,other):
if type(other)==type(self):
#check order and then convert to unit
if not self.order:
return self.__new__(self.__class__,self*other.invert())
if not other.order or self.compare(other.order,self.order):
return self.__new__(self.__class__,super(Unit,self).__div__(other.convert(self.units)),self.units)
elif other.order:
newValue=float(self)*float(other)
units=self.units.split('/')[0]
if len(other.units.split('/'))>1:
units+='*'+other.units.split('/')[1]
units+='/'+other.units.split('/')[0]
if len(self.units.split('/'))>1:
units+='*'+self.units.split('/')[1]
return self.__new__(self.__class__,newValue,units)
raise UnitError('Dimensionality of units does not match')
else:
return Unit(super(Unit,self).__div__(other),self.units)
def __truediv__(self,other):
if type(other)==type(self):
#check order and then convert to unit
if not self.order:
return self.__new__(self.__class__,super(Unit,self).__truediv__(other),other.invert().units)
if not other.order or self.compare(other.order,self.order):
return self.__new__(self.__class__,super(Unit,self).__truediv__(other.convert(self.units)),self.units)
elif other.order:
newValue=float(self)*float(other)
units=self.units.split('/')[0]
if len(other.units.split('/'))>1:
units+='*'+other.units.split('/')[1]
units+='/'+other.units.split('/')[0]
if len(self.units.split('/'))>1:
units+='*'+self.units.split('/')[1]
return self.__new__(self.__class__,newValue,units)
raise UnitError('Dimensionality of units does not match')
else:
return self.__new__(self.__class__,super(Unit,self).__truediv__(other),self.units)
def __mod__(self,other):
if type(other)==type(self):
#check order and then convert to unit
if not other.order or self.compare(other.order,self.order):
return super(Unit,self).__mod__(other.convert(self.units))
raise UnitError('Dimensionality of units does not match')
else:
return super(Unit,self).__mod__(other)
def __pow__(self,other):
if type(other)==type(self):
#check order and then convert to unit
if not other.order:
return super(Unit,self).__pow__(other.convert(self.units))
raise UnitError('Cannot raise to the power of a value with units')
else:
return self.__new__(self.__class__,super(Unit,self).__pow__(other),self.units)
def __divmod__(self,other):
if type(other)==type(self):
#check order and then convert to unit
if not other.order or self.compare(other.order,self.order):
return super(Unit,self).__divmod__(other.convert(self.units))
raise UnitError('Dimensionality of units does not match')
else:
return super(Unit,self).__divmod__(other)
def __radd__(self,other):
return self.__new__(self.__class__,super(Unit,self).__radd__(other),self.units)
def __rsub__(self,other):
return self.__new__(self.__class__,super(Unit,self).__rsub__(other),self.units)
def __rmul__(self,other):
return self.__new__(self.__class__,super(Unit,self).__rmul__(other),self.units)
def __rdiv__(self,other):
return self.__new__(self.__class__,super(Unit,self).__rdiv__(other),self.invert().units)
def __rtruediv__(self,other):
return self.__new__(self.__class__,super(Unit,self).__rtruediv__(other),self.invert().units)
def __rmod__(self,other):
if not self.order:
return super(Unit,self).__rmod__(other)
else:
raise UnitError('Dimensionality of units does not match')
def __rpow__(self,other):
if not self.order:
return super(Unit,self).__rpow__(other)
raise UnitError('Cannot raise to the power of a value with units')
def __rdivmod__(self,other):
if not self.order:
return super(Unit,self).__rdivmod__(other)
raise UnitError('Dimensionality of units does not match')
def __ge__(self,other):
if type(other)==type(self):
#check order and then convert to unit
if not other.order or self.compare(other.order,self.order):
return super(Unit,self).__ge__(other.convert(self.units))
raise UnitError('Dimensionality of units does not match')
else:
return super(Unit,self).__ge__(other)
def __gt__(self,other):
if type(other)==type(self):
#check order and then convert to unit
if not other.order or self.compare(other.order,self.order):
return super(Unit,self).__gt__(other.convert(self.units))
raise UnitError('Dimensionality of units does not match')
else:
return super(Unit,self).__gt__(other)
def __le__(self,other):
if type(other)==type(self):
#check order and then convert to unit
if not other.order or self.compare(other.order,self.order):
return super(Unit,self).__le__(other.convert(self.units))
raise UnitError('Dimensionality of units does not match')
else:
return super(Unit,self).__le__(other)
def __lt__(self,other):
if type(other)==type(self):
#check order and then convert to unit
if not other.order or self.compare(other.order,self.order):
return super(Unit,self).__lt__(other.convert(self.units))
raise UnitError('Dimensionality of units does not match')
else:
return super(Unit,self).__lt__(other)
def __abs__(self):
return Unit(super(Unit,self).__abs__(),self.units)
def getUnit(self,unit):
"""getUnit(unit)
Sub routine to extract unit parameters from UNITS dictionary and return the appropriate values.
Also determines prefixes.
"""
unit=unit.strip()
if unit in self._units.keys():
return self._units[unit],1
elif unit[1:] in self._units.keys() and unit[0] in self._prefixes.keys():
return self._units[unit[1:]],self._prefixes[unit[0]]
else:
raise UnitError('Unit '+unit+' not found')
def getCompoundUnit(self,order,scaling,unit):
"""getCompoundUnit(order,scaling,unit)
Get compound unit parameters
"""
multiply=compile(self._separators['MULTIPLY'])
divide=compile(self._separators['DIVIDE'])
if unit[1:] in self._compoundUnits and unit[0] in self._prefixes.keys():
scaling*=self._prefixes[unit[0]]
unit=unit[1:]
if unit not in self._compoundUnits:
raise UnitError('Unit '+unit+' not found')
scaling*=self._compoundUnits[unit]['SIVAL']
units=dict(zip(['Numerator','Denominator'],[multiply.split('*'.join(divide.split(self._compoundUnits[unit]['UNITS'])[::2])),multiply.split('*'.join(divide.split(self._compoundUnits[unit]['UNITS'])[1::2]))]))
newOrderNum,newScaling=self.unitParse(units['Numerator'])
scaling*=newScaling
newOrderDen,newScaling=self.unitParse(units['Denominator'])
scaling/=newScaling
for type in list(set(newOrderNum.keys()+newOrderDen.keys())):
if not order.__contains__(type):
order[type]=0
try:
order[type]+=newOrderNum[type]
except KeyError:
pass
try:
order[type]-=newOrderDen[type]
except KeyError:
pass
return order,scaling
def isCompound(self,unit):
"""isCompound(unit)
Returns True if the unit is a compound unit (not a base SI unit)
"""
if unit in self._compoundUnits:
return True
elif unit[1:] in self._compoundUnits and unit[0] in self._prefixes.keys():
return True
else:
return False
def unitParse(self,unitList):
"""unitParse(unitList)
Parse a list of units and powers into a dimensional order and scaling factor to the SI UNIT combination.
Expects list of the form [unit,(power),unit,..] where power doesn't have to be specified but if it does it refers to the previous unit.
"""
#Give order of types and scaling to SI units
scaling=1
order={}
for i in range(len(unitList)):
unit=unitList[i]
if unit=='':
continue
try:
unit=float(unit)
if i==0:
raise UnitError('Cannot Parse unit incorrect format, number found before unit')
else:
o,s=self.unitParse([unitList[i-1]])
for key in o.keys():
order[key]+=(unit-1)*o[key]
scaling*=s**(unit-1)
continue
except:
pass
if self.isCompound(unit):
order,scaling=self.getCompoundUnit(order,scaling,unit)
continue
UnitParams=self.getUnit(unit)
try:
order[UnitParams[0]['TYPE']]+=1
except KeyError:
order[UnitParams[0]['TYPE']]=1
scaling*=UnitParams[0]['SIVAL']*UnitParams[1]
return order,scaling
def combine(self,numerator,denominator):
"""combine(numeratorOrder,numeratorScaling,denominatorOrder,denominatorScaling)
Combine numerator and denominator order and scaling into an overall order and scaling.
"""
numeratorOrder=numerator[0]
numeratorScaling=numerator[1]
denominatorOrder=denominator[0]
denominatorScaling=denominator[1]
resultOrder={}
for type in list(set(numeratorOrder.keys()+denominatorOrder.keys())):
order=0
try:
order+=numeratorOrder[type]
except KeyError:
pass
try:
order-=denominatorOrder[type]
except KeyError:
pass
resultOrder[type]=order
scaling=numeratorScaling/denominatorScaling
return resultOrder,scaling
def compare(self,order1,order2):
"""compare(order1,order2)
compare the dimensions of 2 sets of units and check that the result of dividing one by the other is dimensionless
"""
if order1 and order2:
for type in list(set(order1.keys()+order2.keys())):
order=0
try:
order+=order1[type]
except KeyError:
pass
try:
order-=order2[type]
except KeyError:
pass
if order!=0:
return False
return True
elif not order1 and not order2:
return True
return False
def unitCompare(self,desiredUnit):
"""unitCompare(self,desiredUnit)
Function to compare two units and return the scale factor between them.
If the dimensional order of the units is incorrect raises an error.
Expects forms:
kg*m**2
kg*m^2
kg/m^3
kg/m*m*m
Splits for division first and then multiplication and power - no bracket parsing and assumes unit in the form:
kg/m/s is kg per (m per s) ie kgs/m).
"""
multiply=compile(self._separators['MULTIPLY'])
divide=compile(self._separators['DIVIDE'])
#Unit parsing into numerator and denominator
actUnit=dict(zip(['Numerator','Denominator'],[multiply.split('*'.join(divide.split(self.units)[::2])),multiply.split('*'.join(divide.split(self.units)[1::2]))]))
desUnit=dict(zip(['Numerator','Denominator'],[multiply.split('*'.join(divide.split(desiredUnit)[::2])),multiply.split('*'.join(divide.split(desiredUnit)[1::2]))]))
#Determine values for the order and scaling of the units
actUnitOrder,actUnitScaling=self.combine(self.unitParse(actUnit['Numerator']),self.unitParse(actUnit['Denominator']))
desUnitOrder,desUnitScaling=self.combine(self.unitParse(desUnit['Numerator']),self.unitParse(desUnit['Denominator']))
#If the orders match then return the scaling between them else raise an Error.
#N.B. scaling is the number required to convert one of the unit type into the appropriate SI unit combination:
#
# i.e. 100 ft/s= 30.48 m/s (SF=0.3048) && 1 km/s = 1000 m/s (SF=1000) so 1 ft/s is 0.03048 km/s
# Therefore convert to meters and then to km so multiply by 0.3048 and divide by 1000
if self.compare(actUnitOrder,desUnitOrder):
return float(actUnitScaling)/float(desUnitScaling)
else:
raise UnitError('Order of units: '+self.units+' and '+desiredUnit+' does not match')
def convert(self,unit):
if unit and self.units:
return self.__new__(self.__class__,float(self*self.unitCompare(unit)),unit)
return self
class __UnitTestCase(unittest.TestCase):
def setUp(self):
self.__setattr__('unit',Unit(1))
def tearDown(self):
self.__delattr__('unit')
def test_setunits(self):
self.unit.setUnits('kg/m**3')
self.assertEqual(self.unit.units,'kg/m**3')
def test_getUnit(self):
self.assertEqual(self.unit.getUnit('m'),({'SIVAL':1.0,'TYPE':'Length'},1.0),'getUnit error: '+str(self.unit.getUnit('m')))
self.assertEqual(self.unit.getUnit('km'),({'SIVAL':1.0,'TYPE':'Length'},1000.0),'getUnit error: '+str(self.unit.getUnit('km')))
self.assertEqual(self.unit.getUnit('g'),({'SIVAL':0.001,'TYPE':'Mass'},1.0),'getUnit error: '+str(self.unit.getUnit('g')))
self.assertEqual(self.unit.getUnit('kg'),({'SIVAL':1.0,'TYPE':'Mass'},1.0),'getUnit error: '+str(self.unit.getUnit('kg')))
self.assertEqual(self.unit.getUnit('s'),({'SIVAL':1.0,'TYPE':'Time'},1.0),'getUnit error: '+str(self.unit.getUnit('s')))
self.assertEqual(self.unit.getUnit('min'),({'SIVAL':60.0,'TYPE':'Time'},1.0),'getUnit error: '+str(self.unit.getUnit('min')))
self.assertEqual(self.unit.getUnit('hr'),({'SIVAL':3600.0,'TYPE':'Time'},1.0),'getUnit error: '+str(self.unit.getUnit('hr')))
self.assertEqual(self.unit.getUnit('miles'),({'SIVAL':1609.344,'TYPE':'Length'},1.0),'getUnit error: '+str(self.unit.getUnit('miles')))
self.assertEqual(self.unit.getUnit('lb'),({'SIVAL':2.2046226,'TYPE':'Mass'},1.0),'getUnit error: '+str(self.unit.getUnit('lb')))
self.assertEqual(self.unit.getUnit('Gs'),({'SIVAL':1.0,'TYPE':'Time'},1000000000.0),'getUnit error: '+str(self.unit.getUnit('Gs')))
self.assertEqual(self.unit.getUnit('ft'),({'SIVAL':0.3048,'TYPE':'Length'},1.0),'getUnit error: '+str(self.unit.getUnit('ft')))
self.assertEqual(self.unit.getUnit('MC'),({'SIVAL':1.0,'TYPE':'Charge'},1000000.0),'getUnit error: '+str(self.unit.getUnit('MC')))
def test_isCompound(self):
self.assertTrue(self.unit.isCompound('J'),'isCompound error: '+str(self.unit.isCompound('J')))
self.assertFalse(self.unit.isCompound('m'),'isCompound error: '+str(self.unit.isCompound('m')))
self.assertTrue(self.unit.isCompound('N'),'isCompound error: '+str(self.unit.isCompound('N')))
self.assertTrue(self.unit.isCompound('A'),'isCompound error: '+str(self.unit.isCompound('A')))
self.assertTrue(self.unit.isCompound('A'),'isCompound error: '+str(self.unit.isCompound('A')))
self.assertTrue(self.unit.isCompound('V'),'isCompound error: '+str(self.unit.isCompound('V')))
self.assertTrue(self.unit.isCompound('Ohm'),'isCompound error: '+str(self.unit.isCompound('Ohm')))
self.assertFalse(self.unit.isCompound('C'),'isCompound error: '+str(self.unit.isCompound('C')))
self.assertFalse(self.unit.isCompound('g'),'isCompound error: '+str(self.unit.isCompound('g')))
self.assertFalse(self.unit.isCompound('ft'),'isCompound error: '+str(self.unit.isCompound('ft')))
self.assertFalse(self.unit.isCompound('lb'),'isCompound error: '+str(self.unit.isCompound('lb')))
self.assertFalse(self.unit.isCompound('s'),'isCompound error: '+str(self.unit.isCompound('s')))
self.assertFalse(self.unit.isCompound('min'),'isCompound error: '+str(self.unit.isCompound('min')))
self.assertFalse(self.unit.isCompound('hr'),'isCompound error: '+str(self.unit.isCompound('hr')))
self.assertFalse(self.unit.isCompound('miles'),'isCompound error: '+str(self.unit.isCompound('miles')))
def test_getCompoundUnit(self):
self.assertEqual(self.unit.getCompoundUnit({},1,'J'),({'Mass':1,'Length':2,'Time':-2},1),'getCompoundUnit error: '+str(self.unit.getCompoundUnit({},1,'J')))
self.assertEqual(self.unit.getCompoundUnit({},1,'A'),({'Charge':1,'Time':-1},1),'getCompoundUnit error: '+str(self.unit.getCompoundUnit({},1,'A')))
self.assertEqual(self.unit.getCompoundUnit({},1,'V'),({'Length': 2, 'Mass': 1, 'Charge': -1, 'Time': -2},1),'getCompoundUnit error: '+str(self.unit.getCompoundUnit({},1,'V')))
self.assertEqual(self.unit.getCompoundUnit({},1,'Ohm'),({'Length': 2, 'Mass': 1, 'Charge': -2, 'Time': -1},1),'getCompoundUnit error: '+str(self.unit.getCompoundUnit({},1,'Ohm')))
self.assertEqual(self.unit.getCompoundUnit({},1,'N'),({'Mass':1,'Length':1,'Time':-2},1),'getCompoundUnit error: '+str(self.unit.getCompoundUnit({},1,'N')))
self.assertEqual(self.unit.getCompoundUnit({},1,'kN'),({'Mass':1,'Length':1,'Time':-2},1000.0),'getCompoundUnit error: '+str(self.unit.getCompoundUnit({},1,'kN')))
def test_unitParse(self):
self.assertEqual(self.unit.unitParse(['kJ','km']),({'Mass':1,'Length':3,'Time':-2},1000000),'unitParse error: '+str(self.unit.unitParse(['kJ','km'])))
self.assertEqual(self.unit.unitParse(['kg','m']),({'Mass':1,'Length':1},1),'unitParse error: '+str(self.unit.unitParse(['kg','m'])))
self.assertEqual(self.unit.unitParse(['Gs','ft']),({'Length':1,'Time':1},304800000.0),'unitParse error: '+str(self.unit.unitParse(['Gs','ft'])))
def test_combine(self):
self.assertEqual(self.unit.combine([{'Mass':1,'Length':4,'Time':-2},1000],[{'Mass':1,'Length':2},1000]),({'Mass':0,'Length':2,'Time':-2},1),'unitParse error: '+str(self.unit.combine([{'Mass':1,'Length':4,'Time':-2},1000],[{'Mass':1,'Length':2},1000])))
self.assertEqual(self.unit.combine([{'Mass':3,'Length':4,'Time':-2},1000],[{'Mass':1,'Length':2},1000]),({'Mass':2,'Length':2,'Time':-2},1),'unitParse error: '+str(self.unit.combine([{'Mass':3,'Length':4,'Time':-2},1000],[{'Mass':1,'Length':2},1000])))
def test_compare(self):
self.assertFalse(self.unit.compare({'Mass':12,'Length':4,'Time':-2},{'Mass':1,'Length':4,'Time':-2}),'compare error: '+str(self.unit.compare({'Mass':1,'Length':4,'Time':-2},{'Mass':1,'Length':4,'Time':-2})))
self.assertFalse(self.unit.compare({'Mass':12,'Length':4,'Time':-2},{'Mass':1,'Time':-2}),'compare error: '+str(self.unit.compare({'Mass':1,'Length':4,'Time':-2},{'Mass':1,'Length':4,'Time':-2})))
self.assertFalse(self.unit.compare({'Mass':1,'Length':4,'Time':-2},{'Mass':1,'Length':14,'Time':-2}),'compare error: '+str(self.unit.compare({'Mass':1,'Length':4,'Time':-2},{'Mass':1,'Length':4,'Time':-2})))
self.assertFalse(self.unit.compare({'Mass':1,'Length':4,'Time':2},{'Mass':1,'Length':4,'Time':-2}),'compare error: '+str(self.unit.compare({'Mass':1,'Length':4,'Time':-2},{'Mass':1,'Length':4,'Time':-2})))
self.assertTrue(self.unit.compare({'Mass':1,'Length':4,'Time':-2},{'Mass':1,'Length':4,'Time':-2}),'compare error: '+str(self.unit.compare({'Mass':1,'Length':4,'Time':-2},{'Mass':1,'Length':4,'Time':-2})))
def test_unitCompare(self):
self.unit.setUnits('m')
self.assertAlmostEqual(self.unit.unitCompare('ft'),3.28083989501,7,'unitCompare error: '+str(self.unit.unitCompare('ft')))
self.unit.setUnits('miles')
self.assertEqual(self.unit.unitCompare('m'),1609.344,'unitCompare error: '+str(self.unit.unitCompare('m')))
self.unit.setUnits('m')
self.assertEqual(self.unit.unitCompare('km'),0.001,'unitCompare error: '+str(self.unit.unitCompare('km')))
def test_convert(self):
self.unit=Unit(123)
self.unit.setUnits('miles')
self.assertEqual(self.unit.convert('m'),1609.344*123,'unitCompare error: '+str(self.unit.convert('m')))
def test___repr__(self):
self.assertEqual(repr(self.unit),'1.0','repr test error')
self.unit.setUnits('m')
self.assertEqual(repr(self.unit),'1.0 m','repr test error '+repr(self.unit))
def test___eq__(self):
self.assertTrue(self.unit==1,'Equality test error')
self.assertFalse(self.unit==2,'Equality test error')
self.unit.setUnits('m')
self.assertTrue(self.unit==Unit(1,'m'),'Equality test error')
self.assertFalse(self.unit==Unit(1,'km'),'Equality test error')
self.assertFalse(self.unit==Unit(1,'s'),'Equality test error')
self.assertTrue(self.unit==Unit(0.001,'km'),'Equality test error')
def test___ne_(self):
self.assertFalse(self.unit!=1,'Equality test error')
self.assertTrue(self.unit!=2,'Equality test error')
self.unit.setUnits('m')
self.assertFalse(self.unit!=Unit(1,'m'),'Equality test error')
self.assertTrue(self.unit!=Unit(1,'km'),'Equality test error')
self.assertTrue(self.unit!=Unit(1,'s'),'Equality test error')
self.assertFalse(self.unit!=Unit(0.001,'km'),'Equality test error')
def test___add__(self):
self.assertEqual(self.unit+1,2,'Add test error')
self.assertEqual(self.unit+Unit(2,'m'),Unit(3,'m'),'Add test error')
self.unit.setUnits('m')
try:
self.unit+Unit(1,'s')
self.assertTrue(False,'Add test error')
except Exception,e:
self.assertEqual(type(e),UnitError,'Add test error')
self.assertAlmostEqual(self.unit+Unit(1,'km'),1001,12,'Add test error '+str(self.unit+Unit(1,'km')))
self.assertAlmostEqual(self.unit+Unit(1,'km'),Unit(1.001,'km'),12,'Add test error'+ str(self.unit+Unit(1,'km')))
def test___sub__(self):
self.assertEqual(self.unit-2,-1,'Sub test error')
self.assertEqual(self.unit-Unit(2,'m'),Unit(-1,'m'),'Sub test error')
self.unit.setUnits('m')
try:
self.unit-Unit(1,'s')
self.assertTrue(False,'Sub test error')
except Exception,e:
self.assertEqual(type(e),UnitError,'Sub test error')
self.assertAlmostEqual(self.unit-Unit(+1,'km'),-999,12,'Sub test error '+str(self.unit-Unit(1,'km')))
self.assertAlmostEqual(self.unit-Unit(1,'km'),Unit(-0.999,'km'),12,'Sub test error'+ str(self.unit-Unit(1,'km')))
def test___mul__(self):
self.assertEqual(self.unit*2,2,'Mul test error')
self.assertEqual(self.unit*Unit(2,'m'),Unit(2,'m'),'Mul test error')
self.unit.setUnits('m')
self.assertEqual(self.unit*Unit(1,'s'),1,'Mul test error')
self.assertEqual(self.unit*Unit(1,'s'),Unit(1,'m*s'),'Mul test error')
self.assertAlmostEqual(self.unit*Unit(+1,'km'),1000,12,'Mul test error')
self.assertAlmostEqual(self.unit*Unit(1,'km'),Unit(1,'km'),12,'Mul test error')
def test___div__(self):
self.unit+=3
self.assertEqual(self.unit/2,2,'Div test error'+str(self.unit/2))
self.assertEqual(self.unit/Unit(2,'m'),Unit(2),'Div test error')
self.unit.setUnits('m')
self.assertEqual(self.unit/Unit(1,'s'),4,'Div test error')
self.assertEqual(self.unit/Unit(1,'s'),Unit(4,'m/s'),'Div test error')
self.assertAlmostEqual(self.unit/Unit(+4,'km'),0.001,12,'Div test error')
self.assertAlmostEqual(self.unit/Unit(4,'km'),Unit(0.000001,'km'),12,'Div test error')
def test___truediv__(self):
self.unit+=3
self.assertEqual(self.unit.__truediv__(2),2,'truediv test error'+str(self.unit.__truediv__(2)))
self.assertEqual(self.unit.__truediv__(Unit(2,'m')),Unit(2),'truediv test error')
self.unit.setUnits('m')
self.assertEqual(self.unit.__truediv__(Unit(1,'s')),4,'truediv test error')
def test___mod__(self):
self.unit+=4
self.assertEqual(self.unit%2,1,'Mod test error')
try:
self.unit%Unit(2,'m')
self.assertTrue(False)
except Exception,e:
self.assertEqual(type(e),UnitError,'Mod test error')
self.unit.setUnits('m')
self.assertEqual(self.unit%(Unit(2,'m')),1,'Mod test error')
try:
self.unit%Unit(2,'s')
self.assertTrue(False)
except Exception,e:
self.assertEqual(type(e),UnitError,'Mod test error')
def test___pow__(self):
self.unit+=3
self.assertEqual(self.unit**2,16,'Pow test error')
try:
self.unit**Unit(2,'m')
self.assertTrue(False)
except Exception,e:
self.assertEqual(type(e),UnitError,'Pow test error')
self.unit.setUnits('m')
self.assertEqual(self.unit**(Unit(2)),16,'Pow test error')
try:
self.unit%Unit(2,'s')
self.assertTrue(False)
except Exception,e:
self.assertEqual(type(e),UnitError,'Pow test error')
def test___divmod__(self):
self.unit+=4
self.assertEqual(divmod(self.unit,2),(2,1),'divmod test error')
try:
divmod(self.unit,Unit(2,'m'))
self.assertTrue(False)
except Exception,e:
self.assertEqual(type(e),UnitError,'divmod test error')
self.unit.setUnits('m')
self.assertEqual(divmod(self.unit,Unit(2,'m')),(2,1),'divmod test error')
try:
divmod(self.unit,Unit(2,'s'))
self.assertTrue(False)
except Exception,e:
self.assertEqual(type(e),UnitError,'divmod test error')
def test___radd__(self):
self.assertEqual(1+self.unit,2,'radd test error')
self.assertEqual(2+self.unit,Unit(3),'radd test error')
self.unit.setUnits('m')
self.assertEqual(2+self.unit,Unit(3,'m'),'radd test error')
def test___rsub__(self):
self.assertEqual(3-self.unit,2,'rsub test error')
self.assertEqual(4-self.unit,Unit(3),'rsub test error')
self.unit.setUnits('m')
self.assertEqual(4-self.unit,Unit(3,'m'),'rsub test error')
def test___rmul__(self):
self.assertEqual(3*self.unit,3,'rmul test error')
self.assertEqual(4*self.unit,Unit(4),'rmul test error')
self.unit.setUnits('m')
self.assertEqual(4*self.unit,Unit(4,'m'),'rmul test error')
def test___rdiv__(self):
self.assertEqual(3/self.unit,3,'rdiv test error')
self.assertEqual(4/self.unit,Unit(4),'rdiv test error')
self.unit.setUnits('m')
self.assertEqual(4/self.unit,Unit(4,'m**-1'),'rdiv test error')
self.unit+=3
self.assertEqual(4/self.unit,Unit(1,'m**-1'),'rdiv test error')
def test___rtruediv__(self):
self.assertEqual(self.unit.__rtruediv__(3),3,'rtruediv test error')
self.assertEqual(self.unit.__rtruediv__(4),Unit(4),'rtruediv test error')
self.unit.setUnits('m')
self.assertEqual(self.unit.__rtruediv__(4),Unit(4,'m**-1'),'rtruediv test error')
self.unit+=3
self.assertEqual(self.unit.__rtruediv__(4),Unit(1,'m**-1'),'rtruediv test error')
def test___rmod__(self):
self.unit+=4
self.assertEqual(9%self.unit,4,'rmod test error')
self.unit.setUnits('m')
try:
9%self.unit
self.assertTrue(False)
except Exception,e:
self.assertEqual(type(e),UnitError,'rmod test error')
def test___rpow__(self):
self.unit+=1
self.assertEqual(4**self.unit,16,'rpow test error')
self.unit.setUnits('m')
try:
4**self.unit
self.assertTrue(False)
except Exception,e:
self.assertEqual(type(e),UnitError,'rpow test error')
def test___rdivmod__(self):
self.unit+=4
self.assertEqual(divmod(9,self.unit),(1,4),'rdivmod test error')
self.unit.setUnits('m')
try:
divmod(9,self.unit)
self.assertTrue(False)
except Exception,e:
self.assertEqual(type(e),UnitError,'rdivmod test error')
def test___ge__(self):
self.assertTrue(4>=self.unit,'ge error')
self.assertFalse(self.unit>=2,'ge error')
self.unit+=2
self.assertTrue(self.unit>=3,'ge error')
self.assertTrue(3>=self.unit,'ge error')
self.unit.setUnits('m')
self.assertTrue(self.unit>=3,'ge error')
self.assertTrue(3>=self.unit,'ge error')
self.assertTrue(self.unit>=Unit(3,'m'),'ge error')
try:
self.assertTrue(self.unit>=Unit(3,'s'),'ge error')
self.assertTrue(False)
except Exception,e:
self.assertEqual(type(e),UnitError,'ge test error')
def test___gt__(self):
self.assertTrue(4>self.unit,'gt error')
self.assertFalse(self.unit>1,'gt error')
self.unit+=2
self.assertTrue(self.unit>2,'gt error')
self.assertTrue(4>self.unit,'gt error')
self.unit.setUnits('m')
self.assertTrue(self.unit>2,'gt error')
self.assertTrue(4>self.unit,'gt error')
self.assertTrue(self.unit>Unit(2,'m'),'gt error')
try:
self.assertTrue(self.unit>Unit(2,'s'),'gt error')
self.assertTrue(False)
except Exception,e:
self.assertEqual(type(e),UnitError,'gt test error')
def test___le__(self):
self.assertTrue(1<=self.unit,'le error')
self.assertFalse(self.unit<=0,'le error')
self.unit+=2
self.assertTrue(self.unit<=3,'le error')
self.assertTrue(3<=self.unit,'le error')
self.unit.setUnits('m')
self.assertTrue(self.unit<=3,'le error')
self.assertTrue(2<self.unit,'le error')
self.assertTrue(self.unit<=Unit(4,'m'),'le error')
try:
self.assertTrue(self.unit<=Unit(4,'s'),'le error')
self.assertTrue(False)
except Exception,e:
self.assertEqual(type(e),UnitError,'le test error')
def test___lt__(self):
self.assertTrue(0<self.unit,'lt error')
self.assertFalse(self.unit<0,'lt error')
self.unit+=2
self.assertTrue(self.unit<4,'lt error')
self.assertTrue(2<self.unit,'lt error')
self.unit.setUnits('m')
self.assertTrue(self.unit<4,'lt error')
self.assertTrue(2<self.unit,'lt error')
self.assertTrue(self.unit<Unit(4,'m'),'lt error')
try:
self.assertTrue(self.unit<Unit(4,'s'),'lt error')
self.assertTrue(False)
except Exception,e:
self.assertEqual(type(e),UnitError,'lt test error')
def test___float__(self):
self.assertEqual(float(self.unit),1.0,'float error')
self.assertNotEqual(float(self.unit),2.0,'float error')
def test___abs__(self):
self.assertEqual(abs(self.unit),1.0,'abs error')
self.assertEqual(abs(-self.unit),1.0,'abs error')
def test_invert(self):
self.assertEqual(self.unit.invert(),Unit(1),'invert error')
self.unit+=1
self.assertEqual(self.unit.invert(),Unit(0.5),'invert error')
self.unit.setUnits('m')
self.assertEqual(self.unit.invert(),Unit(0.5,'m**-1'),'invert error')
self.unit.setUnits('m/s')
self.assertEqual(self.unit.invert(),Unit(0.5,'s/m'),'invert error')
def test___format__(self):
self.assertEqual('{:>4.2f}'.format(self.unit),'1.00','format error')
self.unit.setUnits('m')
self.assertEqual('{:>4.2f}'.format(self.unit),'1.00m','format error')
self.unit.setUnits('m/s')
self.assertEqual('{:>4.2f}'.format(self.unit),'1.00m/s','format error')
self.assertEqual('{:>5.3f km/sa}'.format(self.unit),'0.001 km/s','format error')
self.assertEqual('{:>5.3f km/sA}'.format(self.unit),'0.001','format error')
def __debugTestSuite():
suite=unittest.TestSuite()
unitSuite = unittest.TestLoader().loadTestsFromTestCase(__UnitTestCase)
suite.addTests(unitSuite._tests)
return suite
def __testSuite():
unitSuite = unittest.TestLoader().loadTestsFromTestCase(__UnitTestCase)
return unittest.TestSuite([unitSuite])
def runTests():
suite=__testSuite()
unittest.TextTestRunner(verbosity=4).run(suite)
def debugTests():
suite=__debugTestSuite()
import ipdb,sys
for test in suite:
try:
test.debug()
except Exception,e:
if type(e)==AssertionError:
ipdb.post_mortem(sys.exc_info()[2])
else:
try:
from IPython.core.ultratb import VerboseTB
vtb=VerboseTB(call_pdb=1)
vtb(*sys.exc_info())
except:
import traceback
print'\n'
traceback.print_exc()
ipdb.post_mortem(sys.exc_info()[2])
if __name__=='__main__':
runTests()
|
apache-2.0
| 1,061,623,227,853,314,000 | 52.394947 | 260 | 0.585998 | false |
baumartig/paperboy
|
paperboy-config.py
|
1
|
11335
|
from settings_handler import settings
from job import EXECUTION_TYPES as job_execution_types
from job import WEEKDAYS as job_weekdays
import recipes_handler
import jobs_handler
import os
import settings_handler
import jobs_executor
import util
import _Getch
def makeMenu(options, directInput=True):
sortedKeys = sorted(options)
for key in sortedKeys:
print "%5s %s" % (key, options[key]["name"])
if directInput:
print "Selection: ",
selection = _Getch.getch()
print
else:
selection = raw_input("Selection (commit with Enter): ")
while not selection in options:
print "Invalid selection"
if directInput:
print "New selection: ",
selection = _Getch.getch()
print
else:
selection = raw_input("New Selection: ")
if "arg" in options[selection]:
options[selection]["function"](*options[selection]["arg"])
else:
options[selection]["function"]()
def mainMenu():
clear()
print "Welcome to the paperboy server"
print "What would you like to do"
makeMenu(mainOptions)
def jobsMenu():
clear()
print "Jobs Menu"
makeMenu(jobsOptions)
def listJobs():
clear()
copyListJobOptions = listJobsOptions.copy()
print "List jobs:"
for (index, job) in enumerate(jobs, 1):
copyListJobOptions[str(index)] = jobOption(job.recipeRef, index - 1)
makeMenu(copyListJobOptions)
def jobProperties(job):
clear()
copyJobPropertiesOptions = jobPropertiesOptions.copy()
index = jobs.index(job)
copyJobPropertiesOptions["d"] = {"name": "delete job",
"function": deleteJob,
"arg": [job]}
optionsList = []
optionsList.append({"name": "execution type: %20s" % job.executionType,
"function": changeJobInterval,
"arg": [job]})
optionsList.append({"name": "execution time: %20s"
% util.formatTime(job.executionTime),
"function": changeJobExecutionTime,
"arg": [job]})
if not job.executionType == "daily":
optionsList.append({"name": "execution day : %20s" % job.executionDay,
"function": changeJobExecutionDay,
"arg": [job]})
# append the options to the menu
for (x, option) in enumerate(optionsList, 1):
copyJobPropertiesOptions[str(x)] = option
print "Properties of \"%s\"" % job.recipeRef
makeMenu(copyJobPropertiesOptions)
def changeJobInterval(job):
intervalOptions = {}
for (x, interval) in enumerate(job_execution_types, 1):
intervalOptions[str(x)] = {"name": interval,
"function": setJobInterval,
"arg": [job,
job.setExecutionType,
interval]}
intervalOptions["x"] = {"name": "exit menu",
"function": jobProperties,
"arg": [job]}
makeMenu(intervalOptions)
def setJobInterval(job, function, interval):
function(interval)
jobs_handler.saveJobs(jobs)
jobProperties(job)
def changeJobExecutionTime(job):
new_time = None
while not new_time:
new_time_str = raw_input("New Execution Time: ")
try:
new_time = util.parseTime(new_time_str)
except:
new_time = None
print "Invalid time formate please write HH:MM"
job.setExecutionTime(new_time)
jobs_handler.saveJobs(jobs)
jobProperties(job)
def changeJobExecutionDay(job):
if job.executionType == "weekly":
dayOptions = {}
for (x, day) in enumerate(job_weekdays, 1):
dayOptions[str(x)] = {"name": day,
"function": setJobExecutionDay,
"arg": [job, job.setExecutionDay, day]}
dayOptions["x"] = {"name": "exit menu",
"function": jobProperties,
"arg": [job]}
makeMenu(dayOptions)
else:
new_day = None
while not new_day:
new_day_str = raw_input("New execution day (1 to 30): ")
try:
new_day = int(new_day_str)
if new_day < 1 or new_day > 30:
new_day = None
print "Invalid day please enter a number between 1 and 30."
except:
new_day = None
print "Invalid day please enter a number between 1 and 30."
job.setExecutionDay(new_day)
jobs_handler.saveJobs(jobs)
jobProperties(job)
def setJobExecutionDay(job, function, day):
function(day)
jobs_handler.saveJobs(jobs)
jobProperties(job)
def jobOption(name, index):
return {"name": "%s" % (name),
"function": jobProperties,
"arg": [jobs[index]]}
def deleteJob(job):
jobs_handler.deleteJob(job)
loadJobs()
listJobs()
def newJob(startIndex=0):
clear()
nextIndex = startIndex + 10
previousIndex = startIndex - 10
recipes = recipes_handler.loadBuiltinRecipes()
copyJobOptions = newJobOptions.copy()
print "New Job (found %d)" % len(recipes)
if startIndex > 0:
copyJobOptions["p"] = {"name": "previous 10 recipes",
"function": newJob,
"arg": [previousIndex]}
if nextIndex < len(recipes):
copyJobOptions["n"] = {"name": "next 10 recipes",
"function": newJob,
"arg": [nextIndex]}
for (recipe, x) in zip(recipes[startIndex:nextIndex],
range(startIndex, nextIndex)):
newJobOption(copyJobOptions, x, recipe)
makeMenu(copyJobOptions, False)
def filterJobsTitle():
newValue = raw_input("New Title Filter: ")
recipes_handler.titleFilter = newValue
newJob()
def filterJobsDescription():
newValue = raw_input("New Description Filter: ")
recipes_handler.descriptionFilter = newValue
newJob()
def filterJobsLanguage():
newValue = raw_input("New Language Filter: ")
recipes_handler.languageFilter = newValue
newJob()
def newJobOption(dict, x, recipe):
dict[str(x + 1)] = {"name": "[%s]%s" % (recipe.language, recipe.title),
"function": createJob,
"arg": [recipe.title]}
def createJob(ref):
newJob = jobs_handler.newJob(ref)
loadJobs()
jobProperties(jobs[newJob.id])
def exitToMainMenu():
mainMenu()
def settingsMenu():
clear()
print "Settings:"
copySettingsOptions = settingsOptions.copy()
# colletct options
optionsList = []
optionsList.append(newOption("Builtin recipes folder",
settings.calibreFolder,
settings.setCalibreFolder))
optionsList.append(newOption("Export format", settings.format,
settings.setFormat))
optionsList.append(newOption("Mail from", settings.mailFrom,
settings.setMailFrom))
optionsList.append(newOption("Mail To", settings.mailTo,
settings.setMailTo))
if settings.useSmtp():
print "Using SMPT"
copySettingsOptions["s"] = {"name": "Use sendmail",
"function": sendmailSettings}
optionsList.append(newOption("Smtp server",
settings.smtpServer["address"],
settings.setSmtpServerAdress))
if "port" in settings.smtpServer:
optionsList.append(newOption("Smtp port",
settings.smtpServer["port"],
settings.setSmtpServerPort))
if "security" in settings.smtpServer:
optionsList.append(newOption("Smtp security",
settings.smtpServer["security"],
settings.setSmtpServerSecurity))
if "login" in settings.smtpServer:
optionsList.append(newOption("Smtp login",
settings.smtpServer["login"],
settings.setSmtpLogin))
if "password" in settings.smtpServer:
encodedPw = "*" * len(settings.smtpServer["password"])
optionsList.append(newOption("Smtp password",
encodedPw,
settings.setSmtpPassword))
else:
print "Using sendmail"
copySettingsOptions["s"] = {"name": "Use smtp server",
"function": createDefaultSmtpSettings}
# append the options to the menu
for (x, option) in enumerate(optionsList, 1):
copySettingsOptions[str(x)] = option
print
print "Options:"
makeMenu(copySettingsOptions)
def quit():
clear()
print "Goodbye"
def editSetting(name, function):
newValue = raw_input("%s: " % name)
function(newValue)
settings_handler.saveSettings()
settingsMenu()
def createDefaultSmtpSettings():
settings.setSmtpServer("localhost", 587, "starttls")
settings.setSmtpLogin("username")
settings.setSmtpPassword("username")
settings_handler.saveSettings()
settingsMenu()
def sendmailSettings():
settings.deleteSmtpSettings()
settings_handler.saveSettings()
settingsMenu()
def newOption(name, option, function):
return {"name": "%22s: %30s" % (name, str(option)),
"function": editSetting,
"arg": [name, function]}
def clear():
os.system('clear')
return
def executeJobs():
clear()
jobs_executor.execJobs(jobs)
makeMenu(executeJobsOptions)
def loadJobs():
global jobs
jobs = jobs_handler.loadJobs()
mainOptions = {"1": {"name": "jobs", "function": jobsMenu},
"2": {"name": "paperboy settings", "function": settingsMenu},
"3": {"name": "execute jobs", "function": executeJobs},
"q": {"name": "quit", "function": quit}}
jobsOptions = {"1": {"name": "list jobs", "function": listJobs},
"2": {"name": "new job", "function": newJob, "arg": [0]},
"x": {"name": "exit menu", "function": exitToMainMenu}}
newJobOptions = {"t": {"name": "filter title", "function": filterJobsTitle},
# "d": {"name": "filter description",
# "function": filterJobsDescription},
"l": {"name": "filter language",
"function": filterJobsLanguage},
"x": {"name": "exit menu", "function": jobsMenu}}
listJobsOptions = {"x": {"name": "exit menu", "function": jobsMenu}}
jobPropertiesOptions = {"x": {"name": "exit menu", "function": listJobs}}
executeJobsOptions = {"x": {"name": "goto main menu",
"function": exitToMainMenu}}
settingsOptions = {"x": {"name": "exit menu", "function": exitToMainMenu}}
jobs = []
loadJobs()
mainMenu()
|
apache-2.0
| 1,463,873,548,460,806,700 | 30.054795 | 79 | 0.556683 | false |
ynop/spych
|
spych/data/features/pipeline/__init__.py
|
1
|
1081
|
from .base import Pipeline
from .base import ExtractionStage
from .base import ProcessingStage
from .extraction import SpectrumExtractionStage
from .extraction import MelFilterbankExtractionStage
from .extraction import MFCCExtractionStage
from .scaling import ExponentialStage
from .scaling import LogStage
from .scaling import RescalingStage
from .splicing import SpliceStage
from .splicing import UnspliceMergeType
from .splicing import UnspliceStage
from .convertion import MelToMFCCStage
def spectrum_extraction_pipeline(win_length=400, win_step=160):
return Pipeline(extract_stage=SpectrumExtractionStage(win_length=win_length, win_step=win_step))
def mel_extraction_pipeline(win_length=400, win_step=160, num_mel=23):
return Pipeline(extract_stage=MelFilterbankExtractionStage(num_mel=num_mel, win_length=win_length, win_step=win_step))
def mfcc_extraction_pipeline(win_length=400, win_step=160, num_mfcc=13, num_mel=23):
return Pipeline(extract_stage=MFCCExtractionStage(num_mfcc=num_mfcc, num_mel=num_mel, win_length=win_length, win_step=win_step))
|
mit
| -7,606,965,782,792,891,000 | 35.033333 | 132 | 0.813136 | false |
SYNHAK/spiff
|
client/bonehead/spiff/frontDoor.py
|
1
|
1705
|
import bonehead
from bonehead.ui import Page
from PyQt4 import QtCore, QtGui, QtWebKit, QtNetwork
import threading
class FrontDoorPlugin(bonehead.Plugin):
def newPage(self, name, args, ui):
return FrontDoorPage(args['sensor-id'], ui)
class FrontDoorPage(Page):
def __init__(self, sensorID, ui):
super(FrontDoorPage, self).__init__('Open/Close Space', ui)
self.__sensor = self.spiff.getOne('sensor', sensorID)
self.setStyleSheet("*{font-size:32pt}")
self.layout = QtGui.QVBoxLayout(self)
self.button = QtGui.QPushButton(self)
self.text = QtGui.QLabel(self)
self.text.setAlignment(QtCore.Qt.AlignHCenter)
self.layout.addWidget(self.text)
self.layout.addWidget(self.button)
self.button.clicked.connect(self.toggle)
self.updateButton()
def _runToggle(self):
if str(self.__sensor.value) == "True":
self.text.setText("Closing space...")
self.__sensor.value = False
else:
self.text.setText("Opening space...")
self.__sensor.value = True
self.__sensor.save()
self.updateButton()
def toggle(self):
t = threading.Thread(target=self._runToggle)
t.start()
def updateButton(self):
self.__sensor.refresh()
if str(self.__sensor.value) == "True":
self.button.setStyleSheet("*{background-color: #f00;}")
self.button.setText("Close Space")
self.text.setText("Space is OPEN")
else:
self.button.setStyleSheet("*{background-color: #0f0;}")
self.button.setText("Open Space")
self.text.setText("Space is CLOSED")
|
agpl-3.0
| 793,890,765,382,257,000 | 33.795918 | 67 | 0.61173 | false |
MichinobuMaeda/jpzipcode
|
src/gae/jpzipcode/controller/task.py
|
1
|
4345
|
# -*- coding: UTF-8 -*-
#
# Copyright 2012 Michinobu Maeda.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hashlib
import StringIO
import zipfile
from google.appengine.ext import blobstore
from jpzipcode.model import Params, Status, save_blob
class Task():
"""タスク"""
__tsk = None
__cat = None
__stt = None
def __init__(self, task, cat):
self.__tsk = task
self.__cat = cat
self.__stt = Status()
def get_task(self):
"""タスク名を取得する。"""
return self.__tsk
def get_cat(self):
"""分類(住所/事業所)を取得する。"""
return self.__cat
def get_prev(self):
"""データのインプットになるタスク名を取得する。"""
return Params().get('task_prv', self.__tsk)
def __get_key(self, name):
"""ステータスのキーを取得する。"""
return '%(nam)s_%(tsk)s_%(cat)s' % {
'nam':name,
'tsk':self.__tsk,
'cat':self.__cat,
}
def __get_key_prev(self, name):
"""インプットデータのステータスのキーを取得する。"""
return '%(nam)s_%(prv)s_%(cat)s' % {
'nam':name,
'prv':self.get_prev(),
'cat':self.__cat,
}
def get_ts(self):
"""日本郵便配布データのタイムスタンプを取得する。"""
return self.__stt.get('ts_ar_%(cat)s' % {'cat':self.__cat})
def get_ts_short(self):
"""日本郵便配布データのタイムスタンプを短い書式で取得する。"""
return self.get_ts().replace('-', '').replace(':', '').replace(' ', '')[0:8]
def get_stt(self, name):
"""ステータスを取得する。"""
return self.__stt.get(self.__get_key(name))
def get_stt_prev(self, name):
"""インプットデータのステータスを取得する。"""
return self.__stt.get(self.__get_key_prev(name))
def set_stt(self, stt):
"""ステータスを設定する。"""
dic = {}
for key in stt.keys():
dic[self.__get_key(key)] = stt[key]
self.__stt.merge(dic)
def kick(self):
"""処理を実行する。"""
return self.convert()
def convert(self):
"""データ変換処理を実行する。"""
stts = {}
key = self.get_stt_prev('key')
blob_info = blobstore.BlobInfo(blobstore.BlobKey(key))
zr = blob_info.open()
zi = zipfile.ZipFile(zr, 'r')
if len(zi.infolist()) < 1:
zi.close()
zr.close()
return None
zw = StringIO.StringIO()
zo = zipfile.ZipFile(zw, 'w', zipfile.ZIP_DEFLATED)
self.proc_all(zi, zo, stts)
zo.close()
zi.close()
zr.close()
con = zw.getvalue()
stts['csz'] = len(con)
sha1 = hashlib.sha1()
sha1.update(con)
stts['dig'] = sha1.hexdigest()
stts['key'] = save_blob(con, '%(tsk)s_%(cat)s-%(ts)s.zip' % {
'cat':self.get_cat(),
'tsk':self.get_task(),
'ts':self.get_ts_short(),
})
self.set_stt(stts)
return 'ok'
# can be overidden
def proc_all(self, zi, zo, stts):
"""全てのインプットを処理する。"""
dsz = 0
cnt = 0
for zip_info in zi.infolist():
for data in self.proc(zi, zip_info):
zo.writestr(data[0], data[1], zipfile.ZIP_DEFLATED)
dsz += len(data[1])
cnt += 1
stts['dsz'] = dsz
stts['cnt'] = cnt
# to be overidden
def proc(self, zi, zip_info):
"""1個のインプットを処理する。"""
pass
|
apache-2.0
| 6,164,277,880,098,447,000 | 27.625 | 84 | 0.519394 | false |
RajibHossen/rabbitmq-codes
|
direct/receive_log_direct.py
|
1
|
1026
|
#!/usr/bin/env python
import pika
import sys
credential = pika.PlainCredentials("ipvision","ipvision123")
conn_params = pika.ConnectionParameters('192.168.122.198',5672,'/',credential)
connection = pika.BlockingConnection(conn_params)
channel = connection.channel()
channel.exchange_declare(exchange='direct_logs',
type='direct')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
severities = sys.argv[1:]
if not severities:
sys.stderr.write("Usage: %s [info] [warning] [error]\n" % sys.argv[0])
sys.exit(1)
for severity in severities:
channel.queue_bind(exchange='direct_logs',
queue=queue_name,
routing_key=severity)
print(' [*] Waiting for logs. To exit press CTRL+C')
def callback(ch, method, properties, body):
print(" [x] %r:%r" % (method.routing_key, body))
channel.basic_consume(callback,
queue=queue_name,
no_ack=True)
channel.start_consuming()
|
mit
| -9,674,809,686,369,454 | 26.72973 | 78 | 0.648148 | false |
foxbunny/seagull
|
seagull/cmdline/assets.py
|
1
|
5942
|
#
# Seagull photo gallery app
# Copyright (C) 2016 Hajime Yamasaki Vukelic
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
import os
import sys
import shutil
import signal
import tempfile
import subprocess
from os.path import normpath, join, dirname, exists, isdir, abspath
from . import Command
TMPDIR = tempfile.gettempdir()
COMPASS_PID = join(TMPDIR, 'compass.pid')
COFFEE_PID = join(TMPDIR, 'coffee.pid')
COMPASS = shutil.which('compass')
COFFEE = shutil.which('coffee')
class AssetsCommand:
"""
Base class for assets-related commands
"""
DEFAULT_SKIN = 'seagull'
THISDIR = dirname(__file__)
ROOTDIR = dirname(THISDIR)
SKINDIR = join(ROOTDIR, normpath('seagull/skins'))
def __init__(self, conf):
if conf['runtime.skin_path_override']:
self.skindir = abspath(conf['runtime.skin_path_override'])
else:
self.skindir = abspath(conf['runtime.skin_path'])
print("using skin in '{}'".format(self.skindir))
self.static_url = conf['assets.static_url']
self.srcdir = join(self.skindir, 'src')
self.assetsdir = join(self.skindir, 'assets')
self.scssdir = join(self.srcdir, 'scss')
self.csdir = join(self.srcdir, 'coffee')
self.cssdir = join(self.assetsdir, 'css')
self.jsdir = join(self.assetsdir, 'js')
self.imgdir = join(self.assetsdir, 'img')
self.fontdir = join(self.assetsdir, 'font')
def compass_cmd(self, *cmds):
return [
COMPASS,
*cmds,
'--http-path', self.static_url,
'--app-dir', self.skindir,
'--sass-dir', self.scssdir,
'--css-dir', self.cssdir,
'--images-dir', self.imgdir,
'--fonts-dir', self.fontdir,
'--javascript-dir', self.jsdir,
'--output-style', 'expanded',
'--relative-assets',
]
def coffee_cmd(self, *cmds):
return [
COFFEE,
*cmds,
'--bare',
'--output', self.jsdir,
self.csdir,
]
def write_pid(pid, pidfile):
with open(pidfile, 'w') as f:
f.write(str(pid))
def read_pid(pidfile):
with open(pidfile, 'r') as f:
return int(f.read())
def kill_pid(pid):
if sys.platform == 'win32':
# On win32, a cmd.exe is spawned, which then spawns the process, so
# the pid is for the cmd.exe process and not the main process
# itself. Therefore we need to send an interrupt to the cmd.exe
# process which will then hopefully terminate the children.
os.kill(pid, signal.CTRL_BREAK_EVENT)
os.kill(pid, signal.SIGTERM)
def kill_pidfile(pidfile):
pid = read_pid(pidfile)
kill_pid(pid)
os.unlink(pidfile)
def start_watchers(conf):
print('starting watchers')
cmd = AssetsCommand(conf)
if hasattr(subprocess, 'CREATE_NEW_PROCESS_GROUP'):
# On Windows, commands are run in a subshell regardless of the
# ``shell`` argument unless CREATE_NEW_PROCESS_GROUP flag is used.
# This flag is not supported on *nix platforms, so we test that the
# flag is supposed instead of testing for platform.
popen_kw = dict(creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
popen_kw = {}
compass = subprocess.Popen(cmd.compass_cmd('watch'), **popen_kw)
coffee = subprocess.Popen(cmd.coffee_cmd('--watch'), **popen_kw)
write_pid(compass.pid, COMPASS_PID)
write_pid(coffee.pid, COFFEE_PID)
sys.exit(0)
def recompile_assets(conf):
print('recompiling assets')
cmd = AssetsCommand(conf)
try:
subprocess.check_call(cmd.compass_cmd('compile', '--force'))
subprocess.check_call(cmd.coffee_cmd('--compile'))
except subprocess.CalledProcessError:
print('Error compiling assets')
sys.exit(1)
else:
sys.exit(0)
class Watch(Command):
name = 'watch'
help = 'watch a skin directory for changes and recompile assets'
def add_args(self):
self.group.add_argument('--skin-path', '-P', metavar='PATH',
help='use PATH instead of skin specified by '
'the configuration file')
def run(self, args):
self.conf['runtime.skin_path_override'] = args.skin_path
self.conf['runtime.start_hooks'].append(start_watchers)
class StopWatchers(Command):
name = 'stop-watchers'
help = 'stop the assets watchers'
@staticmethod
def kill_process(name, pidfile):
try:
kill_pidfile(pidfile)
except FileNotFoundError:
print('{} PID file not found, nothing to do'.format(name))
except OSError:
print('{} could not be stopped, is it still running?'.format(name))
os.unlink(pidfile)
def run(self, args):
print('stopping watchers')
self.kill_process('compass', COMPASS_PID)
self.kill_process('coffee', COFFEE_PID)
self.quit(0)
class Recompile(Command):
name = 'recompile'
help = 'recompile assets'
def add_args(self):
self.group.add_argument('--skin-path', '-P', metavar='PATH',
help='use PATH instead of skin specified by '
'the configuration file')
def run(self, args):
self.conf['runtime.skin_path_override'] = args.skin_path
self.conf['runtime.start_hooks'].append(recompile_assets)
|
gpl-3.0
| -3,666,141,837,121,631,700 | 30.775401 | 79 | 0.616291 | false |
yugangw-msft/azure-cli
|
src/azure-cli/azure/cli/command_modules/cdn/tests/latest/test_afd_security_policy_scenarios.py
|
2
|
5310
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ResourceGroupPreparer, JMESPathCheck
from azure.cli.testsdk import ScenarioTest, record_only
from .afdx_scenario_mixin import CdnAfdScenarioMixin
class CdnAfdSecurityPolicyScenarioTest(CdnAfdScenarioMixin, ScenarioTest):
@ResourceGroupPreparer()
def test_afd_security_policy_crud(self, resource_group):
profile_name = 'profilesecuritytest'
self.afd_security_policy_list_cmd(resource_group, profile_name, expect_failure=True)
# List get empty
self.afd_profile_create_cmd(resource_group, profile_name)
list_checks = [JMESPathCheck('length(@)', 0)]
self.afd_security_policy_list_cmd(resource_group, profile_name, checks=list_checks)
# Create an endpoint
endpoint1_name = self.create_random_name(prefix='endpoint1', length=24)
endpoint2_name = self.create_random_name(prefix='endpoint2', length=24)
origin_response_timeout_seconds = 100
enabled_state = "Enabled"
endpoint_checks = [JMESPathCheck('originResponseTimeoutSeconds', 100),
JMESPathCheck('enabledState', 'Enabled')]
self.afd_endpoint_create_cmd(resource_group,
profile_name,
endpoint1_name,
origin_response_timeout_seconds,
enabled_state,
checks=endpoint_checks)
self.afd_endpoint_create_cmd(resource_group,
profile_name,
endpoint2_name,
origin_response_timeout_seconds,
enabled_state,
checks=endpoint_checks)
# Create a security policy
security_policy_name = self.create_random_name(prefix='security', length=24)
domain_ids = list()
domain_ids.append(f'/subscriptions/{self.get_subscription_id()}/resourcegroups/{resource_group}/providers/Microsoft.Cdn/profiles/{profile_name}/afdEndpoints/{endpoint1_name}')
domain_ids.append(f'/subscriptions/{self.get_subscription_id()}/resourcegroups/{resource_group}/providers/Microsoft.Cdn/profiles/{profile_name}/afdEndpoints/{endpoint2_name}')
waf_policy_id = f'/subscriptions/{self.get_subscription_id()}/resourcegroups/CliDevReservedGroup/providers/Microsoft.Network/frontdoorwebapplicationfirewallpolicies/SampleStandard'
checks = [JMESPathCheck('provisioningState', 'Succeeded')]
self.afd_security_policy_create_cmd(resource_group,
profile_name,
security_policy_name,
domain_ids,
waf_policy_id,
checks=checks)
show_checks = [JMESPathCheck('name', security_policy_name),
JMESPathCheck('parameters.wafPolicy.id', waf_policy_id),
JMESPathCheck('length(parameters.associations[0].domains)', 2),
JMESPathCheck('parameters.associations[0].domains[0].id', domain_ids[0]),
JMESPathCheck('parameters.associations[0].domains[1].id', domain_ids[1]),
JMESPathCheck('provisioningState', 'Succeeded')]
self.afd_security_policy_show_cmd(resource_group, profile_name, security_policy_name, checks=show_checks)
list_checks = [JMESPathCheck('length(@)', 1),
JMESPathCheck('@[0].name', security_policy_name),
JMESPathCheck('@[0].provisioningState', 'Succeeded')]
self.afd_security_policy_list_cmd(resource_group, profile_name, checks=list_checks)
# Update the security policy
update_checks = [JMESPathCheck('name', security_policy_name),
JMESPathCheck('parameters.wafPolicy.id', waf_policy_id),
JMESPathCheck('length(parameters.associations[0].domains)', 1),
JMESPathCheck('parameters.associations[0].domains[0].id', domain_ids[1]),
JMESPathCheck('provisioningState', 'Succeeded')]
self.afd_security_policy_update_cmd(resource_group,
profile_name,
security_policy_name,
[domain_ids[1]],
waf_policy_id,
checks=update_checks)
# Delete the security policy
self.afd_security_policy_delete_cmd(resource_group, profile_name, security_policy_name)
list_checks = [JMESPathCheck('length(@)', 0)]
self.afd_security_policy_list_cmd(resource_group, profile_name, checks=list_checks)
|
mit
| 2,082,815,849,316,268,500 | 60.744186 | 188 | 0.560075 | false |
jonrf93/genos
|
dbservices/genosdb/models.py
|
1
|
1428
|
"""
Models module, contains classes that serve as data transfer objects and are transformed
from/to JSON to interact with mongo db
"""
class User:
""" User class to contain information for users in an application
Attributes:
username: username on the application
password: user password, usually hashed before creation
email: user email
first_name: first name for this user
last_name: last name for this user
"""
def __init__(self, user_json):
self.username = user_json['username']
self.password = user_json['password']
self.email = user_json['email']
self.first_name = user_json['first_name']
self.last_name = user_json['last_name']
def __init__(self, username, password, email, first_name=None, last_name=None):
self.username = username
self.password = password
self.email = email
self.first_name = first_name
self.last_name = last_name
def to_json(self):
""" Transforms the current object into a JSON object.
Returns:
JSON with user information
"""
return {
'username': self.username,
'password': self.password,
'email': self.email,
'first_name': self.first_name,
'last_name': self.last_name
}
|
mit
| -1,352,826,174,854,707,000 | 31.454545 | 91 | 0.573529 | false |
uptown/django-town
|
django_town/oauth2/grant/codegrant.py
|
1
|
2324
|
#-*- coding: utf-8 -*-
from .grant import Grant
from ..endpoint import AuthorizationEndpoint, TokenEndpoint
class CodeGrant(Grant):
"""
The authorization code grant type is used to obtain both access
tokens and refresh tokens and is optimized for confidential clients.
Since this is a redirection-based flow, the client must be capable of
interacting with the resource owner's user-agent (typically a web
browser) and capable of receiving incoming requests (via redirection)
from the authorization server.
+----------+
| Resource |
| Owner |
| |
+----------+
^
|
(B)
+----|-----+ Client Identifier +---------------+
| -+----(A)-- & Redirection URI ---->| |
| User- | | Authorization |
| Agent -+----(B)-- User authenticates --->| Server |
| | | |
| -+----(C)-- Authorization Code ---<| |
+-|----|---+ +---------------+
| | ^ v
(A) (C) | |
| | | |
^ v | |
+---------+ | |
| |>---(D)-- Authorization Code ---------' |
| Client | & Redirection URI |
| | |
| |<---(E)----- Access Token -------------------'
+---------+ (w/ Optional Refresh Token)
Note: The lines illustrating steps (A), (B), and (C) are broken into
two parts as they pass through the user-agent.
"""
def authorization(self):
self._authorization_endpoint = AuthorizationEndpoint(self._server, self._request, self._client)
redirect_uri = self._authorization_endpoint.authorization()
return redirect_uri
def get_credential(self, expires_in):
self._token_endpoint = TokenEndpoint(self._server, self._request, self._client)
return self._token_endpoint.get_credential(expires_in)
|
mit
| 7,184,162,702,005,725,000 | 42.055556 | 103 | 0.418675 | false |
0x1001/StockMarket
|
crypto/training_data.py
|
1
|
4252
|
import argparse
import exchange
import random
from tkinter import *
import PIL.Image
import PIL.ImageTk
import pickle
import os
import helper
TEMP_FILE_BEFORE = "_before_temp.png"
TEMP_FILE_AFTER = "_after_temp.png"
TRAINING_DB_FILE = "training_data_db.pkl"
RANGE = 10
RANGE_AFTER = 10
class TrainingData:
def __init__(self):
self.data = []
self.feedback = "" # "buy", "sell", "hold"
self.currency_pair = "" # "USDT_BTC"
def ask():
currency_pair = "USDT_BTC"
data = exchange.get_chart_data(currency_pair)
while True:
start = random.randint(0, len(data) - (RANGE + RANGE_AFTER) - 1)
slice = data[start : start + RANGE]
slice_after = data[start + RANGE : start + (RANGE + RANGE_AFTER)]
ymin = min([float(d["low"]) for d in slice + slice_after])
ymax = max(([float(d["high"]) for d in slice + slice_after]))
exchange.plot_chart_data(slice, currency_pair, TEMP_FILE_BEFORE, ymin, ymax)
exchange.plot_chart_data(slice_after, currency_pair, TEMP_FILE_AFTER, ymin, ymax)
answer = _gui_question()
if answer:
td = TrainingData()
td.data = slice
td.feedback = answer
td.currency_pair = currency_pair
_save(td)
else:
break
def _gui_question():
root = Tk()
class _Answer:
def __init__(self, root):
self.answer = None
self.root = root
def buy_callback(self):
self.answer = "buy"
self.root.destroy()
def sell_callback(self):
self.answer = "sell"
self.root.destroy()
def hold_collback(self):
self.answer = "hold"
self.root.destroy()
answer = _Answer(root)
w = Label(root, text="Choose between: Buy, Sell, Hold")
w.pack()
image_frame = Frame(root)
photo_before = PIL.ImageTk.PhotoImage(PIL.Image.open(TEMP_FILE_BEFORE))
chart_before = Label(image_frame, image=photo_before)
chart_before.image = photo_before
chart_before.pack(fill=BOTH, side=LEFT)
photo_after = PIL.ImageTk.PhotoImage(PIL.Image.open(TEMP_FILE_AFTER))
chart_after = Label(image_frame, image=photo_after)
chart_after.image = photo_after
chart_after.pack(fill=BOTH, side=LEFT)
image_frame.pack()
button_frame = Frame(root)
buy = Button(button_frame, text="buy", fg="green", command=answer.buy_callback)
sell = Button(button_frame, text="sell", fg="red", command=answer.sell_callback)
hold = Button(button_frame, text="hold", fg="black", command=answer.hold_collback)
buy.pack(side=LEFT)
sell.pack(side=LEFT)
hold.pack(side=LEFT)
button_frame.pack()
root.geometry('%dx%d+%d+%d' % (1400, 570, 400, 300))
root.mainloop()
return answer.answer
def generate_automatic():
currency_pairs = ["USDT_BTC", "USDT_ETH", "USDT_LTC", "USDT_ZEC", "USDT_ETC", "USDT_REP", "USDT_XMR", "USDT_STR", "USDT_DASH", "USDT_XRP"]
all_data = []
for currency_pair in currency_pairs:
data = exchange.get_chart_data(currency_pair)
all_data.append(data)
print("Progress {2}: {0} / {1}".format(currency_pairs.index(currency_pair), len(currency_pairs), currency_pair))
_save(all_data)
def _save(data):
if not os.path.isfile(TRAINING_DB_FILE):
all_data = []
else:
with open(TRAINING_DB_FILE, "rb") as fp:
all_data = pickle.load(fp)
all_data.append(data)
with open(TRAINING_DB_FILE, "wb") as fp:
pickle.dump(all_data, fp)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Training data generator.')
parser.add_argument('-r', dest='remove', action='store_true', help='Remove existing training database.')
parser.add_argument('-g', dest='generate', action='store_true', help='Generate training data.')
parser.add_argument('-ag', dest='generate_automatic', action='store_true', help='Generates data automatically')
args = parser.parse_args()
if args.remove:
if os.path.isfile(TRAINING_DB_FILE):
os.unlink(TRAINING_DB_FILE)
if args.generate:
ask()
if args.generate_automatic:
generate_automatic()
|
gpl-2.0
| -6,574,255,750,448,201,000 | 28.324138 | 142 | 0.613829 | false |
benpicco/mate-panflute
|
src/panflute/daemon/pithos.py
|
1
|
4814
|
#! /usr/bin/env python
# Panflute
# Copyright (C) 2010 Paul Kuliniewicz <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301, USA.
"""
Interface translator for Pithos.
"""
from __future__ import absolute_import
import panflute.daemon.connector
import panflute.daemon.mpris
import panflute.mpris
import dbus
class Connector (panflute.daemon.connector.DBusConnector):
"""
Connection manager for Pithos.
"""
def __init__ (self):
panflute.daemon.connector.DBusConnector.__init__ (self, "pithos", "Pithos",
"net.kevinmehall.Pithos")
# Pithos's icon isn't put into a common directory
def root (self, **kwargs):
return panflute.daemon.mpris.Root ("Pithos", **kwargs)
def track_list (self, **kwargs):
return panflute.daemon.mpris.TrackList (**kwargs)
def player (self, **kwargs):
return Player (**kwargs)
class Player (panflute.daemon.mpris.Player):
"""
Player object for Pithos.
"""
from panflute.util import log
def __init__ (self, **kwargs):
panflute.daemon.mpris.Player.__init__ (self, **kwargs)
for feature in ["GetCaps", "GetMetadata", "GetStatus",
"Next", "Pause", "Stop", "Play"]:
self.register_feature (feature)
bus = dbus.SessionBus ()
proxy = bus.get_object ("net.kevinmehall.Pithos", "/net/kevinmehall/Pithos")
self.__pithos = dbus.Interface (proxy, "net.kevinmehall.Pithos")
self.cached_caps.all = panflute.mpris.CAN_GO_NEXT | \
panflute.mpris.CAN_PAUSE | \
panflute.mpris.CAN_PLAY | \
panflute.mpris.CAN_PROVIDE_METADATA
self.__handlers = [
self.__pithos.connect_to_signal ("PlayStateChanged", self.__play_state_changed_cb),
self.__pithos.connect_to_signal ("SongChanged", self.__song_changed_cb)
]
self.__pithos.IsPlaying (reply_handler = self.__play_state_changed_cb,
error_handler = self.log.warn)
self.__pithos.GetCurrentSong (reply_handler = self.__song_changed_cb,
error_handler = self.log.warn)
def remove_from_connection (self):
for handler in self.__handlers:
handler.remove ()
self.__handlers = []
panflute.daemon.mpris.Player.remove_from_connection (self)
def do_Next (self):
self.__pithos.SkipSong (reply_handler = lambda: None,
error_handler = self.log.warn)
def do_Pause (self):
if self.cached_status.state == panflute.mpris.STATE_PLAYING:
self.__pithos.PlayPause (reply_handler = lambda: None,
error_handler = self.log.warn)
def do_Stop (self):
self.do_Pause ()
def do_Play (self):
if self.cached_status.state != panflute.mpris.STATE_PLAYING:
self.__pithos.PlayPause (reply_handler = lambda: None,
error_handler = self.log.warn)
def __play_state_changed_cb (self, playing):
"""
Called when the playback state changes.
"""
if playing:
self.cached_status.state = panflute.mpris.STATE_PLAYING
else:
self.cached_status.state = panflute.mpris.STATE_PAUSED
def __song_changed_cb (self, song):
"""
Called when the current song changes.
"""
self.log.debug ("New song: {0}".format (song))
if song is not None and len (song) > 0:
metadata = {}
if "title" in song:
metadata["title"] = song["title"]
if "artist" in song:
metadata["artist"] = song["artist"]
if "album" in song:
metadata["album"] = song["album"]
if "songDetailURL" in song:
metadata["location"] = song["songDetailURL"]
self.cached_metadata = metadata
else:
self.cached_metadata = {}
|
gpl-2.0
| 4,155,278,898,648,473,000 | 31.527027 | 95 | 0.58226 | false |
sdeslauriers/streamlines
|
streamlines/io/__init__.py
|
1
|
5342
|
import nibabel as nib
import numpy as np
from nicoord import AffineTransform
from nicoord import CoordinateSystem
from nicoord import CoordinateSystemSpace
from nicoord import CoordinateSystemAxes
from nicoord import VoxelSpace
from nicoord import coord
from nicoord import inverse
import streamlines as sl
# Streamlines in .trk format are always saved in native RAS space.
_ras_mm = CoordinateSystem(
CoordinateSystemSpace.NATIVE, CoordinateSystemAxes.RAS)
def load(filename: str):
"""Loads the streamlines contained in a file
Loads the streamlines contained in a .trk file. The streamlines are
always loaded in a native RAS coordinate system. If the voxel_to_rasmm
affine transform is present in the header, it is also loaded with
the streamlines. This allows the transformation to voxel space using the
transform_to method.
Args:
filename: The file name from which to load the streamlines. Only .trk
files are supported.
"""
# Load the input streamlines.
tractogram_file = nib.streamlines.load(filename)
header = tractogram_file.header
affine_to_rasmm = header['voxel_to_rasmm']
voxel_sizes = header['voxel_sizes']
shape = header['dimensions']
# If there is a transform to RAS, invert it to get the transform to
# voxel space.
if not np.allclose(affine_to_rasmm, np.eye(4)):
affine_to_voxel = np.linalg.inv(affine_to_rasmm)
target = coord('voxel', 'ras', voxel_sizes, shape)
transforms = [AffineTransform(_ras_mm, target, affine_to_voxel)]
else:
transforms = None
tractogram = tractogram_file.tractogram
streamlines = sl.Streamlines(tractogram.streamlines, _ras_mm, transforms)
# Add the streamline point data to each streamline.
for key, values in tractogram.data_per_point.items():
for streamline, value in zip(streamlines, values):
streamline.data[key] = value.T
return streamlines
def save(streamlines, filename):
"""Saves streamlines to a trk file
Saves the streamlines and their metadata to a trk file.
Args:
streamlines (streamlines.Streamlines): The streamlines to save.
filename (str): The filename of the output file. If the file
exists, it will be overwritten.
Examples:
>>> import numpy as np
>>> import streamlines as sl
>>> streamlines = sl.Streamlines(np.random.randn(10, 100, 3))
>>> sl.io.save(streamlines, 'test.trk')
"""
# Concatenate all metadata into 2 dicts, one for streamline data and
# the other for point data.
data_per_point = {}
data_per_streamline = {}
# There might be no streamlines.
if len(streamlines) > 0:
for key in streamlines[0].data.keys():
if streamlines[0].data[key].ndim == 2:
data_per_point[key] = [s.data[key].T for s in streamlines]
else:
data_per_streamline[key] = [s.data[key] for s in streamlines]
transforms = streamlines.transforms
if streamlines.coordinate_system != _ras_mm:
# If we are not in RAS, find the affine to native RAS. If it does not
# exist, we have to stop because nibabel always wants to save in native
# RAS.
valid_transforms = [t for t in transforms if t.target == _ras_mm]
if len(valid_transforms) == 0:
raise ValueError(
f'The streamlines are not in native RAS space and no '
f'transforms to RAS are available. Cannot save to .trk '
f'format.')
# Note that we don't change the coordinate system. Nibabel does it on
# save.
transform = valid_transforms[0]
coordinate_system = transform.target
affine_to_rasmm = affine = transform.affine
else:
# The points are already in the right coordinate system.
affine_to_rasmm = np.eye(4)
# If we are in RAS, we can still find the transform to native RAS as
# the inverse of the inverse. It is ok if there is none.
target = coord('voxel', 'ras')
valid_transforms = [t for t in transforms if t.target == target]
if len(valid_transforms) == 0:
affine = np.eye(4)
coordinate_system = coord('voxel', 'ras')
else:
transform = inverse(valid_transforms[0])
coordinate_system = transform.target
affine = transform.affine
# Get the reference image information from the coordinate system if it is
# available.
if isinstance(coordinate_system, VoxelSpace):
shape = coordinate_system.shape
voxel_sizes = coordinate_system.voxel_sizes
else:
# Use default values if voxel space data is not available.
shape = (1, 1, 1)
voxel_sizes = (1, 1, 1)
new_tractogram = nib.streamlines.Tractogram(
[s.points for s in streamlines],
affine_to_rasmm=affine_to_rasmm,
data_per_point=data_per_point,
data_per_streamline=data_per_streamline)
hdr_dict = {'dimensions': shape,
'voxel_sizes': voxel_sizes,
'voxel_to_rasmm': affine,
'voxel_order': "".join(nib.aff2axcodes(affine))}
trk_file = nib.streamlines.TrkFile(new_tractogram, hdr_dict)
trk_file.save(filename)
|
gpl-3.0
| 4,941,398,785,082,114,000 | 33.688312 | 79 | 0.648446 | false |
mlperf/training_results_v0.6
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/python/tvm/autotvm/task/code_hash.py
|
1
|
1259
|
"""
Decorator functions for hashing schedule code
code hashing is used to check the consistence of schedule code and the parameters loaded from log
"""
import inspect
import zlib
from tvm import schedule
def attach_code_hash(s):
"""Decorator for attaching a code hash to a schedule
Parameters
----------
s: Schedule
tvm.schedule.Schedule to attach the hash to
"""
def decorator(func):
def wrapper(*args, **kwargs):
func(*args, **kwargs)
raw_hash = zlib.crc32(''.join(inspect.getsourcelines(func)[0]).encode())
s.code_hash = hex(raw_hash)[2:]
return wrapper
return decorator
def attach_code_hash_to_arg(arg_idx=1):
"""Decorator for attaching a code hash to a schedule
Parameters
----------
arg_idx: int
index of the argument (expected to be a Schedule) to attach the code
hash to
"""
def decorator(func):
def wrapper(*args, **kwargs):
func(*args, **kwargs)
assert isinstance(args[arg_idx], schedule.Schedule)
raw_hash = zlib.crc32(''.join(inspect.getsourcelines(func)[0]).encode())
args[arg_idx].code_hash = hex(raw_hash)[2:]
return wrapper
return decorator
|
apache-2.0
| -4,064,826,580,530,348,000 | 28.27907 | 97 | 0.618745 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.