max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
todoapp/container/toDoApp/tests/tests_views.py
|
Alexzg/Django-projects
| 0 |
2025393
|
from django.test import TestCase
from django.http import HttpRequest
from django.urls import reverse
class StartPageTests(TestCase):
print("- StartPageTests")
def test_start_page_status_code(self):
response = self.client.get('/')
self.assertEquals(response.status_code, 200)
def test_start_page_correct_url_by_name(self):
response = self.client.get(reverse('index'))
self.assertEquals(response.status_code, 200)
def test_start_page_uses_correct_html(self):
response = self.client.get(reverse('index'))
self.assertContains(response, '<h1>Ucair - TodoApp</h1>')
def test_start_page_uses_incorrect_html(self):
response = self.client.get(reverse('index'))
self.assertNotContains(response, '<h1>I have a wrong app</h1>')
def test_form_data_entry_redirection(self):
response = self.client.post('/add', {
'title':'ViewTest-1', 'content':'Some description', 'category':'General', 'startDate':'2018-12-30', 'dueDate':'2018-12-29', })
self.assertEquals(response.status_code, 302) #redirection
class AddTaskPageTests(TestCase):
print("- AddTaskPageTests")
def test_add_page_status_code(self):
response = self.client.get('/add/')
self.assertNotEquals(response.status_code, 200)
def test_add_page_correct_url_by_name(self):
response = self.client.get(reverse('addTask'))
self.assertNotEquals(response.status_code, 200)
| 1,347 |
Strip.py
|
chris3k/dilbertbrowser
| 0 |
2023740
|
import os
class Strip(object):
def __init__(self):
self.url = None
self.title = ""
self.date = None
self.next_strip = None
self.prev_strip = None
self.img_url = None
self._local_img_path = None
self._comic = None
@property
def local_img_path(self):
if self._local_img_path:
return os.path.join("strips", self._local_img_path)
@local_img_path.setter
def local_img_path(self, url):
self._local_img_path = url
@property
def comic(self):
if self.local_img_path and os.path.exists(self.local_img_path):
return self.local_img_path
return self._comic
@comic.setter
def comic(self, x):
self._comic = x
def parse(self):
pass
def download(self):
pass
def __str__(self):
return "<url={}, local_path={}>".format(self.url, self._local_img_path)
def __repr__(self):
return self.__str__()
| 1,040 |
examples/python/osc-wifi.py
|
squidsoup-uk/ArduinoOSC
| 133 |
2024702
|
from osc4py3.as_eventloop import *
from osc4py3 import oscbuildparse
from osc4py3 import oscmethod as osm
import time
osc_startup()
osc_udp_client("192.168.1.201", 54321, "client_send")
osc_udp_client("192.168.1.201", 54345, "client_bind")
osc_udp_server("0.0.0.0", 55555, "server_recv")
osc_udp_server("0.0.0.0", 54445, "server_published")
def handler(address, *args):
print(address, args)
osc_method("/*", handler, argscheme=osm.OSCARG_ADDRESS + osm.OSCARG_DATAUNPACK)
try:
while True:
msg = oscbuildparse.OSCMessage('/lambda/msg', ",ifs", [123, 4.5, "six"])
osc_send(msg, "client_send")
osc_process() # one message, one call
msg = oscbuildparse.OSCMessage('/callback', ",ifs", [1, 2.2, "test"])
osc_send(msg, "client_send")
osc_process() # one message, one call
msg = oscbuildparse.OSCMessage('/wildcard/abc/test', ",i", [1])
osc_send(msg, "client_send")
osc_process() # one message, one call
msg = oscbuildparse.OSCMessage('/need/reply', ",", [])
osc_send(msg, "client_send")
osc_process() # one message, one call
msg = oscbuildparse.OSCMessage('/bind/values', ",ifs", [345, 6.7, "string"])
osc_send(msg, "client_bind")
osc_process() # one message, one call
msg = oscbuildparse.OSCMessage('/lambda/bind/args', ",ifs", [789, 1.23, "bind"])
osc_send(msg, "client_send")
osc_process() # one message, one call
time.sleep(1)
except KeyboardInterrupt:
# Properly close the system.
osc_terminate()
| 1,576 |
backend/api/migrations/0069_remove_documenthistory_user_role.py
|
amichard/tfrs
| 18 |
2022948
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2019-01-21 17:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0068_rename_fuel_supply_records'),
]
operations = [
migrations.RemoveField(
model_name='documenthistory',
name='user_role'
),
]
| 417 |
playerinterface/views.py
|
ixalis/mafiapp
| 0 |
2025572
|
from django.shortcuts import render, redirect
from gamegeneration.models import *
from django.http import HttpResponse
from django.views import generic
from forms import *
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login, authenticate
def is_gm(user):
return True
try:
return user.profile.currentPlayer.attributes.get("GM").value == 'True'
except:
return False
def home(request):
"""
Home page
"""
return render(request, 'home.html', {})
@login_required
def dashboard(request):
"""
View for User Profile
"""
player = request.user.profile.currentPlayer
items = player.iteminstance_set.all()
abilities = player.abilityinstance_set.all()
attributes = []
attributesme = player.attributes.all()
for a in attributesme:
if a.visible():
attributes.append(a)
context = {'user':request.user, 'player':player, 'items':items, 'abilities':abilities, 'attributes':attributes}
return render(request, 'playerinterface/profile.html', context)
@login_required
def itemuse(request, itemid):
"""
View for form for using an item
"""
item = ItemInstance.objects.get(id=itemid)
if item.owner != request.user.profile.currentPlayer and not is_gm(request.user):
return redirect('dashboard')
requests = item.get_requests()
if request.method == 'POST':
form = AutoGenerateForm(request.POST, extra=requests)
if form.is_valid():
parameters = form.get_answers()
parameters['owner'] = request.user.profile.currentPlayer
message = item.use(parameters)
m = Message(addressee=parameters['owner'], content=message, game=request.user.profile.currentPlayer.game)
m.save()
#Display the message you get at the end
context = {"message":message}
return render(request, 'gmmessage.html', context)
else:
form = AutoGenerateForm(extra = requests)
#Render the form
context = {'form':form, 'main':item.itype.name, 'instruction':item.get_usetext()}
return render(request, "form.html", context)
def itemtransfer(request, itemid):
"""
View form for transfering an item
"""
item = ItemInstance.objects.get(id=itemid)
if item.owner != request.user.profile.currentPlayer and not is_gm(request.user):
return redirect('dashboard')
requests = item.get_requests()
if request.method == 'POST':
form = ItemTransferForm(request.POST)
if form.is_valid():
owner = form.get_answer()
message = item.transfer(owner)
item.save()
m = Message(addressee=owner, content=message, game=request.user.profile.currentPlayer.game)
#Display the message you get at the end
context = {"message":message}
return render(request, 'gmmessage.html', context)
else:
form = ItemTransferForm()
#Render the form
context = {'form':form, 'main':item.get_itype().get_name()}
return render(request, "form.html", context)
@login_required
def abilityactivate(request, abilityid):
"""
View for form for using an item
"""
ability = AbilityInstance.objects.get(id=abilityid)
if ability.owner != request.user.profile.currentPlayer and not is_gm(request.user):
return redirect('dashboard')
requests = ability.get_requests()
if request.method == 'POST':
form = AutoGenerateForm(request.POST, extra=requests)
if form.is_valid():
parameters = form.get_answers()
parameters['owner'] = request.user.profile.currentPlayer
message = ability.use(parameters)
m = Message(addressee=parameters['owner'], content=message, game=request.user.profile.currentPlayer.game)
m.save()
#Display the message you get at the end
context = {"message":message}
return render(request, "gmmessage.html", context)
else:
form = AutoGenerateForm(extra = requests)
#Render the form
context = {'form':form, 'main':ability.itype.name, 'instruction':ability.get_usetext()}
return render(request, "form.html", context)
@login_required
def profile(request):
game = request.user.profile.currentPlayer.game
context = {"message":"You are currently playing the game"+str(game)}
return render(request, "gmmessage.html", context)
@login_required
def inbox(request):
player = request.user.profile.currentPlayer
messages = Message.objects.filter(addressee=player)
context = {'messages':messages}
return render(request, "playerinterface/inbox.html", context)
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = <PASSWORD>.cleaned_data.get('<PASSWORD>')
user = authenticate(username=username, password=<PASSWORD>)
login(request, user)
return redirect('home')
else:
form = SignUpForm()
return render(request, 'playerinterface/signup.html', {'form': form})
| 5,259 |
mapa.py
|
gustavocamalionti/mapa-interativo
| 0 |
2024227
|
import folium
from folium.plugins import Fullscreen
from folium.plugins import MarkerCluster
import pandas as pds
#IMPORTANDO A PLANILHA COM DADOS
ler = pds.read_excel(r'C:\Users\<NAME>\Documents\Programação\Meus-Projetos\Mapa Interativo\dados\dadosmap.xlsx')
#MAPA: CONFIGURAÇÕES GERAIS
mapa = folium.Map(
location = [-13.7048968, -69.6590157],
world_copy_jump = True,
tiles = 'OpenStreetMap',
zoom_start = 4,
)
#BOTÃO DE FULLSCREEN
Fullscreen(
position = 'topright',
title = 'Entrar/Sair da Tela Cheia',
title_cancel = 'Sair da Tela Cheia',
force_separate_button=True).add_to(mapa)
#INTERAÇÕES ENTRE OS MARCADORES.
for index, linha in ler.iterrows():
cluster = MarkerCluster().add_to(mapa)
print(ler)
#CRIANDO O LOOP
for index, linha in ler.iterrows():
folium.Marker(location=[linha['LAT'], linha['LONG']], popup = linha['UF2'], icon=folium.Icon(color='purple', icon='info-sign')).add_to(cluster)
#folium.CircleMarker(location=[-23.8641866,-46.4303154], radius=25, popup='<b>SANTOS</b>', color='#3186cc', fill=True, fill_color='#3186cc').add_to(cluster)
#BOTÃO DE CONTROLE "ESTADOS"
#folium.LayerControl().add_to(mapa)
mapa.save(r'C:\Users\<NAME>\Documents\Programação\Meus-Projetos\Mapa Interativo\mapa.html')
| 1,266 |
servers/mlflowserver/mlflowserver/MLFlowServer.py
|
dtaniwaki/seldon-core
| 0 |
2024651
|
from mlflow import pyfunc
import seldon_core
from seldon_core.user_model import SeldonComponent
from typing import Dict, List, Union, Iterable
import numpy as np
import os
import logging
import requests
import pandas as pd
log = logging.getLogger()
MLFLOW_SERVER = "model"
class MLFlowServer(SeldonComponent):
def __init__(self, model_uri: str):
super().__init__()
log.info(f"Creating MLFLow server with URI: {model_uri}")
self.model_uri = model_uri
self.ready = False
def load(self):
log.info(f"Downloading model from {self.model_uri}")
model_file = seldon_core.Storage.download(self.model_uri)
self._model = pyfunc.load_model(model_file)
self.ready = True
def predict(
self,
X: np.ndarray,
feature_names: Iterable[str] = [],
meta: Dict = None
) -> Union[np.ndarray, List, Dict, str, bytes]:
log.info(f"Requesting prediction with: {X}")
if not self.ready:
self.load()
# TODO: Make sure this doesn't get called from here, but
# from the actual python wrapper. Raise exception instead
#raise requests.HTTPError("Model not loaded yet")
if not feature_names is None and len(feature_names)>0:
df = pd.DataFrame(data=X, columns=feature_names)
else:
df = pd.DataFrame(data=X)
result = self._model.predict(X)
log.info(f"Prediction result: {result}")
return result
| 1,548 |
libs/openwsman/bindings/python/tests/client.py
|
juergh/dash-sdk
| 13 |
2023625
|
import unittest
from pywsman import *
class TestSequenceFunctions(unittest.TestCase):
def test_client_constructor_uri_simple(self):
client = Client("http://localhost")
self.assertEqual(client.scheme() , "http")
self.assertEqual(client.host() , "localhost")
def test_client_constructor_uri(self):
client = Client( "https://wsman:secret@localhost:5985/wsman" )
assert client is not None
self.assertEqual(client.scheme() , "https" )
self.assertEqual(client.user() , "wsman" )
self.assertEqual(client.password() , "<PASSWORD>" )
self.assertEqual(client.host() , "localhost" )
self.assertEqual(client.port() , 5985 )
self.assertEqual(client.path() , "/wsman" )
def test_client_constructor_full(self):
client = Client( "localhost", 5985, "/wsman", "http", "wsman", "secret" )
assert client is not None
self.assertEqual(client.scheme() , "http" )
self.assertEqual(client.user() , "wsman" )
self.assertEqual(client.password() , "<PASSWORD>")
self.assertEqual(client.host() , "localhost" )
self.assertEqual(client.port() ,5985 )
self.assertEqual(client.path() ,"/wsman")
def test_client_options_constructor(self):
options = ClientOptions()
assert options is not None
def test_identify(self):
client = Client( "http://wsman:secret@localhost:5985/wsman" )
assert client is not None
options = ClientOptions()
assert options is not None
doc = client.identify( options )
assert doc is not None
root = doc.root()
assert root is not None
prot_version = root.find( XML_NS_WSMAN_ID, "ProtocolVersion" )
prod_vendor = root.find( XML_NS_WSMAN_ID, "ProductVendor" )
prod_version = root.find(XML_NS_WSMAN_ID, "ProductVersion" )
print "Protocol %s, Vendor %s, Version %s" %( prot_version, prod_vendor, prod_version )
if __name__ == '__main__':
unittest.main()
| 1,821 |
src/mainmodulename/plugins/type_one_plugin/type_one_plugin.py
|
portikCoder/basic_python_plugin_project
| 1 |
2025471
|
# Copyright (c) 2021 portikCoder. All rights reserved.
# See the license text under the root package.
import logging
from mainmodulename.common.plugin_template import Plugin
class TypeOnePlugin(Plugin):
def run(self):
logging.info("Doing a great job.")
| 270 |
src/CNN/plotImage.py
|
RaymondLZhou/deep-neural-networks
| 0 |
2025297
|
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing.image import ImageDataGenerator
def plotImages(images_arr, sets, crops):
fig, axes = plt.subplots(sets, crops, figsize=(20, 20))
axes = axes.flatten()
for img, ax in zip( images_arr, axes):
ax.imshow(img)
ax.axis('off')
plt.tight_layout()
plt.show()
def plotImageSet(train_data_gen, sets, crops):
augmented_images = [train_data_gen[0][0][i] for i in range (sets) for j in range(crops)]
plotImages(augmented_images, sets, crops)
def createImageSet(train_dir, validation_dir, batch_size, IMG_HEIGHT, IMG_WIDTH):
image_gen_train = ImageDataGenerator(
rescale=1./255,
rotation_range=45,
width_shift_range=.15,
height_shift_range=.15,
horizontal_flip=True,
zoom_range=0.5
)
train_data_gen = image_gen_train.flow_from_directory(batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='binary')
plotImageSet(train_data_gen, 4, 5)
image_gen_val = ImageDataGenerator(rescale=1./255)
val_data_gen = image_gen_val.flow_from_directory(batch_size=batch_size,
directory=validation_dir,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='binary')
return train_data_gen, val_data_gen
| 1,827 |
main.py
|
t5318019/molly
| 0 |
2024859
|
import requests
import urllib3
import csv
import os.path
import os
import re
from urllib.parse import urlparse
from urllib.parse import urljoin
urllib3.disable_warnings()
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
PAGE_DIR = os.path.join(os.path.dirname(__file__), 'page')
HEADERS = {
'ACCEPT-LANGUAGE': 'zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7',
'ACCEPT-ENCODING': 'gzip, deflate, br',
'USER-AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
}
def get_html(urlpath):
response = requests.get(urlpath, headers=HEADERS, verify=False)
encodings = requests.utils.get_encodings_from_content(response.text)
if encodings:
response.encoding = encodings[0].lower()
if response.encoding.lower() == 'iso-8859-1':
response.encoding = 'utf-8'
if response.encoding.lower() == 'big-5':
response.encoding = 'big5'
return response
if __name__ == "__main__":
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(PAGE_DIR):
os.mkdir(PAGE_DIR)
csv_filename = os.path.join(DATA_DIR, 'u1_new.csv')
if not os.path.exists(csv_filename):
response = requests.get('http://stats.moe.gov.tw/files/school/108/u1_new.csv', headers=HEADERS, verify=False)
if response.status_code == 200:
with open(csv_filename, 'w') as f:
text = response.text
text = text.replace('\u3000','')
text = text.replace(' ','')
text = text.replace('www2','www')
f.write(text)
with open(csv_filename, newline='') as csvfile:
rows = csv.DictReader(csvfile)
for row in rows:
urlresult = urlparse(row['網址'].lower())
urlpath = 'http://' + urlresult.netloc
html_filename = os.path.join(PAGE_DIR, urlresult.netloc + '.html')
if os.path.exists(html_filename):
continue
while True:
print(urlpath)
response = get_html(urlpath)
if response.status_code == 403:
response = get_html(urlpath.replace('http:', 'https:'))
m = re.search(r'http-equiv=[\'"]*refresh[\'"]*\s+content=[\'"]\s*\d\s*;\s*url=([\w\/:.\'"\-]+)', response.text, flags=re.IGNORECASE)
if m:
refresh_url = m[1].lower()
refresh_url = refresh_url.replace('"', '')
refresh_url = refresh_url.replace('\'', '')
refresh_url = refresh_url if refresh_url.startswith('http') else urljoin(response.url, refresh_url)
refresh_url = refresh_url.replace('https:', 'http:')
urlpath = refresh_url
else:
break
with open(html_filename, 'w') as f:
f.write(response.text)
print(row['學校名稱'], urlresult.netloc, response.encoding, sep=': ')
| 3,028 |
elit/components/parsers/second_order/treecrf_decoder.py
|
emorynlp/el
| 40 |
2025050
|
# ========================================================================
# Copyright 2020 Emory University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
# -*- coding:utf-8 -*-
# Author: hankcs
from typing import Any, Tuple
import torch
from elit.components.parsers.biaffine.biaffine_model import BiaffineDecoder
from elit.components.parsers.biaffine.mlp import MLP
from elit.components.parsers.constituency.treecrf import CRF2oDependency
from elit.components.parsers.second_order.affine import Triaffine
class TreeCRFDecoder(BiaffineDecoder):
def __init__(self, hidden_size, n_mlp_arc, n_mlp_sib, n_mlp_rel, mlp_dropout, n_rels) -> None:
super().__init__(hidden_size, n_mlp_arc, n_mlp_rel, mlp_dropout, n_rels)
self.mlp_sib_s = MLP(hidden_size, n_mlp_sib, dropout=mlp_dropout)
self.mlp_sib_d = MLP(hidden_size, n_mlp_sib, dropout=mlp_dropout)
self.mlp_sib_h = MLP(hidden_size, n_mlp_sib, dropout=mlp_dropout)
self.sib_attn = Triaffine(n_in=n_mlp_sib, bias_x=True, bias_y=True)
self.crf = CRF2oDependency()
def forward(self, x, mask=None, **kwargs: Any) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
s_arc, s_rel = super(TreeCRFDecoder, self).forward(x, mask)
sib_s = self.mlp_sib_s(x)
sib_d = self.mlp_sib_d(x)
sib_h = self.mlp_sib_h(x)
# [batch_size, seq_len, seq_len, seq_len]
s_sib = self.sib_attn(sib_s, sib_d, sib_h).permute(0, 3, 1, 2)
return s_arc, s_sib, s_rel
| 2,071 |
Algorithms/Implementation/mini_max_sum.py
|
rho2/HackerRank
| 0 |
2025442
|
X = list(map(int, input().split()))
X.sort()
l = sum(X[:4])
h = sum(X[1:])
print(str(l) + ' ' + str(h))
| 107 |
seq_util/make_seq_id_unique.py
|
fandemonium/code
| 2 |
2025617
|
import sys
import re
id = []
seq = []
for line in open(sys.argv[1], 'rU'):
line = line.strip()
if line.startswith(">"):
lexemes = re.split(">| |,", line)
gi = lexemes[1]
loc = re.split("=|[..]", lexemes[2])[1]
newloc = loc.replace("(", "_")
newid = gi + "_"+newloc
id.append(newid)
else:
seq.append(line)
na = dict(zip(id, seq))
fa_out = open(sys.argv[2], 'w')
for item in na:
fa_out.write(">gi%s\n" % item)
fa_out.write("%s\n" % na[item])
| 464 |
bts2/bts2.py
|
WorkVerbDesign/BTS2
| 0 |
2024994
|
#! /usr/bin/python3
# Module Runner
# This is basically burn the subs
import settings
import sys
from threading import Thread
from time import sleep
from dataBaseClass import Sub, db
from placeAndGcode import placeNames, makeGcode
from sendGcode import gSend
from pubSubListener import ws1_start, pingTwitchServersToKeepTheConnectionAliveTask, webSocketInit
from frontPanel import Btn_Red, Btn_Blk, LED_BB_Red, LED_BB_Grn, LED_RB_Red, LED_Grn, LED_Red, LED_RB_Grn
from ohShit import stopit
import consoleClass
from consoleClass import consoleStuff
#for testing
#from dbMaker import makeDb
#from dbUnparser import unParsify
#set flags
threadQuit = False
streamerToggle = False
#grab settings
entered = settings.nameEntered
placed = settings.namePlaced
gcode = settings.nameGcode
burnt = settings.nameBurnt
def runConsole():
while not threadQuit:
consoleStuff()
def runPlace():
while not threadQuit:
noNotPlaced = Sub.select().where(Sub.status==entered).count()
if noNotPlaced > 0:
placeNames()
def deraLict():
noNotGd = Sub.select().where(Sub.status==placed).count()
if noNotGd > 0:
consoleClass.thread1 = "found " + str(noNotGd) + " un-gcoded names, fixing"
gList = Sub.select().where(Sub.status==placed)
for name in gList:
makeGcode(name)
def testies():
while not threadQuit:
#number of names to process for the test
entriesNo = Sub.select().where(Sub.status >= placed).count()
#display number of names to process for the test
consoleClass.thread1 = str(entriesNo) + " of " + str(testicalNum)+ " names processed"
#when all names have been placed end the test
if testicalNum == entriesNo:
endTheDamnTest()
def virtualSub():
#virtual pubsub
consoleClass.thread2 = "disabled for test"
try:
consoleClass.thread1 = "test: initiated. looking for db entries"
chromazomes = Sub.select().where(Sub.status >= placed).count()
except:
consoleClass.thread1 = "test: making new db"
chromazomes = 0
chromazomes += makeDb()
consoleClass.thread1 = "test: done adding names"
def endTheDamnTest():
global threadQuit
#console
consoleClass.thread1 = "trying to exit clean!"
#quit flag
threadQuit = True
#stop serial
stopit()
#put database gcode into text
#unParsify()
#stop the database
db.stop()
#indicate on the front panel
LED_BB_Grn.on()
sleep(2)
LED_BB_Grn.off()
#full exit of the program
sys.exit()
def runBurner():
while not threadQuit:
readyCount = Sub.select().where(Sub.status==gcode).count()
#console will update separately since this gets stuck in gSend.
if readyCount > 0:
if readyCount >= 10:
LED_Grn.blink()
if readyCount < 10:
LED_Grn.on()
if streamerToggle:
gSend()
else:
LED_Grn.off()
def redButton():
global streamerToggle
LED_RB_Red.off()
if streamerToggle == False:
LED_RB_Grn.on()
streamerToggle = True
consoleClass.thread1 = "Sender Active"
else:
LED_RB_Grn.off()
streamerToggle = False
consoleClass.thread1 = "Sender Disabled"
def blkButton():
consoleClass.thread1 = "Shut down button pressed!"
endTheDamnTest()
#these have to be declared here
Btn_Red.when_released = redButton
Btn_Red.when_pressed = LED_RB_Red.on
Btn_Blk.when_pressed = LED_BB_Red.on
Btn_Blk.when_released = blkButton
if __name__ == "__main__":
#console
consoleClass.thread1 = "Burn the Subs Booted"
try:
Thread.daemon = True
#start console thread
Thread(target=runConsole).start()
#check if there are derelict entries
deraLict()
#pubSub, starts two threads
webSocketInit()
#testing virtualpubSub
#virtualSub()
#placer
Thread(target=runPlace).start()
#testing thread trap
#testies(chromazomes)
#gCodeStreamer, traps thread
runBurner()
except KeyboardInterrupt:
endTheDamnTest()
except:
LED_Red.on()
| 4,559 |
sample/script/exporter.py
|
sho7noka/PyDCC
| 0 |
2023677
|
import random
import sys
sys.path.append("/Users/shosumioka/Yurlungur")
from yurlungur.user.Qt import QtWidgets, UIWindow
from vfxwindow.utils.palette import getPaletteList
class YWindow(UIWindow):
WindowID = 'unique_window_id'
WindowName = 'My Window'
WindowDockable = True
def __init__(self, parent=None, **kwargs):
super(YWindow, self).__init__(parent, **kwargs)
# Setup window here
container = QtWidgets.QWidget()
layout = QtWidgets.QVBoxLayout()
container.setLayout(layout)
self.setCentralWidget(container)
messageButton = QtWidgets.QPushButton('Popup Message')
messageButton.clicked.connect(self.message)
layout.addWidget(messageButton)
confirmButton = QtWidgets.QPushButton('Confirmation Box')
confirmButton.clicked.connect(self.confirm)
layout.addWidget(confirmButton)
paletteButton = QtWidgets.QPushButton('Random Palette')
paletteButton.clicked.connect(self.palette)
layout.addWidget(paletteButton)
def message(self):
"""Test message box."""
value = self.displayMessage(
title='Test',
message='This is a test.'
)
print('Chosen value: {}'.format(value))
return value
def confirm(self):
"""Test confirmation box."""
value = self.displayMessage(
title='Test',
message='This is a test.',
buttons=('Yes', 'No'),
defaultButton='Yes',
cancelButton='No',
)
print('Chosen value: {}'.format(value))
return value
def palette(self):
newPalette = random.choice(getPaletteList())
self.setWindowPalette(*newPalette.split('.', 1))
# Setup callbacks, but wait until the program is ready
self.deferred(self.newScene)
def newScene(self, *args):
"""Example: Delete and reapply callbacks after loading a new scene."""
self.removeCallbacks('sceneNewCallbacks')
if self.maya:
self.addCallbackScene('kAfterNew', self.newScene, group='sceneNewCallbacks')
elif self.nuke:
self.addCallbackOnCreate(self.newScene, nodeClass='Root', group='sceneNewCallbacks')
elif self.unity:
print(1)
if __name__ == '__main__':
YWindow.show()
| 2,350 |
Extract_Context_From_Snippets_Full.py
|
touqir14/Cmput_692_project
| 0 |
2024097
|
# import timeit
from sys import getsizeof
import time
import json
from sys import getsizeof
import collections
import pickle
# import timeit
from sys import getsizeof
import time
import csv
import io
output = io.StringIO()
dir = '/Users/sasa/Dropbox/1-Uni/CMPUT 692/Project/Code/google_snippet/Nikon_Snippets_Integrated.json'
import pickle
def create_pickle_from_json():
f = open(dir, 'r')
jsonvalues = json.load(f)
f.close()
with open('/Users/sasa/Dropbox/1-Uni/CMPUT 692/Project/Code/Nikon_Entities.csv', 'rU') as csvfile:
reader = csv.reader(csvfile, dialect=csv.excel_tab)
i=0
ParentIndex = {}
for row in reader:
i = i + 1
#if (i > 1):
# break
print(row[0])
k = row[0]
myIndex = {}
for criteria in jsonvalues[row[0]]:
splitted = str(criteria['title']).lower().split()
for term in splitted:
if ((term not in str(k).lower()) and (term not in ["...", "|",":",",","at","from","to","for",'/',"with","and","or", "the","b&h", '_','-', '&',"review:", "reviews", "review", "w/","w/o", "amazon.com","youtube"])):
#print(term)
term = term.replace("(", "")
term = term.replace(")", "")
term = term.replace("{", "")
term = term.replace("}", "")
term = term.replace("!", "")
term = term.replace(":", "")
term = term.replace("|", "")
if term not in myIndex:
myIndex[term] = {}
myIndex[term] = 1
else:
if term in myIndex:
myIndex[term] += 1
#print(myIndex)
ParentIndex[str(k).lower()]=myIndex
print(k)
print(ParentIndex[str(k).lower()])
with open('/Users/sasa/Dropbox/1-Uni/CMPUT 692/Project/Code/dictionary_Nikon_Snippet_Surroundings.pickle', 'wb') as fsurr:
pickle.dump(ParentIndex, fsurr)
if __name__ == "__main__":
create_pickle_from_json()
| 2,272 |
tests/test_rdfprint.py
|
RenskeW/cwltool
| 289 |
2025360
|
import subprocess
import sys
import pytest
from cwltool.main import main
from .util import get_data
def test_rdf_print() -> None:
assert main(["--print-rdf", get_data("tests/wf/hello_single_tool.cwl")]) == 0
def test_rdf_print_unicode(monkeypatch: pytest.MonkeyPatch) -> None:
"""Force ASCII encoding but load UTF file with --print-rdf."""
monkeypatch.setenv("LC_ALL", "C")
params = [
sys.executable,
"-m",
"cwltool",
"--print-rdf",
get_data("tests/utf_doc_example.cwl"),
]
assert subprocess.check_call(params) == 0
| 589 |
2015/Day19/src/Day19.py
|
Ganon11/AdventCode
| 0 |
2025375
|
import argparse
import sys
def parse_input(filename):
rules = dict()
molecule = ''
with open(filename, 'r') as fp:
for line in fp.readlines():
if line.find('=>') != -1:
parts = line.split()
if not parts[0] in rules:
rules[parts[0]] = list()
rules[parts[0]].append(parts[2])
else:
molecule = line
return (rules, molecule)
def parse_reverse_input(filename):
rules = dict()
molecule = ''
with open(filename, 'r') as fp:
for line in fp.readlines():
if line.find('=>') != -1:
parts = line.split()
if not parts[2] in rules:
rules[parts[2]] = list()
rules[parts[2]].append(parts[0])
else:
molecule = line
return (rules, molecule)
def part_1(filename):
(rules, molecule) = parse_input(filename)
values = set()
for key in rules:
for newVal in rules[key]:
length = len(key)
index = molecule.find(key)
while index != -1:
newMol = molecule[:index] + newVal + molecule[index + length:]
values.add(newMol)
index = molecule.find(key, index + length)
print("Found %d unique values" % len(values))
def part_2(filename):
(rules, molecule) = parse_reverse_input(filename)
frontier = dict()
seen = set()
gens_seen = set()
frontier[molecule] = 0
while 0 < len(frontier):
current = min(frontier, key=len)
gen = frontier[current]
if not gen in gens_seen:
print("Now checking gen %d" % gen)
gens_seen.add(gen)
frontier.pop(current)
if current in seen:
next
seen.add(current)
#print("Checking %s" % current)
if current == 'e':
print("Got target in %d generations" % gen)
break
for key in rules:
for newVal in rules[key]:
length = len(key)
index = current.find(key)
while index != -1:
newMol = current[:index] + newVal + current[index + length:]
frontier[newMol] = gen + 1
index = current.find(key, index + length)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('filename')
args = parser.parse_args()
part_1(args.filename)
part_2(args.filename)
if __name__ == "__main__":
main()
| 2,222 |
dogma/extensions/nonequimolar_codon_optimization/__init__.py
|
griffinclausen/dogma
| 1 |
2024901
|
from .optimizer import (
NonEquimolarOptimizer
)
from .utils import (
get_random_base_profile,
get_random_base_profile_within_scope,
get_random_codon_profile,
calculate_num_base_combinations,
calculate_num_codon_combinations
)
| 252 |
maldi-learn/tests/preprocessing/test_normalization.py
|
sebastianbalzer/maldi_PIKE
| 11 |
2024419
|
"""Test normalizers."""
import unittest
import numpy as np
from maldi_learn.data import MaldiTofSpectrum
from maldi_learn.preprocessing import TotalIonCurrentNormalizer
MOCK_DATA = [
MaldiTofSpectrum(
[[0.0, 5.0],
[10.7, 8.0],
[150.4, 10.],
[1000, 3.0]
]
), # Mean intensity 6.5
MaldiTofSpectrum(
[[0.0, 15.0],
[10.7, 0.0],
[150.4, 10.],
[1000, 3.0]
]
), # Mean intensity 7 or 9.3333 (with ignore zero intensity)
]
# Total mean intensity: 6.75 or 7.7142857143 (with ignore zero intensity)
class TestTotalIonCurrentNormalizer(unittest.TestCase):
def test_dont_ignore_zero_intensity(self):
transf = TotalIonCurrentNormalizer(ignore_zero_intensity=False)
transformed = transf.fit_transform(MOCK_DATA)
# Normalization factor first example: 6.5 / 6.75 = 0.9629
transformed_intesities = transformed[0].intensities
expected_intensities = MOCK_DATA[0].intensities * (6.5 / 6.75)
self.assertTrue(np.allclose(
transformed_intesities,
expected_intensities
))
# Normalization factor second example: 7 / 6.75 = 1.0370
transformed_intesities = transformed[1].intensities
expected_intensities = MOCK_DATA[1].intensities * (7 / 6.75)
self.assertTrue(np.allclose(
transformed_intesities,
expected_intensities
))
def test_ignore_zero_intensity(self):
transf = TotalIonCurrentNormalizer(ignore_zero_intensity=True)
transformed = transf.fit_transform(MOCK_DATA)
# Normalization factor first example: 6.5 / 7.71428 = 0.9629
transformed_intesities = transformed[0].intensities
expected_intensities = MOCK_DATA[0].intensities * (6.5 / 7.71428)
self.assertTrue(np.allclose(
transformed_intesities,
expected_intensities
))
# Normalization factor second example: 9.3333 / 7.71428 = 1.0370
transformed_intesities = transformed[1].intensities
expected_intensities = MOCK_DATA[1].intensities * (9.3333 / 7.71428)
self.assertTrue(np.allclose(
transformed_intesities,
expected_intensities
))
| 2,283 |
testing/server.py
|
grantsrb/planet
| 0 |
2025395
|
import eventlet
import socketio
import sys
import os
from io import BytesIO
from PIL import Image
import base64
import numpy as np
from agents import RandnAgent
import pickle
def get_empty_datas():
d = dict()
d['obs_names'] = []
d['rewards'] = []
d['actions'] = []
d['dones'] = []
d['observations'] = []
d['save_eps'] = False
return d
sio = socketio.Server()
app = socketio.WSGIApp(sio)
# Server and image saving initialization
save_eps = False
save_folder = 'gamedata'
if not os.path.exists(save_folder):
os.mkdir(save_folder)
_, subds, _ = next(os.walk(save_folder))
trial_num = len(subds)
save_folder = os.path.join(save_folder, "trial_"+str(trial_num))
if not os.path.exists(save_folder):
os.mkdir(save_folder)
# Initialize episode collection
_, subds, _ = next(os.walk(save_folder))
global_ep = len(subds)
ep_folder = os.path.join(save_folder, "episode_"+str(global_ep))
if not os.path.exists(ep_folder):
os.mkdir(ep_folder)
_, _, files = next(os.walk(ep_folder))
global_frame = len(files)
datas = get_empty_datas()
agent = RandnAgent()
@sio.event
def connect(sid, environ):
print('connect ', sid)
@sio.event
def step(sid, step_data):
if 'done' in step_data and step_data['done'] == "1":
episode = global_ep + 1
frame = 0
else:
episode = global_ep
frame = len(datas['obs_names'])
print('step from', sid, " -- saving data:", step_data['save_data'], " - frame:", frame)
if 'observation' in step_data and step_data['observation'] is not None:
img_string = str(step_data['observation'])
decoded_bytes = base64.b64decode(img_string)
byts = BytesIO(decoded_bytes)
img = Image.open(byts)
npimg = np.asarray(img)
action = agent(npimg)
if step_data['save_data'] == "True":
save_name = "frame_{}.png".format(frame)
obs_name = os.path.join(ep_folder, save_name)
img.save(obs_name)
frame += 1
datas['obs_names'].append(obs_name)
datas['rewards'].append(float(step_data['reward']))
datas['actions'].append([float(x) for x in action])
datas['dones'].append(int(step_data['done']))
datas['observations'].append(npimg)
else:
npimg = np.zeros((100, 75))
action = agent(npimg)
sio.emit('socket_response', {"velocity": action[0], "direction":action[1]}, room=sid)
print("Emitted response")
@sio.event
def disconnect(sid):
if len(datas['observations']) > 0:
save_name = os.path.join(save_folder, "sid_{}.p".format(sid))
with open(save_name, 'wb') as f:
pickle.dump(datas, f)
print('disconnect ', sid)
if __name__ == '__main__':
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
| 2,806 |
apply_failed_state.py
|
kjhman21/klaytn-tests
| 2 |
2023383
|
#!/usr/local/bin/python
# How to use
# $ ./update_state.sh
import os
import re
import sys
import glob
import fnmatch
import json
delimeter_line=re.compile("^--")
def find_files(directory, pattern):
ret_files = []
for root, dirs, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
path = os.path.join(root, basename)
ret_files.append(path)
return ret_files
filename=sys.argv[1]
with open(filename) as f:
test_filename=""
test_fork=""
test_idx=0
for line in f:
if delimeter_line.match(line):
pass
elif len(test_filename) == 0:
m = re.search("--- FAIL: TestState/.*/(.*\.json)/([0-9a-zA-Z]+)/([0-9]+) \(.*", line)
test_filename = m.group(1)
test_fork = m.group(2)
test_idx = int(m.group(3))
files = find_files(".", test_filename)
if len(files) != 1:
m = re.search("--- FAIL: TestState/.*?([^/]*/.*\.json)/([0-9a-zA-Z]+)/([0-9]+) \(.*", line)
filenameWithPath = m.group(1)
filesSecond = []
for fi in files:
if re.search(".*%s" % (filenameWithPath), fi) is not None:
filesSecond.append(fi)
if len(filesSecond) != 1:
print "Filename %s matched multiple times. %s" % (test_filename, files)
exit(0)
files = filesSecond
test_filename = files[0]
else:
m = re.search("^.*got ([0-9a-zA-Z]+), want ([0-9a-zA-z]+)", line)
got = m.group(1)
want = m.group(2)
print "Processing file %s..." % (test_filename)
content = ""
new_content = ""
with open(test_filename) as jsonf:
matched_hash = 0
matched_logs = 0
for jsonl in jsonf:
if re.match("^\s+\"hash\"\s*:", jsonl):
if matched_hash == test_idx:
jsonl = re.sub(want, got, jsonl)
matched_hash += 1
elif re.match("^\s+\"logs\"\s*:", jsonl):
if matched_logs == test_idx:
jsonl = re.sub(want, got, jsonl)
matched_logs += 1
new_content += jsonl
with open(test_filename, "w") as jsonf:
jsonf.write(new_content)
test_filename=""
| 1,992 |
src/imagen.py
|
ethanial1/Braille_lectors
| 0 |
2025667
|
import cv2 as cv
import numpy as np
# Clase que contiene todos los métodos del procesamiento de imágenes
class Imagen(object):
def __init__(self, imagen):
# abrir la imagen con cv2
self.imagen_original = cv.imread(imagen)
# verificamos que la imagen se puede abrir
if self.imagen_original is None:
raise IOError('No se puede abrir la imagen')
# 1 - Pasar a escala de grises
gris = cv.cvtColor(self.imagen_original, cv.COLOR_BGR2GRAY)
# 2 - Procesamos los bordes de la imagen bordes
self.bordes_imagen_binario = self.bordes_imagen(gris)
# 3 - Proceso de la imagen, imagen binaria
self.imagen_binaria = self.imagen_binaria(gris)
# 4 - Obtenemos el alto, ancho y los canales
self.alto, self.ancho, self.canal = self.imagen_original.shape
# 5 - Hacemos una copia
self.final = self.imagen_original.copy()
# Utilizamos cv2 para dibujar el rectangulo
def caja(self, izquierda, derecha, arriba, abajo, color = (255,0,0), tamano = 1):
# dibujamos el rectangulo en la imagen original
self.final = cv.rectangle(self.final, (izquierda, arriba), (derecha, abajo), color, tamano)
return True
def obtener_imagen_final(self):
return self.final
def obtener_imagen_bordes(self):
return self.bordes_imagen_binario
def obtener_imagen_binario(self):
return self.imagen_binaria
def obtener_imagen_original(self):
return self.imagen_original
def imagen_binaria(self, gray):
# 1 - Reducir ruido
blur = cv.GaussianBlur(gray, (3,3), 0)
# 2 - binarizamos la imagen
ret, th = cv.threshold(blur, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
# 3 - reducimos el ruido
blur2 = cv.GaussianBlur(th, (3,3),0)
# 4 - Binarizamo la imagen
ret2, th2 = cv.threshold(blur2, 0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
return cv.bitwise_not(th2)
def bordes_imagen(self, gris):
# 1 - Reducir el ruido con un desenfoque
blur = cv.GaussianBlur(gris,(3,3),0)
# 2 - Umbralizar la imagen de forma adaptativa
thres = cv.adaptiveThreshold(blur,255,cv.ADAPTIVE_THRESH_MEAN_C,cv.THRESH_BINARY,5,4)
# 3 - Reducimos mas ruido
blur2 = cv.medianBlur(thres, 3)
# 4 - Umbralizamos la imagen
ret3,th3 = cv.threshold(blur2,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
# 5 - aplicamos un not con CV2, lo que es blanco - negro, negro - blanco.
return cv.bitwise_not(th3)
| 2,413 |
combine_trios.py
|
jdayton3/ch-pipeline
| 0 |
2025401
|
import gzip
import re
import os
import time
from sys import argv
import concurrent.futures
startTime = time.time()
char = '\n' + ('*' * 70) + '\n'
#Input file or list of files
inputFile = argv[1]
#Create a dictionary of files that need to be combined into one vcf file
fileDict = {}
with open(inputFile) as sampleFile:
header = sampleFile.readline()
headerList = header.rstrip().split("\t")
fileNameIndex = headerList.index("file_name")
familyIdIndex = headerList.index("family_id")
for sample in sampleFile:
sampleData = sample.rstrip("\n").split("\t")
fileName = sampleData[fileNameIndex]
sampleFamilyId = sampleData[familyIdIndex]
shortName = re.findall(r"([\w\-/]+)\.?.*\.?.*\.gz", fileName)[0]
actualFileName = "{}_test/{}_parsed.vcf.gz".format(sampleFamilyId, shortName)
if sampleFamilyId not in fileDict:
fileDict[sampleFamilyId] = [actualFileName]
else:
fileDict[sampleFamilyId].append(actualFileName)
probandDict = {}
parentDict = {}
with open(inputFile) as sampleFile:
header = sampleFile.readline()
headerList = header.rstrip().split("\t")
fileNameIndex = headerList.index("file_name")
familyIdIndex = headerList.index("family_id")
sampleIdIndex = headerList.index("sample_id")
probandIndex = headerList.index("proband")
genderIndex = headerList.index("sex")
for sample in sampleFile:
sampleData = sample.rstrip("\n").split("\t")
fileName = sampleData[fileNameIndex]
sampleFamilyId = sampleData[familyIdIndex]
sampleId = sampleData[sampleIdIndex]
probandStatus = sampleData[probandIndex]
gender = sampleData[genderIndex]
shortName = re.findall(r"([\w\-/]+)\.?.*\.?.*\.gz", fileName)[0]
actualFileName = "{}_test/{}_parsed.vcf.gz".format(sampleFamilyId, shortName)
if probandStatus == "Yes":
probandDict[sampleId] = sampleFamilyId
else:
if sampleFamilyId not in parentDict:
parentDict[sampleFamilyId] = {sampleId: gender}
else:
parentDict[sampleFamilyId][sampleId] = gender
# Create fam files
def createFamFiles(proband):
familyId = probandDict[proband]
familyDict = parentDict[familyId]
paternal = ""
maternal = ""
outputString = ""
sampleDict = {}
for key, value in familyDict.items():
if value == "1":
paternal = key
else:
maternal = key
with open(inputFile) as sampleFile:
header = sampleFile.readline()
headerList = header.rstrip().split("\t")
fileNameIndex = headerList.index("file_name")
familyIdIndex = headerList.index("family_id")
sampleIdIndex = headerList.index("sample_id")
probandIndex = headerList.index("proband")
genderIndex = headerList.index("sex")
for sample in sampleFile:
sampleData = sample.rstrip("\n").split("\t")
fileName = sampleData[fileNameIndex]
sampleFamilyId = sampleData[familyIdIndex]
sampleId = sampleData[sampleIdIndex]
probandStatus = sampleData[probandIndex]
gender = sampleData[genderIndex]
if probandStatus == "Yes" and familyId == sampleFamilyId:
sampleDict[sampleId] = "{}\t{}\t{}\t{}\t{}\t2\n".format(sampleFamilyId, sampleId, paternal, maternal, gender)
elif probandStatus == "No" and familyId == sampleFamilyId:
sampleDict[sampleId] = "{}\t{}\t0\t0\t{}\t1\n".format(sampleFamilyId, sampleId, gender)
with open("{}_test/{}.fam".format(familyId, familyId), "w") as outputFile:
for key, value in sorted(sampleDict.items()):
outputFile.write(value)
with concurrent.futures.ProcessPoolExecutor(max_workers=24) as executor:
executor.map(createFamFiles, probandDict)
filesToGenotype = []
# Use GATK to combine all trios into one vcf
def combineTrios(trio):
files = fileDict[trio]
fileString = ""
outputName = "{}_test/{}.vcf.gz".format(trio, trio)
for file in files:
fileString += "-V {} ".format(file)
os.system("gatk IndexFeatureFile -F {}".format(file))
os.system("gatk CombineGVCFs -R /references/Homo_sapiens_assembly38.fasta {} -O {}".format(fileString, outputName))
return(outputName)
with concurrent.futures.ProcessPoolExecutor(max_workers=24) as executor:
outputName = executor.map(combineTrios, fileDict)
for file in outputName:
filesToGenotype.append(file)
timeElapsedMinutes = round((time.time()-startTime) / 60, 2)
timeElapsedHours = round(timeElapsedMinutes / 60, 2)
print('{}Trios have been combined. Time elapsed: {} minutes ({} hours){}'.format(char, timeElapsedMinutes, timeElapsedHours, char))
| 4,796 |
tasks/task_8.py
|
simensgreen/NumericalMethods
| 4 |
2024814
|
import sys
import os
sys.path.append(os.path.pardir)
from NumericalMethods.interpolation import lagrange_polynomial, canonical_polynomial
# =================================================================================
# Нахождение интерполяция методами Лагранжа, Ньютона и через канонический многочлен
# =================================================================================
def main():
# Таблица для метода Лагранжа
table_for_lagrange = {
'x': [2, 3, 4],
'y': [20, 22, 19],
'Подставить значения': [3.5]
}
# TODO: Исправить метод Ньютона!
# Таблица для метода Ньютона (не работает)
# table_for_newton = {
# 'x': [3, 4, 5],
# 'y': [22, 19, 18],
# 'Подставить значения': [3.5]
# }
# Таблица для канонического многочлена
table_for_canonical = {
'x': [2, 3, 4, 5],
'y': [20, 22, 19, 18],
'Подставить значения': [3.5],
'Рисовать график?': 'да'
}
# ============================================================
# ВНИМАНИЕ! Пугливым ниже не смотреть! Дальше программный код!
# ATTENTION! Not for timid people! Below is the program code!
# ============================================================
print(' Многочлен Лагранжа '.center(100, '='))
lagrange_result = lagrange_polynomial(table_for_lagrange['x'], table_for_lagrange['y'])
print(f'Полный многочлен: {lagrange_result["Полный многочлен"]}\n'
f'Упрощенный многочлен: {lagrange_result["Упрощенный многочлен"]}')
function = lagrange_result['Функция python']
for val in table_for_lagrange['Подставить значения']:
print(f'y({val}) = {round(function(val), 8)}')
# TODO: Включить метод Ньютона!
# print(' Многочлен Ньютона '.center(100, '='))
#
# lagrange_result = newton_pol(table_for_newton['x'], table_for_newton['y'])
# print(f'Полный многочлен: {lagrange_result["Полный многочлен"]}\n'
# f'Упрощенный многочлен: {lagrange_result["Упрощенный многочлен"]}')
# function = lagrange_result['Функция python']
# for val in table_for_newton['Подставить значения']:
# print(f'y({val}) = {round(function(val), 8)}')
print(' Канонический многочлен '.center(100, '='))
canonical_result = canonical_polynomial(table_for_canonical['x'], table_for_canonical['y'])
print('Матрица:')
canonical_result['Матрица'].console_display()
print(f'Столбец свободных членов: {canonical_result["Столбец свободных членов"]}\n')
print(f'Решение СЛАУ: {canonical_result["Решение СЛАУ"]}')
print(f'Полином: {canonical_result["Полином"]}\n')
function = canonical_result['Функция python']
for val in table_for_canonical['Подставить значения']:
print(f'y({val}) = {round(function(val), 8)}')
if table_for_canonical['Рисовать график?'].lower() == 'да':
print('\n', " Увы, график рисовать я пока не умею ".center(50, '!'))
if __name__ == '__main__':
# Файлы task_%.py сделаны для людей, для которых установка интерпретатора может стать испытанием.
# Запускают эти люди двойными кликом. А если перед ними консоль будет мгновенно закрываться в случае ошибки,
# это будет жуткий стресс, а я даже помочь быстро не смогу, а так хоть print ошибки есть.
try:
main()
except Exception as error:
print(error)
input('Нажмите "Enter" чтобы выйти...')
| 3,397 |
setup.py
|
yihong0618/gaycore
| 108 |
2025070
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
VERSION = '2.0.1'
setup(name='gaycore',
version=VERSION,
description="a tiny and smart cli player of gcore audio, based on Python package curses and mpg123 for Linux or Mac",
long_description='gaycore listen player using cli enjoy it',
keywords='python gcore gaycore cli terminal',
author='yihong0618',
author_email='<EMAIL>',
url='https://github.com/yihong0618/gaycore',
license='MIT',
packages=find_packages(),
include_package_data=True,
zip_safe=True,
install_requires=[
'requests'
],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries",
],
entry_points={
'console_scripts': [
'gaycore = gaycore.cli:run'
], }
)
| 1,227 |
Visualiser.py
|
dineenai/visualizing_filters
| 20 |
2024620
|
# Importing libraries
import torch
import matplotlib.pyplot as plt
import torchvision.transforms as t
import cv2 as cv
import torchvision.models as models
# Importing the module
from extractor import Extractor
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Loading the model
resnet = models.resnet50()
extractor = Extractor(list(resnet.children()))
extractor.activate()
# Visualising the filters
plt.figure(figsize=(35, 35))
for index, filter in enumerate(extractor.CNN_weights[0]):
plt.subplot(8, 8, index + 1)
plt.imshow(filter[0, :, :].detach(), cmap='gray')
plt.axis('off')
plt.show()
# Filter Map
img = cv.cvtColor(cv.imread('Featuremaps&Filters/img.png'), cv.COLOR_BGR2RGB)
img = t.Compose([
t.ToPILImage(),
t.Resize((128, 128)),
# t.Grayscale(),
t.ToTensor(),
t.Normalize(0.5, 0.5)])(img).unsqueeze(0)
featuremaps = [extractor.CNN_layers[0](img)]
for x in range(1, len(extractor.CNN_layers)):
featuremaps.append(extractor.CNN_layers[x](featuremaps[-1]))
# Visualising the featuremaps
for x in range(len(featuremaps)):
plt.figure(figsize=(30, 30))
layers = featuremaps[x][0, :, :, :].detach()
for i, filter in enumerate(layers):
if i == 64:
break
plt.subplot(8, 8, i + 1)
plt.imshow(filter, cmap='gray')
plt.axis('off')
# plt.savefig('featuremap%s.png'%(x))
plt.show()
| 1,408 |
relstorage/adapters/_abstract_drivers.py
|
dpedu/relstorage
| 1 |
2025168
|
##############################################################################
#
# Copyright (c) 2016 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
Helpers for drivers
"""
from __future__ import print_function
from .interfaces import ReplicaClosedException
import sys
import traceback
def _select_driver(options, driver_options):
name = options.driver or 'auto'
if name == 'auto':
name = driver_options.preferred_driver_name
try:
return driver_options.driver_map[name]
except KeyError:
raise ImportError("Unable to use the driver '%s' for the database '%s'."
" Available drivers are: %s."
" Verify the driver name and that the right packages are installed."
% (name, driver_options.database_type,
list(driver_options.driver_map.keys())))
_base_disconnected_exceptions = (ReplicaClosedException,)
def _standard_exceptions(mod):
# Returns disconnected_exceptions, close_exceptions
# and lock_exceptions
# for a standard driver
disconnected_exceptions = (getattr(mod, 'OperationalError'),
getattr(mod, 'InterfaceError'))
disconnected_exceptions += _base_disconnected_exceptions
close_exceptions = disconnected_exceptions + (getattr(mod, 'ProgrammingError'),)
lock_exceptions = (getattr(mod, 'DatabaseError'),)
return disconnected_exceptions, close_exceptions, lock_exceptions
class _ConnWrapper(object): # pragma: no cover
def __init__(self, conn):
self.__conn = conn
self.__type = type(conn)
self.__at = ''.join(traceback.format_stack())
def __getattr__(self, name):
return getattr(self.__conn, name)
def __setattr__(self, name, value):
if name in ('_ConnWrapper__conn', '_ConnWrapper__at', '_ConnWrapper__type'):
object.__setattr__(self, name, value)
return
return setattr(self.__conn, name, value)
def cursor(self, *args, **kwargs):
return _ConnWrapper(self.__conn.cursor(*args, **kwargs))
def execute(self, op, args=None):
#print(op, args)
self.__conn.connection.handle_unread_result()
return self.__conn.execute(op, args)
def __iter__(self):
return self.__conn.__iter__()
def close(self):
if self.__conn is None:
return
try:
self.__conn.close()
finally:
self.__conn = None
def __del__(self):
if self.__conn is not None:
print("Failed to close", self, self.__type, " from:", self.__at, file=sys.stderr)
print("Deleted at", ''.join(traceback.format_stack()))
| 3,218 |
inkpy/_runtime/env.py
|
facelesspanda/inkpy
| 0 |
2025549
|
from ..util.event import Event
from .value import Value, ListValue, VarPtrValue
class LexEnv:
def __init__(self, callstack, ldefsorg):
self.__callstack = callstack
self.__varchanged = Event()
self.__batchObserving = False
self.__changedVars = None
self.__globals = {}
self.__ldef_org = None
@property
def variableChanged(self):
return self.__varchanged
@variableChanged.setter
def variableChanged(self, v):
pass
@property
def batchObserving(self):
return self.__batchObserving
@batchObserving.setter
def batchObserving(self, v):
self.__batchObserving = v
if v:
self.__changedVars = set()
else:
if self.__changedVars is not None:
for var in self.__changedVars:
self.variableChanged(var, self.__globals[var])
self.__changedVars = None
@property
def json_token(self):
raise NotImplementedError
@json_token.setter
def json_token(self, v):
raise NotImplementedError
@property
def callstack(self):
return self.__callstack
@callstack.setter
def callstack(self, v):
self.__callstack = v
def copy_from(self, other):
self.__globals = other.__globals.copy()
self.variableChanged = other.variableChanged
if self.batchObserving != other.batchObserving:
if other.batchObserving:
self.__batchObserving = True
self.__varchanged = set(other.__varchanged)
else:
self.__batchObserving = False
self.__varchanged = None
def __get_raw_var(self, name, idx):
v = None
if idx <= 0:
v = self.__globals.get(name)
if v is not None: return v
v = self.__ldef_org.find(name)
if v is not None: return v
v = self.__callstack.get_var(name)
if v is None:
raise ValueError(
"RUNTIME ERROR: Variable '%s' could not be found in context '%d'."
"This shouldn't be possible so is a bug in the ink(py) engine."
"Please try to construct a minimal story that reproduces"
"the problem and report to facelesspanda, thank you!" % name,
idx)
return v
def __index(self, name):
if name in self.__globals: return 0
return self.__callstack.current_element_idx
def __resolve_ptr(self, ptr):
ctx_idx = ptr.context_idx
if ctx_idx == -1:
ctx_idx = self.__index(ptr.value)
v = self.__get_raw_var(ptr.value, ctx_idx)
if isinstance(v, VarPtrValue): return v
else:
return VarPtrValue(ptr.value, ctx_idx)
def __retain_list_orgs(self, old, new):
if not isinstance(old, ListValue): return
if not isinstance(new, ListValue): return
if len(new.value) == 0:
new.value.set_initial_orgnames(old.value.origin_names)
def assign(self, var_ass, val):
name = var_ass.name
ctx_idx = -1
set_global = var_ass.is_global if var_ass.is_new else (
name in self.__globals)
if var_ass.is_new:
if isinstance(val, VarPtrValue):
val = self.__resolve_ptr(val)
else:
ptr = None
while True:
ptr = self.__get_raw_var(name, ctx_idx)
if isinstance(ptr, VarPtrValue):
name = ptr.value
ctx_idx = ptr.context_index
set_global = ctx_idx == 0
else:
break
if set_global:
self.__setglobal(name, val)
else:
self.__callstack.set_var(name, val, var_ass.is_new, ctx_idx)
def ptr_val(self, v):
return self.get_var(v.value, v.context_index)
def get_var(self, name, idx=-1):
v = self.__get_raw_var(name, idx)
if isinstance(v, VarPtrValue):
v = self.ptr_val(v)
return v
def __setglobal(self, var, val):
oldval = self[var]
ListValue.retain_list_origins(oldval, val)
self.__globals[var] = val
if len(self.variableChanged) > 0 and val != oldval:
if self.batchObserving:
self.__changedVars.add(var)
else:
self.variableChanged(var, val)
def __getitem__(self, key):
return self.__globals.get(key)
def __setitem__(self, key, value):
if value is None:
raise TypeError("Variable value cannot be None")
if key not in self.__globals:
raise KeyError("%s is not a global variable" % key)
val = Value.create(value)
if val is None:
raise ValueError("Invalid value passed to variable: %s" % value)
self.__setglobal(key, val)
def __iter__(self):
return self.__globals.keys().__iter__()
| 5,005 |
TI_predictor.py
|
neherlab/HIV_time_of_infection
| 3 |
2024833
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 9 11:31:53 2017
@author: puller
"""
from __future__ import division
import numpy as np
import EDI_functions as EDI
#default arguments
measure = 'diversity'
names = ['polymorphic', 'diversity', 'entropy']
funcnames = ['ambiguous_above', 'hamming_above', 'entropy_above']
func_name = funcnames[names.index(measure)]
fcr = 0.5
Tmin = 0; Tmax = 9
vload_min = None
dilutions_min = None
method = 'LAD'
rframe = 2 #reference frame; set to None to use all sites
fs = 28
H = 8
#loading frequency data
datapath = './Frequency_Data/'
data = EDI.load_patient_data(patient_names = 'all', filepath = datapath)
Npat = len(data['pat_names'])
def region(j0jL):
if type(j0jL) is str:
# The genome annotations
head = ['name', 'x1', 'x2', 'width', 'ri']
annot = []
with open(datapath + 'annotations.txt', 'r') as fhandle:
for line in fhandle:
l = [x if j ==0 else int(x) for j, x in enumerate(line.split())]
annot.append({name: l[j] for j, name in enumerate(head)})
coords = {anno['name']: (anno['x1'], anno['x2']) for anno in annot}
return coords[j0jL]
else:
return j0jL
def TI_from_diversity(DD, j0jL, cutoff, nboot = None, rf = rframe):
'''
Estimate the time of infection (TI) from the specified diversity values
Input arguments:
DD: list/array of diversity values
j0jL: tuple specifying the genetic region to use
cutoff: lower cutoff value, xc
nboot: number of bootstraps over different patients (if None, then no bootstrapping)
Output arguments:
TTest: estimated times of infection (with rows corresponding to bootstrap relizations)
dtdx_t0: slope and intercept values (with rows corresponding to bootstrap relizations)
'''
CUT = EDI.window_cutoff(data, func_name, region(j0jL), cutoff, rf = rf)
ttk, xxk, jjk = CUT.realdata(Tmin, Tmax, fcr = fcr, vload_min = vload_min,
dilutions_min = dilutions_min)
if nboot is None:
ttk_data, dtdx_t0 = EDI.fitmeth_byname(ttk, xxk, method = method)
TTest = dtdx_t0[0]*DD + dtdx_t0[1]
return TTest, dtdx_t0
else:
Npat = len(CUT.pat_names)
jjboot = np.random.randint(0, high = Npat, size = (nboot, Npat))
TTest = np.zeros((nboot, len(DD)))
dtdx_t0 = np.zeros((nboot, 2))
for jboot, idx_boot in enumerate(jjboot):
tk = np.ma.concatenate([ttk[np.where(jjk == j)] for j in idx_boot])
xk = np.ma.concatenate([xxk[np.where(jjk == j)] for j in idx_boot])
ttk_est, dtdx_t0[jboot,:] = EDI.fitmeth_byname(tk, xk, method = method)
TTest[jboot,:] = dtdx_t0[jboot, 0]*DD + dtdx_t0[jboot, 1]
return TTest, dtdx_t0
def TI_bootstrap_plot(DD, j0jL, cutoff, filename, nboot = 10**3):
'''
Plot bootstrap histograms for inferred times of infection
Input arguments:
DD: list/array of diversity values
j0jL: tuple specifying the genetic region to use
cutoff: lower cutoff value, xc
filename: path to file for saving figure
nboot: number of bootstraps over different patients
'''
import matplotlib.pyplot as plt
plt.ioff()
# plt.close('all')
TTboot, dtdx_boot = TI_from_diversity(DD, j0jL, cutoff, nboot = nboot)
fig, ax = plt.subplots(1, 1, figsize = (2*H, H))
for jD, D in enumerate(DD):
ax.hist(TTboot[:,jD])
ax.set_xlabel('ETI [years]', fontsize = fs)
ax.tick_params(labelsize = .8*fs)
plt.axis('tight')
plt.savefig(filename)
plt.close()
return None
| 3,640 |
custom_app/custom_app/overrides/custom_purchase_invoice.py
|
MohammedAlNazili/custom-app
| 0 |
2025516
|
from erpnext.accounts.doctype.purchase_invoice.purchase_invoice import PurchaseInvoice
# from erpnext.erpnext.projects.doctype.task import Task
import frappe
# from frappe import _, msgprint
from custom_app.utils.qrcode import gen_qrcode
class CustomPurchaseInvoice(PurchaseInvoice):
def onload(self):
super().onload()
# if self.docstatus!=1:
# self.update_costing()
def validate(self):
super().validate()
# self.validate_advance_amount()
def submit(self):
super().submit()
print("***********************************")
print("***********************************")
print("***********************************")
# self.validate_advance_amount()
self.generate_qr_code()
def generate_qr_code(self):
self.qr_code_image = "<img src='{{ gen_qrcode(self.name) }}' width='100' />"
# gen_qrcode
pass
# img=qrcode.make('Hello World')
# print("Hello")
# print(str(img))
# img.save('/home/malnozili/Desktop/hello.png')
# _file = frappe.get_doc({
# "doctype": "File",
# "file_name": "hello.png",
# "attached_to_name": self.name,
# "attached_to_doctype": "Purchase Invoice",
# # "folder": self.get_folder("Test Folder 1", "Home").name,
# "file_url": '/home/malnozili/Desktop/hello.png'
# })
# _file.save()
# self.qrcode_text = str(img)
| 1,502 |
meiduo_mall/celery_tasks/sms/tasks.py
|
Goldx4/meiduo-demo
| 0 |
2025407
|
import logging
from celery_tasks.main import app
from .yuntongxun.sms import CCP
from . import constants
logger = logging.getLogger('django')
@app.task(name='send_sms_code')
def send_sms_code(mobile, sms_code):
"""
发送短信验证码
:param mobile: 手机号
:param sms_code: 验证码
:return:
"""
# 发送短信
try:
ccp = CCP()
sms_code_expires = str(constants.SMS_CODE_REDIS_EXPIRES // 60)
result = ccp.send_template_sms(mobile, [sms_code, sms_code_expires], constants.SMS_CODE_TEMP_ID)
except Exception as e:
logger.error("发送验证码短信[异常][ mobile: %s, message: %s ]" % (mobile, e))
else:
if result == 0:
logger.info("发送验证码短信[正常][ mobile: %s ]" % mobile)
else:
logger.warning("发送验证码短信[失败][ mobile: %s ]" % mobile)
| 799 |
yubi_goog.py
|
enkaskal/yubi-goog
| 14 |
2024189
|
#!/usr/bin/env python
################################################################################
# yubi_goog.py - google authenticator via yubikey
#
# Use --generate to generate OTPs given a base 32 secret key (from google)
# Use --yubi to send a challenge to the yubikey to generate OTPs
# Use --convert-secret to convert the google secret into hex
#
# author: <NAME> <<EMAIL>>
################################################################################
import base64
import re
import binascii
import time
import sys
import subprocess
import hashlib
import hmac
import struct
ADJACENT_INTERVALS = 3 # generate 3 OTPs
TIME_STEP = 30 # default as per TOTP spec
# Use sudo when invoking ykchalresp
USE_SUDO = True
# supporting py2 and py3 sucks
IS_PY3 = sys.version_info[0] == 3
def mangle_hash(h):
if IS_PY3:
offset = h[-1] & 0x0F
else:
offset = ord(h[-1]) & 0x0F
truncated_hash = h[offset:offset+4]
code = struct.unpack(">L", truncated_hash)[0]
code &= 0x7FFFFFFF;
code %= 1000000;
return '{0:06d}'.format(code)
def totp(secret, tm):
bin_key = binascii.unhexlify(secret)
h = hmac.new(bin_key, tm, hashlib.sha1).digest()
return mangle_hash(h)
def generate_challenges(intervals = ADJACENT_INTERVALS):
"""
intervals: must be odd number
generates intervals-number total challenges. used to
workaround clock skew.
"""
challenges = []
t = int(time.time())
for ix in range(0-int(intervals/2), int(intervals/2)+1):
tm = (t + TIME_STEP*ix)/TIME_STEP
tm = struct.pack('>q', int(tm))
challenges.append(tm)
return challenges
def decode_secret(secret):
"""
Decodes the base32 string google provides" to hex
"""
# remove spaces and uppercase
secret = re.sub(r'\s', '', secret).upper()
secret = secret.encode('ascii')
secret = base64.b32decode(secret)
return binascii.hexlify(secret)
def get_secret():
"""
Read secret from user
"""
if IS_PY3:
google_key = input("Google key: ")
else:
google_key = raw_input("Google key: ")
return decode_secret(google_key)
def convert_secret():
secret = get_secret()
print(secret.decode())
def generate():
# convert secret to hex
secret = get_secret()
# now, and 30 seconds ahead and behind
for chal in generate_challenges():
print("OTP: %s" %( totp(secret, chal) ))
def yubi():
for chal in generate_challenges():
chal = binascii.hexlify(chal)
cmd = []
if USE_SUDO:
cmd = ['sudo']
cmd.append('ykchalresp')
cmd.append('-2x')
cmd.append(chal)
if hasattr(subprocess, "check_output"):
try:
resp = subprocess.check_output(cmd).strip()
except subprocess.CalledProcessError:
sys.exit(1)
else:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out, err = proc.communicate()
if not isinstance(out, basestring):
raise ValueError("Command {0} returned {1!r}."
.format(" ".join(cmd), out))
resp = out.strip()
print("OTP: %s" %(mangle_hash(binascii.unhexlify(resp))))
def error():
print("Valid opts: --generate, --yubi, or --convert-secret")
if __name__ == "__main__":
if len(sys.argv) <= 1:
yubi()
sys.exit(1)
if sys.argv[1] == "--generate":
generate()
elif sys.argv[1] == "--yubi":
yubi()
elif sys.argv[1] == "--convert-secret":
convert_secret()
else:
error()
sys.exit(1)
| 3,656 |
engines/tests/test_google.py
|
Rested/multi-translate
| 1 |
2025151
|
import pytest
from engines.google import bcp_47_to_iso_639
from errors import InvalidISO6391CodeError
def test_bcp_47_to_iso_639_can_handle_iso_639_starting_codes_correctly():
assert bcp_47_to_iso_639("en-GB-oed") == "en"
def test_bcp_47_to_iso_639_errors_for_iso_639_3_letter_codes():
with pytest.raises(InvalidISO6391CodeError) as excinfo:
bcp_47_to_iso_639("cel-gaulish")
assert "3-letter" in str(excinfo)
def test_bcp_47_to_iso_639_errors_for_non_iso_codes():
with pytest.raises(InvalidISO6391CodeError) as excinfo:
bcp_47_to_iso_639("fake-language")
assert "no ISO-639 component" in str(excinfo)
| 646 |
worker.py
|
redjerdai/iseeyourcarm8
| 0 |
2023052
|
#
import os
import numpy as np
import pandas
import sys
import matplotlib.image as mpimg
#
from NomeroffNet import filters, RectDetector, TextDetector, OptionsDetector, Detector, textPostprocessing, textPostprocessingAsync
#
class Worker:
def __init__(self, NOMEROFF_NET_DIR, MASK_RCNN_DIR, MASK_RCNN_LOG_DIR, load_model="latest", options_detector="latest",
text_detector_module="eu", load_text_detector="latest"):
sys.path.append(NOMEROFF_NET_DIR)
# Initialize npdetector with default configuration file.
self.nnet = Detector(MASK_RCNN_DIR, MASK_RCNN_LOG_DIR)
self.nnet.loadModel(load_model)
self.rectDetector = RectDetector()
self.optionsDetector = OptionsDetector()
self.optionsDetector.load(options_detector)
# Initialize text detector.
self.textDetector = TextDetector.get_static_module(text_detector_module)()
self.textDetector.load(load_text_detector)
def detect(self, img_path):
# Detect numberplate
img = mpimg.imread(img_path)
NP = self.nnet.detect([img])
# Generate image mask.
cv_img_masks = filters.cv_img_mask(NP)
# Detect points.
arrPoints = self.rectDetector.detect(cv_img_masks)
zones = self.rectDetector.get_cv_zonesBGR(img, arrPoints)
# find standart
regionIds, stateIds, countLines = self.optionsDetector.predict(zones)
regionNames = self.optionsDetector.getRegionLabels(regionIds)
# find text with postprocessing by standart
textArr = self.textDetector.predict(zones)
textArr = textPostprocessing(textArr, regionNames)
return textArr
| 1,695 |
lib/modules/conv_rnn.py
|
yuzhd/Text2Scene
| 109 |
2025160
|
#!/usr/bin/env python
# codes modified from
# https://github.com/ndrplz/ConvLSTM_pytorch/blob/master/convlstm.py
import os, sys, cv2, json
import math, copy, random
import numpy as np
import os.path as osp
import torch
import torch.nn as nn
from modules.separable_convolution import separable_conv2d
USE_SEPARABLE_CONVOLUTION = False
class ConvGRUCell(nn.Module):
def __init__(self, input_size, hidden_size, kernel_size, bias=True):
super(ConvGRUCell, self).__init__()
self.hidden_size = hidden_size
if USE_SEPARABLE_CONVOLUTION:
conv2d = separable_conv2d
else:
conv2d = nn.Conv2d
self.conv1 = conv2d(
in_channels=(input_size + hidden_size),
out_channels=(2 * hidden_size),
kernel_size=kernel_size, stride=1, padding=int((kernel_size - 1) / 2),
bias=bias)
self.conv2 = conv2d(
in_channels=input_size,
out_channels=hidden_size,
kernel_size=kernel_size, stride=1, padding=int((kernel_size - 1) / 2),
bias=bias)
self.conv3 = conv2d(
in_channels=hidden_size,
out_channels=hidden_size,
kernel_size=kernel_size, stride=1, padding=int((kernel_size - 1) / 2),
bias=bias)
def forward(self, x, h):
# x: [batch, input_size, height, width]
# h: [batch, hidden_size, height, width]
combined = torch.cat((x, h), dim=1)
A = self.conv1(combined)
(az, ar) = torch.split(A, self.hidden_size, dim=1)
z = torch.sigmoid(az)
r = torch.sigmoid(ar)
ag = self.conv2(x) + r * self.conv3(h)
g = torch.tanh(ag)
new_h = z * h + (1.0 - z) * g
return new_h
class ConvLSTMCell(nn.Module):
def __init__(self, input_size, hidden_size, kernel_size, bias=True):
super(ConvLSTMCell, self).__init__()
self.hidden_size = hidden_size
if USE_SEPARABLE_CONVOLUTION:
conv2d = separable_conv2d
else:
conv2d = nn.Conv2d
self.conv = conv2d(
in_channels=(input_size + hidden_size),
out_channels=(4 * hidden_size),
kernel_size=kernel_size, stride=1, padding=int((kernel_size - 1) / 2),
bias=bias)
def forward(self, x, h, c):
# x: [batch, input_size, height, width]
# h, c: [batch, hidden_size, height, width]
combined = torch.cat((x, h), dim=1)
A = self.conv(combined)
(ai, af, ao, ag) = torch.split(A, self.hidden_size, dim=1)
i = torch.sigmoid(ai)
f = torch.sigmoid(af)
o = torch.sigmoid(ao)
g = torch.tanh(ag)
new_c = f * c + i * g
new_h = o * torch.tanh(new_c)
return new_h, new_c
class ConvGRU(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, kernel_size, bias=True, dropout=0.0):
super(ConvGRU, self).__init__()
self.input_sizes = [input_size] + [hidden_size] * (num_layers-1)
self.hidden_sizes = [hidden_size] * num_layers
self.num_layers = num_layers
self.dropout_p = dropout
for i in range(num_layers):
cell = ConvGRUCell(self.input_sizes[i], self.hidden_sizes[i], kernel_size, bias)
setattr(self, 'cell%02d'%i, cell)
if self.dropout_p > 0:
self.dropout = nn.Dropout2d(p=self.dropout_p)
self.init_weights()
def init_weights(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# m.bias.data.zero_()
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
for name, param in self.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_uniform_(param)
def forward(self, input_var, prev_hidden):
# Inputs
# input_var: (#batch, #sequence, #input_size, #height, #width)
# prev_hidden: (#layers, #batch, #hidden_size, #height, #width)
# Outputs
# last_layer_hiddens: (#batch, #sequence, #hidden_size, #height, #width)
# last_step_hiddens: (#layers, #batch, #hidden_size, #height, #width)
# all_hiddens: (#layers, #batch, #sequence, #hidden_size, #height, #width)
all_hiddens_list = []
current_layer_input = input_var
for layer in range(self.num_layers):
layer_output_list = []
h = prev_hidden[layer]
for step in range(current_layer_input.size(1)):
x = current_layer_input[:, step, :, :, :]
h = getattr(self, 'cell%02d'%layer)(x, h)
if self.dropout_p > 0:
h = self.dropout(h)
layer_output_list.append(h)
layer_output = torch.stack(layer_output_list, dim=1)
current_layer_input = layer_output
all_hiddens_list.append(layer_output)
last_layer_hiddens = all_hiddens_list[-1]
all_hiddens = torch.stack(all_hiddens_list, dim=0)
last_step_hiddens = all_hiddens[:, :, -1, :, :, :]
return last_layer_hiddens, last_step_hiddens, all_hiddens
class ConvLSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, kernel_size, bias=True, dropout=0.0):
super(ConvLSTM, self).__init__()
self.input_sizes = [input_size] + [hidden_size] * (num_layers-1)
self.hidden_sizes = [hidden_size] * num_layers
self.num_layers = num_layers
self.dropout_p = dropout
for i in range(num_layers):
cell = ConvLSTMCell(self.input_sizes[i], self.hidden_sizes[i], kernel_size, bias)
setattr(self, 'cell%02d'%i, cell)
if self.dropout_p > 0:
self.dropout = nn.Dropout2d(p=self.dropout_p)
self.init_weights()
def init_weights(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# m.bias.data.zero_()
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
for name, param in self.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_uniform_(param)
def forward(self, input_var, prev_hidden):
# Inputs
# input_var: (#batch, #sequence, #input_size, #height, #width)
# prev_hidden: tuple of (#layers, #batch, #hidden_size, #height, #width)
# Outputs
# last_layer_hiddens: (#batch, #sequence, #hidden_size, #height, #width)
# last_step_*: tuple (#layers, #batch, #hidden_size, #height, #width)
# all_*: tuple (#layers, #batch, #sequence, #hidden_size, #height, #width)
all_hiddens_list = []
all_cells_list = []
current_layer_input = input_var
(prev_h, prev_c) = prev_hidden
for layer in range(self.num_layers):
layer_hidden_list = []
layer_cell_list = []
h = prev_h[layer]
c = prev_c[layer]
for step in range(current_layer_input.size(1)):
x = current_layer_input[:, step, :, :, :]
h, c = getattr(self, 'cell%02d'%layer)(x, h, c)
if self.dropout_p > 0:
h = self.dropout(h)
c = self.dropout(c)
layer_hidden_list.append(h)
layer_cell_list.append(c)
layer_hidden = torch.stack(layer_hidden_list, dim=1)
layer_cell = torch.stack(layer_cell_list, dim=1)
current_layer_input = layer_hidden
all_hiddens_list.append(layer_hidden)
all_cells_list.append(layer_cell)
last_layer_hiddens = all_hiddens_list[-1]
all_hiddens = torch.stack(all_hiddens_list, dim=0)
last_step_hiddens = all_hiddens[:, :, -1, :, :, :]
all_cells = torch.stack(all_cells_list, dim=0)
last_step_cells = all_cells[:, :, -1, :, :, :]
return last_layer_hiddens, (last_step_hiddens, last_step_cells), (all_hiddens, all_cells)
| 8,560 |
button.py
|
piotrcierpich/raspberry
| 0 |
2024477
|
from gpiozero import Button
from signal import pause
button = Button(21)
def elo():
print("xdddddd")
button.when_pressed = elo
pause()
| 140 |
datareaders/sqlitereader.py
|
jamesmarlowe/Python-Data-Readers
| 3 |
2025483
|
import sqlite3
class SqliteReader:
def __init__(self, *args, **kwargs):
if 'database' in kwargs:
self.db_sqlite3 = kwargs['database']
else:
print 'missing database argument, using data.sqlite'
self.db_sqlite3 = 'data.sqlite'
if 'table' in kwargs:
self.db_table = kwargs['table']
else:
print 'missing table argument, using DataTable'
self.db_table = 'DataTable'
def read(self, *args, **kwargs):
db = sqlite3.connect(self.db_sqlite3)
db.row_factory = sqlite3.Row
cursor = db.cursor()
SELECT_TABLE = '''SELECT * FROM '''+self.db_table
cursor.execute(SELECT_TABLE)
existing_rows = [{key:row[row.keys().index(key)] for key in row.keys()} for row in cursor.fetchall()]
return existing_rows
| 881 |
data/process_data.py
|
HectorMontes10/PropertiesPriceApp
| 0 |
2025602
|
import sys
import os
import math
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
def load_data(prices_filepath, regions_filepath):
'''
This function load the datasets co_properties.csv that contain prices of propierties in Colombia
Params:
prices_filepath (str): String that contain the path to co_properties file (csv file with price of properties in Colombia)
regions_filepath (str): String that contain the path to regions.csv (csv file with geographic region in Colombia)
Returns:
df_prices, regions (tupla of pandas DataFrame):
df_prices: This dataframe contain the following columns:
columns:
id-->Id for each property
ad_type-->constant column
start_date, end_date, created_on-->date of start, end of creation for the sale offer
lat, long-->Latitude and Longitude (geographic position)
l1-->Country (constant column, all properties are in Colombia)
l2, l3, l4, l5, l6-->Department, City, Zone, Locality, Neighborhood where property is located
rooms, bedrooms, bathrooms, surface_total, surface_covered-->Features of property
price-->Target variable for prediction model
currency-->Almost all prices are in COP currency
price_period-->Constant column
title, description--> ad title and description
property_type-->type possibles are: Otro, Apartamento, Casa, Lote, Oficina, PH etc.
operation_type-->type possibles of operations are: Venta, Arriendo, Arriendo temporal
regions: This dataframe contain the following columns:
columns:
l2-->Department
Region-->Region where Department is located
l2shp-->Department in other format for easy integration with shape file of department
'''
df_prices = pd.read_csv(prices_filepath)
regions = pd.read_csv(regions_filepath,sep=";", encoding="latin-1")
return (df_prices, regions)
def clean_data(df_prices):
'''
This function clean de df_prices dataframe to be used in the model. Some operations made are:
1. Remove cases with currency different to COP
2. Remove constants columns
3. Choose cases with operation_type = Venta and remove operation type column
4. Assign missing value to invalid values of variables: ('surface_total', 'surface_covered', 'price')
5. Create dummies variables for missing values in features.
1--> if the feature has a missing value
0-->if the value is valid.
6. Remove string and date variables no-used in model or maps.
'''
#Step 1: Remove cases with currency different to COP
df_prices = df_prices[df_prices['currency']=="COP"]
#Step 2: Remove constants columns:
columns_to_remove = []
for col in df_prices.columns:
distinct_values = df_prices[col].unique()
if len(distinct_values)==1:
columns_to_remove.append(col)
df_prices = df_prices.drop(columns_to_remove, axis=1)
#Step 3: Choose cases with operation type = Venta
df_prices = df_prices[df_prices.operation_type=="Venta"]
df_prices = df_prices.drop(['operation_type'], axis=1)
#Step 4: Assign missing value to invalid values of variables: ('surface_total','price')
surface_total_mod = list(df_prices['surface_total'].apply(lambda x: float('NaN') if x<=0 else x))
price_mod = list(df_prices['price'].apply(lambda x: float('NaN') if x<=0 else x))
df_prices = df_prices.drop(['surface_total','price'], axis=1)
df_prices['surface_total'] = surface_total_mod
df_prices['price'] = price_mod
#Step 5: Create dummies variables for missing values in features
columns_for_model = ['lat', 'lon', 'rooms', 'l2', 'l3', 'l4', 'l5', 'l6', 'bedrooms','rooms', 'bedrooms',
'bathrooms', 'surface_total', 'surface_covered', 'price']
numeric_columns = df_prices.select_dtypes(include=np.number).columns
names_dummies = ['missing_'+col for col in columns_for_model]
for i in range(len(columns_for_model)):
if columns_for_model[i] in numeric_columns:
df_prices[names_dummies[i]] = df_prices[columns_for_model[i]].isna().apply(lambda x: 1 if x else 0)
df_prices[names_dummies[i]] = df_prices[columns_for_model[i]].isna().apply(lambda x: 1 if x else 0)
#Step 6: Remove string and date variables no-used in model or maps.
non_used = ['id','start_date','end_date','created_on','title','description', "price_period"]
df_prices = df_prices.drop(non_used,axis=1)
print("INFO[] The cleaned table has the following fields: ")
print("\n{}\n".format(df_prices.columns))
return df_prices
def join_data(df_prices, regions):
'''
This function merge the table df_prices to table regions, using the key column l2. This is usefull to construct
Choroplet map using the column l2shp and Region present in region table.
Params:
df_prices (pandas DataFrame): Contain the cleaned df_prices table
regions(pandas DataFrame): Contain the information in regions.csv file
Returns:
df_prices_full (pandas DataFrame): Contain the cleaned df_prices table with two additional columns: l2shp and Region
'''
df_prices_full = pd.merge(df_prices, regions, on="l2")
return df_prices_full
def save_data(df_prices_full, database_filename):
'''
This function save the table df_prices_full in a sqlLite database. This table will be used in modelling
and mapping stages
Params:
df_prices_full (pandas DataFrame): Contain the cleaned df_prices table with two additional columns: l2shp and Region
database_filename (String): Contain the path to location where table will be stored
Returns:
This function is a procedure, it return None
'''
engine = create_engine('sqlite:///'+database_filename)
df_prices_full.to_sql('Cleaned_prices', engine, index=False, if_exists = 'replace')
def main():
'''
This function control the ETL flow and call the other functions for load, clean, and save data
'''
#sys_argv = ['process_data.py', 'co_properties.csv', 'regions.csv', 'PropertiesPrices.db']
if len(sys.argv) == 4:
df_prices_filepath, regions_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n df_prices: {}\n regions: {}'
.format(df_prices_filepath, regions_filepath))
df, regions = load_data(df_prices_filepath, regions_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Joining data...')
df = join_data(df,regions)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned and joined data saved to database!')
else:
print('Please provide the filepaths of the df_prices and regions '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the joined and cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'co_properties.csv regions.csv '\
'PropertiesPrices.db')
if __name__ == '__main__':
main()
| 8,068 |
baselines/common/policies.py
|
dreamersnme/future
| 11 |
2025180
|
import tensorflow as tf
from baselines.a2c.utils import fc
from baselines.common.distributions import make_pdtype
import gym
class PolicyWithValue(tf.Module):
"""
Encapsulates fields and methods for RL policy and value function estimation with shared parameters
"""
def __init__(self, ac_space, policy_network, value_network=None, estimate_q=False):
"""
Parameters:
----------
ac_space action space
policy_network keras network for policy
value_network keras network for value
estimate_q q value or v value
"""
self.policy_network = policy_network
self.value_network = value_network or policy_network
self.estimate_q = estimate_q
self.initial_state = None
# Based on the action space, will select what probability distribution type
self.pdtype = make_pdtype(policy_network.output_shape, ac_space, init_scale=0.01)
if estimate_q:
assert isinstance(ac_space, gym.spaces.Discrete)
self.value_fc = fc(self.value_network.output_shape, 'q', ac_space.n)
else:
self.value_fc = fc(self.value_network.output_shape, 'vf', 1)
@tf.function
def step(self, observation):
"""
Compute next action(s) given the observation(s)
Parameters:
----------
observation batched observation data
Returns:
-------
(action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple
"""
latent = self.policy_network(observation)
pd, pi = self.pdtype.pdfromlatent(latent)
action = pd.sample()
neglogp = pd.neglogp(action)
value_latent = self.value_network(observation)
vf = tf.squeeze(self.value_fc(value_latent), axis=1)
return action, vf, None, neglogp
@tf.function
def value(self, observation):
"""
Compute value estimate(s) given the observation(s)
Parameters:
----------
observation observation data (either single or a batch)
Returns:
-------
value estimate
"""
value_latent = self.value_network(observation)
result = tf.squeeze(self.value_fc(value_latent), axis=1)
return result
| 2,365 |
qcodes/dataset/database_fix_functions.py
|
LGruenhaupt/Qcodes
| 1 |
2024682
|
"""
Sometimes it happens that databases are put into inconsistent/corrupt states.
This module contains functions to remedy known issues.
"""
import json
import logging
from typing import Dict, Sequence, Any
from tqdm import tqdm
import qcodes.dataset.descriptions.versioning.v0 as v0
import qcodes.dataset.descriptions.versioning.serialization as serial
from qcodes.dataset.sqlite.connection import ConnectionPlus, atomic, \
atomic_transaction
from qcodes.dataset.sqlite.db_upgrades import get_user_version
from qcodes.dataset.sqlite.queries import _get_parameters, \
get_run_description, update_run_description, _update_run_description
from qcodes.dataset.sqlite.query_helpers import one, select_one_where
log = logging.getLogger(__name__)
def fix_version_4a_run_description_bug(conn: ConnectionPlus) -> Dict[str, int]:
"""
Fix function to fix a bug where the RunDescriber accidentally wrote itself
to string using the (new) InterDependencies_ object instead of the (old)
InterDependencies object. After the first call, this function should be
idempotent.
Args:
conn: the connection to the database
Returns:
A dict with the fix results ('runs_inspected', 'runs_fixed')
"""
user_version = get_user_version(conn)
if not user_version == 4:
raise RuntimeError('Database of wrong version. Will not apply fix. '
'Expected version 4, found version {user_version}')
no_of_runs_query = "SELECT max(run_id) FROM runs"
no_of_runs = one(atomic_transaction(conn, no_of_runs_query), 'max(run_id)')
no_of_runs = no_of_runs or 0
with atomic(conn) as conn:
pbar = tqdm(range(1, no_of_runs+1))
pbar.set_description("Fixing database")
# collect some metrics
runs_inspected = 0
runs_fixed = 0
old_style_keys = ['paramspecs']
new_style_keys = ['parameters', 'dependencies', 'inferences',
'standalones']
for run_id in pbar:
desc_str = get_run_description(conn, run_id)
desc_ser = json.loads(desc_str)
idps_ser = desc_ser['interdependencies']
if list(idps_ser.keys()) == old_style_keys:
pass
elif list(idps_ser.keys()) == new_style_keys:
old_desc_ser = \
_convert_run_describer_v1_like_dict_to_v0_like_dict(
desc_ser)
json_str = json.dumps(old_desc_ser)
_update_run_description(conn, run_id, json_str)
runs_fixed += 1
else:
raise RuntimeError(f'Invalid runs_description for run_id: '
f'{run_id}')
runs_inspected += 1
return {'runs_inspected': runs_inspected, 'runs_fixed': runs_fixed}
def _convert_run_describer_v1_like_dict_to_v0_like_dict(
new_desc_dict: Dict[str, Any]) -> Dict[str, Any]:
"""
This function takes the given dict which is expected to be
representation of `RunDescriber` with `InterDependencies_` (underscore!)
object and without "version" field, and converts it to a dict that is a
representation of the `RunDescriber` object with `InterDependencies`
(no underscore!) object and without "version" field.
"""
new_desc_dict = new_desc_dict.copy()
# We intend to use conversion methods from `serialization` module,
# but those work only with RunDescriber representations that have
# "version" field. So first, the "version" field with correct value is
# added.
new_desc_dict['version'] = 1
# Out of that dict we create RunDescriber object of the current version
# (regardless of what the current version is).
new_desc = serial.from_dict_to_current(new_desc_dict)
# The RunDescriber of the current version gets converted to a dictionary
# that represents a RunDescriber object of version 0 - this is the one
# that has InterDependencies object in it (not the InterDependencies_ one).
old_desc_dict = serial.to_dict_as_version(new_desc, 0)
# Lastly, the "version" field is removed.
old_desc_dict.pop('version')
return old_desc_dict
def fix_wrong_run_descriptions(conn: ConnectionPlus,
run_ids: Sequence[int]) -> None:
"""
NB: This is a FIX function. Do not use it unless your database has been
diagnosed with the problem that this function fixes.
Overwrite faulty run_descriptions by using information from the layouts and
dependencies tables. If a correct description is found for a run, that
run is left untouched.
Args:
conn: The connection to the database
run_ids: The runs to (potentially) fix
"""
user_version = get_user_version(conn)
if not user_version == 3:
raise RuntimeError('Database of wrong version. Will not apply fix. '
'Expected version 3, found version {user_version}')
log.info('[*] Fixing run descriptions...')
for run_id in run_ids:
trusted_paramspecs = _get_parameters(conn, run_id)
trusted_desc = v0.RunDescriber(
v0.InterDependencies(*trusted_paramspecs))
actual_desc_str = select_one_where(conn, "runs",
"run_description",
"run_id", run_id)
trusted_json = serial.to_json_as_version(trusted_desc, 0)
if actual_desc_str == trusted_json:
log.info(f'[+] Run id: {run_id} had an OK description')
else:
log.info(f'[-] Run id: {run_id} had a broken description. '
f'Description found: {actual_desc_str}')
update_run_description(conn, run_id, trusted_json)
log.info(f' Run id: {run_id} has been updated.')
| 5,864 |
prompy/promise.py
|
T4rk1n/prompy
| 0 |
2024100
|
"""Promise for python"""
import collections
import enum
import uuid
from typing import Callable, Any, List, Union, Deque, TypeVar, Generic, Tuple
from prompy.errors import UnhandledPromiseError, PromiseRejectionError
TPromiseResults = TypeVar('PromiseReturnType')
# generics don't work with callbacks, check result prop for type.
CompleteCallback = Callable[[Union[List[TPromiseResults], TPromiseResults], Exception], None]
ThenCallback = Callable[[TPromiseResults], None]
CatchCallback = Callable[[Exception], None]
PromiseStarter = Callable[[Callable, Callable], None]
class PromiseState(enum.Enum):
pending = 1
fulfilled = 2
rejected = 3
class Promise(Generic[TPromiseResults]):
"""
Promise interface
Based on js Promises.
Basic usage:
`p = Promise(lambda resolve, reject: resolve('Hello')).then(print)`
"""
def __init__(self, starter: PromiseStarter,
then: ThenCallback=None,
catch: CatchCallback=None,
complete: CompleteCallback=None,
raise_again: bool=False,
start_now: bool=False,
results_buffer_size: int = 100):
"""
Promise takes at least a starter method with params to this promise
resolve and reject. Does not call exec by default but with start_now
the execution will be synchronous.
:param starter: otherwise known as executor.
:param then: initial resolve callback
:param catch: initial catch callback
:param complete: initial complete callback
:param raise_again: raise the rejection error again.
:param start_now:
:param results_buffer_size: number of results to keep in the buffer.
"""
self.canceled = False
self.completed_at = None
self._promise_id: uuid.UUID = uuid.uuid4()
self._then: List[ThenCallback] = [then] if then else []
self._catch: List[CatchCallback] = [catch] if catch else []
self._complete: List[CompleteCallback] = [complete] if complete else []
self._raise_again = raise_again
self._starter = starter
self._result: Any = None
self._results: Deque = collections.deque(maxlen=results_buffer_size)
self._error: Exception = None
self._state = PromiseState.pending
if start_now:
self.exec()
def then(self, func: ThenCallback):
"""
Add a callback to resolve
:param func: callback to resolve
:return:
"""
self._then.append(func)
if self.state == PromiseState.fulfilled:
func(self.result)
return self
def catch(self, func: CatchCallback):
"""
Add a callback to rejection
:param func:
:return:
"""
self._catch.append(func)
if self.state == PromiseState.rejected:
func(self.error)
return self
def complete(self, func: CompleteCallback):
"""
Add a callback to finally block
:param func:
:return:
"""
self._complete.append(func)
return self
def resolve(self, result: TPromiseResults):
"""
Resolve the promise, called by executor.
:param result:
:return:
"""
self._result = result # result always the last resolved
self._results.append(result)
for t in self._then:
self.callback_handler(t(result))
self._finish(PromiseState.fulfilled)
def reject(self, error: Exception):
"""
Reject the promise.
:param error:
:return:
"""
self._error = error
if not self._catch:
self._state = PromiseState.rejected
raise UnhandledPromiseError(
f"Unhandled promise exception: {self.id}") from error
for c in self._catch:
self.callback_handler(c(error))
self._finish(PromiseState.rejected)
def _finish(self, state):
for c in self._complete:
self.callback_handler(c(self.result, self._error))
self._state = state
def exec(self):
"""
Execute the starter method.
:return:
"""
try:
started = self._starter(self.resolve, self.reject)
self.callback_handler(started)
except Exception as error:
self.reject(error)
if self._raise_again:
raise PromiseRejectionError(
f"Promise {self.id} was rejected") from error
@property
def id(self) -> uuid.UUID:
return self._promise_id
@property
def result(self) -> Union[Tuple[TPromiseResults], TPromiseResults]:
return tuple(self._results) if len(self._results) > 1 else self._result
@property
def error(self) -> Exception:
return self._error
@property
def state(self) -> PromiseState:
return self._state
def callback_handler(self, obj: Any):
"""
Override to handle the return value of callbacks.
:param obj: The return value of a callback
:return:
"""
self._handle_generator_callback(obj)
# noinspection PyMethodMayBeStatic
def _handle_generator_callback(self, obj):
if hasattr(obj, '__iter__') and not hasattr(obj, '__len__'):
try:
while True:
next(obj)
except StopIteration:
pass
| 5,517 |
example_test.py
|
koliaok/RNN_javaSourceCodeLearningModel
| 0 |
2025216
|
import os
import csv
import itertools
import nltk
unknown_token = "UNKNOWN_CODE"
sentence_start_token = "CODE_START"
sentence_end_token = "CODE_END"
encoding = 'utf-8'
data_dir = "data/tinyshakespeare"
input_file = os.path.join(data_dir, "allJavaSourceCode.txt")
with open(input_file, 'r', encoding=encoding) as f:
reader = f.readlines()
# Split full comments into sentences
sentences = []
for x in reader:
sentences.append(sentence_start_token + " " + x + " " + sentence_end_token)
tokenized_sentences = [nltk.word_tokenize(sent) for sent in sentences]
word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences))
print("Found %d unique words tokens." % len(word_freq.items()))
# vocab = word_freq.most_common(vocabulary_size)
# Get the most common words and build index_to_word and word_to_index vectors
index_to_word = [x for x in word_freq]
index_to_word.append(unknown_token)
word_to_index = dict([(w, i) for i, w in enumerate(index_to_word)])
for i, sent in enumerate(tokenized_sentences):
tokenized_sentences[i] = [w if w in word_to_index else unknown_token for w in sent]
chars = index_to_word ##동일한 자료형으로 묶어 준다. ex) 현재 count_paris = 00-> ('',145563), 01->('a',154343425) zip실행하면 ['','a',,,,,],[142345, 1534232424]이런
vocab_size = len(chars)
vocab = word_to_index
| 1,313 |
tests/coverage/web/test_delete_quote.py
|
vyahello/quotes
| 3 |
2025389
|
import pytest
from selenium.webdriver.remote.webdriver import WebDriver
from tests.coverage.web.helper.pages import DeletePage
from tests.markers import web # noqa: I100
pytestmark = web
@pytest.fixture()
def delete_page(browser: WebDriver) -> DeletePage:
page: DeletePage = DeletePage(browser)
if not page.loaded():
page.navigate()
yield page
def test_delete_recent(delete_page: DeletePage) -> None:
assert delete_page.delete().message() == "Quote is deleted"
| 491 |
raster/pyfmask.py
|
EkicierNico/pykic
| 10 |
2023072
|
"""
Function to apply Fmask to Landast 8
Author: <NAME>
Dependancies: python-fmask (install from Conda)
'conda config --add channels conda-forge'
'conda install python-fmask'
Release: V1.O1 03/2021
"""
import os, glob, shutil
import logging
import miscellaneous.miscdate as mm # pykic module
def applyfmask(directory):
"""
Apply Fmask on Landsat 8
:param directory: - path of folder image
- path with several folders (unzipped folder from .gz Landsat file)
:return: LC08_PATHROW_DATE_fmask.img in each folder
code 0 = null
code 1 = clear land
code 2 = cloud
code 3 = cloud shadow
code 4 = snow
code 5 = water
"""
imgdir = os.listdir(directory)
tmpdir = os.path.join(pwd, 'tmpfmask')
os.mkdir(tmpdir)
if os.path.isdir(os.path.join(directory, imgdir[0])):
for p in imgdir:
if os.path.isdir(os.path.join(directory, p)):
# Output name
namef = glob.glob(os.path.join(directory, p, '*B2.TIF'))
namef = os.path.basename(namef[0])
pathrow = namef.split('_')
pathrow = pathrow[2]
dato = mm.datefromstr(namef)
oname = 'LC08_{0:s}_{1:d}_fmask.img'.format(pathrow, dato)
# Check if clouds file already exists
if os.path.isfile(os.path.join(directory, p, oname)):
logging.warning(" File '{0:s}' already exists".format(oname))
continue
# Fmask
cmd = 'fmask_usgsLandsatStacked.py -o {0:s} -e {2:s} --scenedir {1:s}'.format(os.path.join(directory, p, oname),
os.path.join(directory, p),
tmpdir)
os.system(cmd)
elif os.path.isfile(os.path.join(directory, imgdir[0])):
# Output name
namef = glob.glob(os.path.join(directory, '*B2.TIF'))
namef = os.path.basename(namef[0])
pathrow = namef.split('_')
pathrow = pathrow[2]
dato = mm.datefromstr(namef)
oname = 'LC08_{0:s}_{1:d}_fmask.img'.format(pathrow, dato)
# Check if clouds file already exists
if os.path.isfile(os.path.join(directory, oname)):
logging.warning(" File '{0:s}' already exists".format(oname))
return None
# Fmask
cmd = 'fmask_usgsLandsatStacked.py -o {0:s} -e {2:s} --scenedir {1:s}'.format(os.path.join(directory, oname),
directory,
tmpdir)
os.system(cmd)
shutil.rmtree(tmpdir)
return None
| 3,089 |
pychron/git_archive/test/diff.py
|
ael-noblegas/pychron
| 1 |
2025560
|
from __future__ import absolute_import
import unittest
from pychron.git_archive.diff_util import extract_line_numbers # extract_line_changes, extract_line_changes2
class DiffTestCase(unittest.TestCase):
def _test_modify(self, a, b, els, ers):
ls, rs = extract_line_numbers(a, b)
self.assertListEqual(ls, els)
self.assertListEqual(rs, ers)
def test_modify_add(self):
a = '''a=1
b=1'''
b = '''a=1
b=1
c=2'''
self._test_modify(a, b, [], [2])
def test_modify1(self):
a = '''a=1
b=1'''
b = '''a=2
b=1'''
self._test_modify(a, b, [0], [0])
def test_modify2(self):
a = '''a=1
b=1'''
b = '''a=1
b=2'''
self._test_modify(a, b, [1], [1])
def test_modify3(self):
a = '''a=2
b=1'''
b = '''a=1
b=1'''
self._test_modify(a, b, [0], [0])
def test_diff_sub(self):
a = '''a=1
b=1'''
b = '''a=1
b=1
c=1'''
self._test_modify(b, a, [2], [])
def test_modify_sameline_add(self):
a = '''a=1
b=1'''
b = '''a=12
b=1'''
self._test_modify(a, b, [0], [0])
def test_modify_sameline_sub(self):
a = '''a=1
b=1'''
b = '''a=12
b=1'''
self._test_modify(b, a, [0], [0])
def test_add_line(self):
a = '''a=1
b=1'''
b = '''a=1
c=12
b=1'''
self._test_modify(a, b, [], [1])
if __name__ == '__main__':
unittest.main()
| 1,445 |
nexxT/core/CompositeFilter.py
|
pfrydlewicz/nexxT
| 5 |
2025171
|
# SPDX-License-Identifier: Apache-2.0
# Copyright (C) 2020 ifm electronic gmbh
#
# THE PROGRAM IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND.
#
"""
This module defines the CompositeFilter class
"""
import logging
from nexxT.interface import Filter, OutputPort, InputPort
from nexxT.core.SubConfiguration import SubConfiguration
from nexxT.core.Exceptions import NexTRuntimeError, NexTInternalError
logger = logging.getLogger(__name__)
class CompositeFilter(SubConfiguration):
"""
This class handles a sub-configuration of a nexxT application. Sub configs are either applications or
SubGraphs (which behave like a filter).
"""
class CompositeInputNode(Filter):
"""
This filter acts as a dummy filter inside the composite subgraph; because it represents
the input to the subgraph, it uses dynamic output ports
"""
def __init__(self, env):
Filter.__init__(self, False, True, env)
class CompositeOutputNode(Filter):
"""
This filter acts as a dummy filter inside the composite subgraph; because it represents
the output of the subgraph, it uses dynamic input ports
"""
def __init__(self, env):
Filter.__init__(self, True, False, env)
class CompositeNode(Filter):
"""
This class is used to represent a composite subgraph in a filter graph.
"""
def __init__(self, env, envCInput, envCOutput, parent):
Filter.__init__(self, False, False, env)
self._parent = parent
for src in envCInput.getDynamicOutputPorts():
dest = InputPort(False, src.name(), env)
self.addStaticPort(dest)
for src in envCOutput.getDynamicInputPorts():
dest = OutputPort(False, src.name(), env)
self.addStaticPort(dest)
def getGraph(self):
"""
Returns the filter graph implementing this composite node (child filter graph)
:return: a FilterGraph instance
"""
return self._parent.getGraph()
def getCompositeName(self):
"""
Returns the type name of this composite filter (this is the same for all instances of a composite filter)
:return: a string
"""
return self._parent.getName()
def __init__(self, name, configuration):
super().__init__(name, configuration)
self._configuration = configuration
_compositeInputNode = self._graph.addNode(CompositeFilter, "CompositeInputNode", "CompositeInput")
_compositeOutputNode = self._graph.addNode(CompositeFilter, "CompositeOutputNode", "CompositeOutput")
if _compositeInputNode != "CompositeInput" or _compositeOutputNode != "CompositeOutput":
raise NexTInternalError("unexpected node names.")
# prevent renaming and deletion of these special nodes
self._graph.protect("CompositeInput")
self._graph.protect("CompositeOutput")
configuration.addComposite(self)
def compositeNode(self, env):
"""
Factory function for creating a dummy filter instance (this one will never get active).
:param env: the FilterEnvironment instance
:return: a Filter instance
"""
mockup = env.getMockup()
compIn = self._graph.getMockup("CompositeInput")
compOut = self._graph.getMockup("CompositeOutput")
res = CompositeFilter.CompositeNode(env, compIn, compOut, self)
def renameCompositeInputPort(node, oldPortName, newPortName):
graph = mockup.getGraph()
try:
node = graph.nodeName(mockup)
except NexTRuntimeError:
# node has been already removed from graph
logger.internal("Node '%s' already has been removed.", node, exc_info=True)
return
graph.renameInputPort(node, oldPortName, newPortName)
def renameCompositeOutputPort(node, oldPortName, newPortName):
graph = mockup.getGraph()
try:
node = graph.nodeName(mockup)
except NexTRuntimeError:
# node has been already removed from graph
logger.internal("Node '%s' already has been removed.", node, exc_info=True)
return
graph.renameOutputPort(node, oldPortName, newPortName)
self._graph.dynOutputPortRenamed.connect(renameCompositeInputPort)
self._graph.dynInputPortRenamed.connect(renameCompositeOutputPort)
if mockup is not None:
self._graph.dynOutputPortAdded.connect(mockup.createFilterAndUpdate)
self._graph.dynInputPortAdded.connect(mockup.createFilterAndUpdate)
self._graph.dynOutputPortDeleted.connect(mockup.createFilterAndUpdate)
self._graph.dynInputPortDeleted.connect(mockup.createFilterAndUpdate)
return res
def checkRecursion(self):
"""
Check for composite recursions and raise a CompositeRecursion exception if necessary. Called from FilterGraph
after adding a composite filter.
:return:
"""
self._configuration.checkRecursion()
| 5,241 |
keystoneclient/tests/v2_0/test_endpoints.py
|
alexpilotti/python-keystoneclient
| 5 |
2023881
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystoneclient.tests.v2_0 import utils
from keystoneclient.v2_0 import endpoints
class EndpointTests(utils.TestCase):
def setUp(self):
super(EndpointTests, self).setUp()
self.TEST_ENDPOINTS = {
'endpoints': [
{
'adminurl': 'http://host-1:8774/v1.1/$(tenant_id)s',
'id': '8f9531231e044e218824b0e58688d262',
'internalurl': 'http://host-1:8774/v1.1/$(tenant_id)s',
'publicurl': 'http://host-1:8774/v1.1/$(tenant_id)s',
'region': 'RegionOne',
},
{
'adminurl': 'http://host-1:8774/v1.1/$(tenant_id)s',
'id': '8f9531231e044e218824b0e58688d263',
'internalurl': 'http://host-1:8774/v1.1/$(tenant_id)s',
'publicurl': 'http://host-1:8774/v1.1/$(tenant_id)s',
'region': 'RegionOne',
}
]
}
def test_create_with_optional_params(self):
req_body = {
"endpoint": {
"region": "RegionOne",
"publicurl": "http://host-3:8774/v1.1/$(tenant_id)s",
"internalurl": "http://host-3:8774/v1.1/$(tenant_id)s",
"adminurl": "http://host-3:8774/v1.1/$(tenant_id)s",
"service_id": uuid.uuid4().hex,
}
}
resp_body = {
"endpoint": {
"adminurl": "http://host-3:8774/v1.1/$(tenant_id)s",
"region": "RegionOne",
"id": uuid.uuid4().hex,
"internalurl": "http://host-3:8774/v1.1/$(tenant_id)s",
"publicurl": "http://host-3:8774/v1.1/$(tenant_id)s",
}
}
self.stub_url('POST', ['endpoints'], json=resp_body)
endpoint = self.client.endpoints.create(
region=req_body['endpoint']['region'],
publicurl=req_body['endpoint']['publicurl'],
adminurl=req_body['endpoint']['adminurl'],
internalurl=req_body['endpoint']['internalurl'],
service_id=req_body['endpoint']['service_id']
)
self.assertIsInstance(endpoint, endpoints.Endpoint)
self.assertRequestBodyIs(json=req_body)
def test_create_with_optional_params_as_none(self):
req_body_without_defaults = {
"endpoint": {
"region": "RegionOne",
"service_id": uuid.uuid4().hex,
"publicurl": "http://host-3:8774/v1.1/$(tenant_id)s",
"adminurl": None,
"internalurl": None,
}
}
resp_body = {
"endpoint": {
"region": "RegionOne",
"id": uuid.uuid4().hex,
"publicurl": "http://host-3:8774/v1.1/$(tenant_id)s",
"adminurl": None,
"internalurl": None,
}
}
self.stub_url('POST', ['endpoints'], json=resp_body)
endpoint_without_defaults = self.client.endpoints.create(
region=req_body_without_defaults['endpoint']['region'],
publicurl=req_body_without_defaults['endpoint']['publicurl'],
service_id=req_body_without_defaults['endpoint']['service_id'],
adminurl=None,
internalurl=None
)
self.assertIsInstance(endpoint_without_defaults, endpoints.Endpoint)
self.assertRequestBodyIs(json=req_body_without_defaults)
def test_create_without_optional_params(self):
req_body_without_defaults = {
"endpoint": {
"region": "RegionOne",
"service_id": uuid.uuid4().hex,
"publicurl": "http://host-3:8774/v1.1/$(tenant_id)s",
"adminurl": None,
"internalurl": None,
}
}
resp_body = {
"endpoint": {
"region": "RegionOne",
"id": uuid.uuid4().hex,
"publicurl": "http://host-3:8774/v1.1/$(tenant_id)s",
"adminurl": None,
"internalurl": None,
}
}
self.stub_url('POST', ['endpoints'], json=resp_body)
endpoint_without_defaults = self.client.endpoints.create(
region=req_body_without_defaults['endpoint']['region'],
publicurl=req_body_without_defaults['endpoint']['publicurl'],
service_id=req_body_without_defaults['endpoint']['service_id']
)
self.assertIsInstance(endpoint_without_defaults, endpoints.Endpoint)
self.assertRequestBodyIs(json=req_body_without_defaults)
def test_delete(self):
self.stub_url('DELETE', ['endpoints', '8f953'], status_code=204)
self.client.endpoints.delete('8f953')
def test_list(self):
self.stub_url('GET', ['endpoints'], json=self.TEST_ENDPOINTS)
endpoint_list = self.client.endpoints.list()
[self.assertIsInstance(r, endpoints.Endpoint)
for r in endpoint_list]
| 5,644 |
cornice_swagger/converters/schema.py
|
fmigneault/cornice.ext.swagger
| 0 |
2025002
|
"""
This module handles the conversion between colander object schemas and swagger
object schemas by converting types and node validators.
"""
import colander
from cornice_swagger.converters.exceptions import NoSuchConverter
def convert_length_validator_factory(max_key, min_key):
def validator_converter(validator):
converted = None
if isinstance(validator, colander.Length):
converted = {}
if validator.max is not None:
converted[max_key] = validator.max
if validator.min is not None:
converted[min_key] = validator.min
return converted
return validator_converter
def convert_oneof_validator_factory():
def validator_converter(validator):
converted = None
if isinstance(validator, colander.OneOf):
converted = {
'enum': list(validator.choices)
}
return converted
return validator_converter
def convert_range_validator(validator):
converted = None
if isinstance(validator, colander.Range):
converted = {}
if validator.max is not None:
converted['maximum'] = validator.max
if validator.min is not None:
converted['minimum'] = validator.min
return converted
def convert_regex_validator(validator):
converted = None
if isinstance(validator, colander.Regex):
converted = {}
if hasattr(colander, 'url') and validator is colander.url:
converted['format'] = 'url'
elif isinstance(validator, colander.Email):
converted['format'] = 'email'
else:
converted['pattern'] = validator.match_object.pattern
return converted
class ValidatorConversionDispatcher(object):
def __init__(self, *converters):
self.converters = converters
def __call__(self, schema_node, validator=None):
if validator is None:
validator = schema_node.validator
converted = {}
if validator is not None:
for converter in (self.convert_all_validator,) + self.converters:
ret = converter(validator)
if ret is not None:
converted = ret
break
return converted
def convert_all_validator(self, validator):
if isinstance(validator, colander.All):
converted = {}
for v in validator.validators:
ret = self(None, v)
converted.update(ret)
return converted
else:
return None
class TypeConverter(object):
type = ''
def __init__(self, dispatcher):
self.dispatcher = dispatcher
def convert_validator(self, schema_node):
return {}
def convert_type(self, schema_node):
converted = {
'type': self.type
}
if schema_node.title:
converted['title'] = schema_node.title
if schema_node.description:
converted['description'] = schema_node.description
if schema_node.default is not colander.null:
converted['default'] = schema_node.default
return converted
def __call__(self, schema_node):
converted = self.convert_type(schema_node)
converted.update(self.convert_validator(schema_node))
return converted
class BaseStringTypeConverter(TypeConverter):
type = 'string'
format = None
def convert_type(self, schema_node):
converted = super(BaseStringTypeConverter,
self).convert_type(schema_node)
if self.format is not None:
converted['format'] = self.format
return converted
class BooleanTypeConverter(TypeConverter):
type = 'boolean'
class DateTypeConverter(BaseStringTypeConverter):
format = 'date'
class DateTimeTypeConverter(BaseStringTypeConverter):
format = 'date-time'
class NumberTypeConverter(TypeConverter):
type = 'number'
convert_validator = ValidatorConversionDispatcher(
convert_range_validator,
convert_oneof_validator_factory(),
)
class IntegerTypeConverter(NumberTypeConverter):
type = 'integer'
class StringTypeConverter(BaseStringTypeConverter):
convert_validator = ValidatorConversionDispatcher(
convert_length_validator_factory('maxLength', 'minLength'),
convert_regex_validator,
convert_oneof_validator_factory(),
)
class TimeTypeConverter(BaseStringTypeConverter):
format = 'time'
class ObjectTypeConverter(TypeConverter):
type = 'object'
def convert_type(self, schema_node):
converted = super(ObjectTypeConverter,
self).convert_type(schema_node)
properties = {}
required = []
for sub_node in schema_node.children:
properties[sub_node.name] = self.dispatcher(sub_node)
if sub_node.required:
required.append(sub_node.name)
if len(properties) > 0:
converted['properties'] = properties
if len(required) > 0:
converted['required'] = required
if schema_node.typ.unknown == 'preserve':
converted['additionalProperties'] = {}
return converted
class ArrayTypeConverter(TypeConverter):
type = 'array'
convert_validator = ValidatorConversionDispatcher(
convert_length_validator_factory('maxItems', 'minItems'),
)
def convert_type(self, schema_node):
converted = super(ArrayTypeConverter,
self).convert_type(schema_node)
converted['items'] = self.dispatcher(schema_node.children[0])
return converted
class TypeConversionDispatcher(object):
def __init__(self, custom_converters={}, default_converter=None):
self.converters = {
colander.Boolean: BooleanTypeConverter,
colander.Date: DateTypeConverter,
colander.DateTime: DateTimeTypeConverter,
colander.Float: NumberTypeConverter,
colander.Integer: IntegerTypeConverter,
colander.Mapping: ObjectTypeConverter,
colander.Sequence: ArrayTypeConverter,
colander.String: StringTypeConverter,
colander.Time: TimeTypeConverter,
}
self.converters.update(custom_converters)
self.default_converter = default_converter
def __call__(self, schema_node):
schema_type = schema_node.typ
schema_type = type(schema_type)
converter_class = self.converters.get(schema_type)
if converter_class is None:
if self.default_converter:
converter_class = self.default_converter
else:
raise NoSuchConverter
converter = converter_class(self)
converted = converter(schema_node)
return converted
| 6,921 |
network/statistics/mainHeatmapComparison.py
|
shamanDevel/AdaptiveSampling
| 10 |
2023892
|
"""
Creates error plots for different heatmap values.
This evaluates the results of mainAdaptiveIsoStats.py
"""
import numpy as np
import matplotlib.pyplot as plt
from statsLoader import StatsConfig, StatsLoader, StatField
if __name__ == "__main__":
"""
Compare convergence with increasing heatmap mean.
"""
SETS = [
('Ejecta', StatsLoader("../result-stats/adaptiveIsoEnhance4Big/Ejecta.hdf5")),
#('RM', StatsLoader("../result-stats/adaptiveIso/RM")),
#('Thorax', StatsLoader("../result-stats/adaptiveIso/Thorax")),
#('Human', StatsLoader("../result-stats/adaptiveIso/Human")),
]
FIELDS = [
(StatField.SSIM_MASK, "DSSIM mask", lambda x: (1-x)/2),
(StatField.SSIM_NORMAL, "DSSIM normal", lambda x: (1-x)/2),
(StatField.SSIM_COLOR_NOAO, "DSSIM color (no AO)", lambda x: (1-x)/2),
(StatField.LPIPS_COLOR_NO_AO, "LPIPS color (no AO)", lambda x: x)
]
TITLE = "Mean stats - different heatmap mean / number of samples"
PATTERN = "plastic"
fig, ax = plt.subplots(
nrows=len(SETS), ncols=len(FIELDS),
sharey='row', sharex=True,
squeeze = False)
for a,(name, loader) in enumerate(SETS):
assert isinstance(loader, StatsLoader)
minHeatmapMin = min(loader.heatmapMin())
heatmapMeans = loader.heatmapMean()
# get model combinations (importance + reconstruction networks)
models = loader.allNetworks()
modelNames = ["%s - %s"%model for model in models]
# draw plots
for b,(field_id, field_name, transform) in enumerate(FIELDS):
field_data = []
handles = []
for model_idx, (importance, reconstruction) in enumerate(models):
results = []
for mean in heatmapMeans:
cfg = StatsConfig(importance, reconstruction, minHeatmapMin, mean, PATTERN)
data = loader.getStatistics(cfg)
l = len(data)
field_data = np.array([data[i][field_id] for i in range(l)])
results.append(transform(np.mean(field_data)))
print(field_name, ";", modelNames[model_idx], "->", results)
handles.append(ax[a,b].plot(heatmapMeans, results))
if a==0:
ax[a,b].set_title(field_name)
if b==0:
ax[a,b].set_ylabel(name)
fig.legend(handles,labels=modelNames,loc='center left')
handles = []
fig.suptitle(TITLE)
#fig.legend(handles,
# labels=modelNames,
# loc="lower center",
# borderaxespad=0.1,
# ncol=max(5,len(models)))
fig.subplots_adjust(
left=0.03, bottom=0.05, right=0.99, top=0.93,
wspace=0.03, hspace=0.09)
plt.show()
| 2,873 |
src/lib/nets/volumetric/cotr/DeTrans/position_encoding.py
|
charzharr/Hierarchical-Contrastive-Pretraining
| 0 |
2024613
|
"""
Positional encodings for the transformer.
"""
import math
import torch
from torch import nn
from typing import Optional
from torch import Tensor
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=[64, 64, 64], temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, x):
bs, c, d, h, w = x.shape
mask = torch.zeros(bs, d, h, w, dtype=torch.bool).to(x.device)
assert mask is not None
not_mask = ~mask
d_embed = not_mask.cumsum(1, dtype=torch.float32)
y_embed = not_mask.cumsum(2, dtype=torch.float32)
x_embed = not_mask.cumsum(3, dtype=torch.float32)
if self.normalize:
eps = 1e-6
d_embed = (d_embed - 0.5) / (d_embed[:, -1:, :, :] + eps) * self.scale
y_embed = (y_embed - 0.5) / (y_embed[:, :, -1:, :] + eps) * self.scale
x_embed = (x_embed - 0.5) / (x_embed[:, :, :, -1:] + eps) * self.scale
dim_tx = torch.arange(self.num_pos_feats[0], dtype=torch.float32, device=x.device)
dim_tx = self.temperature ** (3 * (dim_tx // 3) / self.num_pos_feats[0])
dim_ty = torch.arange(self.num_pos_feats[1], dtype=torch.float32, device=x.device)
dim_ty = self.temperature ** (3 * (dim_ty // 3) / self.num_pos_feats[1])
dim_td = torch.arange(self.num_pos_feats[2], dtype=torch.float32, device=x.device)
dim_td = self.temperature ** (3 * (dim_td // 3) / self.num_pos_feats[2])
pos_x = x_embed[:, :, :, :, None] / dim_tx
pos_y = y_embed[:, :, :, :, None] / dim_ty
pos_d = d_embed[:, :, :, :, None] / dim_td
pos_x = torch.stack((pos_x[:, :, :, :, 0::2].sin(), pos_x[:, :, :, :, 1::2].cos()), dim=5).flatten(4)
pos_y = torch.stack((pos_y[:, :, :, :, 0::2].sin(), pos_y[:, :, :, :, 1::2].cos()), dim=5).flatten(4)
pos_d = torch.stack((pos_d[:, :, :, :, 0::2].sin(), pos_d[:, :, :, :, 1::2].cos()), dim=5).flatten(4)
pos = torch.cat((pos_d, pos_y, pos_x), dim=4).permute(0, 4, 1, 2, 3)
return pos
def build_position_encoding(mode, hidden_dim):
N_steps = hidden_dim // 3
if (hidden_dim % 3) != 0:
N_steps = [N_steps, N_steps, N_steps + hidden_dim % 3]
else:
N_steps = [N_steps, N_steps, N_steps]
if mode in ('v2', 'sine'):
position_embedding = PositionEmbeddingSine(num_pos_feats=N_steps, normalize=True)
else:
raise ValueError(f"not supported {mode}")
return position_embedding
| 3,038 |
search/bf.py
|
tekjar/fcnd.projects
| 0 |
2025693
|
import numpy as np
from enum import Enum
from queue import Queue
# breadth-first search is to keep track of visited cells and all your partial plans
# and always expand the shortest partial plan first
class Action(Enum):
LEFT = (0, -1)
RIGHT = (0, 1)
UP = (-1, 0)
DOWN = (1, 0)
def __str__(self):
if self == self.LEFT:
return '<'
elif self == self.RIGHT:
return '>'
elif self == self.UP:
return '^'
elif self == self.DOWN:
return 'v'
class Traveller:
def __init__(self, map):
self.map = map
self.start = None
self.goal = None
def map_matrix_shape(self):
'''
returns ROWS_MAX_INDEX X COLUMNS_MAX_INDEX
'''
return (self.map.shape[0] - 1, self.map.shape[1] - 1)
def valid_actions(self, current_position):
current_row, current_column = current_position[0], current_position[1]
up_index = current_row - 1
down_index = current_row + 1
left_index = current_column - 1
right_index = current_column + 1
max_row_index, max_column_index = self.map_matrix_shape()
valid = [Action.UP, Action.DOWN, Action.LEFT, Action.RIGHT]
# print('row = ', current_row, 'column = ', current_column)
# upward movement out of map
if up_index < 0 or minimap[up_index][current_column] == 1:
valid.remove(Action.UP)
# downward movement out of map
if down_index > max_row_index or minimap[down_index][current_column] == 1:
valid.remove(Action.DOWN)
# leftside movement out of map
if left_index < 0 or minimap[current_row][left_index] == 1:
valid.remove(Action.LEFT)
# rightside movement out of map
if right_index > max_column_index or minimap[current_row][right_index] == 1:
valid.remove(Action.RIGHT)
return valid
def travel(self, start, goal):
self.start = start
self.goal = goal
# {currnt_position: (parent, action)}
paths = {}
visited = set()
queue = Queue()
found = False
queue.put(start)
# there are still nodes to traverse through
while not queue.empty():
current = queue.get()
if current == goal:
found = True
break
valid_actions = self.valid_actions(current)
for act in valid_actions:
action = act.value
neighbour = current[0] + action[0], current[1] + action[1]
#print('current = ', current, ' action = ', action, ' after action = ', neighbour)
if neighbour not in visited:
visited.add(neighbour)
queue.put(neighbour)
paths[neighbour] = (current, act)
return found, paths
def trace_back(self, paths):
path = []
# trace back from goal
next = self.goal
while next != self.start:
next, action = paths[next]
path.append(action)
path = path[::-1]
return path
# Define a function to visualize the path
def visualize(self, path):
"""
Given a grid, path and start position
return visual of the path to the goal.
'S' -> start
'G' -> goal
'O' -> obstacle
' ' -> empty
"""
# Define a grid of string characters for visualization
sgrid = np.zeros(np.shape(self.map), dtype=np.str)
sgrid[:] = ' '
sgrid[self.map[:] == 1] = 'O'
pos = self.start
# Fill in the string grid
for action in path:
#print('pos = ', pos, ' action = ', action)
da = action.value
sgrid[pos[0], pos[1]] = str(action)
pos = (pos[0] + da[0], pos[1] + da[1])
sgrid[pos[0], pos[1]] = 'G'
sgrid[self.start[0], self.start[1]] = 'S'
print(sgrid)
minimap = np.array([
[0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0],
[0, 0, 0, 1, 0, 0],
])
traveller = Traveller(minimap)
found, paths = traveller.travel((0,0), (4, 4))
path = traveller.trace_back(paths) if found else exit('No path found')
traveller.visualize(path)
| 4,403 |
World-Happiness-Linear-Regression/main.py
|
michaeljspagna/World-Happiness-Linear-Regression
| 0 |
2024921
|
from datahandler import DataHandler
from linearregression import LinearRegressionModel
if __name__ == "__main__":
dh = DataHandler()
X_train, Y_train, X_test, Y_test = dh.getTrainAndTest()
modelT = LinearRegressionModel(X_train.shape[1])
modelT.train(X_train, Y_train, 0.5, 700)
modelT.test(X_test, Y_test)
| 328 |
Learning/Test31_ClassvsInstanceVariables.py
|
liang1024/Python
| 1 |
2025019
|
'''
类与实例变量
'''
class Girl:
gender="female"
def __init__(self,name):
self.name=name
r=Girl("Rachel")
s=Girl("stanky")
print(r.gender)
print(s.gender)
print(r.name)
print(s.name)
| 196 |
tests/test_bianalyzer.py
|
luntos/bianalyzer
| 4 |
2025150
|
# -*- coding: utf-8 -*
import unittest
from bianalyzer.abstracts import download_abstracts, IEEEKeywordDownloader, DLTermType
from bianalyzer import BianalyzerText
from bianalyzer.keywords import extract_keywords_via_textrank, extract_keywords_via_frequency
from bianalyzer.relevance import construct_similarity_matrix, construct_relevance_matrix
from bianalyzer.biclustering import get_keyword_text_biclusters, get_keyword_biclusters, GreedyBBox, BBox, SpectralGraphCoclustering
springer_api_key = '' # Put your Springer API key here
class AbstractsTestCase(unittest.TestCase):
def test_download_ieee(self):
articles = download_abstracts('IEEE', 'motion recognition', 100)
self.assertEqual(len(articles), 100)
for article in articles:
self.assertEqual(isinstance(article.abstract_text, str), True)
@unittest.skipIf(springer_api_key is None or springer_api_key == '', 'A user should specify their key')
def test_download_springer(self):
articles = download_abstracts('Springer', 'motion recognition', 300, springer_api_key)
for article in articles:
self.assertGreater(len(article.abstract_text), 50)
self.assertEqual(len(articles), 300)
for article in articles:
self.assertIsInstance(article.abstract_text, unicode or str)
def test_ieee_keywords_download(self):
articles = download_abstracts('IEEE', 'motion recognition', 10)
downloader = IEEEKeywordDownloader(articles, DLTermType.controlled)
keywords = downloader.download_ieee_keywords()
self.assertEqual((len(keywords) > 0), True)
class KeywordsTestCase(unittest.TestCase):
def setUp(self):
super(KeywordsTestCase, self).setUp()
texts = ['This paper focuses on the development of an effective cluster validity measure with outlier detection and cluster merging algorithms for support vector clustering (SVC)',
'In a multi-cluster tool, there may be both single and dual-arm cluster tools. Such a multi-cluster tool is called hybrid multi-cluster tool',
'Fuel maps are critical tools for spatially explicit fire simulation and analysis. Many diverse techniques have been used to create spatial fuel data products including field assessment, association, remote sensing, and biophysical modeling.']
b_texts = [BianalyzerText(t) for t in texts]
self.texts = b_texts
def test_textrank(self):
keywords = extract_keywords_via_textrank(self.texts, window_size=3, keyword_limit=10, frequency_filter=False,
stemming_filter=False)
self.assertTrue(len(keywords) >= 10)
def test_frequency(self):
keywords = extract_keywords_via_frequency(self.texts, max_freq=50)
self.assertTrue(len(keywords) > 0)
class RelevanceTestCase(unittest.TestCase):
def setUp(self):
super(RelevanceTestCase, self).setUp()
texts = ['This paper focuses on the development of an effective cluster validity measure with outlier detection and cluster merging algorithms for support vector clustering (SVC)',
'In a multi-cluster tool, there may be both single and dual-arm cluster tools. Such a multi-cluster tool is called hybrid multi-cluster tool',
'Fuel maps are critical tools for spatially explicit fire simulation and analysis. Many diverse techniques have been used to create spatial fuel data products including field assessment, association, remote sensing, and biophysical modeling.']
b_texts = [BianalyzerText(t) for t in texts]
self.texts = b_texts
self.keywords = ['development', 'cluster validity', 'measure', 'simulation', 'tools', 'cluster', 'fuel']
def test_relevance_matrix(self):
matrix = construct_relevance_matrix(self.keywords, self.texts, 'bm25').matrix
self.assertEqual(len(matrix), len(self.keywords))
self.assertEqual(len(matrix[0]), len(self.texts))
for row in matrix:
row_max = max(row)
self.assertGreater(row_max, 0.0)
def test_similarity_matrix(self):
rel_matrix = construct_relevance_matrix(self.keywords, self.texts, 'frequency')
sim_matrix = construct_similarity_matrix(rel_matrix, 0.1).matrix
self.assertEqual(len(sim_matrix), len(self.keywords))
self.assertEqual(len(sim_matrix[0]), len(self.keywords))
class BiclusteringTestCase(unittest.TestCase):
def setUp(self):
texts = ['This paper focuses on the development of an effective cluster validity measure with outlier detection and cluster merging algorithms for support vector clustering (SVC)',
'In a multi-cluster tool, there may be both single and dual-arm cluster tools. Such a multi-cluster tool is called hybrid multi-cluster tool',
'Fuel maps are critical tools for spatially explicit fire simulation and analysis. Many diverse techniques have been used to create spatial fuel data products including field assessment, association, remote sensing, and biophysical modeling.']
b_texts = [BianalyzerText(t) for t in texts]
self.texts = b_texts
self.keywords = ['development', 'cluster validity', 'measure', 'simulation', 'tools', 'cluster', 'fuel']
self.relevance_matrix = construct_relevance_matrix(self.keywords, self.texts, 'tf-idf')
self.similarity_matrix = construct_similarity_matrix(self.relevance_matrix, 0.2)
def test_relevance_biclustering(self):
result = get_keyword_text_biclusters(self.relevance_matrix, BBox)
self.assertGreater(len(result.biclusters), 0)
result = get_keyword_text_biclusters(self.relevance_matrix, GreedyBBox)
self.assertGreater(len(result.biclusters), 0)
result = get_keyword_text_biclusters(self.relevance_matrix, SpectralGraphCoclustering, biclusters_number=5)
self.assertGreater(len(result.biclusters), 0)
def test_similarity_biclustering(self):
result = get_keyword_biclusters(self.similarity_matrix, BBox)
self.assertGreater(len(result.biclusters), 0)
result = get_keyword_biclusters(self.similarity_matrix, GreedyBBox)
self.assertGreater(len(result.biclusters), 0)
result = get_keyword_biclusters(self.similarity_matrix, SpectralGraphCoclustering, biclusters_number=5)
self.assertGreater(len(result.biclusters), 0)
| 6,439 |
geodata/admin.py
|
wycemiro/gpstracker
| 1 |
2025676
|
from django.contrib.gis import admin
# Register your models here.
from .models import Location, Gpslocation, Device
# subclass the GeoModelAdmin to use the locally hosted OpenLayers library
class olGeoModelAdmin(admin.GeoModelAdmin):
openlayers_url = 'OpenLayers.js'
# subclass the OSMGeoAdmin to use the locally hosted OpenLayers library
class olOSMGeoAdmin(admin.OSMGeoAdmin):
openlayers_url = 'OpenLayers.js'
# register an admin tool for the Location model
# admin.site.register(Location, olGeoModelAdmin)
# the OSMGeoAdmin tool uses the openstreetmap data for a nicer experience
admin.site.register(Location, olOSMGeoAdmin)
admin.site.register(Gpslocation, olOSMGeoAdmin)
admin.site.register(Device)
| 718 |
orchestrator/processor/l2_pipelines/water_masking_computation.py
|
spacebel/MAJA
| 57 |
2025579
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Centre National d'Etudes Spatiales (CNES)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
###################################################################################################
#
# o o
# oo oo oo o oo ,-.
# o o o o o o o o o \_/
# o o o o o o o o {|||)<
# o o oooooo o oooooo / \
# o o o o o o o o `-^
# o o o o oooo o o
#
###################################################################################################
orchestrator.processor.base_processor -- shortdesc
orchestrator.processor.base_processor is the base of all processors
###################################################################################################
"""
from orchestrator.common.logger.maja_logging import configure_logger
from orchestrator.cots.otb.otb_app_handler import OtbAppHandler
from orchestrator.modules.maja_module import MajaModule
from orchestrator.common.maja_exceptions import *
import orchestrator.common.constants as constants
import os
LOGGER = configure_logger(__name__)
class MajaWaterMasking(MajaModule):
"""
classdocs
"""
NAME = "WaterMask"
def __init__(self):
"""
Constructor
"""
super(MajaWaterMasking, self).__init__()
self._water_app = None
self.in_keys_to_check = ["Params.Caching","Params.InitMode", "AppHandler", "Plugin", "L1Reader","L2Reader","L2COMM", "DEM"]
self.out_keys_to_check = ["RayleighIPTOCR",constants.CLOUD_MASK_ALL_CLOUDS,"dtm_shd","SunglintFlag"]
self.out_keys_provided = ["WaterMask", "PossibleWaterMask", "TestedWaterMask"]
def run(self, dict_of_input, dict_of_output):
LOGGER.info("Water masking computation start")
caching = dict_of_input.get("Params").get("Caching")
if caching:
water_working = dict_of_input.get("AppHandler").get_directory_manager().get_temporary_directory("WaterMaskingProc_",
do_always_remove=True)
else:
water_working = ""
water_mask = os.path.join(water_working, "water_sub.tif")
possible_water_mask = os.path.join(water_working, "possible_water_sub.tif")
tested_water_mask = os.path.join(water_working, "tested_water_sub.tif")
param_water = {"tocr": dict_of_output.get("RayleighIPTOCR"),
"edg": dict_of_input.get("L1Reader").get_value("IPEDGSubOutput"),
"cldsum": dict_of_output.get(constants.CLOUD_MASK_ALL_CLOUDS),
"dtmshd": dict_of_output["dtm_shd"],
"srtmwat": dict_of_input.get("DEM").MSK,
"demslc": dict_of_input.get("DEM").SLC,
"redbandtocr": dict_of_input.get("Params").get("RedBandIndex_DateD"),
"nirbandtocr": dict_of_input.get("Params").get("NIRBandIndex_DateD"),
"nirbandrcr": dict_of_input.get("Params").get("NIRBandIndex_DateDm1"),
"waterndvithreshold": dict_of_input.get("L2COMM").get_value_f("WaterNDVIThreshold"),
"waterreflectancevariationthreshold": dict_of_input.get("L2COMM").get_value_f(
"WaterReflectanceVariationThreshold"),
"maximumsunglintreflectance": dict_of_input.get("L2COMM").get_value_f(
"MaximumSunglintReflectance"),
"srtmgaussiansigma": dict_of_input.get("L2COMM").get_value_f("SRTMSigmaSmoothing"),
"srtmpercent": dict_of_input.get("L2COMM").get_value_f("SRTMPercent"),
"watermasknumber": dict_of_input.get("L2COMM").get_value_i("WaterMaskNumber"),
"minpercentpossiblewater": dict_of_input.get("L2COMM").get_value_f("MinPercentPossibleWater"),
"waterslopethreshold": dict_of_input.get("L2COMM").get_value_f("WaterSlopeThreshold"),
"waterredreflectancethreshold": dict_of_input.get("L2COMM").get_value_f(
"WaterRedReflectanceThreshold"),
"reall2nodata": dict_of_input.get("Params").get("RealL2NoData"),
"was": water_mask + ":uint8",
"pwa": possible_water_mask + ":uint8",
"twa": tested_water_mask + ":uint8"
}
if not dict_of_input.get("Params").get("InitMode"):
param_water["l2rcr"] = dict_of_input.get("L2Reader").get_value("RCRImage")
param_water["l2pwa"] = dict_of_input.get("L2Reader").get_value("PWAImage")
param_water["l2twa"] = dict_of_input.get("L2Reader").get_value("TWAImage")
if dict_of_input.get("Params").get("InitMode"):
param_water["initmode"] = dict_of_input.get("Params").get("InitMode")
if dict_of_output["SunglintFlag"]:
param_water["sunglintflag"] = dict_of_output["SunglintFlag"]
self._water_app = OtbAppHandler("WaterMask", param_water, write_output=True)
# Update output dictionary
dict_of_output["WaterMask"] = self._water_app.getoutput()["was"]
dict_of_output["PossibleWaterMask"] = self._water_app.getoutput()["pwa"]
dict_of_output["TestedWaterMask"] = self._water_app.getoutput()["twa"]
| 6,086 |
Tarea4/bitmap.py
|
ciborg245/bmp_render
| 0 |
2024721
|
import struct
def char(c):
return struct.pack("=c", c.encode('ascii'))
def word(w):
return struct.pack("=h", w)
def dword(d):
return struct.pack("=l", d)
def color(r, g, b):
return bytes([b, g, r])
class Bitmap(object):
def __init__(self, width, height):
self.width = width
self.height = height
self.pixels = []
self.clear()
def clear(self):
self.pixels = [
[color(10, 10, 10) for x in range(self.width)]
for y in range (self.height)
]
def write(self, filename):
f = open(filename, 'bw')
#file header (14)
f.write(char('B'))
f.write(char('M'))
f.write(dword(14 + 40 + self.width * self.height * 3))
f.write(dword(0))
f.write(dword(14 + 40))
#image header (40)
f.write(dword(40))
f.write(dword(self.width))
f.write(dword(self.height))
f.write(word(1))
f.write(word(24))
f.write(dword(0))
f.write(dword(self.width * self.height * 3))
f.write(dword(0))
f.write(dword(0))
f.write(dword(0))
f.write(dword(0))
for x in range(self.height):
for y in range(self.width):
f.write(self.pixels[x][y])
f.close()
def point(self, x, y, color):
self.pixels[x][y] = color
| 1,375 |
barcode_widget/__manifest__.py
|
trojikman/misc-addons
| 0 |
2023520
|
{
"name": "Barcode Widget",
"version": "13.0.1.0.1",
"category": "Widget",
"author": "The Gok Team, IT-Projects LLC",
# Original module didn't have license information.
# But App store pages says that it's LGPL-3
# https://www.odoo.com/apps/modules/9.0/barcode_widget/
#
# -- <NAME>
"license": "Other OSI approved licence", # MIT
"depends": ["web"],
"data": ["views/assets.xml"],
"qweb": ["static/src/xml/barcode_widget.xml"],
"js": [],
"test": [],
"demo": [],
"installable": False,
"application": True,
}
| 579 |
problem/rayleigh_benard.py
|
wence-/composable-solvers
| 1 |
2025633
|
# flake8: noqa: F403
from __future__ import absolute_import
from argparse import ArgumentParser
from firedrake import *
from firedrake.utils import cached_property
from . import baseproblem
class Problem(baseproblem.Problem):
name = "Rayleigh-Benard"
parameter_names = ("mumps", "pcd_mg")
@property
def mumps(self):
return {"snes_type": "newtonls",
"snes_monitor": True,
"snes_converged_reason": True,
"ksp_converged_reason": True,
"snes_rtol": 1e-8,
"snes_linesearch_type": "basic",
"ksp_type": "preonly",
"mat_type": "aij",
"pc_type": "lu",
"pc_factor_mat_solver_package": "mumps"}
@property
def hypre(self):
return {"pc_hypre_type": "boomeramg",
"pc_hypre_boomeramg_no_CF": True,
"pc_hypre_boomeramg_coarsen_type": "HMIS",
"pc_hypre_boomeramg_interp_type": "ext+i",
"pc_hypre_boomeramg_P_max": 4,
"pc_hypre_boomeramg_agg_nl": 1,
"pc_hypre_boomeramg_agg_num_paths": 2}
@property
def pcd_mg(self):
pcd_mg = {"snes_type": "newtonls",
"snes_view": True,
"snes_monitor": True,
"snes_rtol": 1e-8,
"snes_converged_reason": True,
"ksp_converged_reason": True,
"snes_linesearch_type": "basic",
"mat_type": "matfree",
"ksp_type": "fgmres",
"ksp_monitor": True,
"ksp_gmres_modifiedgramschmidt": True,
"pc_type": "fieldsplit",
"pc_fieldsplit_type": "multiplicative",
"pc_fieldsplit_0_fields": "0,1",
"pc_fieldsplit_1_fields": "2",
# GMRES on Navier-stokes, with fieldsplit PC.
"fieldsplit_0_ksp_type": "gmres",
"fieldsplit_0_ksp_converged_reason": True,
"fieldsplit_0_ksp_gmres_modifiedgramschmidt": True,
"fieldsplit_0_ksp_rtol": 1e-2,
"fieldsplit_0_pc_type": "fieldsplit",
"fieldsplit_0_pc_fieldsplit_type": "schur",
"fieldsplit_0_pc_fieldsplit_schur_fact_type": "lower",
# HYPRE on velocity block
"fieldsplit_0_fieldsplit_0_ksp_type": "preonly",
"fieldsplit_0_fieldsplit_0_pc_type": "python",
"fieldsplit_0_fieldsplit_0_pc_python_type": "firedrake.AssembledPC",
"fieldsplit_0_fieldsplit_0_assembled_mat_type": "aij",
"fieldsplit_0_fieldsplit_0_assembled_pc_type": "hypre",
# PCD on the pressure block
"fieldsplit_0_fieldsplit_1_ksp_type": "preonly",
"fieldsplit_0_fieldsplit_1_pc_type": "python",
"fieldsplit_0_fieldsplit_1_pc_python_type": "firedrake.PCDPC",
# Matrix-free Fp application
"fieldsplit_0_fieldsplit_1_pcd_Fp_mat_type": "matfree",
# sor on assembled mass matrix
"fieldsplit_0_fieldsplit_1_pcd_Mp_mat_type": "aij",
"fieldsplit_0_fieldsplit_1_pcd_Mp_ksp_type": "richardson",
"fieldsplit_0_fieldsplit_1_pcd_Mp_ksp_max_it": 2,
"fieldsplit_0_fieldsplit_1_pcd_Mp_pc_type": "sor",
# hypre on assembled stiffness matrix
"fieldsplit_0_fieldsplit_1_pcd_Kp_ksp_type": "preonly",
"fieldsplit_0_fieldsplit_1_pcd_Kp_mat_type": "aij",
"fieldsplit_0_fieldsplit_1_pcd_Kp_pc_type": "telescope",
"fieldsplit_0_fieldsplit_1_pcd_Kp_pc_telescope_reduction_factor": 6,
"fieldsplit_0_fieldsplit_1_pcd_Kp_telescope_pc_type": "hypre",
# hypre on temperature block
"fieldsplit_1_ksp_type": "gmres",
"fieldsplit_1_ksp_converged_reason": True,
"fieldsplit_1_ksp_rtol": 1e-4,
"fieldsplit_1_pc_type": "python",
"fieldsplit_1_pc_python_type": "firedrake.AssembledPC",
"fieldsplit_1_assembled_mat_type": "aij",
"fieldsplit_1_assembled_pc_type": "telescope",
"fieldsplit_1_assembled_pc_telescope_reduction_factor": 6,
"fieldsplit_1_assembled_telescope_pc_type": "hypre"}
for k, v in self.hypre.items():
if k.startswith("pc_hypre_boomeramg"):
pcd_mg["fieldsplit_1_assembled_telescope_%s" % k] = v
pcd_mg["fieldsplit_0_fieldsplit_1_pcd_Kp_telescope_%s" % k] = v
pcd_mg["fieldsplit_0_fieldsplit_0_assembled_%s" % k] = v
return pcd_mg
@cached_property
def Ra(self):
return Constant(self.args.Ra)
@cached_property
def Pr(self):
return Constant(self.args.Pr)
@property
def vertical_temperature(self):
return self.args.vertical_temperature
@staticmethod
def argparser():
parser = ArgumentParser(description="""Set options for driven-cavity Navier-Stokes. Uses Taylor-Hood elements""",
add_help=False)
parser.add_argument("--degree", action="store", default=1,
help="Polynomial degree of the pressure space",
type=int)
parser.add_argument("--size", action="store", default=10,
help="Number of cells in each spatial direction",
type=int)
parser.add_argument("--dimension", action="store", default=2, choices=[2, 3],
help="Spatial dimension of problem",
type=int)
parser.add_argument("--Ra", action="store", default=200,
help="Rayleigh number",
type=float)
parser.add_argument("--Pr", action="store", default=6.8,
help="Prandtl number",
type=float)
parser.add_argument("--vertical-temperature", action="store_true",
default=False,
help="Apply a vertical temperature gradient?")
parser.add_argument("--help", action="store_true",
help="Show help")
return parser
@cached_property
def function_space(self):
V = VectorFunctionSpace(self.mesh, "CG", self.degree+1)
P = FunctionSpace(self.mesh, "CG", self.degree)
T = FunctionSpace(self.mesh, "CG", self.degree)
return V*P*T
@cached_property
def F(self):
global dx
W = self.function_space
u, p, T = split(self.u)
v, q, S = TestFunctions(W)
if self.dimension == 2:
g = Constant((0, -1))
else:
g = Constant((0, 0, -1))
dx = dx(degree=2*(self.degree))
F = (
inner(grad(u), grad(v))*dx
+ inner(dot(grad(u), u), v)*dx
- inner(p, div(v))*dx
+ (self.Ra/self.Pr)*inner(T*g, v)*dx
+ inner(div(u), q)*dx
+ inner(dot(grad(T), u), S)*dx
+ 1/self.Pr * inner(grad(T), grad(S))*dx
)
return F
@cached_property
def bcs(self):
if self.dimension == 2:
if self.vertical_temperature:
high_T = 3 # bottom
low_T = 4 # top
else:
high_T = 1 # left
low_T = 2 # right
else:
if self.vertical_temperature:
high_T = 5 # bottom
low_T = 6 # top
else:
high_T = 1 # left
low_T = 2 # right
return (DirichletBC(self.function_space.sub(0), zero(self.dimension), "on_boundary"),
DirichletBC(self.function_space.sub(2), Constant(1.0), high_T),
DirichletBC(self.function_space.sub(2), Constant(0.0), low_T))
@cached_property
def nullspace(self):
return MixedVectorSpaceBasis(self.function_space,
[self.function_space.sub(0),
VectorSpaceBasis(constant=True),
self.function_space.sub(2)])
@cached_property
def appctx(self):
return {"velocity_space": 0}
@cached_property
def output_fields(self):
u, p, T = self.u.split()
u.rename("Velocity")
p.rename("Pressure")
T.rename("Temperature")
return (u, p, T)
| 8,802 |
setup.py
|
Ouranosinc/malleefowl
| 0 |
2025032
|
# -*- coding: utf-8 -*-
from setuptools import find_packages
from setuptools import setup
version = __import__('malleefowl').__version__
description = 'Malleefowl has WPS processes for climate data access and workflows.'
long_description = (
open('README.rst').read() + '\n' +
open('AUTHORS.rst').read() + '\n' +
open('CHANGES.rst').read()
)
reqs = [line.strip() for line in open('requirements/deploy.txt')]
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Atmospheric Science',
]
setup(name='malleefowl',
version=version,
description=description,
long_description=long_description,
classifiers=classifiers,
keywords='wps pywps python malleefowl netcdf esgf',
author='Birdhouse',
url='https://github.com/bird-house/malleefowl',
license="Apache License v2.0",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='malleefowl',
install_requires=reqs,
entry_points={
'console_scripts': [
'malleefowl=malleefowl:main',
]},
)
| 1,342 |
Programacion Funcional/MIs Conseptos/Sample2.py
|
Chente31209/SeptimoISC
| 0 |
2025023
|
def Elevar(Numero):
return Numero * Numero
def SumarElevados(Num1 ,Num2):
return Elevar(Num1)+Elevar(Num2)
print(SumarElevados(1,2))
#
#
# .
| 154 |
dumpres/directory_save_strategy.py
|
xDiaym/vkparse
| 1 |
2024959
|
from pathlib import Path
from typing import Any
from converters.abstract_converter import AbstractConverter
from dumpres.abstract_save_strategy import AbstractSaveStrategy
from dumpres.buffered_file_dumper import BufferedFileDumper
from models.message import Message
class DirectorySaveStrategy(AbstractSaveStrategy):
def __init__(
self,
path: Path,
file_ext: str,
converter: AbstractConverter,
*args: Any,
**kwargs: Any
) -> None:
self._dumper = BufferedFileDumper(
path, file_ext, converter, *args, **kwargs
)
def on_file(
self, directory_name: str, file_name: str, messages: list[Message]
) -> None:
self._dumper.add(messages)
def on_directory_end(self, directory_name: str) -> None:
# Does not create new dirs, save file in out directory as is.
self._dumper.write("", directory_name)
def on_end(self) -> None:
# Do nothing.
pass
| 987 |
seq2seq/s2s_test.py
|
sorokine/NeuralTripleTranslation
| 47 |
2025327
|
import pickle
import numpy as np
from utils.seq2seq.learning_core_20180312 import Seq2seqCore
def feeder_generator(xy_tuple):
feed_dict = dict()
token_store_data, ontology_store_data = xy_tuple
assert len(token_store_data) == len(ontology_store_data)
x_data = list()
x_length = list()
y_data = list()
y_length = list()
for one_idx in range(len(token_store_data)):
one_target = token_store_data[one_idx]
x_length.append(len(one_target))
one_target += [0] * (64 - len(one_target))
x_data.append(one_target)
one_target = ontology_store_data[one_idx]
y_length.append(5)
y_data.append(one_target)
feed_dict["encoder_input"] = np.array(x_data, dtype=np.int32).T
feed_dict["decoder_target"] = np.array(y_data, dtype=np.int32).T
feed_dict["encoder_length"] = np.array(x_length, dtype=np.int32)
feed_dict["decoder_length"] = np.array(y_length, dtype=np.int32)
return feed_dict
def s2s_test():
token_idx_dict, idx_token_dict, ontology_idx_dict, idx_ontology_dict, train_token, test_token, train_ontology, test_ontology = pickle.load(
open("data/split/20180316.pkl", "rb"))
s2s_core = Seq2seqCore(encoder_vocab_size=len(token_idx_dict),
decoder_vocab_size=len(ontology_idx_dict))
for epoch_number in range(1, 200):
print(epoch_number)
s2s_core.load("models/%05d.tfmodel"%epoch_number)
input_feed_dict = feeder_generator((test_token.copy(), test_ontology.copy()))
print(s2s_core.evaluate(input_feed_dict))
if __name__ == "__main__":
s2s_test()
| 1,632 |
modin/data_management/functions/mapfunction.py
|
xrmx/modin
| 0 |
2025201
|
from .function import Function
class MapFunction(Function):
@classmethod
def call(cls, function, *call_args, **call_kwds):
def caller(query_compiler, *args, **kwargs):
return query_compiler.__constructor__(
query_compiler._modin_frame._map(
lambda x: function(x, *args, **kwargs), *call_args, **call_kwds
)
)
return caller
@classmethod
def register(cls, function, *args, **kwargs):
return cls.call(function, *args, **kwargs)
| 563 |
src/widgets/verse_list_widget/verse_widget/verse_text_label.py
|
devs-7/bible-projector-python
| 0 |
2024492
|
from PyQt5 import QtWidgets, QtCore
class VerseTextLabel(QtWidgets.QLabel):
def __init__(self, parent=None):
super().__init__(parent)
self.setWordWrap(True)
self.setAlignment(QtCore.Qt.AlignTop)
self.setStyleSheet('''
font-size: 12px;
''')
| 299 |
rlpyt/agents/dqn/dsr/dsr_agent.py
|
2016choang/sfl
| 2 |
2024005
|
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.nn.parallel import DistributedDataParallelCPU as DDPC
from rlpyt.agents.base import BaseAgent, AgentStep
from rlpyt.agents.dqn.epsilon_greedy import EpsilonGreedyAgentMixin
from rlpyt.distributions.epsilon_greedy import EpsilonGreedy
from rlpyt.models.utils import strip_ddp_state_dict
from rlpyt.utils.buffer import buffer_to
from rlpyt.utils.logging import logger
from rlpyt.utils.collections import namedarraytuple
AgentInfo = namedarraytuple("AgentInfo", "a")
class DsrAgent(EpsilonGreedyAgentMixin, BaseAgent):
def __call__(self, observation):
model_inputs = buffer_to(observation,
device=self.device)
dsr = self.model(model_inputs, mode='dsr')
return dsr.cpu()
def encode(self, observation):
model_inputs = buffer_to(observation,
device=self.device)
features = self.model(model_inputs, mode='encode')
return features.cpu()
def q_estimate(self, observation):
model_inputs = buffer_to(observation,
device=self.device)
q = self.model(model_inputs, mode='q')
return q.cpu()
def initialize(self, env_spaces, share_memory=False,
global_B=1, env_ranks=None):
super().initialize(env_spaces, share_memory,
global_B=global_B, env_ranks=env_ranks)
self.target_model = self.ModelCls(**self.env_model_kwargs,
**self.model_kwargs)
self.target_model.load_state_dict(self.model.state_dict())
self.distribution = EpsilonGreedy(dim=env_spaces.action.n)
if env_ranks is not None:
self.make_vec_eps(global_B, env_ranks)
def to_device(self, cuda_idx=None):
super().to_device(cuda_idx)
self.target_model.to(self.device)
def state_dict(self):
return dict(model=self.model.state_dict(),
target=self.target_model.state_dict())
@torch.no_grad()
def step(self, observation, prev_action, prev_reward):
if self.distribution.epsilon >= 1.0:
if prev_action.shape:
q = torch.zeros(prev_action.shape[0], self.distribution.dim)
else:
q = torch.zeros(self.distribution.dim)
else:
model_inputs = buffer_to(observation,
device=self.device)
features = self.model(model_inputs, mode='encode')
model_inputs = buffer_to(features,
device=self.device)
dsr = self.model(model_inputs, mode='dsr')
model_inputs = buffer_to(dsr,
device=self.device)
q = self.model(model_inputs, mode='q')
q = q.cpu()
action = self.distribution.sample(q)
agent_info = AgentInfo(a=action)
# action, agent_info = buffer_to((action, agent_info), device="cpu")
return AgentStep(action=action, agent_info=agent_info)
def target(self, observation):
model_inputs = buffer_to(observation,
device=self.device)
target_dsr = self.model(model_inputs, mode='dsr')
return target_dsr.cpu()
def update_target(self):
self.target_model.load_state_dict(
strip_ddp_state_dict(self.model.state_dict()))
def dsr_parameters(self):
return [param for name, param in self.model.named_parameters() if 'dsr' in name]
def parameters(self):
return [param for name, param in self.model.named_parameters()]
| 3,505 |
hax/hax/queue/__init__.py
|
DmitryKuzmenko/cortx-hare
| 0 |
2024817
|
import json
import logging
from queue import Queue
from typing import Any, Callable, Dict, List, Optional, Tuple
from hax.message import BroadcastHAStates
from hax.motr.delivery import DeliveryHerald
from hax.motr.planner import WorkPlanner
from hax.queue.confobjutil import ConfObjUtil
from hax.types import (Fid, HaLinkMessagePromise, HAState, MessageId,
ObjHealth)
LOG = logging.getLogger('hax')
class BQProcessor:
"""
Broadcast Queue Processor.
This is the place where a real processing logic should be located.
"""
def __init__(self, planner: WorkPlanner, delivery_herald: DeliveryHerald,
conf_obj_util: ConfObjUtil):
self.planner = planner
self.confobjutil = conf_obj_util
self.herald = delivery_herald
def process(self, message: Tuple[int, Any]) -> None:
(i, msg) = message
LOG.debug('Message #%d received: %s (type: %s)', i, msg,
type(msg).__name__)
try:
self.payload_process(msg)
except Exception:
LOG.exception(
'Failed to process a message #%d.'
' The message is skipped.', i)
LOG.debug('Message #%d processed', i)
def payload_process(self, msg: str) -> None:
data = None
try:
data = json.loads(msg)
except json.JSONDecodeError:
LOG.error('Cannot parse payload, invalid json')
return
payload = data['payload']
msg_type = data['message_type']
handlers: Dict[str, Callable[[Dict[str, Any]], None]] = {
'M0_HA_MSG_NVEC': self.handle_device_state_set,
'STOB_IOQ_ERROR': self.handle_ioq_stob_error,
}
if msg_type not in handlers:
LOG.warn('Unsupported message type given: %s. Message skipped.',
msg_type)
return
handlers[msg_type](payload)
def handle_device_state_set(self, payload: Dict[str, Any]) -> None:
# To add check for multiple object entries in a payload.
# for objinfo in payload:
hastate: Optional[HAState] = self.to_ha_state(payload)
if not hastate:
LOG.debug('No ha states to broadcast.')
return
q: Queue = Queue(1)
LOG.debug('HA broadcast, node: %s device: %s state: %s',
payload['node'], payload['device'], payload['state'])
self.planner.add_command(
BroadcastHAStates(states=[hastate], reply_to=q))
ids: List[MessageId] = q.get()
self.herald.wait_for_any(HaLinkMessagePromise(ids))
def handle_ioq_stob_error(self, payload: Dict[str, Any]) -> None:
fid = Fid.parse(payload['conf_sdev'])
if fid.is_null():
LOG.debug('Fid is 0:0. Skipping the message.')
return
q: Queue = Queue(1)
self.planner.add_command(
BroadcastHAStates(
states=[HAState(fid,
status=ObjHealth.FAILED)], reply_to=q))
ids: List[MessageId] = q.get()
self.herald.wait_for_any(HaLinkMessagePromise(ids))
def to_ha_state(self, objinfo: Dict[str, str]) -> Optional[HAState]:
hastate_to_objstate = {
'online': ObjHealth.OK,
'failed': ObjHealth.FAILED,
'offline': ObjHealth.OFFLINE,
'repair': ObjHealth.REPAIR,
'repaired': ObjHealth.REPAIRED,
'rebalance': ObjHealth.REBALANCE
}
try:
sdev_fid = self.confobjutil.drive_to_sdev_fid(
objinfo['node'], objinfo['device'])
state = hastate_to_objstate[objinfo['state']]
except KeyError as error:
LOG.error('Invalid json payload, no key (%s) present', error)
return None
return HAState(sdev_fid, status=state)
| 3,875 |
build/lib/databasemanager-master/databasemanager/settings/_iusersettings.py
|
jowanpittevils/Databasemanager_Signalplotter
| 0 |
2025176
|
#==================================================#
# Authors: <NAME> <<EMAIL>> #
# License: BSD (3-clause) #
#==================================================#
import configparser
import ast
from collections import namedtuple
class _iUserSettings(object):
conf_file = 'config.ini'
user_section = 'User'
default_settings = {
'loading_data_frequency_type': 'similar',
'loading_data_channels': 'None',
'loading_data_missing_channel_type': 'None',
}
default_settings_type = {
'loading_data_frequency_type': str,
'loading_data_channels': list,
'loading_data_missing_channel_type': str,
}
def __init__(self):
self.reload()
def reload(self):
config = configparser.ConfigParser()
config.read(self.conf_file)
if(len(config.sections())==0):
self.__SaveDefault()
config.read(self.conf_file)
assert(len(config.sections())>0)
self.__settings = self.__cast_types(config._sections[self.user_section])
# assing the loaded settings dictionary to the fields of this current settings singleton object
self.__dict__.update(self.__settings)
@staticmethod
def __replaceNone(val):
if(val.lower() == 'none'):
return None
return val
@classmethod
def __cast_types(cls, settings):
for k in settings:
settings[k] = cls.__replaceNone(settings[k])
if(settings[k] is not None):
caster = cls.default_settings_type[k]
if(caster in [list, dict, tuple, set]):
settings[k] = ast.literal_eval(settings[k])
else:
settings[k] = caster(settings[k])
return settings
def __SaveDefault(self):
config = configparser.ConfigParser()
config[self.user_section] = self.default_settings
with open(self.conf_file, 'w') as configfile:
config.write(configfile)
def __repr__(self):
return str(self)
def __str__(self):
return str(self.__settings)
| 2,358 |
ripcord/tests/api/test_app.py
|
kickstandproject/ripcord
| 1 |
2025370
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 IBM Corp.
# Copyright (C) 2013-2014 PolyBeacon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
from oslo.config import cfg
from ripcord.api import app
from ripcord.openstack.common.fixture import config
from ripcord import test
class TestCase(test.TestCase):
def setUp(self):
super(TestCase, self).setUp()
self.CONF = self.useFixture(config.Config()).conf
def test_WSGI_address_family(self):
self.CONF.set_override('host', '::', group='api')
server_cls = app.get_server_cls(cfg.CONF.api.host)
self.assertEqual(server_cls.address_family, socket.AF_INET6)
self.CONF.set_override('host', '127.0.0.1', group='api')
server_cls = app.get_server_cls(cfg.CONF.api.host)
self.assertEqual(server_cls.address_family, socket.AF_INET)
self.CONF.set_override('host', 'ddddd', group='api')
server_cls = app.get_server_cls(cfg.CONF.api.host)
self.assertEqual(server_cls.address_family, socket.AF_INET)
| 1,569 |
mnd_utils/custom_typing.py
|
mnicolas94/python_utils
| 0 |
2025204
|
from abc import abstractmethod
from typing_extensions import Protocol
class Sliceable(Protocol):
@abstractmethod
def __getitem__(self, item): ...
| 157 |
apkkit/base/package.py
|
pombredanne/code.foxkit.us-adelie-apkkit
| 0 |
2023083
|
"""Contains the Package class and related helper classes and functions."""
from jinja2 import Template
import logging
import os
import time
PACKAGE_LOGGER = logging.getLogger(__name__)
PKGINFO_TEMPLATE = Template("""
# Generated by APK Kit for Adélie Linux
# {{ builduser }}@{{ buildhost }} {{ self.builddate }}
pkgname = {{ package.name }}
pkgver = {{ package.version }}
pkgdesc = {{ package.description }}
arch = {{ package.arch }}
size = {{ package.size }}
{%- if package.license %}
license = {{ package.license }}
{%- endif %}
{%- if package.url %}
url = {{ package.url }}
{%- endif %}
{%- if package.origin %}
origin = {{ package.origin }}
{%- endif %}
{%- if package.provides %}{%- for provided in package.provides %}
provides = {{ provided }}
{%- endfor %}{%- endif %}
{%- if package.depends %}{%- for depend in package.depends %}
depend = {{ depend }}
{%- endfor %}{%- endif %}
{%- if package.replaces %}{%- for replace in package.replaces %}
replaces = {{ replace }}
{%- endfor %}{%- endif %}
{%- if package.install_if %}{%- for iif in package.install_if %}
install_if = {{ iif }}
{%- endfor %}{%- endif %}
builddate = {{ builddate }}
{%- if package.commit %}
commit = {{ package.commit }}
{%- endif %}
{%- if package.data_hash %}
datahash = {{ package.data_hash }}
{%- endif %}
{%- if package.maintainer %}
maintainer = {{ package.maintainer }}
{%- endif %}
""")
"""The template used for generating .PKGINFO"""
class Package:
"""The base package class."""
def __init__(self, name, version, arch, description=None, url=None, size=0,
provides=None, depends=None, license=None, origin=None,
replaces=None, commit=None, maintainer=None, builddate=0,
install_if=None, **kwargs):
"""Initialise a package object.
:param str name:
The name of the package.
:param str version:
The version of the package.
:param str arch:
The architecture of the package.
:param str description:
(Recommended) The description of the package. Defaults to the name
if not set.
:param int size:
(Recommended) The installed size of the package. You almost always
want to set this to something other than 0 if you don't want
unhappy users. :)
:param str url:
(Optional) The URL of the homepage for the package.
:param list provides:
(Optional) One or more virtuals that this package provides.
:param list depends:
(Optional) One or more packages that are required to be installed
to use this package.
:param str license:
(Recommended) The license this package is under.
:param str origin:
(Optional) The origin package, if this package is a subpackage.
Defaults to `name`.
:param list replaces:
(Optional) One or more packages that this package replaces.
:param str commit:
(Recommended) The hash of the git commit the repository was on when
this package was built.
:param str maintainer:
(Recommended) The maintainer of the package.
:param int builddate:
(Optional) The date the package was built, in UNIX timestamp.
Defaults to right now.
:param list install_if:
(Optional) Read the APKBUILD.5 manpage.
"""
self._pkgname = name
self._pkgver = str(version)
self._pkgdesc = description or name
self._url = url
self._size = int(size)
self._arch = arch
self._provides = provides or list()
self._depends = depends or list()
self._replaces = replaces or list()
self._iif = install_if or list()
self._license = license
self._origin = origin or name
self._commit = commit
self._maintainer = maintainer
self._builddate = builddate or time.time()
if '_datahash' in kwargs:
self._datahash = kwargs.pop('_datahash')
if len(kwargs) > 0:
PACKAGE_LOGGER.warning("unknown kwargs in Package: %r", kwargs)
@property
def name(self):
"""The name of the package."""
return self._pkgname
@property
def version(self):
"""The version of the package."""
return self._pkgver
@property
def description(self):
"""The description of the package."""
return self._pkgdesc
@property
def url(self):
"""The URL of the homepage of the package."""
return self._url
@property
def size(self):
"""The installed size of the package in bytes."""
return self._size
@size.setter
def size(self, new_size):
"""Change the installed size of the package in bytes.
.. warning: Internal use only!
:param int new_size:
The new installed size of the package.
"""
self._size = new_size
@property
def arch(self):
"""The architecture of the package."""
return self._arch
@property
def provides(self):
"""The libraries and/or virtuals provided by this package."""
return self._provides
@property
def depends(self):
"""The dependencies of the package."""
return self._depends
@property
def replaces(self):
"""The packages this package replaces."""
return self._replaces
@property
def install_if(self):
"""The packages that pull in this package."""
return self._iif
@property
def license(self):
"""The license of the package."""
return self._license
@property
def origin(self):
"""The origin package of this package."""
return self._origin
@property
def commit(self):
"""The hash of the git commit the build repository was on."""
return self._commit
@property
def maintainer(self):
"""The maintainer of the package."""
return self._maintainer
@property
def data_hash(self):
"""The hash of the package's data, or None if not available."""
return getattr(self, '_datahash', None)
@data_hash.setter
def data_hash(self, hash_):
"""Set the hash of the package's data."""
self._datahash = hash_
def __repr__(self):
return 'Package(name="{name}", version="{ver}", arch="{arch}", '\
'description="{desc}", url="{url}", size={size}, '\
'provides={prov}, depends={dep}, license={lic}, '\
'origin="{origin}", replaces={rep}, commit="{git}", '\
'maintainer="{m}", builddate={ts}, install_if={iif})'.format(
name=self._pkgname, ver=self._pkgver, arch=self._arch,
desc=self._pkgdesc, prov=self._provides, dep=self._depends,
url=self._url, size=self._size, lic=self._license,
origin=self._origin, rep=self._replaces, git=self._commit,
m=self._maintainer, ts=self._builddate, iif=self._iif)
def to_pkginfo(self):
"""Serialises the package's information into the PKGINFO format.
:returns str: The PKGINFO for this package. Unicode str, ready to be
written to a file.
.. note:: To write a file, see the :py:meth:`.write_pkginfo` helper
method.
"""
return PKGINFO_TEMPLATE.render(builduser=os.getenv('USER', '?'),
buildhost=os.uname().nodename,
package=self)
@classmethod
def from_pkginfo(cls, buf):
"""Create a new :py:class:`Package` object from an existing PKGINFO.
:param buf:
The buffer to read from (whether file, StringIO, etc).
:returns:
A :py:class:`Package` object with the details from the PKGINFO.
:throws ValueError:
If a required field is missing from the PKGINFO.
"""
params = {}
param_map = {'pkgname': 'name', 'pkgver': 'version', 'arch': 'arch',
'pkgdesc': 'description', 'provides': 'provides',
'depend': 'depends', 'url': 'url', 'size': 'size',
'replaces': 'replaces', 'builddate': 'builddate',
'license': 'license', 'datahash': '_datahash',
'maintainer': 'maintainer', 'commit': 'commit',
'install_if': 'install_if'}
list_keys = {'provides', 'depend', 'replaces', 'install_if'}
params['provides'] = list()
params['depends'] = list()
params['replaces'] = list()
for line in buf.readlines():
if not isinstance(line, str):
line = line.decode('utf-8')
# Skip comments.
if len(line) == 0 or line[0] == '#':
continue
if line.find('=') == -1:
PACKAGE_LOGGER.warning('!!! malformed line? "%s" !!!', line)
continue
(key, value) = line.split('=', 1)
key = key.strip()
value = value.strip()
if key in param_map:
if key in list_keys:
params[param_map[key]].append(value)
else:
params[param_map[key]] = value
else:
PACKAGE_LOGGER.info('!!! unrecognised PKGINFO key %s !!!', key)
return cls(**params)
| 9,603 |
temper/src/aggregator_ui/__init__.py
|
isandlaTech/cohorte-demos
| 1 |
2023869
|
#!/usr/bin/env python
#-- Content-Encoding: UTF-8 --
"""
Created on 09 juil. 2012
:author: <NAME>
"""
| 103 |
src/sheet_reader.py
|
thhuang/SheetMusicAI
| 0 |
2025422
|
import argparse
import cv2
from midiutil.MidiFile import MIDIFile
from utils import open_file, load_imgs, draw_boxes
from detector import Detector
from note import Note, NoteLength
class SheetReader:
def __init__(self, input: str) -> None:
self._sheet_img_path = input
self._notes = []
self._start_size = 0.5 # TODO: DRY
self._stop_size = 0.9 # TODO: DRY
self._threshold_staff = 0.87 # TODO: DRY
self._threshold_sharp_sign = 0.73 # TODO: DRY
self._threshold_flat_sign = 0.73 # TODO: DRY
self._threshold_natural_sign = 0.73 # TODO: DRY
self._threshold_quarter_note = 0.73 # TODO: DRY
self._threshold_half_note = 0.73 # TODO: DRY
self._threshold_whole_note = 0.73 # TODO: DRY
self._threshold_half_rest = 0.9 # TODO: DRY
self._width_resized = 2048
self._search_step = 0.02
self._decode_threshold = 5 / 8 # TODO: DRY
# Read data
self._img = cv2.imread(self._sheet_img_path, 0) # grey scale
self._img_height, self._img_width = self._img.shape
# Preprocess
self._preprocess()
def _preprocess(self) -> None:
# Color filter
_, self._img = cv2.threshold(self._img, 177, 255, cv2.THRESH_BINARY)
# Resize
scale = self._width_resized / self._img_width
self._img = cv2.resize(self._img, None, fx = scale, fy = scale, interpolation = cv2.INTER_CUBIC)
self._img_height, self._img_width = self._img.shape # update the size of the image
# RGB image
self._img_rgb = cv2.cvtColor(self._img, cv2.COLOR_GRAY2RGB)
# Save image
cv2.imwrite('sheet_proprocessed.png', self._img)
open_file('sheet_proprocessed.png')
@property
def img_height(self) -> float:
return self._img_height
@property
def img_width(self) -> float:
return self._img_width
def detect_staff(self, staff_dir: str) -> None:
staff_imgs = load_imgs(staff_dir)
detector = Detector(self, staff_imgs, self._threshold_staff, is_staff=True)
self._staff_boxes = detector.detect()
self._staff_boxes.sort(key=lambda box: box.y)
draw_boxes('staff_boxes_img.png', self._img_rgb, self._staff_boxes)
def detect_sharp_sign(self, sharp_sign_dir: str) -> None:
sharp_sign_imgs = load_imgs(sharp_sign_dir)
detector = Detector(self, sharp_sign_imgs, self._threshold_sharp_sign)
self._sharp_sign_boxes = detector.detect()
draw_boxes('sharp_sign_boxes_img.png', self._img_rgb, self._sharp_sign_boxes)
def detect_flat_sign(self, flat_sign_dir: str) -> None:
flat_sign_imgs = load_imgs(flat_sign_dir)
detector = Detector(self, flat_sign_imgs, self._threshold_flat_sign)
self._flat_sign_boxes = detector.detect()
draw_boxes('flat_sign_boxes_img.png', self._img_rgb, self._flat_sign_boxes)
def detect_natural_sign(self, natural_sign_dir: str) -> None:
natural_sign_imgs = load_imgs(natural_sign_dir)
detector = Detector(self, natural_sign_imgs, self._threshold_natural_sign)
self._natural_sign_boxes = detector.detect()
draw_boxes('natural_sign_boxes_img.png', self._img_rgb, self._natural_sign_boxes)
def detect_quarter_note(self, quarter_note_dir: str) -> None:
quarter_note_imgs = load_imgs(quarter_note_dir)
detector = Detector(self, quarter_note_imgs, self._threshold_quarter_note)
self._quarter_note_boxes = detector.detect()
draw_boxes('quarter_note_boxes_img.png', self._img_rgb, self._quarter_note_boxes)
def detect_half_note(self, half_note_dir: str) -> None:
half_note_imgs = load_imgs(half_note_dir)
detector = Detector(self, half_note_imgs, self._threshold_half_note)
self._half_note_boxes = detector.detect()
draw_boxes('half_note_boxes_img.png', self._img_rgb, self._half_note_boxes)
def detect_whole_note(self, whole_note_dir: str) -> None:
whole_note_imgs = load_imgs(whole_note_dir)
detector = Detector(self, whole_note_imgs, self._threshold_whole_note)
self._whole_note_boxes = detector.detect()
draw_boxes('whole_note_boxes_img.png', self._img_rgb, self._whole_note_boxes)
def detect_half_rest(self, half_rest_dir: str) -> None:
half_rest_imgs = load_imgs(half_rest_dir)
detector = Detector(self, half_rest_imgs, self._threshold_half_rest)
self._half_rest_boxes = detector.detect()
draw_boxes('half_rest_boxes_img.png', self._img_rgb, self._half_rest_boxes)
def decode(self):
# TODO: refactor
for staff_box in self._staff_boxes:
quarter_notes = [Note(NoteLength.QUARTER, box, staff_box)
for box in self._quarter_note_boxes
if abs(box.center[1] - staff_box.center[1]) < staff_box.h * self._decode_threshold]
half_notes = [Note(NoteLength.HALF, box, staff_box)
for box in self._half_note_boxes
if abs(box.center[1] - staff_box.center[1]) < staff_box.h * self._decode_threshold]
staff_notes = quarter_notes + half_notes
staff_notes.sort(key=lambda note: note.x)
self._notes += staff_notes
for note in staff_notes:
print('{} {}'.format(note.pitch, note.length))
def output_midi(self) -> None:
midi = MIDIFile(1)
track = 0
time = 0
channel = 0
volume = 100
midi.addTrackName(track, time, "Track")
midi.addTempo(track, time, 240)
for note in self._notes:
duration = note.length.value * 4 # TODO: DRY
midi.addNote(track, channel, note.pitch.value, time, duration, volume)
time += duration
midi_file = open('output.mid', 'wb')
midi.writeFile(midi_file)
midi_file.close()
open_file('output.mid')
| 5,982 |
train.py
|
evrimozmermer/ObjectDetectionSilverBullet
| 0 |
2025417
|
import collections
import numpy as np
import os
import torch
import tqdm
import config
import datasets
import optimizers
from models import retinanet
from utils import *
assert torch.__version__.split('.')[0] == '1'
print('CUDA available: {}'.format(torch.cuda.is_available()))
cfg = config.load("./config/config.json")
start_epoch = 0
cfg.resume = "./checkpoints/{0}-{1}-{2}-W{3}H{4}-InpSize{5}-{6}-parallel{7}/ckpt".format(cfg.dataset,
"RetinaNet",
cfg.backbone,
cfg.width,
cfg.height,
cfg.input_size,
cfg.optimizer,
cfg.data_parallel) # cfg.__dict__["dataset"]
try:
os.mkdir("/".join(cfg.resume.split("/")[:3]))
except:
pass
# import dataset
os.chdir("datasets")
cfg.data_root = os.getcwd()
dl_tr, dl_ev = datasets.load(cfg, val=True)
os.chdir("..")
model = retinanet.load(cfg)
# resume
checkpoint = None
if os.path.isfile("{}.pth".format(cfg.resume)):
print('=> loading checkpoint:\n{}.pth'.format(cfg.resume))
checkpoint = torch.load("{}.pth".format(cfg.resume),torch.device(cfg.device))
if checkpoint['parallel_flag']:
model = torch.nn.DataParallel(model)
model.load_state_dict(checkpoint['model_state_dict'], strict = True)
else:
model.load_state_dict(checkpoint['model_state_dict'], strict = True)
start_epoch = checkpoint['epoch']
if cfg.data_parallel and not checkpoint['parallel_flag']:
model = torch.nn.DataParallel(model)
model.to(cfg.device)
model.training = True
param_groups = model.parameters()
opt = optimizers.load(cfg, param_groups)
if checkpoint:
opt.load_state_dict(checkpoint['optimizer'])
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(opt, patience=3, verbose=True)
loss_hist = collections.deque(maxlen=20)
for epoch in range(start_epoch, cfg.epochs):
model.train()
if cfg.data_parallel:
model.module.freeze_bn()
else:
model.freeze_bn()
epoch_loss = []
pbar = tqdm.tqdm(enumerate(dl_tr, start = 1))
for iter_num, data in pbar:
if torch.cuda.is_available():
classification_loss, regression_loss = model([data['image'].cuda().float(), data['annot'].cuda().float()])
else:
classification_loss, regression_loss = model([data['image'].float(), data['annot']])
classification_loss = classification_loss.mean()
regression_loss = regression_loss.mean()
loss = classification_loss + regression_loss
opt.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
opt.step()
loss_hist.append(float(loss))
epoch_loss.append(float(loss))
print_text = 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}'.format(
epoch, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))
pbar.set_description(print_text)
del classification_loss
del regression_loss
# scheduler.step(np.mean(epoch_loss))
if (epoch+1)%cfg.visualization_interval==0:
visualize(cfg, model, dl_ev)
torch.save({'model_state_dict': model.state_dict(),
'optimizer': opt.state_dict(),
'epoch': epoch,
'parallel_flag': cfg.data_parallel},
'{}.pth'.format(cfg.resume))
model.eval()
torch.save(model, 'model_final.pt')
| 3,818 |
cours7.py
|
fallou-vip/Ti104
| 0 |
2025390
|
entree_utilisateur = input("Veuillez entrer une chaine de caracteres: ")
# Utilisateur entre la phrase: 'Bonjour le monde'
# Structure de donnees dans Python: Le dictionnaire
# Un ensemble de cle et de valeurs (map en javascript?)
# On pourrait eventuellement fabriquer une base de donnees avec des dictionnaires
# Etudiant:
# Nom
# Prenom
# Numero_etudiant
#
# d = {'nom': 'Huberdeau', 'prenom': 'Alex', 'Numero_etudiant': 0}
# e = {'nom': 'Fowang', 'prenom': 'John', 'Numero_etudiant': 1}
#
# print(d['nom']) => 'Huberdeau'
# print(d['prenom']) => 'Alex'
# d['prenom'] = 'alex'
#for lettre in entree_utilisateur:
# if lettre in ...
# incremente compteur
# else
# cree une nouvelle cle dans le dictionnaire
d = {} # Assigne un dictionnaire vide a la variable d ({} pour les dictionnaires, et les set)
for lettre in entree_utilisateur:
if not lettre in d:
d[lettre] = 1
elif lettre in d:
# l[2] -> Accede au deuxieme element de la liste (ou eventuellement de la chaine de caracteres)
d[lettre] += 1
print(d)
for cle in d.keys():
if d[cle] > 1:
print("Vous avez affiche la lettre ", cle, d[cle], " fois")
| 1,176 |
authome/settings_local.py
|
xncbf/authome
| 10 |
2025710
|
from dotenv import load_dotenv
from unipath import Path
load_dotenv()
load_dotenv(verbose=True)
BASE_DIR = Path(__file__).ancestor(2)
env_path = BASE_DIR.child('.env')
load_dotenv(dotenv_path=env_path)
from authome.settings import *
print('LOCAL!!!!')
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
SITE_ID = 3
ALLOWED_HOSTS = ['*']
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
SECURE_SSL_REDIRECT = False
SESSION_COOKIE_DOMAIN = '.example.com'
INTERNAL_IPS = ['127.0.0.1']
BROKER_URL = 'django://'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR.child('db.sqlite3'),
# }
# }
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': os.environ['AUTHOME_DATABASE_NAME'],
# 'USER': os.environ['AUTHOME_DATABASE_USER'],
# 'PASSWORD': os.environ['AUTHOME_DATABASE_PASSWORD'],
# 'HOST': 'fortest.cqzciqkjmzjq.ap-northeast-2.rds.amazonaws.com',
# 'PORT': os.environ['AUTHOME_DATABASE_PORT'],
# }
# }
# STATIC_ROOT = BASE_DIR.child("static")
# MEDIA_ROOT = BASE_DIR.child('images')
| 1,344 |
benchmarks/latency_overhead.py
|
pyronia-sys/libpyronia
| 0 |
2023772
|
from statistics import mean, median
def calc_percent(new, orig):
return ((float(new)-float(orig))/float(orig)*100.0)
f = open('hello-latency', 'r')
latencies = f.readlines()
f.close()
no_pyr = [l.split(',')[0].strip() for l in latencies]
no_pyr = list(map(float, no_pyr))
with_pyr = [l.split(',')[1].strip() for l in latencies]
with_pyr = list(map(float, with_pyr))
print("hello.py latency:")
print("min: %.2f us" % min(no_pyr))
print("median: %.2f us" % median(no_pyr))
print("mean: %.2f us" % mean(no_pyr))
print("max: %.2f us" % max(no_pyr))
print("")
print("hello.py latency with Pyronia:")
print("min: %.2f us" % min(with_pyr))
print("median: %.2f us" % median(with_pyr))
print("mean: %.2f us" % mean(with_pyr))
print("max: %.2f us" % max(with_pyr))
print("")
print("avg overhead: %.2f %%" % calc_percent(mean(with_pyr), mean(no_pyr)))
| 849 |
server/integration-tests/yang/install.py
|
michalvasko/Netopeer2
| 0 |
2024535
|
import json
import subprocess
import sys
def run(cmd):
print(' + {}'.format(' '.join(cmd)))
sys.stdout.flush()
subprocess.check_call(cmd)
with open('manifest.json', 'r') as f:
manifest = json.load(f)
for model in manifest['models']:
run(['sysrepoctl', '--install', '--yang', model])
for feature in manifest['features']:
parts = feature.split(':')
run(['sysrepoctl', '--feature-enable', parts[1], '--module', parts[0]])
| 453 |
secrets.py
|
sondr3/replieswithtime
| 1 |
2024591
|
"""Twitter keys so you can access their API."""
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_TOKEN = ''
ACCESS_SECRET = ''
| 124 |
PyObjCTest/test_nsopenglview.py
|
Khan/pyobjc-framework-Cocoa
| 132 |
2025774
|
from PyObjCTools.TestSupport import *
import AppKit
class TestNSOpenGLView (TestCase):
@min_os_level('10.7')
def testMethods10_7(self):
self.assertResultIsBOOL(AppKit.NSOpenGLView.wantsBestResolutionOpenGLSurface)
self.assertArgIsBOOL(AppKit.NSOpenGLView.setWantsBestResolutionOpenGLSurface_, 0)
if __name__ == "__main__":
main()
| 361 |
wsg.py
|
denesb/window-specification-generator
| 0 |
2025465
|
#!/usr/bin/env python3
import collections
import json
import argparse
from reportlab.pdfgen import canvas
from reportlab.lib.units import cm
from reportlab.lib.pagesizes import A4
BOX_PADDING = (2*cm, 5*cm, 5*cm, 2*cm)
TOP = 0
RIGHT = 1
BOTTOM = 2
LEFT = 3
BOX = (A4[0], A4[1] / 2)
INDICATOR_HEIGHT = 0.4 * cm
WINDOW_COLOR=(0, 0, 0)
class Rect():
def __init__(self, bottom_left, size):
self.bottom_left = bottom_left
self.size = size
self.top_right = (self.bottom_left[0] + self.size[0],
self.bottom_left[1] + self.size[1])
@property
def width(self):
return self.size[0]
@property
def height(self):
return self.size[1]
def to_drawable(self, sf):
return (self.bottom_left[0] * cm * sf, #x1
self.bottom_left[1] * cm * sf, #x2
self.size[0] * cm * sf, #width
self.size[1] * cm * sf) #height
def bottom_line(self, sf):
y = self.bottom_left[1] * cm * sf
return (self.bottom_left[0] * cm * sf,
y,
self.top_right[0] * cm * sf,
y)
def right_line(self, sf):
x = self.top_right[0] * cm * sf
return (x,
self.bottom_left[1] * cm * sf,
x,
self.top_right[1] * cm * sf)
def __repr__(self):
return str([self.bottom_left, self.size])
def scale(spec):
box_w, box_h = BOX
box_w = box_w - BOX_PADDING[LEFT] - BOX_PADDING[RIGHT]
box_h = box_h - BOX_PADDING[TOP] - BOX_PADDING[BOTTOM]
w = spec["width"] * cm
h = spec["height"] * cm
return min(box_h / h, box_w / w)
def translate(spec, sf, index_on_page):
box_w, box_h = BOX
w = spec["width"] * cm * sf
h = spec["height"] * cm * sf
t_x = (box_w - w) / 2
t_y = box_h * index_on_page + (box_h - h) / 2
return (t_x, t_y)
def any_size_defined(pieces):
for p in pieces:
if "size" in p:
return True
return False
def calculate_division_rects(spec):
r = spec["rect"]
t = spec["type"]
pieces = spec["pieces"]
w, h = r.size
n = len(pieces)
if t == "vertical":
offset = 0
remaining = w
calculate_size = lambda s: Rect((r.bottom_left[0] + offset, r.bottom_left[1]), (s, h))
move_offset = lambda s: offset + s
elif t == "horizontal":
offset = h
remaining = h
calculate_size = lambda s: Rect((r.bottom_left[0], r.bottom_left[1] + offset - s), (w, s))
move_offset = lambda s: offset - s
else:
raise Exception("Invalid division type: `{}'".format(t))
for i, d in enumerate(pieces):
s = d.get("size", remaining / n)
n = n - 1
remaining = remaining - s
d["rect"] = calculate_size(s)
offset = move_offset(s)
if "type" in d:
calculate_division_rects(d)
def draw_opening(c, d, opening, sf):
r = d["rect"]
x = r.bottom_left[0] * cm * sf
y = r.top_right[1] * cm * sf
w = r.width * cm * sf
h = r.height * cm * sf
xm = x + w / 2
ym = y - h / 2
pad = 0.2 * cm
if opening is None:
s = 0.5 * cm
c.line(xm - s, ym, xm + s, ym)
c.line(xm, ym - s, xm, ym + s)
else:
if opening == "top":
x_tip = xm
y_tip = y - pad
x_left = x + pad
y_left = y - h + pad
x_right = x + w - pad
y_right = y - h + pad
elif opening == "right":
x_tip = x + pad
y_tip = ym
x_left = x + w - pad
y_left = y - h + pad
x_right = x + w - pad
y_right = y - pad
elif opening == "left":
x_tip = x + w - pad
y_tip = ym
x_left = x + pad
y_left = y - h + pad
x_right = x + pad
y_right = y - pad
else:
raise Exception("Invalid opening value: `{}'".format(opening))
lines = [(x_tip, y_tip, x_left, y_left),
(x_left, y_left, x_right, y_right),
(x_right, y_right, x_tip, y_tip)]
c.lines(lines)
def draw_openings(c, spec, sf):
openings = spec.get("opens", None)
if (type(openings) is list):
for opening in openings:
draw_opening(c, spec, opening, sf)
else:
draw_opening(c, spec, openings, sf)
def draw_division(c, spec, sf):
t = spec["type"]
pieces = spec["pieces"]
for i, d in enumerate(pieces):
r = d["rect"]
if "type" in d:
draw_division(c, d, sf)
else:
draw_openings(c, d, sf)
if i == len(pieces) - 1:
continue
if t == "vertical":
c.line(*r.right_line(sf))
else:
c.line(*r.bottom_line(sf))
def calculate_division_sizes(spec, v, h, l):
for i, p in enumerate(spec["pieces"]):
r = p["rect"]
if spec["type"] == "vertical":
v[l].append(r.width)
else:
h[l].append(r.height)
if "type" in p:
calculate_division_sizes(p, v, h, l + 1)
def calculate_sizes(spec):
h = collections.defaultdict(list)
v = collections.defaultdict(list)
h[0] = [spec["height"]]
v[0] = [spec["width"]]
if "division" in spec:
calculate_division_sizes(spec["division"], v, h, 1)
return h, v
def draw_size(c, size, sf):
text = "{:.2f}".format(size)
text_width = c.stringWidth(text)
s = size * cm * sf
x = s / 2 - text_width / 2
c.drawString(x, 0, text)
c.line(0, 0.05 * cm, 0, INDICATOR_HEIGHT - 0.05 * cm)
c.line(s, 0.05 * cm, s, INDICATOR_HEIGHT - 0.05 * cm)
c.line(0, INDICATOR_HEIGHT / 2, x - 0.05 * cm, INDICATOR_HEIGHT / 2)
c.line(x + text_width + 0.05 * cm, INDICATOR_HEIGHT / 2, s, INDICATOR_HEIGHT / 2)
def draw_horizontal_sizes(c, spec, sizes, sf):
w, h = spec["width"], spec["height"]
c.saveState()
c.rotate(90)
c.translate(h * cm * sf, -w * cm * sf -1 * cm)
i = 0
for l in reversed(sorted(sizes.keys())):
c.translate(0, -i * 0.4 * cm)
if len(sizes[l]) == 0:
continue
i = i + 1
c.saveState()
for size in sizes[l]:
c.translate(-size * cm * sf, 0)
draw_size(c, size, sf)
c.restoreState()
c.restoreState()
def draw_vertical_sizes(c, sizes, sf):
c.saveState()
c.translate(0, -1 * cm)
i = 0
for l in reversed(sorted(sizes.keys())):
c.translate(0, -i * 0.4 * cm)
if len(sizes[l]) == 0:
continue
i = i + 1
c.saveState()
for size in sizes[l]:
draw_size(c, size, sf)
c.translate(size * cm * sf, 0)
c.restoreState()
c.restoreState()
def draw_sizes(c, spec, sf):
h, v = calculate_sizes(spec)
draw_horizontal_sizes(c, spec, h, sf)
draw_vertical_sizes(c, v, sf)
def draw_window(c, spec, index_on_page):
c.saveState()
sf = scale(spec)
c.translate(*translate(spec, sf, index_on_page))
c.setStrokeColorRGB(*WINDOW_COLOR)
r = Rect((0, 0), (spec["width"], spec["height"]))
text = spec["name"]
text_width = c.stringWidth(text)
c.drawString(r.width * cm * sf / 2 - text_width / 2, r.height * cm * sf + 1 * cm, text)
c.bookmarkPage(text)
c.addOutlineEntry(text, text)
# Outer rectangle
c.rect(*r.to_drawable(sf))
if "division" in spec:
spec["division"]["rect"] = r
calculate_division_rects(spec["division"])
draw_division(c, spec["division"], sf)
else:
spec["rect"] = r
draw_openings(c, spec, sf)
draw_sizes(c, spec, sf)
c.restoreState()
def draw_windows(spec):
c = canvas.Canvas(spec["name"] + ".pdf", pagesize=A4)
c.setTitle(spec["title"])
c.showOutline()
for i, window in enumerate(spec["windows"]):
if i > 1 and i % 2 == 0:
c.showPage()
draw_window(c, window, 1 - (i % 2))
c.save()
parser = argparse.ArgumentParser(
description="Draw window size specifications")
parser.add_argument("specification", help="The window specification file", type=str)
args = parser.parse_args()
with open(args.specification, "r") as f:
draw_windows(json.load(f))
| 8,322 |
LeetCodeSolutions/python/38_Count_and_Say.py
|
ChuanleiGuo/AlgorithmsPlayground
| 1 |
2025109
|
class Solution(object):
def countAndSay(self, n):
"""
:type n: int
:rtype: str
"""
string = '1'
for i in range(2, n + 1):
j = 0
temp = ''
while j < len(string):
k = j + 1
while k in range(j + 1, len(string)):
if string[k] != string[j]:
break
k += 1
temp += str(k - j)
temp += str(string[j])
j = k
string = temp
return string
n = 2
print Solution().countAndSay(n)
| 616 |
chrome/updater/test/service/win/updater_test_service.py
|
zealoussnow/chromium
| 14,668 |
2024279
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
import xmlrpc.server
import pywintypes
import servicemanager
import win32api
import win32service
import win32serviceutil
import rpc_handler
# TODO(crbug.com/1233612): Use portpick to choose an available port, and
# propagate the port to clients (for example, via a pre-defined registry key).
_XML_RPC_SERVER_PORT = 9090
class UpdaterTestRequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
def log_message(self, format, *args):
# Overrides base class's implementation which writes the messages to
# sys.stderr.
# When XML RPC server runs within the service, sys.stderr is None. This
# crashes the server and aborts connection. Workaround this issue by using
# python logging.
logging.error(format, *args)
class UpdaterTestXmlRpcServer(xmlrpc.server.SimpleXMLRPCServer):
"""Customized XML-RPC server for updater tests."""
def __init__(self):
super().__init__(('localhost', _XML_RPC_SERVER_PORT),
requestHandler=UpdaterTestRequestHandler,
allow_none=True)
def run(self):
"""xml-rpc server main loop."""
self.register_introspection_functions()
self.register_instance(rpc_handler.UpdaterTestRPCHandler())
self.serve_forever()
class UpdaterTestService(win32serviceutil.ServiceFramework):
"""Customizes updater tests behavior."""
# Do not change these class variables names, these are required by the base
# class.
_svc_name_ = 'UpdaterTestService'
_svc_display_name_ = 'Updater Test Service'
_svc_description_ = 'Service for browser updater tests'
def SvcStop(self):
"""Called by service framework to stop this service."""
logging.info('Updater test service stopping...')
self._xmlrpc_server.shutdown()
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
def SvcDoRun(self):
"""Called by service framework to start this service."""
try:
logging.info('%s starting...', self._svc_name_)
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STARTED,
(self._svc_name_, ''))
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
self._xmlrpc_server = UpdaterTestXmlRpcServer()
self._xmlrpc_server.run()
servicemanager.LogInfoMsg(self._svc_name_ + ' - Ended')
except pywintypes.error as err:
logging.exception(err)
servicemanager.LogErrorMsg(err)
self.ReportServiceStatus(win32service.SERVICE_ERROR_SEVERE)
if __name__ == "__main__":
logging.info('Command: %s', sys.argv)
# Prefer the pythonservice.exe in the same directory as the interpreter.
# This is mainly for the vpython case.
destination = os.path.join(
os.path.dirname(os.path.abspath(sys.executable)), 'pythonservice.exe')
if os.path.exists(destination):
os.environ['PYTHON_SERVICE_EXE'] = destination
try:
win32api.SetConsoleCtrlHandler(lambda _: True, True)
win32serviceutil.HandleCommandLine(UpdaterTestService)
except Exception as err:
servicemanager.LogErrorMsg(err)
| 3,272 |
spug_api/apps/configuration/service.py
|
showsmall/spug
| 2 |
2025585
|
from flask import Blueprint, request
from libs.tools import json_response, JsonParser
from apps.configuration.models import Service, ConfigKey, AppConfigRel
from apps.deploy.models import App
from libs.decorators import require_permission
from public import db
blueprint = Blueprint(__name__, __name__)
@blueprint.route('/', methods=['GET'])
@require_permission('config_service_view')
def get():
group = request.args.get('group')
server_query = Service.query
if group:
services = server_query.filter_by(group=group).all()
else:
services = server_query.all()
return json_response(services)
@blueprint.route('/', methods=['POST'])
@require_permission('config_service_add')
def post():
form, error = JsonParser('name', 'identify', 'desc', 'group').parse()
if error is None:
Service(**form).save()
return json_response()
return json_response(message=error)
@blueprint.route('/<int:ser_id>', methods=['PUT'])
@require_permission('config_service_edit')
def put(ser_id):
form, error = JsonParser('name', 'identify', 'desc', 'group').parse()
if error is None:
Service.query.get_or_404(ser_id).update(**form)
return json_response()
return json_response(message=error)
@blueprint.route('/<int:ser_id>', methods=['DELETE'])
@require_permission('config_service_del')
def delete(ser_id):
service = Service.query.get_or_404(ser_id)
rel = AppConfigRel.query.filter_by(d_id=ser_id, d_type='ser').first()
if rel:
rel_app = App.query.get_or_404(rel.s_id)
return json_response(message='应用 <%s> 引用了该服务,请解除关联后再尝试删除该服务!' % rel_app.name)
if ConfigKey.query.filter_by(owner_id=ser_id, owner_type='ser').count():
return json_response(message='为了安全,请删除该服务下的所有配置后再尝试删除该服务!')
service.delete()
return json_response()
@blueprint.route('/groups/', methods=['GET'])
@require_permission('config_service_view | config_service_add | config_service_edit')
def fetch_groups():
service_group = db.session.query(Service.group.distinct().label('group')).all()
return json_response([x.group for x in service_group])
| 2,134 |
lib/config.py
|
Lapland-UAS-Tequ/tequ-xbee-ble-sensor
| 0 |
2025498
|
import ujson
from functions import log
from sys import print_exception
from machine import Pin
import utime
class config:
settings = {}
def __init__(self):
log("Initializing configuration file")
self.loadConfig()
self.LED_PIN_ID = self.getUserLED_ID()
self.led_pin = Pin(self.LED_PIN_ID, Pin.OUT, value=0)
def loadConfig(self):
try:
log("Loading config file...")
file = open('/flash/lib/config.json')
self.settings = ujson.loads(file.read())
file.close()
except Exception as e:
log("Loading config file... FAILED.. Creating default config..")
print_exception(e)
self.createDefaultConfig()
finally:
log(self.settings)
def updateConfig(self):
try:
log("Updating config file...")
file = open('/flash/lib/config.json', mode='w')
file.write(ujson.dumps(self.settings))
file.close()
except Exception as e:
log("Updating config file... FAILED..")
print_exception(e)
self.createDefaultConfig()
finally:
log(self.settings)
def createDefaultConfig(self):
# original values
log("Falling back to default config...")
value = {"SEND_INTERVAL": 10}
self.settings = value
file = open('/flash/lib/config.json', mode='w')
file.write(ujson.dumps(value))
file.close()
def updateConfigValue(self, parameter, value):
log("Parameter %s and value: %s => Updating parameter..." % (parameter, value))
self.settings[parameter] = value
def getSleepTime(self):
return self.settings["sleep_time"]
def getSendInterval(self):
return self.settings["send_interval"]
def getCurrentConfigAsJSON(self):
return ujson.dumps(self.settings)
def getWDTimeout(self):
return self.settings["wd_timeout"]
def getUserLED_ID(self):
return self.settings["user_led_id"]
def getVersion(self):
return self.settings["version"]
def blinkLed(self):
self.led_pin.value(1)
utime.sleep_ms(5)
self.led_pin.value(0)
| 2,238 |
scanned_image_beautifier/library/image_handler.py
|
LordAmit/automating-boring-staffs-using-python
| 3 |
2023060
|
from PIL import Image
def check_is_image_good_for_thumbnail(img: Image.Image):
if img.width < 256:
return True
return False
def change_to_grayscale(img: Image.Image):
return img.convert("L")
def save_thumbnail(img: Image.Image, file_save_address: str) -> Image:
size = 700, 700
img.thumbnail(size)
img.save(file_save_address)
# img = Image.open("/home/amit/git/automating-boring-tasks-using-python/scanned_image_beautifier/test.jpg")
# save_thumbnail(img, "/home/amit/git/automating-boring-tasks-using-python/scanned_image_beautifier/test_thumbnail.jpg")
| 595 |
code/warum.py
|
adrianimboden/cppusergroup-domain-driven-datatypes
| 0 |
2024453
|
#!/usr/bin/env python3
secret_global = None
def do_something(value):
global secret_global
value.append('new fancy value') # no const
secret_global = value # everything is shared
my_list = [1, 2, 3]
do_something(my_list)
assert (my_list == [1, 2, 3])
| 269 |
lang/Python/equilibrium-index-5.py
|
ethansaxenian/RosettaDecode
| 0 |
2024944
|
f = (eqindex2Pass, eqindexMultiPass, eqindex1Pass)
d = ([-7, 1, 5, 2, -4, 3, 0],
[2, 4, 6],
[2, 9, 2],
[1, -1, 1, -1, 1, -1, 1])
for data in d:
print(("d = %r" % data))
for func in f:
print((" %16s(d) -> %r" % (func.__name__, list(func(data)))))
| 279 |
Introduction/PythonTuples/Tuples.py
|
nayanrajani/PYTHON
| 0 |
2025747
|
tuple1 = ("Nayan", "Mihir", "Ishwar")
print(tuple1)
duplicatetuple = ("Nayan", "Mihir", "Ishwar", "Nayan", "Mihir", "Ishwar")
print(duplicatetuple)
print(len(duplicatetuple))
tupleitem = ("Nayan",) #For tuple we have to use comma
print(tupleitem)
print(type(tupleitem)) #tuple
tupleitem1 = ("Nayan")
print(type(tupleitem1)) #string
#can be of different types
tuple1 = ("Nayan", "Mihir","Ishwar")
tuple2 = (1,8,85,45,645)
tuple3 = (True, False, True)
#can contains different data types
tuple4 = ("Nayan", 23, True,"Male")
# tuple constructor
tuple4 = tuple(("Nayan","Mihir","Ishwar"))
print(tuple4)
"""
Python Tuples
mytuple = ("apple", "banana", "cherry")
Tuple
Tuples are used to store multiple items in a single variable.
Tuple is one of 4 built-in data types in Python used to store collections of data, the other 3 are List, Set, and Dictionary, all with different qualities and usage.
A tuple is a collection which is ordered and unchangeable.
Tuples are written with round brackets.
Example
Create a Tuple:
thistuple = ("apple", "banana", "cherry")
print(thistuple)
Tuple Items
Tuple items are ordered, unchangeable, and allow duplicate values.
Tuple items are indexed, the first item has index [0], the second item has index [1] etc.
Ordered
When we say that tuples are ordered, it means that the items have a defined order, and that order will not change.
Unchangeable
Tuples are unchangeable, meaning that we cannot change, add or remove items after the tuple has been created.
Allow Duplicates
Since tuples are indexed, they can have items with the same value:
Example
Tuples allow duplicate values:
thistuple = ("apple", "banana", "cherry", "apple", "cherry")
print(thistuple)
Tuple Length
To determine how many items a tuple has, use the len() function:
Example
Print the number of items in the tuple:
thistuple = ("apple", "banana", "cherry")
print(len(thistuple))
Create Tuple With One Item
To create a tuple with only one item, you have to add a comma after the item, otherwise Python will not recognize it as a tuple.
Example
One item tuple, remember the comma:
thistuple = ("apple",)
print(type(thistuple))
#NOT a tuple
thistuple = ("apple")
print(type(thistuple))
Tuple Items - Data Types
Tuple items can be of any data type:
Example
String, int and boolean data types:
tuple1 = ("apple", "banana", "cherry")
tuple2 = (1, 5, 7, 9, 3)
tuple3 = (True, False, False)
A tuple can contain different data types:
Example
A tuple with strings, integers and boolean values:
tuple1 = ("abc", 34, True, 40, "male")
type()
From Python's perspective, tuples are defined as objects with the data type 'tuple':
<class 'tuple'>
Example
What is the data type of a tuple?
mytuple = ("apple", "banana", "cherry")
print(type(mytuple))
The tuple() Constructor
It is also possible to use the tuple() constructor to make a tuple.
Example
Using the tuple() method to make a tuple:
thistuple = tuple(("apple", "banana", "cherry")) # note the double round-brackets
print(thistuple)
Python Collections (Arrays)
There are four collection data types in the Python programming language:
List is a collection which is ordered and changeable. Allows duplicate members.
Tuple is a collection which is ordered and unchangeable. Allows duplicate members.
Set is a collection which is unordered and unindexed. No duplicate members.
Dictionary is a collection which is ordered* and changeable. No duplicate members.
*As of Python version 3.7, dictionaries are ordered. In Python 3.6 and earlier, dictionaries are unordered.
When choosing a collection type, it is useful to understand the properties of that type. Choosing the right type for a particular data set could mean retention of meaning, and, it could mean an increase in efficiency or security.
"""
| 3,778 |
about/views.py
|
Dikutal/Dikutal
| 0 |
2024663
|
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext, Template
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_protect
from django.http import HttpResponse
from datetime import datetime
from markdown import markdown
from settings import *
from util.modtemplate import *
about_template = mod_template('about/index.md', markdown)
def index(request):
return HttpResponse(about_template.render(RequestContext(
request, {
'active_tab': 'about',
'subtitle': 'About'
})))
| 707 |
pandahouse/tests/test_http.py
|
Intelecy/pandahouse
| 3 |
2025740
|
import pytest
from requests.exceptions import RequestException, ConnectionError
from pandahouse.http import execute, ClickhouseException
def test_execute(connection):
query = "DESC system.parts FORMAT CSV;"
response = execute(query, connection=connection)
assert isinstance(response, bytes)
def test_execute_stream(connection):
query = "DESC system.parts FORMAT CSV;"
response = execute(query, stream=True, connection=connection)
result = response.read()
assert result
def test_wrong_host():
query = "DESC system.parts FORMAT CSV;"
with pytest.raises(ConnectionError):
execute(query, connection={"host": "http://local"})
def test_wrong_query(connection):
query = "SELECT * FROM default.nonexisting"
with pytest.raises((ClickhouseException, RequestException)):
execute(query, connection=connection)
| 867 |
SendPicture.py
|
IamVikas-bot/WhatsappBot
| 0 |
2025553
|
import os
import openpyxl
from whatsapp import WhatsApp
whatsapp = WhatsApp(100, session="mysession")
wb = openpyxl.load_workbook("UserInput/ListOfGroups.xlsx")
sheet = wb.active
for row in range(sheet.max_row-1):
whatsapp.send_picture(str(sheet.cell(row+2,1).value),os.getcwd()+'\ClaimsData.png')
row=row+1
whatsapp.quit()
| 346 |
Economy/User.py
|
ntuecon/2018groupPE
| 0 |
2024334
|
"""
Created on Mon Jun 18 23:21:10 2018
@author: WeiJin
"""
# Intro our tool to the user
print "Welcome! You're now a governer in the economy."
print ""
print "The economy is located near a lake and water is a crucial part of production. But if more water is used, water will be less efficient as a factor. Now, you get to decide the allocation of water to three different firms. Their efficiency of using water differs a lot."
print ""
print "The first coordinate of the following choices means the unit of water you give to the first firm, which is the firm with highest efficiency of using water."
print ""
print "Similarly, the second coordinate is the unit of water you give to the second firm, which is the firm with the second highest efficiency of using water."
print ""
print "Finally, the third coordiante of the choices means the unit of water you give to the last firm, who connot produce anything out of water."
print ""
print ""
print "There are four possible allocations of the last factor to the three firms:"
print ""
print "Option A: [Firm 1: 1; Firm 2: 1; Firm 3: 28]"
print ""
print "Option B: [Firm 1: 28; Firm 2: 1; Firm 3: 1]"
print ""
print "Option C: [Firm 1: 15; Firm 2: 10; Firm 3: 5]"
print ""
print "Option D: [Firm 1: 20; Firm 2: 7; Firm 3: 3]"
print ""
choice = raw_input("Please enter your choice (A/B/C/D):")
print ""
# Reveal user's choice
if choice == "A" or choice == "a":
print "You have chosen to allocate:"
print "1 unit to firm 1"
print "1 unit to firm 2"
print "28 units to firm 3"
print ""
print "You have chosen the worst allocation."
elif choice == "B" or choice == "b":
print "You have chosen to allocate:"
print "28 units to firm 1"
print "1 unit to firm 2"
print "1 unit to firm 3"
print ""
print "You have chosen the second best allocation!"
elif choice == "C" or choice == "c":
print "You have chosen to allocate:"
print "15 units to firm 1"
print "10 units to firm 2"
print "5 units to firm 3"
print ""
print "You have chosen the second worst allocation."
elif choice == "D" or choice == "d":
print "You have chosen to allocate:"
print "20 units to firm 1"
print "7 units to firm 2"
print "3 units to firm 3"
print ""
print "You have chosen the best allocation!"
else:
print "Invalid input. Please input either A/B/C/D"
# Explanation of the different allocations
print "At first glance, it might be straightforward that the government should allocate most of the water to the firm with highest efficiency."
print ""
print "But based on our simluations, the allocating nearly all of the quota to the most efficient firm did not have highest social welfare."
print ""
print "This might be due to the fact that marginal production is decreasing."
print ""
print "That is, the optimal allocation should be somewhere in between."
| 2,960 |
tools/generate_latlon_ugrid.py
|
bjlittle/mint
| 0 |
2024626
|
import netCDF4
import numpy
import sys
import argparse
from numpy import sin, cos, pi, heaviside
"""
Generate grid and edge data on uniform grid and save result in UGRID file
"""
parser = argparse.ArgumentParser(description='Generate the output file storing the lat-lon grid and edge data in UGRID format')
parser.add_argument('-o', dest='grid_file', default='',
help='Specify the netcdf file containing the grid and the mesh name in the format "FILENAME:MESHNAME"')
parser.add_argument('-nx', default=1, type=int,
help='Number of longitude cells')
parser.add_argument('-ny', default=1, type=int,
help='Number of latitude cells')
parser.add_argument('-s', type=str, dest='stream_funct', default='sin(pi*x/180.)*cos(pi*y/180.)',
help='Stream function of x (longitude in deg) and y (latitude in deg) used for setting the edge integrals')
args = parser.parse_args()
# check
if not args.grid_file:
print("ERROR: must specify grid file (-o)")
sys.exit(1)
try:
grid_file, mesh_name = args.grid_file.split(':')
except:
print("ERROR: grid file must be in the form 'FILENAME:MESHNAME'")
sys.exit(2)
nx, ny = args.nx, args.ny
nc = netCDF4.Dataset(grid_file, 'w')
nnodes = (nx + 1) * (ny + 1)
nedges = nx * (ny + 1) + (nx + 1) * ny
nfaces = nx * ny
nnodesId = nc.createDimension('nnodes', nnodes)
nedgesId = nc.createDimension('nedges', nedges)
nfacesId = nc.createDimension('nfaces', nfaces)
fourId = nc.createDimension('four', 4)
twoId = nc.createDimension('two', 2)
mesh = nc.createVariable(mesh_name, "int", [])
mesh.cf_role = 'mesh_topology'
mesh.topology_dimension = 2
mesh.node_coordinates = 'lon lat'
mesh.face_node_connectivity = 'face_node'
mesh.face_edge_connectivity = 'face_edge'
mesh.edge_node_connectivity = 'edge_node'
faceNodeConn = nc.createVariable("face_node", "int64", ("nfaces", "four"))
faceNodeConn.cf_role = "face_node_connectivity"
faceNodeConn.start_index = 0
faceEdgeConn = nc.createVariable("face_edge", "int64", ("nfaces", "four"))
faceEdgeConn.cf_role = "face_edge_connectivity"
faceEdgeConn.start_index = 0
edgeNodeConn = nc.createVariable("edge_node", "int64", ("nedges", "two"))
edgeNodeConn.cf_role = "node_edge_connectivity"
edgeNodeConn.start_index = 0
xvar = nc.createVariable("lon", "float64", ("nnodes",))
xvar.standard_name = "longitude"
xvar.units = "degrees_east"
yvar = nc.createVariable("lat", "float64", ("nnodes",))
yvar.standard_name = "latitude"
yvar.units = "degrees_north"
edge_integrated_velocity = nc.createVariable('edge_integrated_velocity', 'float64', ('nedges',))
edge_integrated_velocity.mesh = mesh_name
edge_integrated_velocity.location = 'edge'
streamfunction = nc.createVariable('streamfunction', 'float64', ('nnodes',))
streamfunction.mesh = mesh_name
streamfunction.location = 'node'
# set the lats/lons and the stream function
lats = numpy.zeros((nnodes,), numpy.float64)
lons = numpy.zeros((nnodes,), numpy.float64)
dlat, dlon = 180./float(ny), 360.0/float(nx)
point_data = numpy.zeros((nnodes,), numpy.float64)
for j in range(ny + 1):
for i in range(nx + 1):
index = i + (nx + 1)*j
lons[index] = 0.0 + i*dlon
lats[index] = -90.0 + j*dlat
x, y = lons[index], lats[index]
point_data[index] = eval(args.stream_funct)
xvar[:] = lons
yvar[:] = lats
streamfunction[:] = point_data
# face-node connectivity
fn = numpy.zeros((nfaces, 4), numpy.int64)
count = 0
for j in range(ny):
for i in range(nx):
i00 = i + 0 + (nx + 1)*(j + 0)
i10 = i + 1 + (nx + 1)*(j + 0)
i11 = i + 1 + (nx + 1)*(j + 1)
i01 = i + 0 + (nx + 1)*(j + 1)
fn[count, :] = i00, i10, i11, i01
count += 1
faceNodeConn[...] = fn
# edge-node connectivity
en = numpy.zeros((nedges, 2), numpy.int64)
edge_data = numpy.zeros((nedges,), numpy.float64)
# x edges
count = 0
for j in range(ny + 1):
for i in range(nx):
i00 = i + 0 + (nx + 1)*(j + 0)
i10 = i + 1 + (nx + 1)*(j + 0)
en[count, :] = i00, i10
x, y = lons[i00], lats[i00]
s00 = eval(args.stream_funct)
x, y = lons[i10], lats[i10]
s10 = eval(args.stream_funct)
edge_data[count] = s10 - s00
count += 1
# y edges
for j in range(ny):
for i in range(nx + 1):
i00 = i + 0 + (nx + 1)*(j + 0)
i01 = i + 0 + (nx + 1)*(j + 1)
en[count, :] = i00, i01
x, y = lons[i00], lats[i00]
s00 = eval(args.stream_funct)
x, y = lons[i01], lats[i01]
s01 = eval(args.stream_funct)
edge_data[count] = s01 - s00
count += 1
edgeNodeConn[...] = en
edge_integrated_velocity[:] = edge_data
# face-edge connectivity
fe = numpy.zeros((nfaces, 4), numpy.int64)
count = 0
for j in range(ny):
for i in range(nx):
is0 = 0 + i + 0 + (nx + 0)*(j + 0)
is1 = 0 + i + 0 + (nx + 0)*(j + 1)
i0s = nx*(ny + 1) + i + 0 + (nx + 1)*(j + 0)
i1s = nx*(ny + 1) + i + 1 + (nx + 1)*(j + 0)
fe[count, :] = is0, i1s, is1, i0s
count += 1
faceEdgeConn[...] = fe
| 5,104 |
splunk-kafka-connector.py
|
CptOfEvilMinions/KSQL-Osquery-Zeek
| 2 |
2025095
|
"""
Author: <NAME>
"""
import splunklib.client as client
from datetime import datetime
import argparse
import requests
import json
import sys
import yaml
import urllib3
urllib3.disable_warnings()
class App:
def __init__(self, config):
# Splunk host info
self.splunk_external_url = config['splunk']['external_url']
self.splunk_docker_hec_url = config['splunk']['docker_hec_url']
# Splunk credentials
self.splunk_username = config['splunk']['username']
self.splunk_password = config['splunk']['password']
# Splunk index and connectors
self.splunk_index_name = config['splunk']['index_name']
self.splunk_connector_name = config['splunk']['index_name'] + "-" + config['splunk']['hec_base_name']
self.splunk_hec_token = None
# Kafka
self.kafka_connect_url = config['kafka']['connect_extenral_url']
# self.kafka_connect_hostname = config['kafka']['connect_external_hostname']
# self.kafka_connect_port = config['kafka']['connect_external_port']
self.kafak_topics_list = config['kafka']['topics']
# Verify
self.verify = config['ssl']['verify']
# Splink service
self.service = client.connect(
host=self.splunk_external_url.split('//')[1].split(':')[0],
port=self.splunk_external_url.split('//')[1].split(':')[1],
username=self.splunk_username,
password=<PASSWORD>,
scheme=self.splunk_external_url.split('//')[0][:-1]
)
def list_kafka_splunk_connectors(self):
headers = {
"Content-Type": "application/json"
}
print (self.kafka_connect_url)
r = requests.get(self.kafka_connect_url, headers=headers, verify=self.verify)
if r.status_code == 200:
print (r.json())
else:
print ( f"[-] - {datetime.now()} - Unable to get list of Kafka connectors" )
print (r.text)
def delete_kafka_splunk_connector(self, splunk_connector_name):
headers = {
"Content-Type": "application/json"
}
kafka_connect_delete_url = self.kafka_connect_url + "/" + splunk_connector_name
r = requests.delete(kafka_connect_delete_url, headers=headers, verify=self.verify)
print (r.status_code)
if r.status_code == 204:
print ( f"[+] - {datetime.now()} - Deleted connector between Splunk and Kafka for {splunk_connector_name}" )
else:
print ( f"[-] - {datetime.now()} - DID NOT DELETE connector between Splunk and Kafka for {splunk_connector_name}" )
print (r.text)
def create_kafka_splunk_connector(self):
headers = {
"Content-Type": "application/json"
}
print (self.kafak_topics_list)
json_data = {
"name": f"{self.splunk_connector_name}",
"config": {
"connector.class": "com.splunk.kafka.connect.SplunkSinkConnector",
"tasks.max": "10",
"topics": f"{self.kafak_topics_list}",
"splunk.hec.uri": f"{self.splunk_docker_hec_url}",
"splunk.hec.token": f"{self.splunk_hec_token}",
"splunk.hec.ack.enabled": "true",
"splunk.hec.raw": "false",
"splunk.hec.track.data": "true",
"splunk.hec.ssl.validate.certs": f"{str(self.verify).lower()}"
}
}
r = requests.post(self.kafka_connect_url, headers=headers, data=json.dumps(json_data), verify=self.verify)
if r.status_code == 201:
print ( f"[+] - {datetime.now()} - Instantiated connector between Splunk and Kafka for {self.kafak_topics_list}" )
elif r.status_code == 409:
print ( f"[+] - {datetime.now()} - Connector between Splunk and Kafka already exists for {self.kafak_topics_list}" )
else:
print ( f"[+] - {datetime.now()} - Did NOT create connector between Splunk and Kafka for {self.kafak_topics_list}" )
print (r.text)
def check_hec_collector(self):
"""
Since the Splunk SDK for Python doesn't support HEC I had to use raw queries
Input: Splunk connection info, Splunk credentials
Output: If Splunk HEC event collector exists set the variable - Return nothing
"""
# Set output mode to json
params = (('output_mode', 'json'),)
splunkl_url_create_hec_token = f"{self.splunk_external_url}/servicesNS/nobody/system/data/inputs/http/"
r = requests.get(url=splunkl_url_create_hec_token, params=params, auth=(self.splunk_username, self.splunk_password), verify=self.verify)
for hec_event_collector in r.json()["entry"]:
if hec_event_collector["name"].split("//")[1] == self.splunk_connector_name:
print ( f"[*] - {datetime.now()} - Did NOT created Splunk HEC token for Kafka-splunk-connector cause it already exists" )
self.splunk_hec_token = hec_event_collector["content"]["token"]
def create_splunk_hec_token(self):
"""
Since the Splunk SDK for Python doesn't support HEC I had to use raw queries
Input: Splunk connection info, Splunk credentials, index name, and connector name
Output: Nothin
"""
# Splunk HEC event collector exists
self.check_hec_collector()
if self.splunk_hec_token is None:
# Set output mode to json
params = (('output_mode', 'json'),)
data = {
"name": f"{self.splunk_connector_name}",
"index": f"{self.splunk_index_name}",
"useACK": 1
}
params = (('output_mode', 'json'),)
splunkl_url_create_hec_token = f"{self.splunk_external_url}/servicesNS/nobody/system/data/inputs/http/"
r = requests.post(url=splunkl_url_create_hec_token, params=params, data=data, auth=(self.splunk_username, self.splunk_password), verify=False)
if r.status_code == 201:
print ( "[+] - {0} - Created Splunk HEC token for Kafka-splunk-connector: {1}".format( datetime.now(), r.json()["entry"][0]["content"]["token"] ))
self.splunk_hec_token = r.json()["entry"][0]["content"]["token"]
else:
print ( f"[-] - {datetime.now()} - Did NOT created Splunk HEC token for Kafka-splunk-connector" )
print (r.text)
def get_splunk_index_list(self):
"""
https://github.com/splunk/splunk-sdk-python/blob/master/examples/index.py
https://docs.splunk.com/DocumentationStatic/PythonSDK/1.6.5/client.html#splunklib.client.Indexes.delete
https://docs.splunk.com/Documentation/Splunk/8.0.3/Search/ExportdatausingSDKs
https://www.tutorialspoint.com/python/string_startswith.htm
Input: Splunk service connector
Output: List of Splunk indexes
"""
indexes = self.service.indexes
index_list = [ index.name for index in indexes if not index.name.startswith("_") ]
return index_list
def create_splunk_index(self, index_name=None):
"""
https://dev.splunk.com/enterprise/docs/python/sdk-python/howtousesplunkpython/howtogetdatapython/#To-create-a-new-index
Input: Splunk service connector, Splunk index list, new Splunk index name
Output: None - Create new inde
"""
# Override the index name in config
if index_name is not None:
self.splunk_index_name = index_name
splunk_index_list = self.get_splunk_index_list()
if self.splunk_index_name not in splunk_index_list:
mynewindex = self.service.indexes.create(self.splunk_index_name)
print (f"[+] - {datetime.now()} - Created {self.splunk_index_name} index")
else:
print (f"[*] - {datetime.now()} - Index {self.splunk_index_name} already exists, skipping")
if __name__ == "__main__":
# Read variables from config
config = None
with open('conf/python/config.yml') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
# Argparser
my_parser = argparse.ArgumentParser()
my_parser.add_argument('--create_splunk_index', action='store', type=str, help='Create splunk index')
my_parser.add_argument('--create_splunk_hec_token', action='store_true', help='Create splunk HEC input')
my_parser.add_argument('--create_kafka_splunk_connector', action='store_true', help='Create splunk index')
my_parser.add_argument('--delete_kafka_splunk_connector', action='store', type=str, help='Create splunk index')
my_parser.add_argument('--list_kafka_splunk_connectors', action='store_true', help='List Kafka Connectors')
my_parser.add_argument('--all', action='store_true', help='Create Splunk index, Create Splunk HEC token, Create Kafka Splunk connector')
args = my_parser.parse_args()
# Inti class with vars
app = App(config)
if args.all:
app.create_splunk_index() # Create splunk index
app.create_splunk_hec_token() # Ceate HEC token
app.create_kafka_splunk_connector() # Create Kafka Splunk connector
# Create Splunk index
if args.create_splunk_index:
app.create_splunk_index(args.create_splunk_index)
# Create Splunk HEC token
if args.create_splunk_hec_token:
app.create_splunk_hec_token()
# Create Kafka Splunk connector
if args.create_kafka_splunk_connector:
app.create_kafka_splunk_connector()
# List of Kafka connectors
if args.list_kafka_splunk_connectors:
app.list_kafka_splunk_connectors()
# Delete Kafka Splunk connector
if args.delete_kafka_splunk_connector:
app.delete_kafka_splunk_connector(args.delete_kafka_splunk_connector)
| 9,911 |
napari/_qt/widgets/qt_action_context_menu.py
|
marlene09/napari
| 0 |
2025349
|
from __future__ import annotations
from typing import TYPE_CHECKING, Dict
from qtpy.QtWidgets import QMenu
if TYPE_CHECKING:
from qtpy.QtWidgets import QAction
from ...layers._layer_actions import ActionOrSeparator
class QtActionContextMenu(QMenu):
"""Makes a QMenu for a dict of `ContextActions`.
`ContextActions` are just dicts with the following keys:
description: str - the text for the menu item
action: Callable - a callback when the item is selected
enable_when: str - an expression that will be evaluated with the
namespace of some context. If True, the menu item is enabled.
show_when: str|None - an expression that will be evaluated with the
namespace of some context. If True, the menu item is visible.
If no show_when key is provided, the menu item is visible.
Parameters
----------
actions : Dict[str, ContextAction]
An (ordered) mapping of name -> `ContextActions`. Menu items will be
added in order of the keys in the mapping. To add a separator to the
menu, add any key with a empty dict (or other falsy value). The key
itself doesn't matter.
parent : QWidget, optional
Parent widget, by default None
Examples
--------
Start with an actions dict to populate the menu:
>>> ACTIONS = {
... 'add_one': {
... 'description': 'Add one',
... 'action': lambda x: x.append(1),
... 'enable_when': 'count == 0 and is_ready',
... },
... }
>>> menu = QtActionContextMenu(ACTIONS)
call menu.update_from_context to update the menu state:
>>> menu.update_from_context({'count': 0, 'is_ready': True})
>>> menu._menu_actions['add_one'].isEnabled()
True
We directly created the dict above, but a mapping of
{key -> callable(obj)} is a good way to (re)create context
dicts for an object that changes over time, like `my_list`:
>>> my_list = [42]
>>> CONTEXT_KEYS = {
... 'count': lambda x: len(x),
... 'is_ready': lambda x: True,
... }
>>> ctx = {k: v(my_list) for k, v in CONTEXT_KEYS.items()}
>>> ctx
{'count': 1, 'is_ready': True}
Use the context dict to update the menu. Here, because count != 0,
`add_one` becomes disabled
>>> menu.update_from_context(ctx)
>>> menu._menu_actions['add_one'].isEnabled()
False
"""
def __init__(self, actions: Dict[str, ActionOrSeparator], parent=None):
super().__init__(parent)
self._actions = actions
self._menu_actions: Dict[str, QAction] = {}
for name, d in actions.items():
if not d:
self.addSeparator()
else:
self._menu_actions[name] = self.addAction(d['description'])
self._menu_actions[name].setData(d['action'])
def update_from_context(self, ctx: dict) -> None:
"""Update the enabled/visible state of each menu item with `ctx`.
`ctx` is a namepsace dict that will be used to `eval()` the
`'enable_when'` and `'show_when'` expressions provided for each action
in the menu. *ALL variables used in these expressions must either be
present in the `ctx` dict, or be builtins*.
"""
for name, menu_item in self._menu_actions.items():
d = self._actions[name]
enabled = eval(d['enable_when'], {}, ctx)
menu_item.setEnabled(enabled)
visible = d.get("show_when")
if visible:
menu_item.setVisible(eval(visible, {}, ctx))
| 3,629 |
cipher/asymmetric/__init__.py
|
yanjingtui/Kasumi-cypher
| 0 |
2024752
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# __init__.py
from . import elGamal as elG
from . import diffieHellman as dH
from . import certificate as ca
| 158 |
examples/yubiauth_server.py
|
frankcash/python-u2flib-server
| 1 |
2025082
|
#!/usr/bin/env python
# Copyright (c) 2013 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Example web server providing U2F enrollment and authentication. It can be run
standalone, or by a WSGI container such as Apache with mod_wsgi.
A YubiAuth installation is required to store users and their enrollment data.
Enrollment will overwrite existing users. All users will have a u2f_ prefix
added to their usernames.
Any error will be returned as a stacktrace with a 400 response code.
Note that this is intended for test/demo purposes, not production use!
"""
from yubiauth import YubiAuth
from u2flib_server.u2f_v2 import (start_register, complete_register,
start_authenticate, verify_authenticate)
from webob.dec import wsgify
from webob import exc
import json
import traceback
def get_origin(environ):
if environ.get('HTTP_HOST'):
host = environ['HTTP_HOST']
else:
host = environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
host += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
host += ':' + environ['SERVER_PORT']
return '%s://%s' % (environ['wsgi.url_scheme'], host)
class U2FServer(object):
"""
Very basic server providing a REST API to enroll a U2F device with
a YubiAuth user, and to perform a sign with the enrolled device.
Only one device per uses is supported, and only one challenge is valid
at a time.
Four calls are provided: enroll, bind, sign and verify. Each of these
expects username and password parameters, and bind and verify expect a
third parameter, data, containing the JSON formatted data which is output
by the U2F browser API upon calling the ENROLL or SIGN commands.
"""
@wsgify
def __call__(self, request):
self.origin = get_origin(request.environ)
self.app_id = self.origin
page = request.path_info_pop()
# To be able to see what the server considers its origin to be:
if page == 'origin':
return self.origin
elif page is None:
return json.dumps([self.origin])
with YubiAuth() as auth:
try:
username = 'u2f_' + request.params['username']
password = request.params['password']
data = request.params.get('data', None)
self.auth = auth
if page == 'enroll':
return self.enroll(username, password)
elif page == 'bind':
return self.bind(username, password, data)
elif page == 'sign':
return self.sign(username, password)
elif page == 'verify':
return self.verify(username, password, data)
else:
raise exc.HTTPNotFound()
except Exception:
return exc.HTTPBadRequest(comment=traceback.format_exc())
def enroll(self, username, password):
try:
user = self.auth.get_user(username)
user.set_password(password)
except:
user = self.auth.create_user(username, password)
enroll = start_register(self.app_id)
user.attributes['_u2f_enroll_'] = enroll.json
return enroll.json
def bind(self, username, password, data):
user = self._get_user(username, password)
enroll = user.attributes['_u2f_enroll_']
binding, cert = complete_register(enroll, data, [self.origin])
user.attributes['_u2f_binding_'] = binding.json
user.attributes['_u2f_cert_'] = cert.as_pem()
return json.dumps({
'username': username[4:],
'origin': self.origin,
'attest_cert': cert.as_pem()
})
def sign(self, username, password):
user = self._get_user(username, password)
binding = user.attributes['_u2f_binding_']
challenge = start_authenticate(binding)
user.attributes['_u2f_challenge_'] = challenge.json
return challenge.json
def verify(self, username, password, data):
user = self._get_user(username, password)
binding = user.attributes['_u2f_binding_']
challenge = user.attributes['_u2f_challenge_']
c, t = verify_authenticate(binding, challenge, data, [self.origin])
return json.dumps({
'touch': t,
'counter': c
})
def _get_user(self, username, password):
user = self.auth.get_user(username)
if not user.validate_password(password):
raise ValueError('Invalid password!')
return user
application = U2FServer()
if __name__ == '__main__':
from wsgiref.simple_server import make_server
httpd = make_server('0.0.0.0', 8081, application)
httpd.serve_forever()
| 6,230 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.