max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
grype/db/test-fixtures/tls/serve.py
|
6un9-h0-Dan/grype
| 2 |
2169331
|
from http.server import HTTPServer, SimpleHTTPRequestHandler
import ssl
import logging
port = 443
directory = "www"
class Handler(SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=directory, **kwargs)
def do_GET(self):
logging.error(self.headers)
SimpleHTTPRequestHandler.do_GET(self)
httpd = HTTPServer(('0.0.0.0', port), Handler)
sslctx = ssl.SSLContext()
sslctx.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
sslctx.load_cert_chain(certfile='server.crt', keyfile="server.key")
httpd.socket = sslctx.wrap_socket(httpd.socket, server_side=True)
print(f"Server running on https://0.0.0.0:{port}")
httpd.serve_forever()
| 705 |
tests/parsing/flow.py
|
felix-hilden/pyfactor
| 17 |
2169049
|
from ._util import refs_equal, refs_in
class TestFlow:
@refs_equal
def test_if_test_comprehension_shadows(self):
source = 'a = 1\nif {a for a in range(2)}:\n b = 2'
refs = [('a', set()), ('b', set())]
return source, refs
@refs_equal
def test_if_test_comprehension_uses(self):
source = 'a = 1\nif {a for i in range(2)}:\n b = 2'
refs = [('a', set()), ('b', {'a'})]
return source, refs
@refs_equal
def test_if_test_propagated_to_body(self):
source = 'a = 1\nif a:\n b = 2'
refs = [('a', set()), ('b', {'a'})]
return source, refs
@refs_equal
def test_if_test_propagated_to_else(self):
source = 'a = 1\nif a:\n pass\nelse:\n b = 2'
refs = [('a', set()), ('b', {'a'})]
return source, refs
@refs_equal
def test_elif_test_propagated_to_body(self):
source = """
a = 1
b = 2
if a:
pass
elif b:
c = 3
"""
refs = [('a', set()), ('b', set()), ('c', {'a', 'b'})]
return source, refs
@refs_equal
def test_elif_test_propagated_to_else(self):
source = """
a = 1
b = 2
if a:
pass
elif b:
pass
else:
c = 3
"""
refs = [('a', set()), ('b', set()), ('c', {'a', 'b'})]
return source, refs
@refs_equal
def test_with_const(self):
source = 'with 1:\n pass'
refs = []
return source, refs
@refs_equal
def test_with_assigns_name(self):
source = 'with 1 as a:\n pass'
refs = [('a', set())]
return source, refs
@refs_equal
def test_with_assigns_names(self):
source = 'with 1 as a, 2 as b:\n pass'
refs = [('a', set()), ('b', set())]
return source, refs
@refs_equal
def test_with_assigns_name_using_var(self):
source = 'a = 1\nwith a as b:\n pass'
refs = [('a', set()), ('b', {'a'})]
return source, refs
@refs_equal
def test_with_assigns_nested_names(self):
source = 'with 1 as (a, (b, c)):\n pass'
refs = [('a', set()), ('b', set()), ('c', set())]
return source, refs
@refs_equal
def test_with_name_not_propagated_forward(self):
source = 'with 1 as a:\n b = 1'
refs = [('a', set()), ('b', set())]
return source, refs
@refs_equal
def test_try(self):
source = """
try:
a = 1
except:
b = 2
else:
c = 3
finally:
d = 4
"""
refs = [('a', set()), ('b', set()), ('c', set()), ('d', set())]
return source, refs
@refs_equal
def test_try_handler_propagated_forward(self):
source = 'a = 1\ntry:\n pass\nexcept a:\n b = 1'
refs = [('a', set()), ('b', {'a'})]
return source, refs
@refs_equal
def test_try_handler_as_tuple(self):
source = 'a = 1\ntry:\n pass\nexcept (a, ValueError):\n b = 1'
refs = [('a', set()), ('b', {'a'})]
return source, refs
@refs_equal
def test_while(self):
source = 'while True:\n a = 1'
refs = [('a', set())]
return source, refs
@refs_equal
def test_while_test_propagated_forward(self):
source = 'a = 1\nwhile a:\n b = 2\nelse:\n c = 3'
refs = [('a', set()), ('b', {'a'}), ('c', {'a'})]
return source, refs
@refs_equal
def test_for_assigns(self):
source = 'for a in range(3):\n pass'
refs = [('a', set())]
return source, refs
@refs_equal
def test_for_iter_uses_var(self):
source = 'a = 1\nfor b in range(a):\n pass'
refs = [('a', set()), ('b', {'a'})]
return source, refs
@refs_in
def test_for_nested_assign(self):
source = 'for a, (b, c) in range(3):\n pass'
refs = [('a', set()), ('b', set()), ('c', set())]
return source, refs
@refs_equal
def test_for_iter_propagated_forward(self):
source = 'a = 1\nfor b in range(a):\n c = 3\nelse: d = 4'
refs = [('a', set()), ('b', {'a'}), ('c', {'a'}), ('d', {'a'})]
return source, refs
| 4,053 |
adsocket/core/permissions.py
|
AwesomeDevelopersUG/adsocket
| 0 |
2171085
|
import abc
class Permission(abc.ABC):
"""
Base permission class. All other permission must instances of this class
"""
async def can_join(self, channel, client, message):
pass
async def can_write(self, channel, client, message):
pass
class DummyPermission(Permission):
"""
Dummy permission simply answer to all calls True
"""
async def can_join(self, channel, client, message):
return True
async def can_write(self, channel, client, message):
return True
class IsAuthenticatedPermission(Permission):
"""
Check whether client is authenticated.. nothing else
"""
async def can_join(self, channel, client, message):
return client.is_authenticated()
| 751 |
Code/25.Notsharp.py
|
Olvi73/Python
| 1 |
2170826
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 14 19:04:17 2020
@author: Administrator
"""
import re
myfile=open('sharp.txt',encoding='utf-8')
txt=myfile.read()
print('修改前内容:')
print(txt,'\n')
n=txt.count('#')
def find(str,n):
out=str
if(re.search('#',str)):
rs=re.search('#.*(\n|.)',str).group()
out=str.replace(rs,'')
if(n!=0):
return find(out,n-1)
return out
txt=find(txt,n)
print('修改后内容:')
print(txt)
| 469 |
output_parsers/tabdelim_csv_caselist.py
|
gcampuzano14/PathISTabs
| 1 |
2169997
|
import re
import os
import csv
from nltk import *
from nltk.tag import *
from nltk.chunk import *
from nltk.corpus import treebank
# input: TABDELIMITED FILE WITH ROWS BY CASE NUMBER
# output: CSV FILE WITH LIST OF CASES
def main():
dxlist = []
data = csv.DictReader(open('full_mds_tab.txt', 'r'), delimiter="\t")
output_file = 'out.csv'
with open(output_file, 'wb') as csvfile:
result_writer = csv.writer(csvfile)
for element in data:
dxstr = element['DIAGNOSIS'].lower()
all_instances = re.findall('[^\.!?:;]*myelodysplastic\s+syndrome[^\.!?:;]*[\.!?:;]', dxstr, re.S)
outdxstr = "__________".join(all_instances)
punc = re.compile("[,\.\/;'!\?&\-_]")
strp = punc.sub(" ", outdxstr)
dxlist.append(strp)
outdxlist = [element['SURGINAL_NUMBER'],element['ACCESS_DATE'],outdxstr]
result_writer.writerow(outdxlist)
return dxlist
dxlist = main()
for e in dxlist:
tokens = word_tokenize(str(e))
print(tokens)
tagged = pos_tag(tokens)
entities = chunk.ne_chunk(tagged)
print(entities)
t = treebank.parsed_sents(entities)[0]
t.draw()
| 1,193 |
aws/video-rekognition.py
|
escofresco/tinydoor
| 4 |
2170091
|
# import os
# import boto3
# import json
# import sys
# import time
#
# AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
# AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
# region_name = "us-west-1"
#
#
# class VideoDetect:
# """Analyze videos using Rekognition Video API."""
#
# rek = boto3.client("rekognition", region_name)
# sqs = boto3.client("sqs", region_name)
# sns = boto3.client("sns", region_name)
# startJobId = ""
# queueUrl = ""
# snsTopicArn = ""
# processType = ""
#
# def __init__(self, role, bucket, video):
# self.roleArn = role
# self.bucket = bucket
# self.video = video
#
# def GetResultsFaces(self, jobId):
# """
# Return an array of detected faces (Faces) sorted by the time the faces were detected.
# Get the results of face detection by calling get_face_detection().
#
# Expected output:
# Emotions: [
# {'Type': string, 'Confidence': number},
# ]
# """
# maxResults = 30
# paginationToken = ""
# finished = False
#
# while finished == False:
# response = self.rek.get_face_detection(
# JobId=jobId, MaxResults=maxResults, NextToken=paginationToken
# )
#
# for faceDetection in response["Faces"]:
# max = faceDetection["Face"]["Emotions"][0]
# for emotion in faceDetection["Face"]["Emotions"]:
# if emotion["Confidence"] > max["Confidence"]:
# max = emotion
# print(max)
# print()
#
# if "NextToken" in response:
# paginationToken = response["NextToken"]
# else:
# finished = True
#
# def GetResultsPersons(self, jobId):
# """Get person tracking information by calling get_person_tracking()."""
# maxResults = 30
# paginationToken = ""
# finished = False
#
# while finished is False:
# response = self.rek.get_person_tracking(
# JobId=jobId, MaxResults=maxResults, NextToken=paginationToken
# )
#
# print(response["VideoMetadata"]["Codec"])
# print(str(response["VideoMetadata"]["DurationMillis"]))
# print(response["VideoMetadata"]["Format"])
# print(response["VideoMetadata"]["FrameRate"])
#
# for personDetection in response["Persons"]:
# print("Index: " + str(personDetection["Person"]["Index"]))
# print("Timestamp: " + str(personDetection["Timestamp"]))
# print()
#
# if "NextToken" in response:
# paginationToken = response["NextToken"]
# else:
# finished = True
#
# def CreateTopicandQueue(self):
# """Create a topic to which notifications can be published."""
# millis = str(int(round(time.time() * 1000)))
#
# # Create SNS topic
# snsTopicName = "AmazonRekognition-TinyDoor" + millis
#
# topicResponse = self.sns.create_topic(Name=snsTopicName)
# self.snsTopicArn = topicResponse["TopicArn"]
#
# # create SQS queue
# sqsQueueName = "AmazonRekognitionQueue" + millis
# self.sqs.create_queue(QueueName=sqsQueueName)
# self.queueUrl = self.sqs.get_queue_url(QueueName=sqsQueueName)["QueueUrl"]
#
# attribs = self.sqs.get_queue_attributes(
# QueueUrl=self.queueUrl, AttributeNames=["QueueArn"]
# )["Attributes"]
#
# sqsQueueArn = attribs["QueueArn"]
#
# # Subscribe SQS queue to SNS topic
# self.sns.subscribe(
# TopicArn=self.snsTopicArn, Protocol="sqs", Endpoint=sqsQueueArn
# )
#
# # Authorize SNS to write SQS queue
# policy = """{{
# "Version":"2012-10-17",
# "Statement":[
# {{
# "Sid":"MyPolicy",
# "Effect":"Allow",
# "Principal" : {{"AWS" : "*"}},
# "Action":"SQS:SendMessage",
# "Resource": "{}",
# "Condition":{{
# "ArnEquals":{{
# "aws:SourceArn": "{}"
# }}
# }}
# }}
# ]
# }}""".format(
# sqsQueueArn, self.snsTopicArn
# )
#
# response = self.sqs.set_queue_attributes(
# QueueUrl=self.queueUrl, Attributes={"Policy": policy}
# )
#
# def DeleteTopicandQueue(self):
# """Deletes a topic and all its subscriptions."""
# self.sqs.delete_queue(QueueUrl=self.queueUrl)
# self.sns.delete_topic(TopicArn=self.snsTopicArn)
#
# def main(self):
# """
# Start analysis of video in specified bucket.
# Face detection is started by a call to start_face_detection.
# """
# jobFound = False
# response = self.rek.start_face_detection(
# Video={"S3Object": {"Bucket": self.bucket, "Name": self.video}},
# NotificationChannel={
# "RoleArn": self.roleArn,
# "SNSTopicArn": self.snsTopicArn,
# },
# FaceAttributes="ALL",
# )
#
# # response = self.rek.start_person_tracking(Video={'S3Object':{'Bucket':self.bucket,'Name':self.video}},
# # NotificationChannel={'RoleArn':self.roleArn, 'SNSTopicArn':self.snsTopicArn})
#
# print("Start Job Id: " + response["JobId"])
# dotLine = 0
# while jobFound is False:
# sqsResponse = self.sqs.receive_message(
# QueueUrl=self.queueUrl,
# MessageAttributeNames=["ALL"],
# MaxNumberOfMessages=10,
# )
#
# if sqsResponse:
# if "Messages" not in sqsResponse:
# if dotLine < 20:
# print(".", end="")
# dotLine = dotLine + 1
# else:
# print()
# dotLine = 0
# sys.stdout.flush()
# continue
#
# for message in sqsResponse["Messages"]:
# notification = json.loads(message["Body"])
# rekMessage = json.loads(notification["Message"])
# print(rekMessage["JobId"])
# print(rekMessage["Status"])
# if str(rekMessage["JobId"]) == response["JobId"]:
# print("Matching Job Found:" + rekMessage["JobId"])
# jobFound = True
# self.GetResultsFaces(rekMessage["JobId"])
# self.sqs.delete_message(
# QueueUrl=self.queueUrl,
# ReceiptHandle=message["ReceiptHandle"],
# )
# else:
# print(
# "Job didn't match:"
# + str(rekMessage["JobId"])
# + " : "
# + str(response["JobId"])
# )
# # Delete the unknown message. Consider sending to dead letter queue
# self.sqs.delete_message(
# QueueUrl=self.queueUrl, ReceiptHandle=message["ReceiptHandle"]
# )
#
# print("done")
#
#
# if __name__ == "__main__":
# roleArn = "arn:aws:iam::623782584215:role/tinydoor-rekognition"
# bucket = "tinydoor-client-uploads"
# video = "emotion-test/Screen Recording 2020-06-28 at 12.52.49 PM.mov"
#
# analyzer = VideoDetect(roleArn, bucket, video)
# analyzer.CreateTopicandQueue()
# analyzer.main()
# analyzer.DeleteTopicandQueue()
| 7,905 |
16-Django_Level_Three/ProTwo_Practice/AppTwo/urls.py
|
andy2167565/Django-Bootcamp-Practice
| 0 |
2170348
|
from django.urls import path
from AppTwo import views
urlpatterns = [
path('', views.users, name='users')
]
| 119 |
2016/day_12.py
|
nabiirah/advent-of-code
| 24 |
2170796
|
""" Advent of Code Day 12 - <NAME>"""
from collections import defaultdict
with open('inputs/day_12.txt') as f:
instructions = [line.strip() for line in f.readlines()]
registers = defaultdict(int)
registers['c'] = 1 # Comment Out for Part One
i = 0
while i < len(instructions):
parse = instructions[i].split(' ')
if parse[0] == 'cpy':
if parse[1].isnumeric():
registers[parse[2]] = int(parse[1])
else:
registers[parse[2]] = registers[parse[1]]
elif parse[0] == 'inc':
registers[parse[1]] += 1
elif parse[0] == 'dec':
registers[parse[1]] -= 1
elif parse[0] == 'jnz':
if parse[1].isnumeric():
if parse[1] != 0:
i += int(parse[2]) - 1
elif registers[parse[1]] != 0:
i += int(parse[2]) - 1
i += 1
# Answer One / Answer Two
print("Register A:", registers['a'])
| 908 |
build/start/start/views.py
|
ItzProxy/CS207_Project
| 1 |
2170575
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("Hello world, polls index here.")
| 178 |
app.py
|
SergeyMalyshevsky/Detection
| 0 |
2170881
|
import os
from flask import Flask, render_template, request
from werkzeug.utils import secure_filename
from detection import detect_people
app = Flask(__name__)
UPLOAD_FOLDER = './static/uploads'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route('/')
def main():
return render_template('./main.html')
@app.route('/uploader', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
if file:
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return render_template('./show_image.html', image_file=filename)
@app.route('/detection', methods=['GET', 'POST'])
def detection():
if request.method == 'POST':
image_file = request.form['image_file']
if image_file:
filename = image_file
try:
detect_people(filename)
except Exception:
pass
return render_template('./result.html', image_file=filename)
if __name__ == '__main__':
app.run(debug=True)
| 1,121 |
JM_code/Step1Code.py
|
JM-Maynard/12stepsCFD
| 1 |
2170943
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 20:54:57 2018
@author: Joshua
This is Step 1 in the 12 steps to CFD code
"""
import numpy #here we load numpy
from matplotlib import pyplot #here we load matplotlib
import time, sys #and load some utilities
nx = 41 # try changing this number from 41 to 81 and Run All ... what happens?
dx = 2.0 / (nx-1)
nt = 50 #nt is the number of timesteps we want to calculate
dt = .025 #dt is the amount of time each timestep covers (delta t)
c = 1 #assume wave speed =1
#Creation of the initial condition
u = numpy.ones(nx) #numpy function ones()
u[int(.5 / dx):int(1 / dx + 1)] = 2 #setting u = 2 between 0.5 and 1 as per our I.C.s
print(u)
#Plotting initial conditions
pyplot.plot(numpy.linspace(0, 2, nx), u);
un = numpy.ones(nx) #initialize a temporary array
#Solution procedure
for n in range(nt): #loop for values of n from 0 to nt, so it will run nt times
un = u.copy() ##copy the existing values of u into un
for i in range(1, nx): ## you can try commenting this line and...
#for i in range(nx): ## ... uncommenting this line and see what happens!
u[i] = un[i] - c * dt / dx * (un[i] - un[i-1])
pyplot.plot(numpy.linspace(0, 2, nx), u);
| 1,355 |
evaluation/graphical_two_choice/create_statistics.py
|
varikakasandor/dissertation-balls-into-bins
| 0 |
2170666
|
from os.path import exists
import numpy as np
import pandas as pd
import scipy.stats as st
from helper.helper import flatten
from k_choice.graphical.two_choice.graphs.complete_graph import CompleteGraph
from k_choice.graphical.two_choice.graphs.cycle import Cycle
from k_choice.graphical.two_choice.graphs.hypercube import HyperCube
from k_choice.graphical.two_choice.strategies.full_knowledge_DQN_strategy import FullKnowledgeDQNStrategy
GMS = ((Cycle(4), 25), (HyperCube(4), 25), (CompleteGraph(4), 25),
(Cycle(16), 50), (HyperCube(16), 50), (CompleteGraph(16), 50),
(Cycle(32), 32), (HyperCube(32), 32), (CompleteGraph(32), 32))
STRATEGIES = ("greedy", "random", "local_reward_optimiser", "dp", "dqn")
def calculate_statistics(graph, m, strategy, alpha=0.95):
read_path = f"data/{graph.name}_{graph.n}_{m}_{strategy}.csv"
if exists(read_path):
df = pd.read_csv(read_path)
scores = df["score"].to_list()
scores = -np.array(scores[:100] * 5 if strategy == "blabla" else scores[-500:])
mean = np.mean(scores)
sem = st.sem(scores)
if sem > 0:
lower, upper = st.norm.interval(alpha=alpha, loc=mean, scale=sem)
return mean, (upper - lower) / 2
else:
return mean, 0
else:
return -1, -1
def create_csv(gms=GMS, strategies=STRATEGIES):
cols = flatten([[f"mean_{graph.name}_{graph.n}_{m}", f"confidence_{graph.name}_{graph.n}_{m}"] for graph, m in gms])
vals = []
for strategy in strategies:
row = []
for graph, m in gms:
mean, confidence = calculate_statistics(graph=graph, m=m, strategy=strategy)
row.extend([mean, confidence])
vals.append(row)
df = pd.DataFrame(data=vals, columns=cols, index=strategies)
output_path = f"data/comparison.csv"
df.to_csv(output_path)
return df
if __name__ == "__main__":
create_csv()
| 1,929 |
deployment/s3_folder_create.py
|
aws-samples/amazon-translate-json-document-translation
| 1 |
2170681
|
## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
## SPDX-License-Identifier: MIT-0
import boto3
import logging
import json
import cfnresponse
s3Client = boto3.client('s3')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def create(properties, physical_id):
bucketName = properties['S3Bucket']
s3Client.put_object(Bucket=bucketName, Key=('input/'))
s3Client.put_object(Bucket=bucketName, Key=('output/'))
s3Client.put_object(Bucket=bucketName, Key=('xmlin/'))
s3Client.put_object(Bucket=bucketName, Key=('xmlout/'))
return cfnresponse.SUCCESS, physical_id
def update(properties, physical_id):
return cfnresponse.SUCCESS, None
def delete(properties, physical_id):
return cfnresponse.SUCCESS, None
def handler(event, context):
logger.info('Received event: %s' % json.dumps(event))
status = cfnresponse.FAILED
new_physical_id = None
try:
properties = event.get('ResourceProperties')
physical_id = event.get('PhysicalResourceId')
status, new_physical_id = {'Create': create, 'Update': update, 'Delete':
delete}.get(event['RequestType'], lambda x, y: (cfnresponse.FAILED,
None))(properties, physical_id)
except Exception as e:
logger.error('Exception:%s' % e)
status = cfnresponse.FAILED
finally:
cfnresponse.send(event, context, status, {}, new_physical_id)
| 1,519 |
workers/zip/__init__.py
|
ove/ove-asset-manager
| 0 |
2169244
|
import glob
import logging
import os
from tempfile import TemporaryDirectory, NamedTemporaryFile
from typing import Dict, List, Union, Set
from zipfile import ZipFile
from common.entities import OveAssetMeta
from common.util import append_slash
from workers.base import BaseWorker
class ZipWorker(BaseWorker):
def worker_type(self) -> str:
return "extract"
def extensions(self) -> List:
return [".zip"]
def description(self) -> str:
return "Extracts zip archives"
def docs(self) -> str:
return "ZipWorker.md"
def parameters(self) -> Dict:
return {
"schema": {
"type": "object",
"properties": {
"index_file": {
"type": "string",
"title": "Index File",
}
}
}
}
def process(self, project_id: str, filename: str, meta: OveAssetMeta, options: Dict):
logging.info("Copying %s/%s/%s into the temp place ...", project_id, meta.id, filename)
index_files = set()
if options.get("index_file", None):
index_files.add(options.get("index_file").strip().lower())
else:
index_files.update({"index.html", "index.htm", "index.js"})
with TemporaryDirectory() as folder:
with NamedTemporaryFile() as zip_file:
self._file_controller.download_asset(project_id=project_id, asset_id=meta.id, filename=filename, down_filename=zip_file.name)
ZipFile(zip_file.name).extractall(path=folder)
self._file_controller.upload_asset_folder(project_id=project_id, meta=meta, upload_folder=folder, worker_name=self.name)
meta_filename_name = os.path.splitext(os.path.basename(meta.filename))[0]
meta.index_file = meta.worker_root + self.name + "/" + meta_filename_name + "/" + _guess_index_file(folder, index_files=index_files)
self._file_controller.set_asset_meta(project_id, meta.id, meta)
logging.info("Finished unzipping %s/%s into the storage ...", project_id, meta.id)
def _guess_index_file(folder: str, index_files: Set[str]) -> Union[str, None]:
result = None
for filename in glob.iglob(append_slash(folder) + '**/*', recursive=True):
if not os.path.islink(filename) and not os.path.ismount(filename) and os.path.isfile(filename):
if not result:
result = filename[len(folder) + 1:]
elif any(filename.lower().endswith(suffix) for suffix in index_files):
result = filename[len(folder) + 1:]
return result or ""
| 2,672 |
metagraph/tests/translators/test_vector.py
|
eriknw/metagraph-1
| 0 |
2170769
|
from metagraph.tests.util import default_plugin_resolver
from metagraph.plugins.numpy.types import NumpyVector
from metagraph.plugins.graphblas.types import GrblasVectorType
import grblas
import numpy as np
def test_numpy_2_graphblas(default_plugin_resolver):
dpr = default_plugin_resolver
dense_array = np.array([0, 1.1, 0, 0, 4.4, 5.5, 6.6, 0])
missing_mask = dense_array == 0
x = NumpyVector(dense_array, mask=~missing_mask)
assert len(x) == 8
# Convert numpy -> grblas vector
intermediate = grblas.Vector.from_values([1, 4, 5, 6], [1.1, 4.4, 5.5, 6.6], size=8)
y = dpr.translate(x, GrblasVectorType)
dpr.assert_equal(y, intermediate)
# Convert numpy <- grblas vector
x2 = dpr.translate(y, NumpyVector)
dpr.assert_equal(x, x2)
| 780 |
Pyrado/scripts/deployment/run_experiment_wam.py
|
jacarvalho/SimuRLacra
| 0 |
2170929
|
""" Execute a trajectory on the real WAM using robcom's GoTo command
Dependencies:
https://git.ias.informatik.tu-darmstadt.de/robcom-2/robcom-2.0
Additional reading:
Ball-in-a-cup demo:
https://git.ias.informatik.tu-darmstadt.de/klink/ball-in-a-cup-demo/-/blob/master/bic-new.py
"""
import os.path as osp
import numpy as np
import robcom_python as r
from pyrado.logger.experiment import ask_for_experiment
from pyrado.utils.argparser import get_argparser
def run_direct_control(ex_dir, qpos_des, qvel_des):
def callback(jg, eg, data_provider):
nonlocal n
nonlocal time_step
nonlocal qpos
nonlocal qvel
if time_step >= n:
return True
dpos = qpos_des[time_step].tolist()
dvel = qvel_des[time_step].tolist()
pos = np.array(jg.get(r.JointState.POS))
vel = np.array(jg.get(r.JointState.VEL))
qpos.append(pos)
qvel.append(vel)
jg.set(r.JointDesState.POS, dpos)
jg.set(r.JointDesState.VEL, dvel)
time_step += 1
return False
# Connect to client
c = r.Client()
c.start('192.168.2.2', 2013) # ip adress and port
print("Connected to client.")
# Reset the robot to the initial position
gt = c.create(r.Goto, "RIGHT_ARM", "")
gt.add_step(5.0, start_pos)
print("Moving to initial position")
gt.start()
gt.wait_for_completion()
print("Reached initial position")
# Read out some states
group = c.robot.get_group(["RIGHT_ARM"])
home_qpos = np.array(group.get(r.JointState.POS))
p_gains = np.array(group.get(r.JointState.P_GAIN))
d_gains = np.array(group.get(r.JointState.D_GAIN))
print("Initial (actual) qpos:", home_qpos)
print("P gain:", p_gains)
print("D gain:", d_gains)
input('Hit enter to continue.')
# Global callback attributes
n = qpos_des.shape[0]
time_step = 0
qpos = []
qvel = []
# Start the direct control
dc = c.create(r.ClosedLoopDirectControl, "RIGHT_ARM", "")
print("Executing trajectory")
dc.start(False, 1, callback, ['POS', 'VEL'], [], [])
dc.wait_for_completion()
print("Finished execution.")
print('Measured positions:', np.array(qpos).shape)
print('Measured velocities:', np.array(qvel).shape)
np.save(osp.join(ex_dir, 'qpos_real.npy'), qpos)
np.save(osp.join(ex_dir, 'qvel_real.npy'), qvel)
c.stop()
print('Connection closed.')
def run_goto(qpos_des, start_pos, dt):
# Connect to client
c = r.Client()
c.start('192.168.2.2', 2013) # ip adress and port
print("Connected to client.")
# Reset the robot to the initial position
gt = c.create(r.Goto, "RIGHT_ARM", "")
gt.add_step(5.0, start_pos)
print("Moving to initial position")
gt.start()
gt.wait_for_completion()
print("Reached initial position")
group = c.robot.get_group(["RIGHT_ARM"])
home_qpos = np.array(group.get(r.JointState.POS))
print("Initial (actual) qpos:", home_qpos)
input('Hit enter to continue.')
gt = c.create(r.Goto, "RIGHT_ARM", "")
for i in range(0, qpos_des.shape[0]):
gt.add_step(dt, qpos_des[i, :])
print("Executing trajectory")
gt.start()
gt.wait_for_completion()
print("Finished execution.")
c.stop()
print('Connection closed.')
if __name__ == '__main__':
# Parse command line arguments
args = get_argparser().parse_args()
# Get the experiment's directory to load from if not given as command line argument
ex_dir = ask_for_experiment() if args.ex_dir is None else args.ex_dir
# Get desired positions and velocities
qpos_des = np.load(osp.join(ex_dir, 'qpos_des.npy'))
qvel_des = np.load(osp.join(ex_dir, 'qvel_des.npy'))
start_pos = np.array([0.0, 0.5876, 0.0, 1.36, 0.0, -0.321, -1.57]) # starting position
dt = 0.002 # step size
#run_goto(qpos_des, start_pos, dt)
#input('Hit enter to continue.')
run_direct_control(ex_dir, qpos_des, qvel_des)
| 4,012 |
VL-T5/src/prompt/prompt_modeling.py
|
ylsung/VL_adapter
| 41 |
2169985
|
import torch
import torch.nn as nn
class InputPrompts(nn.Module):
def __init__(self, config):
super().__init__()
self.prompt_len = config.prompt_len
self.input_dim = config.input_dim
self.mid_dim = config.mid_dim
self.prefix_tokens = torch.arange(self.prompt_len).long()
self.prefix_embedding = nn.Sequential(
nn.Embedding(self.prompt_len, self.input_dim),
nn.Linear(self.input_dim, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.input_dim),
)
def get_prompt(self, bsz, device):
input_tokens = self.prefix_tokens.unsqueeze(0).expand(bsz, -1).to(device) # (B, L)
prefix_prompt = self.prefix_embedding(input_tokens) # (B, L, d_model * n_heads * n_layer)
return prefix_prompt
| 837 |
prices.py
|
georgem3/NanoWalletBot
| 0 |
2169527
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Nano Telegram bot
# @NanoWalletBot https://t.me/NanoWalletBot
#
# Source code:
# https://github.com/SergiySW/NanoWalletBot
#
# Released under the BSD 3-Clause License
#
#
# Run by cron every minute
#
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from telegram import Bot, ParseMode
import logging
import urllib3, certifi, socket, json
import time, math
# Parse config
import ConfigParser
config = ConfigParser.ConfigParser()
config.read('bot.cfg')
api_key = config.get('main', 'api_key')
bitgrail_price = config.get('monitoring', 'bitgrail_price')
header = {'user-agent': 'RaiWalletBot/1.0'}
# MySQL requests
from common_mysql import *
# Common functions
from common import push, mrai_text
# Translation
with open('language.json') as lang_file:
language = json.load(lang_file)
def lang_text(text_id, lang_id):
try:
return language[lang_id][text_id]
except KeyError:
return language['en'][text_id]
def mercatox():
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',ca_certs=certifi.where())
url = 'https://mercatox.com/public/json24'
#response = http.request('GET', url, headers=header, timeout=20.0)
response = http.request('GET', url, timeout=20.0)
json_mercatox = json.loads(response.data)
json_array = json_mercatox['pairs']['XRB_BTC']
try:
last_price = int(float(json_array['last']) * (10 ** 8))
except KeyError:
last_price = 0
high_price = int(float(json_array['high24hr']) * (10 ** 8))
low_price = int(float(json_array['low24hr']) * (10 ** 8))
ask_price = int(float(json_array['lowestAsk']) * (10 ** 8))
bid_price = int(float(json_array['highestBid']) * (10 ** 8))
volume = int(float(json_array['baseVolume']))
btc_volume = int(float(json_array['quoteVolume']) * (10 ** 8))
mysql_set_price(1, last_price, high_price, low_price, ask_price, bid_price, volume, btc_volume)
def bitgrail():
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',ca_certs=certifi.where())
#response = http.request('GET', bitgrail_price, headers=header, timeout=20.0)
response = http.request('GET', bitgrail_price, timeout=20.0)
json_bitgrail = json.loads(response.data)
json_array = json_bitgrail['response']
last_price = int(float(json_array['last']) * (10 ** 8))
high_price = int(float(json_array['high']) * (10 ** 8))
low_price = int(float(json_array['low']) * (10 ** 8))
ask_price = int(float(json_array['ask']) * (10 ** 8))
bid_price = int(float(json_array['bid']) * (10 ** 8))
volume = int(float(json_array['coinVolume']))
btc_volume = int(float(json_array['volume']) * (10 ** 8))
mysql_set_price(2, last_price, high_price, low_price, ask_price, bid_price, volume, btc_volume)
def bitflip():
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',ca_certs=certifi.where())
url = 'https://api.bitflip.cc/method/market.getOHLC'
json_data = json.dumps({"version": "1.0", "pair": "XRB:BTC"})
response = http.request('POST', url, body=json_data, headers={'Content-Type': 'application/json'}, timeout=20.0)
json_bitfilp = json.loads(response.data)
json_array = json_bitfilp[1]
last_price = int(float(json_array['close']) * (10 ** 8))
high_price = int(float(json_array['high']) * (10 ** 8))
low_price = int(float(json_array['low']) * (10 ** 8))
volume = int(float(json_array['volume']))
btc_volume = 0
if (last_price == 0):
price = mysql_select_price()
last_price = int(price[2][0])
url = 'https://api.bitflip.cc/method/market.getRates'
json_data = json.dumps({"version": "1.0", "pair": "XRB:BTC"})
response = http.request('POST', url, body=json_data, headers={'Content-Type': 'application/json'}, timeout=20.0)
json_bitfilp = json.loads(response.data)
json_array = json_bitfilp[1]
for pair in json_array:
if (pair['pair'] in 'XRB:BTC'):
ask_price = int(float(pair['sell']) * (10 ** 8))
bid_price = int(float(pair['buy']) * (10 ** 8))
mysql_set_price(3, last_price, high_price, low_price, ask_price, bid_price, volume, btc_volume)
def kucoin():
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',ca_certs=certifi.where())
url = 'https://api.kucoin.com/v1/open/tick'
response = http.request('GET', url, timeout=20.0)
json_kucoin = json.loads(response.data)
for pair in json_kucoin['data']:
if (pair['symbol'] in 'XRB-BTC'):
last_price = int(float(pair['lastDealPrice']) * (10 ** 8))
ask_price = int(float(pair['sell']) * (10 ** 8))
bid_price = int(float(pair['buy']) * (10 ** 8))
volume = int(float(pair['vol']))
btc_volume = int(float(pair['volValue']) * (10 ** 8))
high_price = int(float(pair['high']) * (10 ** 8))
low_price = int(float(pair['low']) * (10 ** 8))
mysql_set_price(4, last_price, high_price, low_price, ask_price, bid_price, volume, btc_volume)
def bitz():
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',ca_certs=certifi.where())
url = 'https://www.bit-z.com/api_v1/ticker?coin=xrb_btc'
response = http.request('GET', url, timeout=20.0)
json_bitz = json.loads(response.data)
json_array = json_bitz['data']
last_price = int(float(json_array['last']) * (10 ** 8))
high_price = int(float(json_array['high']) * (10 ** 8))
low_price = int(float(json_array['low']) * (10 ** 8))
ask_price = int(float(json_array['sell']) * (10 ** 8))
bid_price = int(float(json_array['buy']) * (10 ** 8))
volume = int(float(json_array['vol']))
btc_volume = 0
mysql_set_price(5, last_price, high_price, low_price, ask_price, bid_price, volume, btc_volume)
def binance():
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',ca_certs=certifi.where())
url = 'https://api.binance.com/api/v1/ticker/24hr?symbol=NANOBTC'
response = http.request('GET', url, timeout=20.0)
json_binance = json.loads(response.data)
last_price = int(float(json_binance['lastPrice']) * (10 ** 8))
high_price = int(float(json_binance['highPrice']) * (10 ** 8))
low_price = int(float(json_binance['lowPrice']) * (10 ** 8))
ask_price = int(float(json_binance['askPrice']) * (10 ** 8))
bid_price = int(float(json_binance['bidPrice']) * (10 ** 8))
volume = int(float(json_binance['volume']))
btc_volume = int(float(json_binance['quoteVolume']) * (10 ** 8))
mysql_set_price(6, last_price, high_price, low_price, ask_price, bid_price, volume, btc_volume)
def prices_above_below(bot, user_id, price, exchange, above):
lang_id = mysql_select_language(user_id)
btc_price = ('%.8f' % (float(price) / (10 ** 8)))
if (above == 1):
text = lang_text('prices_above', lang_id).format(exchange, btc_price).encode("utf-8")
else:
text = lang_text('prices_below', lang_id).format(exchange, btc_price).encode("utf-8")
try:
push(bot, user_id, text)
except Exception as e:
print('Exception user_id {0}'.format(user_id))
print(text)
if (above == 1):
mysql_delete_price_high(user_id)
else:
mysql_delete_price_low(user_id)
time.sleep(0.5)
def price_check():
bot = Bot(api_key)
price = mysql_select_price()
# check if higher
users_high = mysql_select_price_high()
#price_high_bitgrail = max(int(price[1][0]), int(price[1][4]))
price_high_bitz = max(int(price[4][0]), int(price[4][4]))
price_high_kucoin = max(int(price[3][0]), int(price[3][4]))
price_high_binance = max(int(price[5][0]), int(price[5][4]))
for user in users_high:
#if ((price_high_bitgrail >= int(user[1])) and ((int(user[2]) == 0) or (int(user[2]) == 1))):
# prices_above_below(bot, user[0], price_high_bitgrail, "BitGrail.com", 1)
if ((price_high_bitz >= int(user[1])) and ((int(user[2]) == 0) or (int(user[2]) == 2))):
prices_above_below(bot, user[0], price_high_bitz, "Bit-Z.com.com", 1)
elif ((price_high_kucoin >= int(user[1])) and ((int(user[2]) == 0) or (int(user[2]) == 3))):
prices_above_below(bot, user[0], price_high_kucoin, "Kucoin.com", 1)
elif ((price_high_binance >= int(user[1])) and ((int(user[2]) == 0) or (int(user[2]) == 4))):
prices_above_below(bot, user[0], price_high_binance, "Binance.com", 1)
# check if lower
users_low = mysql_select_price_low()
#price_low_bitgrail = min(int(price[1][0]), int(price[1][3]))
price_low_bitz = min(int(price[4][0]), int(price[4][3]))
price_low_kucoin = min(int(price[3][0]), int(price[3][3]))
price_low_binance = min(int(price[5][0]), int(price[5][3]))
for user in users_low:
#if ((price_low_bitgrail <= int(user[1])) and ((int(user[2]) == 0) or (int(user[2]) == 1))):
# prices_above_below(bot, user[0], price_low_bitgrail, "BitGrail.com", 0)
if ((price_low_bitz <= int(user[1])) and ((int(user[2]) == 0) or (int(user[2]) == 2))):
prices_above_below(bot, user[0], price_low_bitz, "Bit-Z.com", 0)
elif ((price_low_kucoin <= int(user[1])) and ((int(user[2]) == 0) or (int(user[2]) == 3))):
prices_above_below(bot, user[0], price_low_kucoin, "Kucoin.com", 0)
elif ((price_low_binance <= int(user[1])) and ((int(user[2]) == 0) or (int(user[2]) == 4))):
prices_above_below(bot, user[0], price_low_binance, "Binance.com", 0)
def prices_usual():
try:
binance()
except:
time.sleep(5)
try:
binance()
except:
time.sleep(1)
try:
mercatox()
except:
time.sleep(1) # too many errors from Mercatox API
#try:
# bitgrail()
#except:
# time.sleep(5)
# try:
# bitgrail()
# except:
# time.sleep(1) # even BitGrail can fail
try:
kucoin()
except:
time.sleep(5)
try:
kucoin()
except:
time.sleep(1)
try:
bitz()
except:
time.sleep(5)
try:
bitz()
except:
time.sleep(1)
try:
bitflip()
except:
time.sleep(5)
try:
bitflip()
except:
time.sleep(1)
price_check()
time.sleep(10)
prices_usual()
| 9,519 |
lesson-pygame/tictactoe.py
|
vinaymayar/python-game-workshop
| 1 |
2170957
|
import pygame
import sys
from pygame.locals import *
# Define constants
width = 480
height = 480
white = (255, 255, 255)
black = (0, 0, 0)
# Initialize pygame
pygame.init()
# Create a screen
screen = pygame.display.set_mode((width, height))
# Create a clock
clock = pygame.time.Clock()
# Load images
x_img = pygame.image.load('x.png').convert()
o_img = pygame.image.load('o.png').convert()
# Draw background
pygame.draw.rect(screen, white, (0, 0, width, height))
pygame.draw.line(screen, black, (width/3, 0), (width/3, height), 5)
pygame.draw.line(screen, black, (2*width/3, 0), (2*width/3, height), 5)
pygame.draw.line(screen, black, (0, height/3), (width, height/3), 5)
pygame.draw.line(screen, black, (0, 2*height/3), (width, 2*height/3), 5)
# Update screen with background
pygame.display.flip()
turn = 1
board = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
def change_player():
global turn
if turn == 1:
turn = 2
else:
turn = 1
def add_image_to_screen(v_idx, h_idx):
global turn
if turn == 1:
img = x_img
else:
img = o_img
position = (h_idx * width / 3 + 5, v_idx * height / 3 + 5)
screen.blit(img, position)
def check_for_victory():
for row in board:
if row[0] == row[1] and row[1] == row[2] and row[0] != 0:
return row[0]
for i in range(3):
if board[0][i] == board[1][i] and board[1][i] == board[2][i] and board[0][i] != 0:
return board[0][i]
if board[0][0] == board[1][1] and board[1][1] == board[2][2] and board[0][0] != 0:
return board[0][0]
if board[0][2] == board[1][1] and board[1][1] == board[2][0] and board[0][2] != 0:
return board[0][2]
return 0
def check_for_draw():
for row in board:
for square in row:
if square == 0:
return False
return True
def print_text(text):
font = pygame.font.SysFont('Arial', 20)
rendered_text = font.render(text, True, black, white)
screen.blit(rendered_text, (200, 200))
def print_winner(winner):
print_text("Player {} won!".format(winner))
def print_draw():
print_text("Draw!")
def click(x, y):
print(y)
print(x)
vertical_idx = y / (height/3)
horizontal_idx = x / (width/3)
print(vertical_idx)
print(horizontal_idx)
if board[vertical_idx][horizontal_idx] > 0:
return
board[vertical_idx][horizontal_idx] = turn
add_image_to_screen(vertical_idx, horizontal_idx)
winner = check_for_victory()
if winner > 0:
print_winner(winner)
return
draw = check_for_draw()
if draw:
print_draw()
return
change_player()
return
while True:
# Process events that happened since the last iteration
for event in pygame.event.get():
# Process quitting
if event.type == QUIT:
pygame.quit()
sys.exit()
# Process a mouse click
elif event.type == MOUSEBUTTONUP:
x, y = event.pos
click(x, y)
pygame.display.flip()
clock.tick(30)
| 3,104 |
pyspark/clustering/ClusteringHack.py
|
AlphaSunny/MachineLearning
| 0 |
2170001
|
from pyspark.sql import SparkSession
from pyspark.ml.clustering import KMeans
from pyspark.ml.feature import StandardScaler
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import VectorAssembler
spark = SparkSession.builder.appName('hack_find').getOrCreate()
# Loads data
dataset = spark.read.csv("hdfs:///user/maria_dev/MachineLearning/hack_data.csv",header=True,inferSchema=True)
feat_cols = ['Session_Connection_Time', 'Bytes Transferred', 'Kali_Trace_Used',
'Servers_Corrupted', 'Pages_Corrupted','WPM_Typing_Speed']
vec_assembler = VectorAssembler(inputCols = feat_cols, outputCol='features')
final_data = vec_assembler.transform(dataset)
scaler = StandardScaler(inputCol="features", outputCol="scaledFeatures", withStd=True, withMean=False)
# Compute summary statistics by fitting the StandardScaler
scalerModel = scaler.fit(final_data)
# Normalize each feature to have unit standard deviation.
cluster_final_data = scalerModel.transform(final_data)
kmeans3 = KMeans(featuresCol='scaledFeatures',k=3)
kmeans2 = KMeans(featuresCol='scaledFeatures',k=2)
model_k3 = kmeans3.fit(cluster_final_data)
model_k2 = kmeans2.fit(cluster_final_data)
wssse_k3 = model_k3.computeCost(cluster_final_data)
wssse_k2 = model_k2.computeCost(cluster_final_data)
print("With K=3")
print("Within Set Sum of Squared Errors = " + str(wssse_k3))
print('--'*30)
print("With K=2")
print("Within Set Sum of Squared Errors = " + str(wssse_k2))
for k in range(2,9):
kmeans = KMeans(featuresCol='scaledFeatures',k=k)
model = kmeans.fit(cluster_final_data)
wssse = model.computeCost(cluster_final_data)
print("With K = " + str(k))
print("Within Set Sum of Squared Errors = " + str(wssse))
print('--'*30)
model_k3.transform(cluster_final_data).groupBy('prediction').count().show()
model_k2.transform(cluster_final_data).groupBy('prediction').count().show()
| 1,938 |
scripts/climodat/check_database.py
|
trentford/iem
| 1 |
2169920
|
"""Rectify climodat database entries."""
from __future__ import print_function
from io import StringIO
import subprocess
import sys
import pandas as pd
from pandas.io.sql import read_sql
from pyiem.network import Table as NetworkTable
from pyiem.util import get_dbconn
def delete_data(pgconn, station, state):
"""Remove whatever data we have for this station."""
cursor = pgconn.cursor()
cursor.execute("""
DELETE from alldata_""" + state + """ WHERE station = %s
""", (station, ))
print("Removed %s database entries" % (cursor.rowcount, ))
cursor.close()
pgconn.commit()
def main(argv):
"""Go Main"""
state = argv[1]
nt = NetworkTable("%sCLIMATE" % (state,))
pgconn = get_dbconn('coop')
df = read_sql("""
SELECT station, year, day from alldata_""" + state + """
ORDER by station, day
""", pgconn, index_col=None, parse_dates=['day'])
for station, gdf in df.groupby('station'):
if station not in nt.sts:
print("station: %s is unknown to %sCLIMATE network" % (station,
state))
delete_data(pgconn, station, state)
continue
# Make sure that our data archive starts on the first of a month
minday = gdf['day'].min().replace(day=1)
days = pd.date_range(minday, gdf['day'].max())
missing = [x for x in days.values if x not in gdf['day'].values]
print(("station: %s has %s rows between: %s and %s, missing %s/%s days"
) % (station, len(gdf.index), gdf['day'].min(),
gdf['day'].max(), len(missing), len(days.values)))
coverage = len(missing) / float(len(days.values))
if coverage > 0.33:
cmd = ("python ../dbutil/delete_station.py %sCLIMATE %s"
) % (state, station)
print(cmd)
subprocess.call(cmd, shell=True)
delete_data(pgconn, station, state)
continue
sio = StringIO()
for day in missing:
now = pd.Timestamp(day).to_pydatetime()
sio.write(("%s,%s,%s,%s,%s\n"
) % (station, now, "%02i%02i" % (now.month, now.day),
now.year, now.month))
sio.seek(0)
cursor = pgconn.cursor()
cursor.copy_from(
sio, "alldata_%s" % (state, ),
columns=('station', 'day', 'sday', 'year', 'month'), sep=','
)
del sio
cursor.close()
pgconn.commit()
if __name__ == '__main__':
main(sys.argv)
| 2,600 |
src/find_good_sample.py
|
furgerf/GAN-for-dermatologic-imaging
| 0 |
2169085
|
#!/usr/bin/env python
# pylint: disable=wrong-import-position,too-many-statements
import os
import time
import traceback
from argparse import ArgumentParser
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from evaluation import Evaluation
from utils import load_checkpoint, load_model, logistic
def parse_arguments():
parser = ArgumentParser()
parser.add_argument("--eval-dir", type=str, required=True,
help="Directory of the evaluation to test (output)")
parser.add_argument("--model-name", type=str, required=True,
help="Name of the model to instantiate")
parser.add_argument("--epoch", type=int, required=True,
help="The epoch of the model to load")
parser.add_argument("--description", type=str, default=None,
help="An optional description of the images")
parser.add_argument("--image-count", type=int, default=1,
help="The number of images to generate")
parser.add_argument("--rows", type=int, default=8,
help="The number of rows to generate")
parser.add_argument("--columns", type=int, default=8,
help="The number of columns to generate")
parser.add_argument("--noise-dimensions", type=int, default=100,
help="The number of dimensions of the noise vector")
parser.add_argument("--search-samples", type=int, default=4,
help="The number of samples to generate at each search step")
parser.add_argument("--step-size", type=float,
help="The distance to move in the various directions")
parser.add_argument("--size-factor", type=float, default=0.9,
help="The factor by which the step size is multiplied after each iteration")
parser.add_argument("--colored", action="store_true",
help="Specify if the model generates colored output")
parser.add_argument("--discriminator-classes", type=int, default=1,
help="Specify the number of classes the discriminator is predicting")
return parser.parse_args()
def main(start_time):
tf.enable_eager_execution()
# handle arguments and config
args = parse_arguments()
args.start_time = start_time
tf.logging.info("Args: {}".format(args))
args.has_colored_target = args.colored
args.checkpoint_dir = os.path.join("output", args.eval_dir, "checkpoints")
model = load_model(args)
generator = model.get_generator()
discriminator = model.get_discriminator()
load_checkpoint(args, checkpoint_number=args.epoch//25, generator=generator, discriminator=discriminator)
gen_training = not False
disc_training = False
for image_number in range(args.image_count):
tf.logging.info("Generating image {}/{}".format(image_number+1, args.image_count))
plt.figure(figsize=(32, 32))
inputs = tf.random_normal([args.search_samples, args.noise_dimensions])
samples = generator(inputs, training=gen_training)
predictions = logistic(discriminator(samples, training=disc_training))
best_index = tf.argmax(predictions)
best_index = best_index.numpy() if best_index.shape else best_index
previous_prediction = predictions[best_index]
plt.subplot(args.rows, args.columns, 1)
Evaluation.plot_image(samples[best_index], np.round(predictions[best_index].numpy(), 5))
previous_direction = None
improvements = 0
best_input = inputs[best_index]
if args.step_size is not None:
current_step_size = args.step_size
for i in range(1, args.rows*args.columns):
tf.logging.info("Looking for image {}/{}, previous prediction: {}{}".format(
i+1, args.rows*args.columns, previous_prediction,
"" if args.step_size is None else ", step: {:.3f}".format(current_step_size)))
# get new possible directions to move
directions = tf.random_normal([args.search_samples, args.noise_dimensions], stddev=0.1)
if previous_direction is not None:
directions = tf.concat([[previous_direction], directions[1:, :]], axis=0)
# obtain new inputs by moving previous input into the various directions
lengths = [tf.norm(direction).numpy() for direction in directions]
tf.logging.debug("Direction lengths: {}".format(",".join([str(l) for l in lengths])))
inputs = tf.reshape(tf.tile(best_input, [args.search_samples]), (-1, args.noise_dimensions))
if args.step_size is None:
inputs = inputs + directions
else:
directions = [direction * current_step_size / tf.norm(direction) for direction in directions]
inputs = inputs + directions
# get new sampels and predictions
samples = generator(inputs, training=gen_training)
predictions = logistic(discriminator(samples, training=disc_training))
best_index = tf.argmax(predictions)
best_index = best_index.numpy() if best_index.shape else best_index
tf.logging.debug("Best previous input: {}, input at best position: {}, direction: {}".format(
best_input[0], inputs[best_index, 0], directions[best_index][0]))
if previous_direction is not None and best_index == 0:
tf.logging.info("Going into the same direction again!")
if predictions[best_index].numpy() > previous_prediction.numpy():
previous_prediction = predictions[best_index]
previous_direction = directions[best_index]
best_input = inputs[best_index]
plt.subplot(args.rows, args.columns, i+1)
Evaluation.plot_image(samples[best_index], np.round(predictions[best_index].numpy(), 5))
improvements += 1
else:
previous_direction = None
tf.logging.info("No improvement found")
if args.step_size is not None:
current_step_size *= args.size_factor
tf.logging.info("Improved the original image {} times ({:.1f}%)".format(
improvements, 100. * improvements / (args.rows*args.columns-1)))
plt.tight_layout()
figure_file = os.path.join("output", args.eval_dir, "samples{}_{:03d}.png".format(
"_{}".format(args.description) if args.description else "", image_number+1))
plt.savefig(figure_file)
plt.close()
tf.logging.info("Finished generating {} images".format(args.image_count))
if __name__ == "__main__":
START_TIME = time.time()
# np.random.seed(42)
tf.logging.set_verbosity(tf.logging.INFO)
try:
main(START_TIME)
except Exception as ex:
tf.logging.fatal("Exception occurred: {}".format(traceback.format_exc()))
finally:
tf.logging.info("Finished eval after {:.1f}m".format((time.time() - START_TIME) / 60))
| 6,481 |
models/__init__.py
|
xytmhy/DED-Net-Defocus-Estimation-and-Deblurring
| 15 |
2169647
|
# from .FlowNetS import *
# from .FlowNetC import *
#
# from .InfoNetC1 import *
# from .InfoNetC2 import *
# from .InfoNetC3 import *
# from .InfoNetC4 import *
# from .InfoNetS4 import *
#
# from .FusionNetS0 import *
# from .DefocusNet1 import *
# from .DefocusNet2 import *
# from .ResSPPNet1 import *
# from .ResSPPNet2 import *
#
# from .DeBlurNetB import *
# from .DeBlurNetI import *
from .DeBlur import *
| 416 |
guillermo/forms.py
|
GBrachetta/guillermo
| 0 |
2171041
|
from django import forms
class ContactForm(forms.Form):
"""
Form for the contact view and template
"""
name = forms.CharField(label="")
email = forms.EmailField(label="")
message = forms.CharField(
label="",
widget=forms.Textarea(
attrs={
"rows": 8,
}
),
)
class Meta:
fields = ["name", "email", "message"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs["class"] = "border-black rounded-0"
placeholders = {
"name": "Name *",
"email": "Email *",
"message": "Message *",
}
self.fields["name"].widget.attrs["autofocus"] = True
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs["placeholder"] = placeholder
self.fields[field].label = False
| 1,010 |
patientMatcher/utils/stats.py
|
john1711/patientMatcher
| 11 |
2170909
|
# -*- coding: utf-8 -*-
import logging
from datetime import date
LOG = logging.getLogger(__name__)
def general_metrics(db):
"""Create an object with database metrics
Args:
db(pymongo.database.Database)
Returns:
metrics(dict): According to the MME API it should be a dictionary like this:
{
"metrics": {
"numberOfCases": 0,
"numberOfSubmitters": 0,
"numberOfGenes": 0,
"numberOfUniqueGenes": 0,
"numberOfVariants": 0,
"numberOfUniqueVariants": 0,
"numberOfFeatures": 0,
"numberOfUniqueFeatures": 0,
"numberOfFeatureSets": 0, # endpoint is not returning this, at the moment
"numberOfUniqueGenesMatched": 0,
"numberOfCasesWithDiagnosis": 0,
"numberOfRequestsReceived": 0,
"numberOfPotentialMatchesSent": 0,
"dateGenerated": "2017-08-24",
},
"disclaimer": "Disclaimer text...",
"terms": "Terms text..."
}
"""
# get gene/occurrence for all genes in db
n_genes = 0
gene_occurrs = item_occurrence(
db, "genomicFeatures", "genomicFeatures.gene", "genomicFeatures.gene.id"
)
for gene_count in gene_occurrs:
n_genes += gene_count["count"]
# get numberOfUniqueVariants/occurrence for all variants in db
variant_occurr = item_occurrence(
db, "genomicFeatures", "genomicFeatures.variant", "genomicFeatures.variant"
)
n_vars = 0
for var in variant_occurr:
n_vars += var.get("count")
# get feature/occurrence for all features in db
n_feat = 0
feat_occurr = item_occurrence(db, "features", "features.id")
for feat in feat_occurr:
n_feat += feat.get("count")
# include in unique_gene_matches only matches actively returned by the server (internal)
match_type = {"match_type": "internal"}
unique_gene_matches = db.matches.distinct(
"results.patients.patient.genomicFeatures.gene", match_type
)
n_cases = sum(1 for i in db.patients.find())
n_cases_diagnosis = sum(
1 for i in db.patients.find({"disorders": {"$exists": True, "$ne": []}})
)
n_requests = sum(1 for i in db.matches.find({"match_type": "internal"}))
n_positive_matches = sum(
1 for i in db.matches.find({"match_type": "internal", "has_matches": True})
)
metrics = {
"numberOfCases": n_cases,
"numberOfSubmitters": len(db.patients.distinct("contact.href")),
"numberOfGenes": n_genes,
"numberOfUniqueGenes": len(db.patients.distinct("genomicFeatures.gene")),
"numberOfVariants": n_vars,
"numberOfUniqueVariants": len(db.patients.distinct("genomicFeatures.variant")),
"numberOfFeatures": n_feat,
"numberOfUniqueFeatures": len(db.patients.distinct("features.id")),
"numberOfUniqueGenesMatched": len(unique_gene_matches),
"numberOfCasesWithDiagnosis": n_cases_diagnosis,
"numberOfRequestsReceived": n_requests,
"numberOfPotentialMatchesSent": n_positive_matches,
"dateGenerated": str(date.today()),
}
return metrics
def item_occurrence(db, unw1, group, unw2=None):
"""Get a list of item/occurrence in patient collection
Args:
db(pymongo.database.Database)
unw1(string): first nested unwind item
group(string): item to group results by
unw2(string): second nested unwind item # none if nested level is missing
Returns:
item_occurr(list) example: [{'id':'item_obj', 'count': item_occurrence}, ..]
"""
# create query pipeline
pipeline = [{"$unwind": "".join(["$", unw1])}]
if unw2:
pipeline.append({"$unwind": "".join(["$", unw2])})
pipeline.append({"$group": {"_id": "".join(["$", group]), "count": {"$sum": 1}}})
item_occurr = list(db.patients.aggregate(pipeline))
return item_occurr
| 4,100 |
cartomancy/games/core/events.py
|
joedaws/card-player
| 0 |
2171050
|
from dataclasses import dataclass
from cartomancy.players.base import Player
@dataclass
class SuccessEvent:
"""Records when a player was successful in an action."""
player: Player
@dataclass
class FailEvent:
"""Records when a player has failed to do something."""
player: Player
@dataclass
class DrawEvent:
"""Stored data from a draw."""
player: Player
number: int = 1
@dataclass
class AskEvent:
"""Stores data of an ask."""
player: Player
opponent: Player
rank: str
@dataclass
class ExchangeEvent:
"""Stores data for an exchange.
Fields:
source (Player): Giving players index.
destination (Player): Receiving players index.
rank (str): Ranks of card(s) being exchanged.
number (int): Number of cards with specific rank being exchanged.
"""
source: Player
destination: Player
rank: str
number: int
@dataclass
class BookEvent:
"""Event for when a players makes a book."""
player: Player
rank: str
number: int = 4
@dataclass
class RemovePlayerEvent:
"""Event for when a player is removed from the game."""
player_to_remove: Player
| 1,171 |
src/sast/tokens.py
|
sota/old-lang
| 1 |
2169932
|
from rpython.rtyper.lltypesystem import rffi, lltype
class Token(object): #pylint: disable=too-few-public-methods
def __init__(self, name, value, kind, line, pos, skip):
self.name = name
self.value = value
self.kind = kind
self.line = line
self.pos = pos
self.skip = skip
def to_str(self):
return '[name=%s value=%s kind=%d line=%d pos=%d skip=%s]' % (
self.name,
self.value,
self.kind,
self.line,
self.pos,
self.skip)
def is_name(self, *names):
for name in list(names):
if name == self.name:
return True
return False
| 707 |
pyRaster/rasterToAscii.py
|
mjsauvinen/P4US
| 4 |
2169446
|
#!/usr/bin/env python3
import sys
import argparse
import numpy as np
from mapTools import *
from utilities import filesFromList, writeLog
from plotTools import addImagePlot
import matplotlib.pyplot as plt
'''
Description:
Author: <NAME>
<EMAIL>
University of Helsinki &
Finnish Meteorological Institute
'''
#==========================================================#
parser = argparse.ArgumentParser(prog='rasterToAscii.py')
parser.add_argument("-f", "--filename",type=str, help="Name of the comp domain data file.")
parser.add_argument("-fo", "--fileout",type=str, help="Name of output ASCII file.")
parser.add_argument("-i", "--round2Int", help="Round the output data to nearest integers.",\
action="store_true", default=False)
parser.add_argument("-p", "--printOn", help="Print also the raster data.",\
action="store_true", default=False)
parser.add_argument("-pp", "--printOnly", help="Only print raster data. Don't write.",\
action="store_true", default=False)
args = parser.parse_args()
writeLog( parser, args, args.printOnly )
#==========================================================#
filename = args.filename
fileout = args.fileout
round2Int = args.round2Int
printOn = args.printOn
printOnly = args.printOnly
# Read the raster tile to be processed.
Rdict = readNumpyZTile(filename)
R = Rdict['R']
Rdims = np.array(np.shape(R))
ROrig = Rdict['GlobOrig']
print(' Rdims = {} '.format(Rdims))
print(' ROrig = {} '.format(ROrig))
if( not printOnly ):
#fx = open( fileout , 'w' )
if( round2Int ): np.savetxt(fileout,np.round(R),fmt='%g')
else: np.savetxt(fileout,R,fmt='%g')
#fx.close()
if( args.printOn or args.printOnly ):
figDims = 13.*(Rdims[::-1].astype(float)/np.max(Rdims))
#print('Sum = {}'.format(np.sum(R)))
fig = plt.figure(num=1, figsize=figDims)
fig = addImagePlot( fig, R, fileout )
plt.show()
R = Rf = None
| 1,909 |
project/delibere/migrations/0015_settore_ss_id.py
|
guglielmo/mosic2-db-delibere
| 0 |
2170659
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-09 18:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('delibere', '0014_settore_parent'),
]
operations = [
migrations.AddField(
model_name='settore',
name='ss_id',
field=models.IntegerField(null=True, unique=True),
),
]
| 458 |
api/momo_devc_app/views/transaction_views.py
|
tranminhduc4796/devc_backend
| 0 |
2168070
|
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
from rest_framework.permissions import IsAuthenticated
from ..serializers import TransactionSerializer
from ..models import Transaction, Profile
from rest_framework.response import Response
from rest_framework import status
from django.shortcuts import get_object_or_404
class ListCreate(ListCreateAPIView):
serializer_class = TransactionSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
user = get_object_or_404(Profile, user=self.request.user)
return Transaction.objects.filter(user=user)
def create(self, request, *args, **kwargs):
user = get_object_or_404(Profile, user=self.request.user)
transaction = Transaction(user=user)
serializer = self.serializer_class(transaction, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class RetrieveUpdateDestroy(RetrieveUpdateDestroyAPIView):
queryset = Transaction.objects.all()
serializer_class = TransactionSerializer
permission_classes = [IsAuthenticated]
| 1,292 |
unit_tests/test_pg_dir_utils.py
|
junaid-ali/charm-plumgrid-director
| 0 |
2170938
|
from mock import MagicMock
from collections import OrderedDict
import charmhelpers.contrib.openstack.templating as templating
templating.OSConfigRenderer = MagicMock()
import pg_dir_utils as nutils
from test_utils import (
CharmTestCase,
)
import charmhelpers.core.hookenv as hookenv
TO_PATCH = [
'os_release',
'neutron_plugin_attribute',
]
class DummyContext():
def __init__(self, return_value):
self.return_value = return_value
def __call__(self):
return self.return_value
class TestPGDirUtils(CharmTestCase):
def setUp(self):
super(TestPGDirUtils, self).setUp(nutils, TO_PATCH)
# self.config.side_effect = self.test_config.get
def tearDown(self):
# Reset cached cache
hookenv.cache = {}
def test_register_configs(self):
class _mock_OSConfigRenderer():
def __init__(self, templates_dir=None, openstack_release=None):
self.configs = []
self.ctxts = []
def register(self, config, ctxt):
self.configs.append(config)
self.ctxts.append(ctxt)
self.os_release.return_value = 'trusty'
templating.OSConfigRenderer.side_effect = _mock_OSConfigRenderer
_regconfs = nutils.register_configs()
confs = [nutils.PG_KA_CONF,
nutils.PG_CONF,
nutils.PG_DEF_CONF,
nutils.PG_HN_CONF,
nutils.PG_HS_CONF,
nutils.PG_IFCS_CONF,
nutils.OPS_CONF]
self.assertItemsEqual(_regconfs.configs, confs)
def test_resource_map(self):
_map = nutils.resource_map()
svcs = ['plumgrid']
confs = [nutils.PG_KA_CONF]
[self.assertIn(q_conf, _map.keys()) for q_conf in confs]
self.assertEqual(_map[nutils.PG_KA_CONF]['services'], svcs)
def test_restart_map(self):
_restart_map = nutils.restart_map()
expect = OrderedDict([
(nutils.PG_CONF, ['plumgrid']),
(nutils.PG_KA_CONF, ['plumgrid']),
(nutils.PG_DEF_CONF, ['plumgrid']),
(nutils.PG_HN_CONF, ['plumgrid']),
(nutils.PG_HS_CONF, ['plumgrid']),
(nutils.OPS_CONF, ['plumgrid']),
(nutils.PG_IFCS_CONF, []),
])
self.assertEqual(expect, _restart_map)
for item in _restart_map:
self.assertTrue(item in _restart_map)
self.assertTrue(expect[item] == _restart_map[item])
| 2,503 |
unitracer/lib/windows/i386/shell32.py
|
icchy/tracecorn
| 67 |
2170394
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Wrapper for shell32.dll in ctypes.
"""
# TODO
# * Add a class wrapper to SHELLEXECUTEINFO
# * More logic into ShellExecuteEx
__revision__ = "$Id: shell32.py 1299 2013-12-20 09:30:55Z qvasimodo $"
from defines import *
#==============================================================================
# This is used later on to calculate the list of exported symbols.
_all = None
_all = set(vars().keys())
#==============================================================================
#--- Constants ----------------------------------------------------------------
SEE_MASK_DEFAULT = 0x00000000
SEE_MASK_CLASSNAME = 0x00000001
SEE_MASK_CLASSKEY = 0x00000003
SEE_MASK_IDLIST = 0x00000004
SEE_MASK_INVOKEIDLIST = 0x0000000C
SEE_MASK_ICON = 0x00000010
SEE_MASK_HOTKEY = 0x00000020
SEE_MASK_NOCLOSEPROCESS = 0x00000040
SEE_MASK_CONNECTNETDRV = 0x00000080
SEE_MASK_NOASYNC = 0x00000100
SEE_MASK_DOENVSUBST = 0x00000200
SEE_MASK_FLAG_NO_UI = 0x00000400
SEE_MASK_UNICODE = 0x00004000
SEE_MASK_NO_CONSOLE = 0x00008000
SEE_MASK_ASYNCOK = 0x00100000
SEE_MASK_HMONITOR = 0x00200000
SEE_MASK_NOZONECHECKS = 0x00800000
SEE_MASK_WAITFORINPUTIDLE = 0x02000000
SEE_MASK_FLAG_LOG_USAGE = 0x04000000
SE_ERR_FNF = 2
SE_ERR_PNF = 3
SE_ERR_ACCESSDENIED = 5
SE_ERR_OOM = 8
SE_ERR_DLLNOTFOUND = 32
SE_ERR_SHARE = 26
SE_ERR_ASSOCINCOMPLETE = 27
SE_ERR_DDETIMEOUT = 28
SE_ERR_DDEFAIL = 29
SE_ERR_DDEBUSY = 30
SE_ERR_NOASSOC = 31
SHGFP_TYPE_CURRENT = 0
SHGFP_TYPE_DEFAULT = 1
CSIDL_DESKTOP = 0x0000
CSIDL_INTERNET = 0x0001
CSIDL_PROGRAMS = 0x0002
CSIDL_CONTROLS = 0x0003
CSIDL_PRINTERS = 0x0004
CSIDL_PERSONAL = 0x0005
CSIDL_FAVORITES = 0x0006
CSIDL_STARTUP = 0x0007
CSIDL_RECENT = 0x0008
CSIDL_SENDTO = 0x0009
CSIDL_BITBUCKET = 0x000a
CSIDL_STARTMENU = 0x000b
CSIDL_MYDOCUMENTS = CSIDL_PERSONAL
CSIDL_MYMUSIC = 0x000d
CSIDL_MYVIDEO = 0x000e
CSIDL_DESKTOPDIRECTORY = 0x0010
CSIDL_DRIVES = 0x0011
CSIDL_NETWORK = 0x0012
CSIDL_NETHOOD = 0x0013
CSIDL_FONTS = 0x0014
CSIDL_TEMPLATES = 0x0015
CSIDL_COMMON_STARTMENU = 0x0016
CSIDL_COMMON_PROGRAMS = 0x0017
CSIDL_COMMON_STARTUP = 0x0018
CSIDL_COMMON_DESKTOPDIRECTORY = 0x0019
CSIDL_APPDATA = 0x001a
CSIDL_PRINTHOOD = 0x001b
CSIDL_LOCAL_APPDATA = 0x001c
CSIDL_ALTSTARTUP = 0x001d
CSIDL_COMMON_ALTSTARTUP = 0x001e
CSIDL_COMMON_FAVORITES = 0x001f
CSIDL_INTERNET_CACHE = 0x0020
CSIDL_COOKIES = 0x0021
CSIDL_HISTORY = 0x0022
CSIDL_COMMON_APPDATA = 0x0023
CSIDL_WINDOWS = 0x0024
CSIDL_SYSTEM = 0x0025
CSIDL_PROGRAM_FILES = 0x0026
CSIDL_MYPICTURES = 0x0027
CSIDL_PROFILE = 0x0028
CSIDL_SYSTEMX86 = 0x0029
CSIDL_PROGRAM_FILESX86 = 0x002a
CSIDL_PROGRAM_FILES_COMMON = 0x002b
CSIDL_PROGRAM_FILES_COMMONX86 = 0x002c
CSIDL_COMMON_TEMPLATES = 0x002d
CSIDL_COMMON_DOCUMENTS = 0x002e
CSIDL_COMMON_ADMINTOOLS = 0x002f
CSIDL_ADMINTOOLS = 0x0030
CSIDL_CONNECTIONS = 0x0031
CSIDL_COMMON_MUSIC = 0x0035
CSIDL_COMMON_PICTURES = 0x0036
CSIDL_COMMON_VIDEO = 0x0037
CSIDL_RESOURCES = 0x0038
CSIDL_RESOURCES_LOCALIZED = 0x0039
CSIDL_COMMON_OEM_LINKS = 0x003a
CSIDL_CDBURN_AREA = 0x003b
CSIDL_COMPUTERSNEARME = 0x003d
CSIDL_PROFILES = 0x003e
CSIDL_FOLDER_MASK = 0x00ff
CSIDL_FLAG_PER_USER_INIT = 0x0800
CSIDL_FLAG_NO_ALIAS = 0x1000
CSIDL_FLAG_DONT_VERIFY = 0x4000
CSIDL_FLAG_CREATE = 0x8000
CSIDL_FLAG_MASK = 0xff00
#--- Structures ---------------------------------------------------------------
# typedef struct _SHELLEXECUTEINFO {
# DWORD cbSize;
# ULONG fMask;
# HWND hwnd;
# LPCTSTR lpVerb;
# LPCTSTR lpFile;
# LPCTSTR lpParameters;
# LPCTSTR lpDirectory;
# int nShow;
# HINSTANCE hInstApp;
# LPVOID lpIDList;
# LPCTSTR lpClass;
# HKEY hkeyClass;
# DWORD dwHotKey;
# union {
# HANDLE hIcon;
# HANDLE hMonitor;
# } DUMMYUNIONNAME;
# HANDLE hProcess;
# } SHELLEXECUTEINFO, *LPSHELLEXECUTEINFO;
class SHELLEXECUTEINFO(Structure):
_fields_ = [
("cbSize", DWORD),
("fMask", ULONG),
("hwnd", HWND),
("lpVerb", LPSTR),
("lpFile", LPSTR),
("lpParameters", LPSTR),
("lpDirectory", LPSTR),
("nShow", ctypes.c_int),
("hInstApp", HINSTANCE),
("lpIDList", LPVOID),
("lpClass", LPSTR),
("hkeyClass", HKEY),
("dwHotKey", DWORD),
("hIcon", HANDLE),
("hProcess", HANDLE),
]
def __get_hMonitor(self):
return self.hIcon
def __set_hMonitor(self, hMonitor):
self.hIcon = hMonitor
hMonitor = property(__get_hMonitor, __set_hMonitor)
LPSHELLEXECUTEINFO = POINTER(SHELLEXECUTEINFO)
#==============================================================================
# This calculates the list of exported symbols.
_all = set(vars().keys()).difference(_all)
__all__ = [_x for _x in _all if not _x.startswith('_')]
__all__.sort()
#==============================================================================
| 7,862 |
WebMirror/management/rss_parser_funcs/feed_parse_extractLittlefairyaliceWordpressCom.py
|
fake-name/ReadableWebProxy
| 193 |
2170737
|
def extractLittlefairyaliceWordpressCom(item):
'''
DISABLED
Parser for 'littlefairyalice.wordpress.com'
'''
return None
| 127 |
silver_waffle/credentials.py
|
miguelagustin/silver-waffle-trading-bot
| 2 |
2170697
|
class Credential:
"""
Class to store exchange credentials
"""
all_credentials = []
def __init__(self, *, secret_key, public_key, exchange_name):
self.secret_key = secret_key
self.public_key = public_key
self.exchange_name = exchange_name
self.all_credentials.append(self)
def to_ccxt_credential(self):
return {
'apiKey': self.public_key,
'secret': self.secret_key
}
def __repr__(self):
return f'Credential(exchange_name={self.exchange_name})'
def find_credentials_by_exchange_name(exchange_name):
results = []
for credential in Credential.all_credentials:
if credential.exchange_name == exchange_name:
results.append(credential)
return results
# Add your credentials to this file if you want them automatically recognized and for tests to work properly
Credential(public_key='your public key', secret_key='your secret key', exchange_name='your exchange name')
| 1,008 |
preacher/core/request/__init__.py
|
lasta/preacher
| 0 |
2170494
|
"""Request compilation."""
from .request import Request, Method, PreparedRequest, ExecutionReport
from .request_body import RequestBody, UrlencodedRequestBody, JsonRequestBody
from .response import Response, ResponseBody
from .url_param import UrlParams, UrlParam, UrlParamValue
__all__ = [
'Request',
'Method',
'PreparedRequest',
'ExecutionReport',
'RequestBody',
'UrlencodedRequestBody',
'JsonRequestBody',
'Response',
'ResponseBody',
'UrlParams',
'UrlParam',
'UrlParamValue',
]
| 531 |
App/stores.py
|
MoSaadiSalem/forums-flask
| 0 |
2170623
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
class BaseStore(object):
def __init__(self, data_provider, last_id):
self._data_provider = data_provider
self._last_id = last_id
def add(self, item_instance):
item_instance.id = self._last_id
self._data_provider.append(item_instance)
self._last_id += 1
def get_all(self):
return (item_instance for item_instance in self._data_provider)
def get_by_id(self, id):
instances = self.get_all()
obj = None
for item_instance in instances:
if item_instance.id == id:
obj = item_instance
break
return obj
def entity_exists(self, item_instance):
exist = True
if self.get_by_id(item_instance.id) is None:
exist = False
return exist
def update(self, item_instance):
all_instances = self.get_all()
for index, instance in enumerate(all_instances):
if instance.id == item_instance.id:
self._data_provider[index] = item_instance
break
def delete(self, id):
item_instance = self.get_by_id(id)
self._data_provider.remove(item_instance)
class MemberStore(BaseStore):
"""Manipulate the principle operation on members.
Attributes:
members (list): Store members objects.
last_id (int): A counter that holds last added member object id.
"""
members = []
last_id = 1
def __init__(self):
super(MemberStore, self).__init__(MemberStore.members, MemberStore.last_id)
def get_by_name(self, name):
all_members = self.get_all()
return (member for member in all_members if member.name == name)
def get_members_with_posts(self, posts):
"""Assign each member to his/her posts
Args:
posts (Post): An instance of post class.
Returns:
all_members (generator): Updated members generator associated their posts objects.
"""
all_members = self.get_all()
for member, post in itertools.product(all_members, posts):
if post.member_id == member.id and post not in member.posts:
member.posts.append(post)
return(member for member in self.get_all())
def get_top(self):
"""A list top members wrote posts
Returns:
all_members (list): Descending sorted ordered list contains top members.
"""
number_of_top = 2
all_members = list(self.get_all())
all_members.sort(key=lambda member: len(member.posts), reverse=True)
for i in range(number_of_top):
yield all_members[i]
class PostStore(BaseStore):
"""Manipulate the principle operation on members.
Attributes:
posts (list): Store posts objects.
last_id (int): A counter that holds last added post object id.
"""
posts = []
last_id = 1
def __init__(self):
super(PostStore, self).__init__(PostStore.posts, PostStore.last_id)
def get_by_title(self, title):
all_posts = self.get_all()
return(post.title for post in all_posts if title in post.title)
def get_post_by_date(self):
all_posts = list(self.get_all())
all_posts.sort(key=lambda post: post.date, reverse=True)
return (post for post in all_posts)
def edit_post(self, id, title, body):
post = self.get_by_id(id)
post.title = title
post.body = body
| 3,539 |
pacfish/visualize_device.py
|
IPASC/DataConversionTool
| 2 |
2170965
|
# SPDX-FileCopyrightText: 2021 International Photoacoustics Standardisation Consortium (IPASC)
# SPDX-FileCopyrightText: 2021 <NAME>
# SPDX-License-Identifier: BSD 3-Clause License
import matplotlib.pylab as plt
from matplotlib.patches import Rectangle, Circle, Polygon
import numpy as np
from pacfish import MetadataDeviceTags
def visualize_device(device_dictionary: dict, save_path: str = None, title: str = None, only_show_xz: bool = False):
"""
Visualises a given device from the device_dictionary.
Parameters
----------
device_dictionary: dict
The dictionary containing the device description.
save_path: str
Optional save_path to save a PNG file of the visualisation to.
title: str
Optional custom title for the plot.
only_show_xz: bool
Optional bool parameter specifying if only the first window should be shown instead of all
"""
def define_boundary_values(_device_dictionary: dict):
mins = np.ones(3) * 100000
maxs = np.ones(3) * -100000
if "illuminators" in _device_dictionary:
for illuminator in _device_dictionary["illuminators"]:
position = _device_dictionary["illuminators"][illuminator][MetadataDeviceTags.ILLUMINATOR_POSITION.tag]
for i in range(3):
if position[i] < mins[i]:
mins[i] = position[i]
if position[i] > maxs[i]:
maxs[i] = position[i]
for detector in _device_dictionary["detectors"]:
position = _device_dictionary["detectors"][detector][MetadataDeviceTags.DETECTOR_POSITION.tag]
for i in range(3):
if position[i] < mins[i]:
mins[i] = position[i]
if position[i] > maxs[i]:
maxs[i] = position[i]
fov = _device_dictionary["general"][MetadataDeviceTags.FIELD_OF_VIEW.tag]
for i in range(3):
if fov[2 * i] < mins[i]:
mins[i] = fov[2 * i]
if fov[2 * i + 1] < mins[i]:
mins[i] = fov[2 * i + 1]
if fov[2 * i] > maxs[i]:
maxs[i] = fov[2 * i]
if fov[2 * i + 1] > maxs[i]:
maxs[i] = fov[2 * i + 1]
MARGIN = 0.001
maxs += MARGIN
mins -= MARGIN
return mins, maxs
def add_arbitrary_plane(_device_dictionary: dict, _mins, _maxs, _axes, _draw_axis):
_draw_axis.set_xlim(_mins[_axes[0]], _maxs[_axes[0]])
_draw_axis.set_ylim(_maxs[_axes[1]], _mins[_axes[1]])
_draw_axis.set_title(f"axes {_axes[0]}/{_axes[1]} projection view")
_draw_axis.set_xlabel(f"{_axes[0]}-axis [m]")
_draw_axis.set_ylabel(f"{_axes[1]}-axis [m]")
fov = _device_dictionary["general"][MetadataDeviceTags.FIELD_OF_VIEW.tag]
for detector in _device_dictionary["detectors"]:
if not (MetadataDeviceTags.DETECTOR_POSITION.tag in _device_dictionary["detectors"][detector] and
MetadataDeviceTags.DETECTOR_GEOMETRY.tag in _device_dictionary["detectors"][detector]):
return
detector_geometry_type = _device_dictionary["detectors"][detector][
MetadataDeviceTags.DETECTOR_GEOMETRY_TYPE.tag]
detector_position = _device_dictionary["detectors"][detector][MetadataDeviceTags.DETECTOR_POSITION.tag]
detector_geometry = np.asarray(
_device_dictionary["detectors"][detector][MetadataDeviceTags.DETECTOR_GEOMETRY.tag])
if detector_geometry_type == "CUBOID":
if detector_geometry[_axes[0]] == 0:
detector_geometry[_axes[0]] = 0.0001
if detector_geometry[_axes[1]] == 0:
detector_geometry[_axes[1]] = 0.0001
_draw_axis.add_patch(Rectangle((detector_position[_axes[0]] - detector_geometry[_axes[0]] / 2,
detector_position[_axes[1]] - detector_geometry[_axes[1]] / 2),
detector_geometry[_axes[0]], detector_geometry[_axes[1]], color="blue"))
elif detector_geometry_type == "SPHERE" or detector_geometry_type == "CIRCLE":
_draw_axis.add_patch(Circle((detector_position[_axes[0]], detector_position[_axes[1]]), detector_geometry,
color="blue"))
else:
print("UNSUPPORTED GEOMETRY TYPE FOR VISUALISATION. WILL DEFAULT TO 'x' visualisation.")
_draw_axis.plot(detector_position[_axes[0]], detector_position[_axes[1]], "x", color="blue")
if "illuminators" in _device_dictionary:
for illuminator in _device_dictionary["illuminators"]:
if not (MetadataDeviceTags.ILLUMINATOR_POSITION.tag in _device_dictionary["illuminators"][illuminator] and
MetadataDeviceTags.ILLUMINATOR_GEOMETRY.tag in _device_dictionary["illuminators"][illuminator]):
return
illuminator_position = _device_dictionary["illuminators"][illuminator][
MetadataDeviceTags.ILLUMINATOR_POSITION.tag]
illuminator_orientation = np.asarray(
_device_dictionary["illuminators"][illuminator][MetadataDeviceTags.ILLUMINATOR_ORIENTATION.tag])
illuminator_divergence = _device_dictionary["illuminators"][illuminator][
MetadataDeviceTags.BEAM_DIVERGENCE_ANGLES.tag]
illuminator_geometry = np.asarray(
_device_dictionary["illuminators"][illuminator][MetadataDeviceTags.ILLUMINATOR_GEOMETRY.tag])
diameter = np.sqrt(np.sum(np.asarray([a ** 2 for a in illuminator_geometry]))) / 2
illuminator_geometry_type = _device_dictionary["illuminators"][illuminator][
MetadataDeviceTags.ILLUMINATOR_GEOMETRY_TYPE.tag]
_draw_axis.scatter(illuminator_position[_axes[0]], illuminator_position[_axes[1]],
marker="+", color="red")
x = [illuminator_position[_axes[0]],
illuminator_position[_axes[0]] +
illuminator_orientation[_axes[0]] / 25]
y = [illuminator_position[_axes[1]],
illuminator_position[_axes[1]] +
illuminator_orientation[_axes[1]] / 25]
plt.plot(x, y, color="yellow", alpha=1, linewidth=25, zorder=-10)
start_indexes = np.asarray(_axes) * 2
end_indexes = start_indexes + 1
_draw_axis.add_patch(
Rectangle((fov[start_indexes[0]], fov[start_indexes[1]]),
-fov[start_indexes[0]] + fov[end_indexes[0]],
-fov[start_indexes[1]] + fov[end_indexes[1]],
color="green", fill=False, label="Field of View"))
if title is None:
title = "Device Visualisation based on IPASC data format specifications"
mins, maxs = define_boundary_values(device_dictionary)
num_subplots = 3
if only_show_xz:
num_subplots = 1
if only_show_xz:
plt.figure(figsize=(3.33, 4))
else:
plt.figure(figsize=(10, 4))
plt.suptitle(title)
ax = plt.subplot(1, num_subplots, 1)
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
add_arbitrary_plane(device_dictionary, mins, maxs, _axes=(0, 2), _draw_axis=ax)
if not only_show_xz:
ax = plt.subplot(1, num_subplots, 2)
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
add_arbitrary_plane(device_dictionary, mins, maxs, _axes=(0, 1), _draw_axis=ax)
ax = plt.subplot(1, num_subplots, 3)
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
add_arbitrary_plane(device_dictionary, mins, maxs, _axes=(1, 2), _draw_axis=ax)
plt.scatter(None, None, color="blue", marker="o", label="Detector Element")
plt.scatter(None, None, color="red", marker="+", label="Illumination Element")
plt.scatter(None, None, color="green", marker="s", label="Field of View")
plt.scatter(None, None, color="Yellow", marker="s", label="Illumination Profile")
plt.legend(loc="lower left")
plt.tight_layout()
if save_path is None:
plt.show()
else:
plt.savefig(save_path + "figure.png", dpi=300)
| 8,460 |
tasks/__init__.py
|
MarcSkovMadsen/awesome-panel-starter
| 5 |
2170690
|
"""Here we configure the cli tasks available via `invoke <namespace>.<command>`"""
from invoke import Collection
from . import docker, notebook, site, test
docker.read_config_from_toml("pyproject.toml")
# pylint: disable=invalid-name
# as invoke only recognizes lower case
namespace = Collection()
namespace.add_collection(site)
namespace.add_collection(docker)
namespace.add_collection(notebook)
namespace.add_collection(test)
| 431 |
halfpipe/workflow/report/anat.py
|
fossabot/Halfpipe-1
| 0 |
2170599
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from nipype.pipeline import engine as pe
from niworkflows.interfaces.masks import SimpleShowMaskRPT # ROIsPlot
from fmriprep import config
from ...interface import Exec, PlotRegistration, MakeResultdicts, ResultdictDatasink
from ..memory import MemoryCalculator
def init_anat_report_wf(workdir=None, name="anat_report_wf", memcalc=MemoryCalculator()):
workflow = pe.Workflow(name=name)
fmriprepreports = ["t1w_dseg_mask", "std_t1w"]
fmriprepreportdatasinks = [f"ds_{fr}_report" for fr in fmriprepreports]
strfields = [
"t1w_preproc",
"t1w_mask",
"t1w_dseg",
"std_preproc",
"std_mask",
*fmriprepreportdatasinks,
]
inputnode = pe.Node(
Exec(
fieldtpls=[
("tags", None),
*[(field, "firststr") for field in strfields],
("std_dseg", "ravel"),
]
),
name="inputnode",
)
#
make_resultdicts = pe.Node(
MakeResultdicts(reportkeys=["skull_strip_report", "t1_norm_rpt", *fmriprepreports]),
name="make_resultdicts",
)
workflow.connect(inputnode, "tags", make_resultdicts, "tags")
#
resultdict_datasink = pe.Node(
ResultdictDatasink(base_directory=workdir), name="resultdict_datasink"
)
workflow.connect(make_resultdicts, "resultdicts", resultdict_datasink, "indicts")
#
for fr, frd in zip(fmriprepreports, fmriprepreportdatasinks):
workflow.connect(inputnode, frd, make_resultdicts, fr)
# T1w segmentation
skull_strip_report = pe.Node(SimpleShowMaskRPT(), name="skull_strip_report")
workflow.connect(inputnode, "t1w_preproc", skull_strip_report, "background_file")
workflow.connect(inputnode, "t1w_mask", skull_strip_report, "mask_file")
workflow.connect(skull_strip_report, "out_report", make_resultdicts, "skull_strip_report")
# T1 -> mni
t1_norm_rpt = pe.Node(
PlotRegistration(template=config.workflow.spaces.get_spaces()[0]),
name="t1_norm_rpt",
mem_gb=0.1,
)
workflow.connect(inputnode, "std_preproc", t1_norm_rpt, "in_file")
workflow.connect(inputnode, "std_mask", t1_norm_rpt, "mask_file")
workflow.connect(t1_norm_rpt, "out_report", make_resultdicts, "t1_norm_rpt")
return workflow
| 2,446 |
max_area_of_island_695.py
|
cthi/LeetCode
| 0 |
2170961
|
class Solution:
def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
dirs = ((1, 0), (-1, 0), (0, 1), (0, -1))
def flood_fill(x, y, N, M, seen, grid):
if x < 0 or y < 0 or x > N - 1 or y > M - 1 or seen[x][y]:
return 0
seen[x][y] = True
if grid[x][y] == 1:
size = 1
for i, j in dirs:
size += flood_fill(x + i, y + j, N, M, seen, grid)
return size
else:
return 0
N = len(grid)
M = len(grid[0])
seen = [[False for j in range(M)] for i in range(N)]
return max(flood_fill(x, y, N, M, seen, grid) for x in range(N) for y in range(M))
| 879 |
graphics/sprite_tile.py
|
HansGR/WorldsCollide
| 7 |
2166909
|
# a tile is 32 bytes representing an 8x8 array of palette color indices
# tile layout example:
# 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
# 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
# row 1 of the tile are bytes:
# 0x02, 0x03
# 0x12, 0x13
# 0x02 = 0000 0010
# 0x03 = 0000 0011
# 0x12 = 0001 0010
# 0x13 = 0001 0011
# 0101 = 5
# 1111 = 15
# 0000 = 0
# 0000 = 0
# 0011 = 3
# 0000 = 0
# 0000 = 0
# 0000 = 0
# row 1 of the tile is color ids: 5 15 0 0 3 0 0 0
# 1100 1000 = 0xc8
# 0100 1000 = 0x48
# 1100 0000
# 0100 0000
# row 0 is bytes 0x00, 0x01, 0x10, 0x11
# row 2 is bytes 0x04, 0x05, 0x14, 0x15
# etc...
class SpriteTile:
ROW_COUNT = 8
COL_COUNT = 8
DATA_SIZE = 32
row_offsets = [
0,
1,
(DATA_SIZE // 2),
(DATA_SIZE // 2) + 1,
]
def __init__(self, data = None):
self.colors = [[0 for x in range(self.COL_COUNT)] for y in range(self.ROW_COUNT)]
if data is not None:
self.data = data
@property
def data(self):
tile_bytes = [0x00] * self.DATA_SIZE
for row_index in range(self.ROW_COUNT):
for col_index in range(self.COL_COUNT):
color = self.colors[row_index][col_index]
dest_bit = (self.COL_COUNT - col_index) - 1
for byte_index in range(len(self.row_offsets)):
tile_bytes[row_index * 2 + self.row_offsets[byte_index]] |= (((color >> byte_index) & 1) << dest_bit)
return tile_bytes
@data.setter
def data(self, new_data):
for row_index in range(self.ROW_COUNT):
row_bytes = []
for byte_index in range(len(self.row_offsets)):
row_bytes.append(new_data[row_index * 2 + self.row_offsets[byte_index]])
for col_index in range(self.COL_COUNT):
color = 0x00
source_bit = (self.COL_COUNT - col_index) - 1
for bit_index, byte in enumerate(row_bytes):
self.colors[row_index][col_index] |= (((byte >> source_bit) & 1) << bit_index)
def color(self, x, y):
# (0, 0) is top left of tile
return self.colors[y][x]
def __str__(self):
result = ""
for row in self.colors:
result += "["
for color in row:
result += f"{color:>2},"
result = result[:-1] + "]\n"
return result[:-1]
| 2,515 |
get VK likes.py
|
dogda116/liked-posts-vk
| 1 |
2169537
|
import vk
import time
# Log in using VK... (read more in VK API documentation)
app_id = ''
user_login = ''
user_password = ''
# ... or using access token
access_token = ''
# Depending on your choice, use one of the following session authorizations (uncomment it)
# session = vk.AuthSession(app_id=app_id, user_login=user_login, user_password=<PASSWORD>)
# session = vk.Session(access_token=access_token)
api = vk.API(session)
def get_latest_posts_from_groups(group_ids, number_of_posts, api_ver):
all_posts = []
req_counter = 0
for group in group_ids:
req_counter += 1
if req_counter == 3:
# VK API allows to make only 3 requests per second
time.sleep(1)
req_counter = 0
latest_posts = api.wall.get(owner_id="-" + str(group), count=str(number_of_posts), filter="all", v=str(api_ver))
latest_posts = latest_posts['items']
latest_posts_ids = [[item['id'], item['owner_id'], item['date']] for item in latest_posts]
all_posts.extend(latest_posts_ids)
return all_posts
def get_latest_posts_from_friend(friend_ids, number_of_posts, api_ver):
all_posts = []
req_counter = 0
for friend in friend_ids:
req_counter += 1
if req_counter == 3:
# VK API allows to make only 3 requests per second
time.sleep(1)
req_counter = 0
try:
latest_posts = api.wall.get(owner_id=str(friend), count=str(number_of_posts), filter="all", v=str(api_ver))
except Exception:
# friend profile can be private/deleted
continue
latest_posts = latest_posts['items']
latest_posts_ids = [[item['id'], item['owner_id'], item['date']] for item in latest_posts]
all_posts.extend(latest_posts_ids)
return all_posts
def filter_liked_group_posts(group_posts, user_id, days, api_ver):
liked_group_posts = []
req_counter = 0
for post in group_posts:
if (time.time() - post[2]) / (24 * 60 * 60) > days:
continue
req_counter += 1
if req_counter == 3:
time.sleep(1)
req_counter = 0
info = api.likes.isLiked(user_id=user_id, type="post", owner_id=str(post[1]), item_id=str(post[0]), v=str(api_ver))
if info['liked'] == 1:
liked_group_posts.append("https://vk.com/wall" + str(post[1]) + "_" + str(post[0]))
return liked_group_posts
def filter_liked_friend_posts(friend_posts, user_id, days, api_ver):
liked_friend_posts = []
req_counter = 0
for post in friend_posts:
if (time.time() - post[2]) / (24 * 60 * 60) > days:
continue
req_counter += 1
if req_counter == 3:
time.sleep(1)
req_counter = 0
info = api.likes.isLiked(user_id=user_id, type="post", owner_id=str(post[1]), item_id=str(post[0]), v=str(api_ver))
if info['liked'] == 1:
liked_friend_posts.append("https://vk.com/wall" + str(post[1]) + "_" + str(post[0]))
return liked_friend_posts
def user_liked_group_posts(user_id, number_of_posts, days, api_ver):
subscriptions = api.users.getSubscriptions(user_id=str(user_id), v=str(api_ver))
group_ids = subscriptions['groups']['items']
all_posts = get_latest_posts_from_groups(group_ids, str(number_of_posts), str(api_ver))
return filter_liked_group_posts(all_posts, str(user_id), days, str(api_ver))
def user_liked_friend_posts(user_id, number_of_posts, days, api_ver):
friends = api.friends.get(user_id=str(user_id), v=str(api_ver))
friend_ids = friends['items']
all_posts = get_latest_posts_from_friend(friend_ids, str(number_of_posts), str(api_ver))
return filter_liked_friend_posts(all_posts, str(user_id), days, str(api_ver))
def save_links_in_txt(links):
output = open('links.txt', 'w')
for link in links:
print(link, file=output)
output.close()
def main():
user_id = input("Enter id of the user whose likes you want to get:")
source_type = input("Enter the type of source ('groups' or 'friends'):")
number_of_posts = input("Enter how many posts need to be checked in each source (1 to 100):")
number_of_days = int(input("Enter how many previous days should be considered:"))
api_ver = input("Enter VK API's version (e.g. 5.101):")
print("Processing...")
start_time = time.time()
if source_type == 'groups':
save_links_in_txt(user_liked_group_posts(user_id, number_of_posts, number_of_days, api_ver))
elif source_type == 'friends':
save_links_in_txt(user_liked_friend_posts(user_id, number_of_posts, number_of_days, api_ver))
else:
print("Wrong 'type of source' input")
print("Success.\n Result saved in 'links.txt' file.\n Running time:", (time.time() - start_time) / 60, "minutes")
main()
| 4,966 |
example/views/exception.py
|
toshiki-tosshi/django-boost
| 25 |
2170232
|
from django_boost.views.generic import TemplateView
from django_boost.http.response import Http415
class E415View(TemplateView):
def get_context_data(self, **kwargs):
raise Http415
| 196 |
params.py
|
latamdatawizards/rutacovid-seir-api
| 0 |
2170980
|
import numpy as np
dias_evaluacion = 90 #asumimos 90 días
dt = 1
periodo_evaluacion = np.linspace(0, dias_evaluacion, dias_evaluacion + 1)
alpha = 0.2
beta = 1.75
gamma = 0.5
parametros = alpha, beta, gamma
#Condiciones iniciales de la ZMG
JAL_Population = 8000000
I_o = 32 / JAL_Population # Tenemos 32 casos
E_o = (32*4)/JAL_Population # Asumimos 4 expuestos por caso
S_o = (1) - (E_o+I_o) # El resto somos suceptibles
R_o = 0 # NO hay ningun recuperado
Condiciones_Iniciales = S_o,E_o,I_o,R_o
| 501 |
tests/pibi_tests.py
|
gjoyet/pibi
| 2 |
2169690
|
from src.ex1 import *
from src.ex2 import *
import os
import pytest
def test_parse_fasta():
h, s = parse_fasta('test_data/reference.fasta')
assert h == ['rseq1', 'rseq2']
assert s == ['ATATGAGCACTCAGTAATAGCCATGGGAGT'
'CAACTCAGTAACCATACCGTTGTTACTAGC',
'ATCGTTTCATTTCAGCTCAGTATAATGAAA'
'GATTTTGCAAATGTTACTGAAACAAAAGCA']
def test_das():
h, s = parse_fasta('test_data/query.fasta')
h, s = discard_ambiguous_seqs(h, s)
assert h == ['qseq1', 'qseq2', 'qseq4']
assert s == ['CTCAGTA', 'CTCagTa', 'TTTTTTT']
def test_nf(capfd):
h, s = parse_fasta('test_data/query.fasta')
h, s = discard_ambiguous_seqs(h, s)
nucleotide_frequencies(s)
out, err = capfd.readouterr()
assert out == '##########\nA: 0.19\nC: 0.19\n' \
'T: 0.52\nG: 0.10\n##########\n'
def test_map_reads():
sd = map_reads('test_data/query.fasta',
'test_data/reference.fasta')
assert sd == {'qseq1': {'rseq1': [9, 33], 'rseq2': [15]},
'qseq2': {'rseq1': [9, 33], 'rseq2': [15]},
'qseq4': {}}
def test_convert():
path_to_s = 'test_data/convert_me.sam'
path_to_f = sam_to_fasta(path_to_s)
h, s = parse_fasta(path_to_f)
os.remove(path_to_f)
assert h == ['NS500637:2:H197YBGXX:1:11102:13568:10359',
'NS500637:2:H197YBGXX:1:11102:13568:10359']
assert s == ['CGGTACTTCTCCAGATACAAAAGTTGCTTGCTGTTAAAAGCT'
'CCACGCCGCTTTTGTCTTATGAATTGTACTGCATCTTCATAT'
'TTCATTCCACCTTCAATTAATGCTAGGGCAACAAGCACCGGA'
'GCTCTGCCAAGGCCTGCGACACA',
'TGTGTCGCAGGCCTTGGCAGAGCTCCGGTGCTTGTTGCCCTA'
'GCATTAATTGAAGGTGGAATGAAATATGAAGATGCAGTACAA'
'TTCATAAGACAAAAGCGGCGTGGAGCTTTTAACAGCAAGCAA'
'CTTTTGTATCTGGAGAAGTACCG']
| 1,878 |
jasper_test.py
|
codebhendi/alfred-bot
| 0 |
2168926
|
import pyaudio
import wave
import time
import tempfile
import audioop
from os import environ, path
import pocketsphinx as ps
from sphinxbase.sphinxbase import *
def init() :
modeldir = "en-adapt"
config = ps.Decoder.default_config();
config.set_string('-hmm', path.join(modeldir, 'en-us-alfred/'))
config.set_string('-lm', path.join(modeldir, 'alfred.lm'))
config.set_string('-dict', path.join(modeldir, 'alfred.dict'))
decoder = ps.Decoder(config)
audio = pyaudio.PyAudio();
activeListen(audio, decoder, THRESHOLD=None)
def getScore(data):
rms = audioop.rms(data, 2)
score = rms / 3
return score
def fetchThreshold(audio):
# TODO: Consolidate variables from the next three functions
THRESHOLD_MULTIPLIER = 1.8
RATE = 16000
CHUNK = 1024
# number of seconds to allow to establish threshold
THRESHOLD_TIME = 1
# prepare recording stream
stream = audio.open(format=pyaudio.paInt16,
channels=1,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
# stores the audio data
frames = []
# stores the lastN score values
lastN = [i for i in range(20)]
# calculate the long run average, and thereby the proper threshold
for i in range(0, RATE / CHUNK * THRESHOLD_TIME):
data = stream.read(CHUNK)
frames.append(data)
# save this data point as a score
lastN.pop(0)
lastN.append(getScore(data))
average = sum(lastN) / len(lastN)
stream.stop_stream()
stream.close()
# this will be the benchmark to cause a disturbance over!
THRESHOLD = average * THRESHOLD_MULTIPLIER
print(THRESHOLD)
return THRESHOLD
def transcribe(fp, decoder):
"""
Performs STT, transcribing an audio file and returning the result.
Arguments:
fp -- a file object containing audio data
"""
fp.seek(44)
# FIXME: Can't use the Decoder.decode_raw() here, because
# pocketsphinx segfaults with tempfile.SpooledTemporaryFile()
data = fp.read()
decoder.start_utt()
decoder.process_raw(data, False, True)
decoder.end_utt()
result = decoder.hyp()
transcribed = [result]
print(transcribed[0])
return transcribed
def activeListen(audio, decoder, THRESHOLD):
RATE = 16000
CHUNK = 1024
LISTEN_TIME = 12
# check if no threshold provided
if THRESHOLD is None:
THRESHOLD = fetchThreshold(audio)
# self.speaker.play(jasperpath.data('audio', 'beep_hi.wav'))
# prepare recording stream
stream = audio.open(format=pyaudio.paInt16,
channels=1,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
frames = []
# increasing the range # results in longer pause after command
# generation
lastN = [THRESHOLD * 1.2 for i in range(30)]
for i in range(0, RATE / CHUNK * LISTEN_TIME):
data = stream.read(CHUNK)
frames.append(data)
score = getScore(data)
lastN.pop(0)
lastN.append(score)
average = sum(lastN) / float(len(lastN))
# TODO: 0.8 should not be a MAGIC NUMBER!
if average < THRESHOLD * 0.8:
break
# self.speaker.play(jasperpath.data('audio', 'beep_lo.wav'))
# save the audio data
stream.stop_stream()
stream.close()
with tempfile.SpooledTemporaryFile(mode='w+b') as f:
wav_fp = wave.open(f, 'wb')
wav_fp.setnchannels(1)
wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16))
wav_fp.setframerate(RATE)
wav_fp.writeframes(''.join(frames))
wav_fp.close()
f.seek(0)
transcribe(f, decoder)
init()
| 3,869 |
tools/ng_reduce.py
|
sdasgup3/neongoby
| 2 |
2170708
|
#!/usr/bin/env python
import argparse
import rcs_utils
import ng_utils
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description = 'Reduce testcase for alias pointers')
parser.add_argument('prog', help = 'the program name (e.g. mysqld)')
parser.add_argument('logs', nargs='+', help = 'point-to logs (.pts)')
parser.add_argument('aa',
help = 'the checked alias analysis: ' + \
str(ng_utils.get_aa_choices()),
metavar = 'aa',
choices = ng_utils.get_aa_choices())
parser.add_argument('vid1', help = 'ValueID of Pointer 1')
parser.add_argument('vid2', help = 'ValueID of Pointer 2')
args = parser.parse_args()
cmd = ng_utils.load_all_plugins('opt')
# reducer need be put before aa
cmd = ' '.join((cmd, '-remove-untouched-code'))
cmd = ' '.join((cmd, '-simplifycfg'))
# Load the checked AA
cmd = ng_utils.load_aa(cmd, args.aa)
cmd = ' '.join((cmd, '-verify-reducer'))
cmd = ' '.join((cmd, '-strip'))
for log in args.logs:
cmd = ' '.join((cmd, '-log-file', log))
cmd = ' '.join((cmd, '-pointer-value', args.vid1))
cmd = ' '.join((cmd, '-pointer-value', args.vid2))
cmd = ' '.join((cmd, '-o', args.prog + '.reduce.bc'))
cmd = ' '.join((cmd, '<', args.prog + '.bc'))
rcs_utils.invoke(cmd)
cmd = ' '.join(('clang++', args.prog + '.reduce.bc',
'-o', args.prog + '.reduce'))
linking_flags = rcs_utils.get_linking_flags(args.prog)
cmd = ' '.join((cmd, ' '.join(linking_flags)))
rcs_utils.invoke(cmd)
| 1,641 |
src/web/generator/migrations/0002_auto_20160531_2359.py
|
fossabot/SIStema
| 5 |
2170527
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-31 18:59
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import relativefilepathfield.fields
class Migration(migrations.Migration):
dependencies = [
('generator', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('abstractdocumentblock_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='generator.AbstractDocumentBlock')),
('filename', relativefilepathfield.fields.RelativeFilePathField(path=settings.SISTEMA_GENERATOR_ASSETS_DIR, recursive=True)),
('width', models.PositiveIntegerField(blank=True, default=None, help_text='В пунктах. Оставьте пустым, чтобы взять размеры самой картинки', null=True)),
('height', models.PositiveIntegerField(blank=True, default=None, help_text='В пунктах. Оставьте пустым, чтобы взять размеры самой картинки', null=True)),
],
options={
'abstract': False,
},
bases=('generator.abstractdocumentblock',),
),
]
| 1,328 |
migrations/versions/357612dfa45b_.py
|
rsrdesarrollo/sarna
| 25 |
2167841
|
"""empty message
Revision ID: 357612dfa45b
Revises: 49bab253b4ee
Create Date: 2018-07-04 23:07:54.313302
"""
from alembic import op
import sqlalchemy as sa
import sarna
# revision identifiers, used by Alembic.
revision = '357612dfa45b'
down_revision = '49bab253b4ee'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('login_try', sa.SmallInteger(), nullable=False, server_default='0'))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'login_try')
# ### end Alembic commands ###
| 685 |
django_filters/tests/__init__.py
|
bmihelac/django-filter
| 1 |
2170509
|
from .tests import (GenericViewTests, InheritanceTest, ModelInheritanceTest,
DateRangeFilterTest, FilterSetForm, AllValuesFilterTest, InitialValueTest,
RelatedObjectTest, MultipleChoiceFilterTest, MultipleLookupTypesTest)
| 230 |
bfg/guns/spdy.py
|
fomars/bfg
| 14 |
2170975
|
'''
Gun for SPDY/2+
'''
import logging
import select
import ssl
import socket
import spdylay
from .base import GunBase, StopWatch
logger = logging.getLogger(__name__)
class SpdyTaskHandler(object):
def __init__(self, task, scenario, results):
self.task = task
self.scenario = scenario
self.results = results
self.stream_id = None
self.sw = None
self.is_finished = False
self.is_failed = False
def on_start(self, stream_id):
assert(self.stream_id is None)
self.stream_id = stream_id
self.sw = StopWatch(self.task)
self.sw.scenario = self.scenario
self.sw.action = 'request'
def on_error(self, error_code=None):
assert(self.sw is not None)
self.sw.stop()
self.sw.set_error(error_code)
self.is_failed = True
self.is_finished = True
def on_request_sent(self):
assert(self.sw is not None)
assert(self.sw.action == 'request')
self.sw.stop()
self.results.put(self.sw.as_sample())
self.sw = StopWatch(self.task)
self.sw.scenario = self.scenario
self.sw.action = 'response_start'
def on_header(self, headers):
assert(self.sw is not None)
if self.sw.action == 'response_start':
self.sw.stop()
self.results.put(self.sw.as_sample())
else:
assert(self.sw.action == 'response')
self.sw = StopWatch(self.task)
self.sw.scenario = self.scenario
self.sw.action = 'response'
self.sw.ext['length'] = 0
for k, v in headers:
if k == ':status':
self.sw.set_code(int(v))
def on_data(self, length):
assert(self.sw is not None)
assert(self.sw.action == 'response')
assert('length' in self.sw.ext)
self.sw.ext['length'] += length
def on_response_end(self):
assert(self.sw is not None)
assert(self.sw.action == 'response')
self.sw.stop()
self.results.put(self.sw.as_sample())
self.sw = None
self.is_finished = True
class SpdyMultiGun(GunBase):
'''
Multi request gun. Only GET. Expects an array of (marker, request)
tuples in task.data. A stream is opened for every request first and
responses are readed after all streams have been opened. A sample is
measured for every action and for overall time for a whole batch.
The sample for overall time is marked with 'overall' in action field.
Based on UrlFetcher from python-spdylay.
'''
SECTION = 'spdy_gun'
SPDY_VERSIONS = {
spdylay.PROTO_SPDY2: "2",
spdylay.PROTO_SPDY3: "3",
# spdylay.PROTO_SPDY3_1: "3.1"
4: "3.1"
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.base_address = self.get_option('target')
logger.info("Initialized spdy gun with target '%s'", self.base_address)
self.ctx = None
self.sock = None
self.session = None
def connect(self):
self.ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self.ctx.options = (
ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_COMPRESSION)
self.ctx.set_npn_protocols(spdylay.get_npn_protocols())
self.sock = socket.create_connection((self.base_address, 443))
self.sock = self.ctx.wrap_socket(self.sock)
version = spdylay.npn_get_version(self.sock.selected_npn_protocol())
if version == 0:
raise RuntimeError('NPN failed')
logger.info(
"Negotiated SPDY version: %s",
self.SPDY_VERSIONS.get(version, 'unknown'))
self.sock.setblocking(False)
self.session = spdylay.Session(
spdylay.CLIENT,
version,
send_cb=self.send_cb,
on_ctrl_recv_cb=self.on_ctrl_recv_cb,
on_data_recv_cb=self.on_data_recv_cb,
before_ctrl_send_cb=self.before_ctrl_send_cb,
on_ctrl_send_cb=self.on_ctrl_send_cb,
on_stream_close_cb=self.on_stream_close_cb)
self.session.submit_settings(
spdylay.FLAG_SETTINGS_NONE,
[(spdylay.SETTINGS_MAX_CONCURRENT_STREAMS,
spdylay.ID_FLAG_SETTINGS_NONE,
100)]
)
def send_cb(self, session, data):
return self.sock.send(data)
def before_ctrl_send_cb(self, session, frame):
if frame.frame_type == spdylay.SYN_STREAM:
handler = session.get_stream_user_data(frame.stream_id)
handler.on_start(frame.stream_id)
def on_ctrl_send_cb(self, session, frame):
if frame.frame_type == spdylay.SYN_STREAM:
handler = session.get_stream_user_data(frame.stream_id)
handler.on_request_sent()
def on_ctrl_recv_cb(self, session, frame):
if (frame.frame_type == spdylay.SYN_REPLY or
frame.frame_type == spdylay.HEADERS):
handler = session.get_stream_user_data(frame.stream_id)
handler.on_header(frame.nv)
def on_data_recv_cb(self, session, flags, stream_id, length):
handler = session.get_stream_user_data(stream_id)
handler.on_data(length)
def on_stream_close_cb(self, session, stream_id, status_code):
handler = session.get_stream_user_data(stream_id)
if status_code == spdylay.OK:
handler.on_response_end()
else:
handler.on_error(status_code)
def shoot(self, task):
if self.session is None:
self.connect()
logger.debug("Task: %s", task)
scenario = task.marker
subtasks = [
task._replace(data=missile[1], marker=missile[0])
for missile in task.data
]
handlers = []
with self.measure(task) as overall_sw:
for subtask in subtasks:
logger.debug("Request GET %s", subtask.data)
handler = SpdyTaskHandler(subtask, scenario, self.results)
self.session.submit_request(
0, [
(':method', 'GET'),
(':scheme', 'https'),
(':path', subtask.data),
(':version', 'HTTP/1.1'),
(':host', self.base_address),
('accept', '*/*'),
('user-agent', 'bfg-spdy')],
stream_user_data=handler)
handlers.append(handler)
while ((self.session.want_read() or
self.session.want_write()) and not
all(h.is_finished for h in handlers)):
want_read = want_write = False
try:
data = self.sock.recv(4096)
if data:
self.session.recv(data)
else:
break
except ssl.SSLWantReadError:
want_read = True
except ssl.SSLWantWriteError:
want_write = True
try:
self.session.send()
except ssl.SSLWantReadError:
want_read = True
except ssl.SSLWantWriteError:
want_write = True
if want_read or want_write:
select.select([self.sock] if want_read else [],
[self.sock] if want_write else [],
[])
overall_sw.stop()
overall_sw.scenario = scenario
overall_sw.action = "overall"
failed = [h for h in handlers if h.is_failed]
if failed:
overall_sw.set_error()
| 7,789 |
lost_hat_product_page_tests.py
|
deidrah/demo-tests
| 1 |
2170579
|
from BaseTestClass import BaseTestClass
from helpers.wrappers import screenshot_decorator
class LostHatProductPageTests(BaseTestClass):
@screenshot_decorator
def test_check_product_name(self):
expected_product_name = 'HUMMINGBIRD PRINTED T-SHIRT'
name_xpath = '//*[@class="col-md-6"]//*[@itemprop="name"]'
driver = self.ef_driver
driver.get(self.sample_product_url)
self.assert_element_text(driver, name_xpath, expected_product_name)
@screenshot_decorator
def test_check_product_price(self):
expected_product_price = 'PLN23.52'
price_xpath = '//*[@class="current-price"]//*[@itemprop="price"]'
driver = self.ef_driver
driver.get(self.sample_product_url)
self.assert_element_text(driver, price_xpath, expected_product_price)
def assert_element_text(self, driver, xpath, expected_text):
"""Comparing expected text with observed value from web element
:param driver: webdriver instance
:param xpath: xpath to element with text to be observed
:param expected_text: text what we expecting to be found
:return: None
"""
element = driver.find_element_by_xpath(xpath)
element_text = element.text
self.assertEqual(expected_text, element_text, 'Expected text differ from actual on page: {}'.format(driver.current_url))
| 1,390 |
py/area.py
|
Ellian-aragao/URI
| 0 |
2171004
|
vet = [float(x) for x in input().split()]
total = (vet[0] * vet[2])/2
print('TRIANGULO: {:.3f}'.format(total))
total = vet[2] * vet[2] * 3.14159
print('CIRCULO: {:.3f}'.format(total))
total = ((vet[0] + vet[1])*vet[2])/2
print('TRAPEZIO: {:.3f}'.format(total))
total = vet[1] * vet[1]
print('QUADRADO: {:.3f}'.format(total))
total = vet[0] * vet[1]
print('RETANGULO: {:.3f}'.format(total))
| 395 |
final_project/catkin_ws/src/run_neural/scripts/run_neural.py
|
jrkwon/ce491-2019
| 0 |
2170804
|
#!/usr/bin/env python
import threading
import cv2
import time
import rospy
import numpy as np
from bolt_msgs.msg import Control
from std_msgs.msg import Int32
from sensor_msgs.msg import Image
import sys
import os
sys.path.append('../neural_net/')
os.chdir('../neural_net/')
import const
from image_converter import ImageConverter
from drive_run import DriveRun
from config import Config
from image_process import ImageProcess
SHARP_TURN_MIN = 0.3
BRAKE_APPLY_SEC = 1.5
THROTTLE_DEFAULT = 0.2
THROTTLE_SHARP_TURN = 0.05
class NeuralControl:
def __init__(self, weight_file_name):
rospy.init_node('run_neural')
self.ic = ImageConverter()
self.image_process = ImageProcess()
self.rate = rospy.Rate(30)
self.drive= DriveRun(weight_file_name)
rospy.Subscriber('/bolt/front_camera/image_raw', Image, self._controller_cb)
self.image = None
self.image_processed = False
#self.config = Config()
self.braking = False
def _controller_cb(self, image):
img = self.ic.imgmsg_to_opencv(image)
cropped = img[Config.config['image_crop_y1']:Config.config['image_crop_y2'],
Config.config['image_crop_x1']:Config.config['image_crop_x2']]
img = cv2.resize(cropped, (Config.config['input_image_width'],
Config.config['input_image_height']))
self.image = self.image_process.process(img)
## this is for CNN-LSTM net models
if Config.config['lstm'] is True:
self.image = np.array(self.image).reshape(1,
Config.config['input_image_height'],
Config.config['input_image_width'],
Config.config['input_image_depth'])
self.image_processed = True
def timer_cb(self):
self.braking = False
def main(weight_file_name):
# ready for neural network
neural_control = NeuralControl(weight_file_name)
# ready for /bolt topic publisher
joy_pub = rospy.Publisher('/bolt', Control, queue_size = 10)
joy_data = Control()
print('\nStart running. Vroom. Vroom. Vroooooom......')
print('steer \tthrt: \tbrake')
while not rospy.is_shutdown():
if neural_control.image_processed is False:
continue
# predicted steering angle from an input image
prediction = neural_control.drive.run(neural_control.image)
joy_data.steer = prediction
#############################
## TODO: you need to change the vehicle speed wisely
## e.g. not too fast in a curved road and not too slow in a straight road
# if brake is not already applied and sharp turn
if neural_control.braking is False:
if abs(joy_data.steer) > SHARP_TURN_MIN:
joy_data.throttle = THROTTLE_SHARP_TURN
joy_data.brake = 0.5
neural_control.braking = True
timer = threading.Timer(BRAKE_APPLY_SEC, neural_control.timer_cb)
timer.start()
else:
joy_data.throttle = THROTTLE_DEFAULT
joy_data.brake = 0
## publish joy_data
joy_pub.publish(joy_data)
## print out
if Config.config['lstm'] is True:
cur_output = '{0:.3f} \t{1:.2f} \t{2:.2f}\r'.format(prediction[0][0][0],
joy_data.throttle, joy_data.brake)
else:
cur_output = '{0:.3f} \t{1:.2f} \t{2:.2f}\r'.format(prediction[0][0],
joy_data.throttle, joy_data.brake)
sys.stdout.write(cur_output)
sys.stdout.flush()
## ready for processing a new input image
neural_control.image_processed = False
neural_control.rate.sleep()
if __name__ == "__main__":
try:
if len(sys.argv) != 2:
exit('Usage:\n$ rosrun run_neural run_neural.py weight_file_name')
main(sys.argv[1])
except KeyboardInterrupt:
print ('\nShutdown requested. Exiting...')
| 4,229 |
angalabiri/users/signals.py
|
dark-codr/ebiangala
| 1 |
2170184
|
from django.http import request
from django.contrib.auth import get_user_model
from paystackapi.paystack import Paystack
from config import settings
from paystackapi.customer import Customer
from paystackapi.verification import Verification
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
paystack_secret_key = settings.base.PAYSTACK_SECRET_KEY
paystack = Paystack(secret_key=paystack_secret_key)
User = get_user_model()
@receiver(pre_save, sender=User)
def create_paystack_customer(sender, instance, *args, **kwargs):
customer = Customer.create(
first_name=instance.first_name,
last_name=instance.last_name,
email=instance.email,
phone=instance.phone,
)
@receiver(post_save, sender=User)
def create_paystack_customer(sender, instance, created, *args, **kwargs):
if created:
customer = Customer.update(
first_name=instance.first_name,
last_name=instance.last_name,
email=instance.email,
phone=instance.phone,
)
| 1,055 |
case_2/runners.py
|
JeroenDM/sampling_based_tube_following_2
| 1 |
2171019
|
import numpy as np
import pandas as pd
from acrobotics.planning.solver import solve
from acrobotics.planning.types import (
Solution,
CostFuntionType,
PlanningSetup,
SolveMethod,
)
from acrobotics.planning.settings import OptSettings, SolverSettings
from acrobotics.path.sampling import SampleMethod, SamplingSetting, SearchStrategy
from acrobotics.path.path_pt import TolEulerPt
from definition import create_robot, create_scene, create_path, show_path
NDOF = 6
def create_settings_min_incremental(desired_num_samples, iters, sample_method):
s = SamplingSetting(
search_strategy=SearchStrategy.MIN_INCREMENTAL,
iterations=iters,
sample_method=sample_method,
num_samples=None,
desired_num_samples=desired_num_samples,
max_search_iters=int(10e4),
tolerance_reduction_factor=2.0,
use_state_cost=True,
state_cost_weight=1.0,
)
s2 = SolverSettings(SolveMethod.sampling_based, CostFuntionType.sum_squared, s)
return s2
def create_settings_incremental(num_samples, iters, sample_method):
s = SamplingSetting(
search_strategy=SearchStrategy.INCREMENTAL,
iterations=iters,
sample_method=sample_method,
num_samples=num_samples,
tolerance_reduction_factor=2.0,
use_state_cost=True,
state_cost_weight=1.0,
)
s2 = SolverSettings(SolveMethod.sampling_based, CostFuntionType.sum_squared, s)
return s2
def create_settings_grid(iters):
s = SamplingSetting(
search_strategy=SearchStrategy.GRID,
iterations=iters,
tolerance_reduction_factor=2.0,
use_state_cost=True,
state_cost_weight=1.0,
)
s2 = SolverSettings(SolveMethod.sampling_based, CostFuntionType.sum_squared, s)
return s2
def results_to_dict(settings: SamplingSetting, solution: Solution, path):
data = {}
data["search_strategy"] = settings.search_strategy.value
data["iters"] = settings.iterations
if settings.search_strategy == SearchStrategy.MIN_INCREMENTAL:
data["desired_num_samples"] = settings.desired_num_samples
data["sample_method"] = settings.sample_method.value
data["num_samples"] = np.nan
elif settings.search_strategy == SearchStrategy.INCREMENTAL:
data["num_samples"] = settings.num_samples
data["sample_method"] = settings.sample_method.value
data["desired_num_samples"] = np.nan
elif settings.search_strategy == SearchStrategy.GRID:
pt_tol: TolEulerPt = path[0].rot_tol
data["num_samples"] = (
pt_tol[0].num_samples * pt_tol[1].num_samples * pt_tol[2].num_samples
)
data["sample_method"] = np.nan
data["desired_num_samples"] = np.nan
data["cost"] = solution.path_cost
data["time"] = solution.run_time
# add joint path to dict/csv
n_path = len(solution.joint_positions)
for j in range(n_path):
for i in range(NDOF):
data[f"q_{j}_{i}"] = solution.joint_positions[j][i]
return data
def run_experiments(parameters, filename):
robot = create_robot()
scene, start, stop = create_scene(np.array([0.85, 0, 0]))
# df = pd.DataFrame(
# columns=["search_strategy", "iters", "desired_num_samples", "cost", "time"]
# )
columns = [
"search_strategy",
"sample_method",
"iters",
"desired_num_samples",
"num_samples",
"cost",
"time",
]
# add joint path to dict/csv
for j in range(parameters["n_path"]):
for i in range(NDOF):
columns.append(f"q_{j}_{i}")
header = ",".join(columns) + "\n"
with open(filename, "a") as f:
f.write(header)
for ss in parameters["search_strategy"]:
if ss == SearchStrategy.GRID:
for rtol in parameters["r_tol_samples"]:
path = create_path(
start, stop, parameters["n_path"], rtol[0], rtol[1], rtol[2]
)
setup = PlanningSetup(robot, path, scene)
s = create_settings_grid(parameters["iters"])
sol = solve(setup, s)
res = results_to_dict(s.sampling_settings, sol, path)
# df = df.append(res, ignore_index=True)
f.write(",".join([str(v) for v in res.values()]) + "\n")
elif ss == SearchStrategy.MIN_INCREMENTAL:
path = create_path(start, stop, parameters["n_path"])
setup = PlanningSetup(robot, path, scene)
for dns in parameters["desired_num_samples"]:
s = create_settings_min_incremental(
dns, parameters["iters"], parameters["sample_method"]
)
sol = solve(setup, s)
res = results_to_dict(s.sampling_settings, sol, path)
# df = df.append(res, ignore_index=True)
f.write(",".join([str(v) for v in res.values()]) + "\n")
elif ss == SearchStrategy.INCREMENTAL:
path = create_path(start, stop, parameters["n_path"])
setup = PlanningSetup(robot, path, scene)
for ns in parameters["num_samples"]:
s = create_settings_incremental(
ns, parameters["iters"], parameters["sample_method"]
)
sol = solve(setup, s)
res = results_to_dict(s.sampling_settings, sol, path)
# df = df.append(res, ignore_index=True)
f.write(",".join([str(v) for v in res.values()]) + "\n")
else:
raise NotImplementedError()
return 0
| 5,789 |
tests/test_fairness_mistreatment.py
|
marlesson/recsys-fair-metrics
| 0 |
2170809
|
import sys, os
import unittest
from unittest.mock import patch
import pandas as pd
from recsys_fair_metrics.recsys_fair import RecsysFair
import shutil
OUTPUT_TEST = "tests/output"
class TestFairnessMistreatment(unittest.TestCase):
def setUp(self):
# shutil.rmtree(OUTPUT_TEST, ignore_errors=True)
os.makedirs(OUTPUT_TEST, exist_ok=True)
self.df = pd.read_csv("tests/factories/test_set_predictions.csv")
self.supp_metadata = pd.read_csv("tests/factories/artist-metadata.csv")
self.column = "artist_rating"
self.recsys_fair = RecsysFair(
df=self.df,
supp_metadata=self.supp_metadata,
user_column="userid",
item_column="musicbrainz-artist-id",
reclist_column="sorted_actions",
reclist_score_column="action_scores",
)
def test_metric(self):
dm = self.recsys_fair.disparate_mistreatment(self.column)
metric = dm.metric()
self.assertEqual(metric["true_positive_rate"].round(4), 0.1128)
def test_show(self):
dm = self.recsys_fair.disparate_mistreatment(self.column)
fig = dm.show()
fig.write_image(OUTPUT_TEST + "/disparate_mistreatment.png")
| 1,235 |
lang_api.py
|
bact/DSIS_ACH_Challenge_-1
| 0 |
2169498
|
from fastapi import FastAPI
import logging
from record_listen import get_word
from websocket import server
app = FastAPI()
@app.get("/")
async def root():
return {"message": "Language API is alive"}
@app.get("/words/{lang}")
async def pick_word(lang):
word = get_word(lang)
if word:
return {
"word": word,
"lang": lang,
"difficulty": 1,
}
return {"word": ""}
@app.get("/record/{lang}")
async def record(lang):
word = get_word(lang)
server.send_to_clients(f"record: {word}") # send to websocket for RPI
logging.info(f"Pick '{word}' for record.")
if word:
return {
"word": word,
"lang": lang,
"difficulty": 1,
}
return {"word": ""}
@app.get("/listen/{lang}")
async def listen(lang):
word = get_word(lang)
server.send_to_clients(f"listen: {word}") # send to websocket for RPI
logging.info(f"Pick '{word}' for listen.")
if word:
return {
"word": word,
"lang": lang,
"difficulty": 1,
}
return {"word": ""}
| 1,126 |
main.py
|
smpny7/notes-maker
| 0 |
2170706
|
# coding: utf-8
import codecs
import csv
import json
import os
import sys
import time
import tkinter as tk
import tkinter.filedialog as fd
print("\n-----------------------------------------------")
print(" \033[1m\033[34mNotes Maker v1.1.0\033[0m ( MIT )\n")
print(" Last Build: May 14 2021")
print(" GitHub: \033[4mhttps://github.com/smpny7/notes-maker\033[0m")
print("-----------------------------------------------\n")
sys.stdout.write('> Waiting for input...\n')
time.sleep(0.3)
root = tk.Tk()
root.withdraw()
file = fd.askopenfilename(
title="Select score data",
filetypes=[("JSON", ".json")]
)
if file:
with open(file, "r") as f:
data = f.read()
sys.stdout.write('\033[1A> Waiting for input... ' + u'\u2705' + '\n')
else:
sys.stdout.write('\033[1A> Waiting for input... ' + u'\u274c' + '\n')
sys.stderr.write(
'\n> \033[1m\033[31mError\033[0m: Select file to convert.\n\n')
sys.exit()
try:
sys.stdout.write('> Checking data in the file...\n')
time.sleep(0.3)
dec = json.loads(codecs.decode(data.encode(), 'utf-8-sig'))
sys.stdout.write(
'\033[1A> Checking data in the file... ' + u'\u2705' + '\n')
except:
sys.stdout.write(
'\033[1A> Checking data in the file... ' + u'\u274c' + '\n')
sys.stderr.write(
'\n> \033[1m\033[31mError\033[0m: This file is not JSON.\n\n')
sys.stderr.write(
'> Exit(1)\033[0m\n\n\n')
sys.exit()
try:
sys.stdout.write('> Converting JSON to CSV...\n')
time.sleep(0.3)
output = []
bpm = dec['BPM']
offset = dec['offset']
for data in dec['notes']:
arr = [0] * 2
arr[0] = 60.0 * data['num'] / (bpm * data['LPB']) + offset / 10000
arr[1] = data['block']
output.append(arr)
sys.stdout.write(
'\033[1A> Converting JSON to CSV... ' + u'\u2705' + '\n')
except:
sys.stdout.write(
'\033[1A> Converting JSON to CSV... ' + u'\u274c' + '\n')
sys.stderr.write(
'\n> \033[1m\033[31mError\033[0m: This file is not generated by Notes Editor.\n')
sys.stderr.write(
'> \033[1m\033[33mMore details\033[0m: \033[4mhttps://github.com/setchi/NoteEditor/\033[0m\n\n')
sys.exit()
sys.stdout.write('> Exporting CSV file...\n')
time.sleep(0.3)
file = fd.asksaveasfilename(
initialfile="data",
defaultextension=".csv",
title="Select a location to save output file",
filetypes=[("CSV", ".csv")]
)
if file:
with open(file, "w", encoding="utf_8") as f:
writer = csv.writer(f)
writer.writerows(output)
sys.stdout.write(
'\033[1A> Exporting CSV file... ' + u'\u2705' + '\n')
else:
sys.stdout.write(
'\033[1A> Exporting CSV file... ' + u'\u274c' + '\n')
sys.stderr.write(
'\n> \033[1m\033[31mError\033[0m: Could not export CSV file.\n')
sys.stderr.write(
'> \033[1m\033[33mHints\033[0m: Check directory permissions.\n\n')
sys.exit()
sys.stdout.write(
'\n> \033[1m\033[32mSuccess\033[0m: Output to the following location.\n')
sys.stdout.write('> ' + file + '\n\n\n')
| 3,087 |
qriscloud/_vendor/__init__.py
|
UQ-RCC/uq-globus-tools
| 0 |
2168162
|
##
# uq-globus-tools
# https://github.com/UQ-RCC/uq-globus-tools
#
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 The University of Queensland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import contextlib
import os
import sys
import importlib
from typing import Union, Iterator
# https://stackoverflow.com/a/64789046
@contextlib.contextmanager
def _add_sys_path(path: Union[str, os.PathLike]) -> Iterator[None]:
"""Temporarily add the given path to `sys.path`."""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
##
# Can't just import .ldap3 here, transitive dependencies need to be rewritten,
# so work around it.
##
_vendor_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)))
with _add_sys_path(_vendor_dir):
# ldap3==2.9
ldap3 = importlib.import_module('ldap3')
| 1,387 |
yoconfigurator/filter.py
|
yola/yoconfigurator
| 0 |
2170631
|
"""Contains methods for filtering configuration."""
import os
from yoconfigurator.base import get_config_module
from yoconfigurator.dicts import DotDict
def filter_config(config, deploy_config):
"""Return a config subset using the filter defined in the deploy config."""
if not os.path.isfile(deploy_config):
return DotDict()
config_module = get_config_module(deploy_config)
return config_module.filter(config)
| 438 |
src/thumbtack/resources.py
|
mitre/thumbtack
| 14 |
2169851
|
import imagemounter.exceptions
from flask import current_app
from flask_restful import Resource, marshal_with, abort, fields
from .exceptions import (
UnexpectedDiskError,
NoMountableVolumesError,
ImageNotInDatabaseError,
)
from .utils import get_mount_info, get_supported_libraries, mount_image, unmount_image
volume_fields = {
"size": fields.Integer,
"offset": fields.Integer,
"index": fields.Integer,
"label": fields.String(attribute=lambda obj: obj.info.get("label")),
"fsdescription": fields.String(attribute=lambda obj: obj.info.get("fsdescription")),
"fstype": fields.String,
"mountpoint": fields.String,
}
disk_fields = {
"name": fields.String(attribute="_name"),
"imagepath": fields.String(attribute=lambda obj: obj.paths[0]),
"mountpoint": fields.String,
"volumes": fields.List(fields.Nested(volume_fields)),
"paths": fields.Raw(attribute=lambda obj: obj._paths, default=None)
}
disk_mount = {"disk_info": fields.Nested(disk_fields), "ref_count": fields.Integer}
class Mount(Resource):
"""A Mount object that allows you to mount and unmount images.
"""
def __init__(self):
"""Create a Mount object.
"""
current_app.logger.debug("Instantiating the Mount class")
@marshal_with(disk_fields)
def put(self, image_path):
"""Mounts an image file.
Parameters
----------
image_path : str
Relative path to an image file to be mounted.
This is relative to the Thumbtack server's IMAGE_DIR config variable.
"""
status = None
try:
current_app.mnt_mutex.acquire()
mounted_disk = mount_image(image_path)
if mounted_disk and mounted_disk.mountpoint is not None:
current_app.logger.info(f"Image mounted successfully: {image_path}")
current_app.mnt_mutex.release()
return mounted_disk
# TODO: refactor to not duplicate code in the mount_form in views.py
except imagemounter.exceptions.SubsystemError:
status = f"Thumbtack was unable to mount {image_path} using the imagemounter Python library."
except PermissionError:
status = f"Thumbtack does not have mounting privileges for {image_path}. Are you running as root?"
except UnexpectedDiskError:
status = "Unexpected number of disks. Thumbtack can only handle disk images that contain one disk."
except NoMountableVolumesError:
status = f"No volumes in {image_path} were able to be mounted."
except ImageNotInDatabaseError:
status = f"Cannot mount {image_path}. Image is not in Thumbtack database."
current_app.mnt_mutex.release()
current_app.logger.error(status)
abort(400, message=str(status))
@marshal_with(disk_mount)
def get(self, image_path=None):
"""Retrieve information about tracked images.
Parameters
----------
image_path : str, optional
Relative path to an image file.
Returns
-------
dict
Dictionary of useful information about a mounted disk image or a list of all mounted images.
"""
mount_info = get_mount_info(image_path)
if not mount_info:
# empty list -- nothing mounted -- is ok to return
if isinstance(mount_info, list):
return mount_info
abort(404, message=f"{image_path} not mounted")
return mount_info
def delete(self, image_path=None):
"""Unmounts an image file.
Parameters
----------
image_path : str
Relative path to an image file to unmount.
"""
current_app.mnt_mutex.acquire()
unmount_image(image_path)
current_app.mnt_mutex.release()
class SupportedLibraries(Resource):
def get(self):
return get_supported_libraries()
| 3,972 |
geonode/geonode/invitations/views.py
|
ttungbmt/BecaGIS_GeoPortal
| 0 |
2170170
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2018 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import traceback
from django.contrib.sites.models import Site
try:
from django.urls import reverse
except ImportError:
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from .forms import GeoNodeInviteForm
from invitations import signals
from invitations.views import SendInvite
from invitations.utils import get_invitation_model
from invitations.adapters import get_invitations_adapter
from geonode.decorators import view_decorator
Invitation = get_invitation_model()
@view_decorator(login_required, subclass=True)
class GeoNodeSendInvite(SendInvite):
template_name = 'invitations/forms/_invite.html'
form_class = GeoNodeInviteForm
def __init__(self, *args, **kwargs):
super(SendInvite, self).__init__(*args, **kwargs)
def form_valid(self, form):
emails = form.cleaned_data["email"]
invited = []
invite = None
try:
invites = form.save(emails)
for invite_obj in invites:
invite = invite_obj
invite.inviter = self.request.user
invite.save()
# invite.send_invitation(self.request)
self.send_invitation(invite, self.request)
invited.append(invite_obj.email)
except Exception as e:
traceback.print_exc()
if invite:
invite.delete()
return self.form_invalid(form, emails, e)
return self.render_to_response(
self.get_context_data(
success_message=_("Invitations succefully sent to '%(email)s'") % {
"email": ', '.join(invited)}))
def form_invalid(self, form, emails=None, e=None):
if e:
return self.render_to_response(
self.get_context_data(
error_message=_("Sorry, it was not possible to invite '%(email)s'"
" due to the following isse: %(error)s (%(type)s)") % {
"email": emails, "error": str(e), "type": type(e)}))
else:
return self.render_to_response(
self.get_context_data(form=form))
def send_invitation(self, invite, request, **kwargs):
current_site = kwargs.pop('site', Site.objects.get_current())
invite_url = reverse('geonode.invitations:accept-invite',
args=[invite.key])
invite_url = request.build_absolute_uri(invite_url)
ctx = kwargs
ctx.update({
'invite_url': invite_url,
'site_name': current_site.name,
'email': invite.email,
'key': invite.key,
'inviter': invite.inviter,
})
email_template = 'invitations/email/email_invite'
adapter = get_invitations_adapter()
adapter.send_invitation_email(email_template, invite.email, ctx)
invite.sent = timezone.now()
invite.save()
signals.invite_url_sent.send(
sender=invite.__class__,
instance=invite,
invite_url_sent=invite_url,
inviter=invite.inviter)
| 4,070 |
PermMissingElem.py
|
tavaresrodrigo/py
| 0 |
2167500
|
import numpy as np
# Painless algorithm using numpy
def solutionM(x):
sar = np.sum(x)
esar = np.sum(np.arange(1,len(x)+2,1))-sar
if esar >= 1:
return int(esar)
else:
return None
# Painless algorithm using only Python standard library
def solution(x):
actual_sum = 0
for i in x:
actual_sum += i
max_number = len(x) +1
expected_sum = (max_number * (max_number+1)//2)
return expected_sum - actual_sum
print (solutionM([2,3,1,5,4,9,8,6,7,11,12,13,14,15,16,26,17,10,18,19,20,21,22,23,24,25,27,28,29,30,31,32,33,34,35,36,37,39]))
print (solution([2,3,1,5,4,9,8,6,7,11,12,13,14,15,16,26,17,10,18,19,20,21,22,23,24,25,27,28,29,30,31,32,33,34,35,36,37,39]))
print (solutionM([]))
print (solution([]))
print (solutionM([1,2,3,4]))
print (solution([1,2,3,4,]))
| 818 |
tests/test_distances.py
|
XinliYu/pyphonetics
| 90 |
2171012
|
from pyphonetics.distance_metrics import levenshtein_distance, hamming_distance
def test_levenshtein():
tests = [
(('b', 'o', 'o', 'k'), ('b', 'a', 'c', 'k'), 2),
('book', 'back', 2),
('hello', 'helo', 1),
('good sir', 'baal', 8),
('say', 'shiver', 5),
('feature', 'get-project-features', 13),
('example', 'samples', 3),
('sturgeon', 'urgently', 6),
('levenshtein', 'frankenstein', 6),
('distance', 'difference', 5),
('a', 'b', 1),
('ab', 'ac', 1),
('ac', 'bc', 1),
('abc', 'axc', 1),
('xabxcdxxefxgx', '1ab2cd34ef5g6', 6),
('a', '', 1),
('ab', 'a', 1),
('ab', 'b', 1),
('abc', 'ac', 1),
('xabxcdxxefxgx', 'abcdefg', 6),
('', 'a', 1),
('a', 'ab', 1),
('b', 'ab', 1),
('ac', 'abc', 1),
('abcdefg', 'xabxcdxxefxgx', 6),
('', '', 0),
('a', 'a', 0),
('abc', 'abc', 0),
('', '', 0),
('a', '', 1),
('', 'a', 1),
('abc', '', 3),
('', 'abc', 3)
]
for test in tests:
assert levenshtein_distance(test[0], test[1]) == test[2]
def test_hamming():
tests = [
('1011101', '1001001', 2),
('2143896', '2233796', 3),
('ramer', 'cases', 3),
('abc', 'abc', 0),
('abc', 'abd', 1),
('night', 'nacht', 2),
((0, 1, 0, 1), (1, 2, 0, 1), 2)
]
for test in tests:
assert hamming_distance(test[0], test[1]) == test[2]
| 1,555 |
afm/pep/__init__.py
|
roytman/arrow-flight-module
| 0 |
2170582
|
#
# Copyright 2020 IBM Corp.
# SPDX-License-Identifier: Apache-2.0
#
from .base import Action, consolidate_actions, transform, transform_schema, transform_batches
from .actions import Redact, RemoveColumns
# registry is a map from action name to Action class
registry = Action.registry
| 287 |
leetcode/0986_interval_list_intersection.py
|
jacquerie/leetcode
| 3 |
2170632
|
# -*- coding: utf-8 -*-
class Interval:
def __init__(self, s=0, e=0):
self.start = s
self.end = e
def __eq__(self, other):
return self.start == other.start and self.end == other.end
class Solution:
def intervalIntersection(self, A, B):
result = []
i, j = 0, 0
while i < len(A) and j < len(B):
if A[i].end < B[j].start:
i += 1
elif B[j].end < A[i].start:
j += 1
else:
result.append(
Interval(
max(A[i].start, B[j].start),
min(A[i].end, B[j].end)
)
)
if A[i].end < B[j].end:
i += 1
else:
j += 1
return result
if __name__ == '__main__':
solution = Solution()
A = [
Interval(0, 2),
Interval(5, 10),
Interval(13, 23),
Interval(24, 25),
]
B = [
Interval(1, 5),
Interval(8, 12),
Interval(15, 24),
Interval(25, 26),
]
expected = [
Interval(1, 2),
Interval(5, 5),
Interval(8, 10),
Interval(15, 23),
Interval(24, 24),
Interval(25, 25),
]
result = solution.intervalIntersection(A, B)
assert expected == result
| 1,388 |
_scripts/tests/data/three_girls/tests/q_3_three_or_fewer.py
|
pxr687/cfd2021
| 1 |
2167940
|
test = {
'name': 'Question three_or_fewer',
'points': 15,
'suites': [
{
'cases': [
{
'code': r"""
>>> # You need to set the value for 'p_3_or_fewer'
>>> 'p_3_or_fewer' in vars()
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # You haven't changed the value for 'p_3_or_fewer'
>>> # from its initial state (of ...)
>>> p_3_or_fewer != ...
True
""",
'hidden': False,
'locked': False
},
{
# n = 10000
# # Take 10000 samples of 10000 trials of this problem.
# res = np.sum(np.random.binomial(5, 0.5, (n, n)) <= 3, axis=1) / n
# np.quantile(res, [0.001, 0.999])
'code': r"""
>>> 0.8 < p_3_or_fewer < 0.825
True
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| 1,087 |
python_village/ini3.py
|
nathaliagg/my_rosalind_answers
| 0 |
2170912
|
#!/usr/bin/env python3
"""
Author : <NAME>
Date : 2021-01-15
Purpose: Python Village - Strings and Lists
"""
import argparse
# define Python user-defined exceptions
class LengthString(Exception):
"""Base class for other exceptions"""
# --------------------------------------------------
def get_args():
""" Get command-line arguments """
parser = argparse.ArgumentParser(
description="""Strings and Lists. Given a string s of maximum length
of 200 letters, and four integers a, b, c, and d, this program returns
two slices of the string from indices a through b, and
c through d, inclusively.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input_file',
metavar='FILE',
type=argparse.FileType('rt'),
help='Input file, string < 200, a, b, c, d integers')
args = parser.parse_args()
return args
# --------------------------------------------------
def main():
"""Slice string s, a through b, and
c through d, inclusively."""
args = get_args()
# print(args)
list_items = args.input_file.read().rstrip().split()
string = list_items[0]
test_length_string(string)
integers = [int(x) for x in list_items[1:]]
print(make_slices(string, integers))
# --------------------------------------------------
def test_length_string(s):
"""Test length of string, return error if > 200 letters"""
if len(s) >= 200:
raise LengthString(f"Length of string must be less than 200")
# --------------------------------------------------
def make_slices(s, list_ints):
"""Make a-b and c-d slices of string s, inclusively"""
a, b, c, d = list_ints
list_slices = [s[a:b+1], s[c:d+1]]
sliced_string = " ".join(list_slices)
return sliced_string
# --------------------------------------------------
if __name__ == '__main__':
main()
| 1,976 |
edlm/cli.py
|
etcher-be/EDLM
| 0 |
2170718
|
# coding=utf-8
"""
Command line interface
"""
import click
import elib
from edlm import LOGGER, __version__
from edlm.config import CFG
from edlm.convert import Context, make_pdf
from edlm.external_tools import MIKTEX, PANDOC
@click.group()
@click.version_option(version=__version__)
@click.option('--debug', default=False, help='Outputs DEBUG message on console', is_flag=True)
def cli(debug):
"""
Command line interface
"""
debug = debug or CFG.debug
if debug:
elib.custom_logging.set_handler_level('EDLM', 'ch', 'debug')
else:
elib.custom_logging.set_handler_level('EDLM', 'ch', 'info')
LOGGER.info(__version__)
PANDOC.setup()
MIKTEX.setup()
@cli.group()
def convert():
"""
Converts documents
"""
@convert.command()
@click.argument(
'source_folder',
type=click.Path(exists=True, file_okay=False, resolve_path=True, readable=True),
nargs=-1,
)
@click.option('-k', '--keep-temp-dir', default=False, help='Keep temporary folder', is_flag=True)
@click.option('-f', '--force', default=False, help='Force re-generation of documents', is_flag=True)
def pdf(source_folder, keep_temp_dir, force):
"""
Converts content of SOURCE_FOLDER(s) recursively for folders containing "index.md" files and convert them to PDF
"""
ctx = Context()
ctx.keep_temp_dir = keep_temp_dir or CFG.keep_temp_dir
ctx.regen = force
for folder in source_folder:
make_pdf(ctx, folder)
# noinspection SpellCheckingInspection
if __name__ == '__main__':
cli(obj={}) # pylint: disable=no-value-for-parameter,unexpected-keyword-arg
exit(0)
| 1,637 |
Coursera/ComputationalPhotography/assignment/part1.py
|
ankitaggarwal011/MOOCs
| 6 |
2170891
|
import sys
import os
import numpy as np
from scipy import signal
import math
import random
import cv2
import run
def make_gaussian(k, std):
'''Create a gaussian kernel.
Input:
k - the radius of the kernel.
std - the standard deviation of the kernel.
Output:
output - a numpy array of shape (2k+1, 2k+1) and dtype float.
If gaussian_1d is a gaussian filter of length 2k+1 in one dimension,
kernel[i,j] should be filled with the product of gaussian_1d[i] and
gaussian_1d[j].
Once all the points are filled, the kernel should be scaled so that the sum
of all cells is equal to one.'''
kernel = None
# Insert your code here.----------------------------------------------------
kernel=np.zeros((2*k+1,2*k+1),dtype=np.float)
gaussian_1d = signal.gaussian(2*k+1,std)
for i in range(gaussian_1d.shape[0]):
for j in range(gaussian_1d.shape[0]):
kernel[i,j]=gaussian_1d[i]*gaussian_1d[j]
kernelsum = kernel.sum()
kernel = kernel/kernelsum
#---------------------------------------------------------------------------
return kernel
def test():
'''This script will perform a unit test on your function, and provide useful
output.
'''
np.set_printoptions(precision=3)
ks = [1, 2, 1, 2, 1]
sds = [1, 2, 3, 4, 5]
outputs = []
# 1,1
y = np.array([[ 0.075, 0.124, 0.075],
[ 0.124, 0.204, 0.124],
[ 0.075, 0.124, 0.075]])
outputs.append(y)
# 2,2
y = np.array([[ 0.023, 0.034, 0.038, 0.034, 0.023],
[ 0.034, 0.049, 0.056, 0.049, 0.034],
[ 0.038, 0.056, 0.063, 0.056, 0.038],
[ 0.034, 0.049, 0.056, 0.049, 0.034],
[ 0.023, 0.034, 0.038, 0.034, 0.023]])
outputs.append(y)
# 1,3
y = np.array([[ 0.107, 0.113, 0.107],
[ 0.113, 0.120, 0.113],
[ 0.107, 0.113, 0.107]])
outputs.append(y)
# 2,4
y = np.array([[ 0.035, 0.039, 0.04 , 0.039, 0.035],
[ 0.039, 0.042, 0.044, 0.042, 0.039],
[ 0.04 , 0.044, 0.045, 0.044, 0.04 ],
[ 0.039, 0.042, 0.044, 0.042, 0.039],
[ 0.035, 0.039, 0.04 , 0.039, 0.035]])
outputs.append(y)
# 1,5
y = np.array([[ 0.11 , 0.112, 0.11 ],
[ 0.112, 0.114, 0.112],
[ 0.11 , 0.112, 0.11 ]])
outputs.append(y)
for k, sd, output in zip(ks, sds, outputs):
if __name__ == "__main__":
print "k:{}, sd:{}".format(k, sd)
usr_out = make_gaussian(k, sd)
if not type(usr_out) == type(output):
if __name__ == "__main__":
print "Error- output has type {}. Expected type is {}.".format(
type(usr_out), type(output))
return False
if not usr_out.shape == output.shape:
if __name__ == "__main__":
print "Error- output has shape {}. Expected shape is {}.".format(
usr_out.shape, output.shape)
return False
if not usr_out.dtype == output.dtype:
if __name__ == "__main__":
print "Error- output has dtype {}. Expected dtype is {}.".format(
usr_out.dtype, output.dtype)
return False
if not np.all(np.abs(usr_out - output) < .005):
if __name__ == "__main__":
print "Error- output has value:\n{}\nExpected value:\n{}".format(
usr_out, output)
return False
if __name__ == "__main__":
print "Passed."
if __name__ == "__main__":
print "Success."
return True
if __name__ == "__main__":
# Testing code
print "Performing unit test. Tests will be accepted if they are within .005 \
of the correct answer."
test()
| 3,625 |
geocms/views/web.py
|
JeffHeard/terrapyn
| 1 |
2170500
|
from django.shortcuts import get_object_or_404
from django.views.generic import TemplateView
from django.core.urlresolvers import reverse
import json
from terrapyn.geocms.models import DataResource, Layer, Style, LayerCollection
class LayerPageView(TemplateView):
template_name = 'terrapyn/geocms/layer.html'
def get_context_data(self, **kwargs):
ctx = super(LayerPageView, self).get_context_data(**kwargs)
ctx['layer'] = get_object_or_404(Layer, slug=kwargs['slug'])
ctx['res'] = ctx['layer'].data_resource
ctx['metadata'] = ctx['res'].metadata.first()
ctx['editable_obj'] = ctx['layer']
return ctx
class LayerCollectionPageView(TemplateView):
template_name = 'terrapyn/geocms/layer_collection.html'
def get_context_data(self, **kwargs):
ctx = super(LayerCollectionPageView, self).get_context_data(**kwargs)
ctx['layer_collection'] = get_object_or_404(LayerCollection, slug=kwargs['slug'])
ctx['editable_obj'] = ctx['layer_collection']
extent = ctx['layer_collection'].layers.first().data_resource.metadata.first().bounding_box
for l in ctx['layer_collection'].layers.all():
extent = extent.union(l.data_resource.metadata.first().bounding_box)
extent.transform(3857)
ctx['layers_json'] = json.dumps({
"extent": extent.wkt,
"layers": [{
"url": reverse('tms', kwargs={'layer': l.slug}),
"title": l.title,
"description": l.description
} for l in ctx['layer_collection'].layers.all()]
}, indent=4)
return ctx
class DataResourcePageView(TemplateView):
template_name = 'terrapyn/geocms/res.html'
def get_context_data(self, **kwargs):
ctx = super(DataResourcePageView, self).get_context_data(**kwargs)
ctx['res'] = get_object_or_404(DataResource, slug=kwargs['slug'])
ctx['metadata'] = ctx['res'].metadata.first()
ctx['summary'] = ctx['res'].driver_instance.summary()
ctx['editable_obj'] = ctx['res']
return ctx
class StylePageView(TemplateView):
template_name = 'terrapyn/geocms/style.html'
def get_context_data(self, **kwargs):
ctx = super(StylePageView, self).get_context_data(**kwargs)
ctx['style'] = get_object_or_404(Style, slug=kwargs['slug'])
ctx['layer'] = ctx['style'].default_for.first()
ctx['editable_obj'] = ctx['style']
return ctx
| 2,491 |
tests/test_generate_hamilton_input_make_pdp_mix.py
|
EdinburghGenomics/clarity_scripts
| 2 |
2169459
|
from unittest.mock import PropertyMock, Mock, patch
from scripts.generate_hamilton_input_make_pdp_mix import GenerateHamiltonInputMakePDPMix
from tests.test_common import TestEPP, NamedMock
class TestGenerateHamiltonInputPDP(TestEPP):
def setUp(self):
fake_outputs_per_input = [
Mock(id='ao1', location=[NamedMock(real_name='container3'), 'A:1'])]
fake_input_artifact_list = [Mock(location=[NamedMock(real_name='container1'), 'A:1']),
Mock(location=[NamedMock(real_name='container2'), 'A:1']),
Mock(location=[NamedMock(real_name='container2'), 'B:1'])]
fake_artifact = Mock(type='Analyte',udf={'NTP Volume (uL)':7}, input_artifact_list=Mock(return_value=fake_input_artifact_list))
fake_artifact2 = Mock(type='Analyte', udf={'NTP Volume (uL)': 25},
input_artifact_list=Mock(return_value=fake_input_artifact_list))
fake_inputs = [fake_artifact]
fake_inputs2 = [fake_artifact2]
self.patched_process1 = patch.object(
GenerateHamiltonInputMakePDPMix,
'process',
new_callable=PropertyMock(return_value=Mock(all_inputs=Mock(return_value=fake_inputs),
outputs_per_input=Mock(return_value=fake_outputs_per_input))
))
self.patched_process2 = patch.object(
GenerateHamiltonInputMakePDPMix,
'process',
new_callable=PropertyMock(return_value=Mock(all_inputs=Mock(return_value=fake_inputs2),
outputs_per_input=Mock(return_value=fake_outputs_per_input))
))
# argument -d left blank to write file to local directory
self.epp = GenerateHamiltonInputMakePDPMix(self.default_argv + ['-i', 'a_file_location'] + ['-d', ''])
def test_run(self): # test that file is written under happy path conditions i.e. 1 input plate, 1 output
with self.patched_process1:
self.epp._run()
expected_file = [
'Output Plate,Output Well,Mix Volume',
'container3,A1,16',
]
expected_md5 = '0fd0e782bacb5887906028d85b1d216d'
actual_file = self.file_content('a_file_location-hamilton_input.csv')
actual_md5_lims = self.stripped_md5('a_file_location-hamilton_input.csv')
actual_md5_shared_drive = self.stripped_md5(self.epp.shared_drive_file_path)
assert actual_file == expected_file
assert actual_md5_lims == expected_md5
assert actual_md5_shared_drive == expected_md5
def test_high_mix_volume(self): # test that maximum mix volume is 50 ul even if sum of the NTP volume is greater
with self.patched_process2:
self.epp._run()
expected_file = [
'Output Plate,Output Well,Mix Volume',
'container3,A1,50',
]
expected_md5 = '3ea758f77a402fa587417c60ae311d66'
actual_file = self.file_content('a_file_location-hamilton_input.csv')
actual_md5_lims = self.stripped_md5('a_file_location-hamilton_input.csv')
actual_md5_shared_drive = self.stripped_md5(self.epp.shared_drive_file_path)
assert actual_file == expected_file
assert actual_md5_lims == expected_md5
assert actual_md5_shared_drive == expected_md5
| 3,555 |
vafilterdesign/2_zPlane3d.py
|
jaakjensen/PythonDSP
| 1 |
2169838
|
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
#Define your function here. Use z^-1 form.
#If len(a) != len(b), don't append the smaller
#len array with zeros
a = [0.0798,0.0798,0.0798,0.0798]
b = [1,-1.556,1.272,-0.398]
zero,pole,gain = signal.tf2zpk(a,b)
print(f'Zeroes = {np.abs(zero)}')
print(f'Poles = {np.abs(pole)}')
def f(x, y):
c = np.zeros(np.shape(x), dtype=complex)
result1 = np.zeros(np.shape(x), dtype=complex)
result2 = np.zeros(np.shape(x), dtype=complex)
c.real = x
c.imag = y
gLen = max(len(a),len(b))
for i in reversed(range(0,len(a))):
if(gLen == len(a)):
result1+=(np.power(c,i) * a[i])
else:
result1+=(np.power(c,i+gLen-1) * a[i])
for i in reversed(range(0,len(b))):
if(gLen == len(b)):
result2+=(np.power(c,i) * b[i])
else:
result2+=(np.power(c,i+gLen-1) * b[i])
return np.abs(result1)/np.abs(result2)
#Calculate X,Y,Z
x = np.linspace(-1.5, 1.5, 40)
y = np.linspace(-1.5, 1.5, 40)
X, Y = np.meshgrid(x, y)
Z = f(X, Y)
#Plot X,Y,and Z
ax = plt.axes(projection='3d')
#ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='viridis', edgecolor='none')
ax.set_title('Z-Plane');
# Plot the unit circle
theta = np.linspace(0, 2*np.pi, 100)
xline = np.sin(theta)
yline = np.cos(theta)
zline = f(xline,yline)
ax.plot3D(xline, yline, zline, 'black', linewidth=2)
#Plot poles and zeros on Z=0 plane
xzeros = np.abs(zero) * np.cos(np.angle(zero))
yzeros = np.abs(zero) * np.sin(np.angle(zero))
zzeros = np.zeros(np.shape(xzeros))
xpoles = np.abs(pole) * np.cos(np.angle(pole))
ypoles = np.abs(pole) * np.sin(np.angle(pole))
zpoles = np.zeros(np.shape(xpoles))
ax.scatter3D(xzeros, yzeros, zzeros, s=100, c='g')
ax.scatter3D(xpoles, ypoles, zpoles, s=100, c='r')
ax.set_xlabel('Real')
ax.set_ylabel('Imaginary')
ax.set_zlim(0,np.max(zline))
plt.show()
| 1,921 |
helpers.py
|
PennLINC/ExecutiveSummary
| 1 |
2170761
|
import os
from os import path
import glob
import shutil
def find_files(seek_dir, pattern):
"""
Finds all files within the directory specified that match
the glob-style pattern.
:parameter: seek_dir: directory to be searched.
:parameter: pattern: Unix shell pattern for finding files.
:return: list of relative paths of copied files (may be empty).
"""
paths = []
glob_pattern = os.path.join(seek_dir, pattern)
for found_file in glob.glob(glob_pattern):
paths.append(found_file)
return paths
def find_and_copy_files(seek_dir, pattern, output_dir):
"""
Finds all files within the directory specified that match
the glob-style pattern. Copies each file to the output
directory.
:parameter: seek_dir: directory to be searched.
:parameter: pattern: Unix shell pattern for finding files.
:parameter: output_dir: directory to which to copy files.
:return: list of relative paths of copied files (may be empty).
"""
rel_paths = []
glob_pattern = os.path.join(seek_dir, pattern)
for found_file in glob.glob(glob_pattern):
# TODO: change name to BIDS name?
filename = os.path.basename(found_file)
rel_path = os.path.relpath(os.path.join(output_dir, filename), os.getcwd())
shutil.copy(found_file, rel_path)
rel_paths.append(rel_path)
return rel_paths
def find_and_copy_file(seek_dir, pattern, output_dir):
"""
Finds a single file within seek_dir, using the pattern.
If found, copies the file to the output_dir.
:parameter: seek_dir: directory to be searched.
:parameter: pattern: Unix shell pattern for finding files.
:parameter: output_dir: directory to which to copy the file.
:return: relative path to copied file, or None.
"""
found_path = find_one_file(seek_dir, pattern)
if found_path:
# TODO: change name to BIDS name?
# Copy the file to output_dir.
filename = os.path.basename(found_path)
rel_path = os.path.relpath(os.path.join(output_dir, filename), os.getcwd())
shutil.copyfile(found_path, rel_path)
return rel_path
else:
return None
def find_one_file(seek_dir, pattern):
one_file = None
# Try to find a file with the pattern given in the directory given.
glob_pattern = path.join(seek_dir, pattern)
filelist = glob.glob(glob_pattern)
# Make sure we got exactly one file.
numfiles=len(filelist)
if numfiles is 1:
one_file = filelist[0]
else:
# TODO: Log info in errorfile.
print('info: Found %s files with pattern: %s' % (numfiles, glob_pattern))
return one_file
| 2,691 |
diagnostic/controllerslocal.py
|
sjjhsjjh/blender-driver
| 2 |
2170013
|
#!/usr/bin/python
# (c) 2017 <NAME>. MIT licensed, see https://opensource.org/licenses/MIT
# Part of Blender Driver, see https://github.com/sjjhsjjh/blender-driver
"""Python module for the Blender Games Engine controller interface.
This module is a diagnostic and demonstration version of the proper
blender_driver.controllers module.
This code demonstrates:
- Access to a local variable set when the controllers module is imported.
The value of the local variable isn't changed in this code, so it's not very
useful. Trying to change the value is demonstrated in the
controllersunboundlocal.py file in this directory.
This module can only be used from within the Blender Game Engine."""
# Exit if run other than as a module.
if __name__ == '__main__':
print(__doc__)
raise SystemExit(1)
# Local imports.
#
# Proper controllers, which have some utility subroutines.
import blender_driver.controllers
counter = -1
def initialise(controller):
"""Controller entry point for the first ever tick."""
# Assume there is only a single sensor
if not controller.sensors[0].positive:
# Only take action on the positive transition.
return
try:
# Next line prints the expected counter value, -1.
print('initialise 0', counter)
print('Terminate the game engine manually, with the Escape key.')
except:
blender_driver.controllers.terminate_engine()
raise
def tick(controller):
pass
def keyboard(controller):
pass
#
# Next line prints the expected counter value, -1.
print("".join(('Controllers module "', __name__, '" ', str(counter))))
| 1,626 |
projects/migrations/0007_alter_projects_screen_shot.py
|
kiptoo-rotich/Awards
| 0 |
2170683
|
# Generated by Django 3.2.5 on 2021-07-17 20:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0006_auto_20210717_2135'),
]
operations = [
migrations.AlterField(
model_name='projects',
name='screen_shot',
field=models.ImageField(default='Image', upload_to='images/'),
),
]
| 418 |
scripts/deperson_pickle.py
|
kpnDataScienceLab/deperson
| 4 |
2170995
|
#!/usr/bin/env python
# (c) KPN B.V.
# Licensed under MIT License (see LICENSE.txt)
# Author: <NAME>, Text Analytics Group, KPN Data Science Lab
import argparse
import pandas as pd
from deperson.deperson import Deperson
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Depersonalize a pickled dataframe.')
parser.add_argument('-d', '--datafile', dest='datafile', required=True)
parser.add_argument('-o', '--output', dest='outfile', default='overwrite',
help='Output file to store results.')
parser.add_argument('-f', '--field', dest='field', default='masked_text',
help='Field to mask.')
parser.add_argument('-r', '--rename-field', dest='rfield',
default='masked_text', help='New name for field.')
parser.add_argument('-c', '--drop-original-column', dest='dropcol',
default=False, action='store_true',
help='Whether to drop original column.')
parser.add_argument('-a', '--autocorrect', dest='autocorrect',
default=False, action='store_true',
help='Whether to apply autocorrection.')
parser.add_argument('-e', '--check-compound-words', dest='check_compound',
default=False, action='store_true',
help='Whether to check for long compound words.')
args = parser.parse_args()
# Depersonalizer
d = Deperson(autocorrect=args.autocorrect, check_compound=args.check_compound)
# Read in data
data = pd.read_pickle(args.datafile)
# Mask field
data['masked_text'] = data[args.field].apply(
lambda text: d.apply_blacklist(d.apply_whitelist(text)))
# Drop original column if requested
if args.dropcol:
data = data.drop(args.field, axis=1)
# Output
if args.outfile == 'overwrite':
data.to_pickle(args.datafile)
else:
data.to_pickle(args.outfile)
| 1,872 |
monasca_notification/common/repositories/postgres/pgsql_repo.py
|
martinchacon/monasca-notification
| 25 |
2170829
|
# Copyright 2015-2017 FUJITSU LIMITED
# (C) Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from oslo_log import log as logging
import psycopg2
from monasca_notification.common.repositories.base import base_repo
from monasca_notification.common.repositories import exceptions as exc
log = logging.getLogger(__name__)
class PostgresqlRepo(base_repo.BaseRepo):
def __init__(self, config):
super(PostgresqlRepo, self).__init__(config)
self._pgsql_params = config['postgresql']
self._pgsql = None
def _connect_to_pgsql(self):
self._pgsql = None
try:
self._pgsql = psycopg2.connect(**self._pgsql_params)
self._pgsql.autocommit = True
except psycopg2.Error as e:
log.exception('Pgsql connect failed %s', e)
raise
def fetch_notifications(self, alarm):
try:
if self._pgsql is None:
self._connect_to_pgsql()
cur = self._pgsql.cursor()
cur.execute(
self._find_alarm_action_sql,
(alarm['alarmDefinitionId'],
alarm['newState']))
for row in cur:
yield (row[0], row[1].lower(), row[2], row[3], row[4])
except psycopg2.Error as e:
log.exception("Couldn't fetch alarms actions %s", e)
raise exc.DatabaseException(e)
def get_alarm_current_state(self, alarm_id):
try:
if self._pgsql is None:
self._connect_to_pgsql()
cur = self._pgsql.cursor()
cur.execute(self._find_alarm_state_sql, alarm_id)
row = cur.fetchone()
state = row[0] if row is not None else None
return state
except psycopg2.Error as e:
log.exception("Couldn't fetch current alarm state %s", e)
raise exc.DatabaseException(e)
def fetch_notification_method_types(self):
try:
if self._pgsql is None:
self._connect_to_pgsql()
cur = self._pgsql.cursor()
cur.execute(self._find_all_notification_types_sql)
for row in cur:
yield (row[0])
except psycopg2.Error as e:
log.exception("Couldn't fetch notification types %s", e)
raise exc.DatabaseException(e)
def insert_notification_method_types(self, notification_types):
try:
if self._pgsql is None:
self._connect_to_pgsql()
cur = self._pgsql.cursor()
cur.executemany(self._insert_notification_types_sql, notification_types)
except psycopg2.Error as e:
log.exception("Couldn't insert notification types %s", e)
raise exc.DatabaseException(e)
def get_notification(self, notification_id):
try:
if self._pgsql is None:
self._connect_to_pgsql()
cur = self._pgsql.cursor()
cur.execute(self._get_notification_sql, notification_id)
row = cur.fetchone()
if row is None:
return None
else:
return [row[0], row[1].lower(), row[2], row[3]]
except psycopg2.Error as e:
log.exception("Couldn't fetch the notification method %s", e)
raise exc.DatabaseException(e)
| 3,886 |
raytracing/examples/ex17.py
|
janekfleper/RayTracing
| 91 |
2170762
|
TITLE = "An optical system with vendor lenses"
DESCRIPTION = """
All vendor lenses could be used just like any other elements. Remember to
check backFocalLength() and effectiveFocalLengths() to understand that the focal
point is not "f_e" after the lens but rather "BFL" after the lens.
"""
from raytracing import *
def exampleCode(comments=None):
path = ImagingPath()
path.label = TITLE
path.append(Space(d=50))
path.append(thorlabs.AC254_050_A())
path.append(Space(d=50))
path.append(thorlabs.AC254_050_A())
path.append(Space(d=150))
path.append(eo.PN_33_921())
path.append(Space(d=50))
path.append(eo.PN_88_593())
path.append(Space(180))
path.append(olympus.LUMPlanFL40X())
path.append(Space(10))
path.display(comments=comments)
if __name__ == "__main__":
exampleCode()
| 842 |
project/social_login.py
|
tomjuggler/login_system
| 28 |
2168862
|
from flask import Flask, render_template, redirect, url_for, flash, Blueprint
from flask_login import current_user, login_user, login_required
from flask_dance.contrib.github import make_github_blueprint, github
from flask_dance.contrib.google import make_google_blueprint, google
from flask_dance.contrib.facebook import make_facebook_blueprint, facebook
from flask_dance.consumer import oauth_authorized, oauth_error
from flask_dance.consumer.storage.sqla import SQLAlchemyStorage
from sqlalchemy.orm.exc import NoResultFound
from . import db
from .models import User, OAuth
github_blueprint = make_github_blueprint(client_id = 'YOUR CLIENT ID', client_secret = 'YOUR CLIENT SECRET')
google_blueprint = make_google_blueprint(client_id= "YOUR CLIENT ID", client_secret= "YOUR CLIENT SECRET", scope=[
"openid",
"https://www.googleapis.com/auth/userinfo.email",
"https://www.googleapis.com/auth/userinfo.profile",
]
)
facebook_blueprint = make_facebook_blueprint(client_id= "YOUR CLIENT ID", client_secret= "YOUR CLIENT SECRET", scope = [
"email"
]
)
github_bp = make_github_blueprint(storage = SQLAlchemyStorage(OAuth, db.session, user = current_user))
google_bp = make_google_blueprint(storage = SQLAlchemyStorage(OAuth, db.session, user = current_user))
facebook_bp = make_facebook_blueprint(storage = SQLAlchemyStorage(OAuth, db.session, user = current_user))
@oauth_authorized.connect_via(github_blueprint)
def github_logged_in(blueprint, token):
if not token:
flash("Failed to log in with GitHub.", category = "error")
return
resp = blueprint.session.get("/user")
if not resp.ok:
msg = "Failed to fecth user info from GitHub."
flash(msg, category= "error")
return
github_name = resp.json()["name"]
github_user_id = resp.json()["id"]
query = OAuth.query.filter_by(
provider = blueprint.name, provider_user_id = github_user_id)
try:
oauth = query.one()
except NoResultFound:
github_user_login = github_name
oauth = OAuth(
provider = blueprint.name,
provider_user_id = github_user_id,
provider_user_login = github_user_login,
token = token,
)
if current_user.is_anonymous:
if oauth.user:
login_user(oauth.user)
# flash("Successfully signed in with GitHub.", 'success')
else:
user = User(username = github_name)
oauth.user = user
db.session.add_all([user, oauth])
db.session.commit()
login_user(user)
# flash("Successfully signed in with GitHub.", 'success')
else:
if oauth.user:
if current_user != oauth.user:
url = url_for("auth.merge", username = oauth.user.username)
return redirect(url)
else:
oauth.user =current_user
db.session.add(oauth)
db.session.commit()
# flash("Successfully linked GitHub account.", 'success')
return redirect(url_for("main.profile"))
@oauth_error.connect_via(github_blueprint)
def github_error(blueprint, message, response):
msg = ("OAuth error from {name}! " "message={message} response = {response}").format(
name = blueprint.name, message = message, response = response
)
flash(msg, category="error")
@oauth_authorized.connect_via(google_blueprint)
def google_logged_in(blueprint, token):
if not token:
flask("Failed to log in.", category="error")
return
resp = blueprint.session.get("/oauth2/v2/userinfo")
if not resp.ok:
msg = "Failed to fetch user info."
flash(msg, category="error")
return
google_name = resp.json()["name"]
google_user_id = resp.json()["id"]
query = OAuth.query.filter_by(
provider = blueprint.name, provider_user_id = google_user_id
)
try:
oauth = query.one()
except NoResultFound:
google_user_login = google_name
oauth = OAuth(
provider=blueprint.name,
provider_user_id=google_user_id,
provider_user_login=google_user_login,
token=token,
)
if current_user.is_anonymous:
if oauth.user:
login_user(oauth.user)
# flash("Successfully signed in with Google.", 'success')
else:
user = User(username = google_name)
oauth.user = user
db.session.add_all([user, oauth])
db.session.commit()
login_user(user)
# flash("Successfully signed in with Google.", 'success')
else:
if oauth.user:
if current_user != oauth.user:
url = url_for("auth.merge", username=oauth.user.username)
return redirect(url)
else:
oauth.user = current_user
db.session.add(oauth)
db.commit()
# flash("Successfully linked Google account.")
return redirect(url_for("main.profile"))
@oauth_error.connect_via(google_blueprint)
def google_error(blueprint, message, response):
msg = ("OAuth error from {name}! " "message={message} response={response}").format(
name=blueprint.name, message = message, response = response
)
flash(msg, category = "error")
@oauth_authorized.connect_via(facebook_blueprint)
def facebook_logged_in(blueprint,token):
if not token:
flash("Failed to log in.", category="error")
return
resp = blueprint.session.get("/me")
if not resp.ok:
msg = "Failed to fetch user info."
flash(msg, category="error")
return
facebook_name = resp.json()["name"]
facebook_user_id = resp.json()["id"]
query = OAuth.query.filter_by(
provider = blueprint.name,
provider_user_id = facebook_user_id
)
try:
oauth = query.one()
except NoResultFound:
oauth = OAuth(
provider = blueprint.name,
provider_user_id = facebook_user_id,
token = token
)
if oauth.user:
login_user(oauth.user)
# flash("Successfully signed in with Facebook.", 'success')
else:
user = User(username = facebook_name)
oauth.user = user
db.session.add_all([user, oauth])
db.session.commit()
login_user(user)
# flash("Successfully signed in with Facebook.", 'success')
return redirect(url_for("main.profile"))
@oauth_error.connect_via(facebook_blueprint)
def facebook_error(blueprint, message, response):
msg = ("OAuth error from {name}! " "message={message} response={response}").format(
name=blueprint.name, message=message, response=response
)
flash(msg, category="error")
| 6,929 |
merger/block.py
|
git-dot-art/80x40-client
| 0 |
2169465
|
import config
def is_good(block):
"""Ensure the block is valid"""
if len(block) != config.EXPECTED_HEIGHT:
return False
for line in block:
if len(line) != config.EXPECTED_WIDTH:
return False
if any(len(c) != 1 or c not in config.ALLOWED_CHARS for c in line):
return False
return True
def get_difference(one, two):
"""Compute the differences between two blocks."""
if len(one) != len(two):
raise Exception("blocks are of different dimensions")
diffs = []
for y in range(len(one)):
line1 = one[y]
line2 = two[y]
if len(line1) != len(line2):
raise Exception("blocks are of different dimensions")
for x in range(len(line1)):
c1 = line1[x]
c2 = line2[x]
if c1 != c2:
diffs.append((x, y, c2))
return diffs
def apply_changes(block, changes):
"""Apply a set of changes to a block."""
for (x, y, c) in changes:
block[y][x] = c
return block
def merge_to_string(block):
"""Convert a block back into a string."""
return '\n'.join(''.join(line) for line in block)
| 1,210 |
dotbot/plugins/plugins.py
|
henworth/dotbot
| 0 |
2170273
|
import os
import glob
import dotbot
class Plugins(dotbot.Plugin):
'''
Load plugins from a list of paths.
'''
_directive = 'plugins'
_has_shown_override_message = False
def can_handle(self, directive):
return directive == self._directive
def handle(self, directive, data):
if directive != self._directive:
raise ValueError('plugins cannot handle directive %s' %
directive)
return self._process_plugins(data)
def _process_plugins(self, data):
success = True
plugin_paths = []
for item in data:
self._log.lowinfo('Loading plugin from %s' % item)
plugin_path_globs = glob.glob(os.path.join(item, '*.py'))
if not plugin_path_globs:
success = False
self._log.warning('Failed to load plugin from %s' % item)
else:
for plugin_path in plugin_path_globs:
plugin_paths.append(plugin_path)
for plugin_path in plugin_paths:
abspath = os.path.abspath(plugin_path)
dotbot.util.module.load(abspath)
if success:
self._log.info('All commands have been executed')
else:
self._log.error('Some commands were not successfully executed')
return success
| 1,343 |
data/migrations/versions/ed01e313d3cb_add_trust_enabled_to_repository.py
|
sferich888/quay
| 1 |
2170205
|
"""
Add trust_enabled to repository.
Revision ID: ed01e313d3cb
Revises: <PASSWORD>
Create Date: 2017-04-14 17:38:03.319695
"""
# revision identifiers, used by Alembic.
revision = "ed01e313d3cb"
down_revision = "c<PASSWORD>"
import sqlalchemy as sa
def upgrade(op, tables, tester):
### commands auto generated by Alembic - please adjust! ###
op.add_column(
"repository",
sa.Column(
"trust_enabled", sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()
),
)
### end Alembic commands ###
op.bulk_insert(tables.logentrykind, [{"name": "change_repo_trust"},])
# ### population of test data ### #
tester.populate_column("repository", "trust_enabled", tester.TestDataType.Boolean)
# ### end population of test data ### #
def downgrade(op, tables, tester):
### commands auto generated by Alembic - please adjust! ###
op.drop_column("repository", "trust_enabled")
### end Alembic commands ###
op.execute(
tables.logentrykind.delete().where(
tables.logentrykind.name == op.inline_literal("change_repo_trust")
)
)
| 1,146 |
app/auth/views.py
|
Sieva-cmd/Myblog
| 0 |
2170617
|
from flask import render_template,redirect,url_for,flash,request
from . import auth
from ..models import User
from .forms import RegistrationForm,LoginForm
from flask_login import login_user,logout_user,login_required
from ..import db
# from ..email import mail_message
# authorisation views
@auth.route('/login',methods=["GET","POST"])
def login():
login_form =LoginForm()
if login_form.validate_on_submit():
user =User.query.filter_by(email=login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password')
title ='Blogs Login'
return render_template('auth/login.html',login_form=login_form,title=title)
@auth.route('/register',methods =["GET","POST"])
def register():
form =RegistrationForm()
if form.validate_on_submit():
user =User(email =form.email.data,username =form.username.data,password =<PASSWORD>.data)
db.session.add(user)
db.session.commit()
# mail_message("Welcome to this blog","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
return render_template('auth/register.html',registration_form =form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('auth.login'))
| 1,487 |
couchbase/management/generic.py
|
couchbase/couchbase-python-client
| 189 |
2170872
|
from couchbase.management.admin import Admin
class GenericManager(object):
def __init__(
self, admin_bucket # type: Admin
):
self._admin_bucket = admin_bucket
| 186 |
wiki/encyclopedia/views.py
|
Elephant333/CS50-Webdev
| 0 |
2171147
|
from django.shortcuts import render
from . import util
from markdown import Markdown
markdown = Markdown()
def index(request):
return render(request, "encyclopedia/index.html", {
"entries": util.list_entries()
})
def entry(request, title):
if title in util.list_entries():
body = util.get_entry(title)
body_converted = markdown.convert(body)
return render(request, "encyclopedia/entry.html", {
"title": title,
"body": body_converted
})
else:
return render(request, "encyclopedia/error.html", {
"message": "The requested page was not found."
})
| 658 |
capitalize/test_.py
|
technolingo/AlgoStructuresPy
| 0 |
2167951
|
from .index import capitalize
def test_capitalize():
assert capitalize('hello world!') == 'Hello World!'
| 111 |
StructuralAnalysis/Material.py
|
Hazem-Kassab/StructuralAnalysis
| 6 |
2170369
|
"""
Class Material is an abstract class.
attributes and properties:
elasticity_modulus: should be initialized by the user
poissons_ratio: should be initialized by the used
shear_modulus (property & abstract method): each inheriting class has its own implementation of the shear_modulus
Derived classes:
Steel:
-attributes: yield_strength, ultimate_strength
Concrete:
"""
from abc import ABC, abstractmethod
class Material(ABC):
def __init__(self, elasticity_modulus, poissons_ratio):
self.elasticity_modulus = elasticity_modulus
self.poissons_ratio = poissons_ratio
self.__shear_modulus = None
@property
@abstractmethod
def shear_modulus(self):
return self.__shear_modulus
class Steel(Material):
def __init__(self, yield_strength, ultimate_strength, elasticity_modulus, poissons_ratio):
super().__init__(elasticity_modulus, poissons_ratio)
self.yield_strength = yield_strength
self.ultimate_strength = ultimate_strength
@property
def shear_modulus(self):
return self.elasticity_modulus / (2 * (1 + self.poissons_ratio))
class Concrete(Material):
@property
def shear_modulus(self):
return None
| 1,242 |
experiments/visualise.py
|
sz144/sider
| 2 |
2171094
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 9 16:32:19 2019
@author: shuoz
"""
import os
import scipy.io
import numpy as np
import matplotlib.pyplot as plt
import tensorly as tl
import nibabel as nib
from nilearn import plotting
def plot_coef(coef_img, img_name, thre_rate=0.01):
coef = coef_img.get_data()
coef_vec = tl.tensor_to_vec(coef)
# selection = SelectPercentile(f_classif, percentile=thre_rate)
n_voxel_th = int(coef_vec.shape[0] * thre_rate)
top_voxel_idx = (abs(coef_vec)).argsort()[::-1][:n_voxel_th]
thre = coef_vec[top_voxel_idx[-1]]
# coef_to_plot = np.zeros(coef.shape[0])
# coef_to_plot[top_voxel_idx] = coef[top_voxel_idx]
# thre = np.amax(abs(coef)) * thre_rate # high absulte value times threshold rate
# coef_img = nib.Nifti1Image(coef, maskimg.affine)
# plotting.plot_stat_map(coef_img, threshold=thre, output_file='%s.png'%img_name, cut_coords=(0, 15, 55))
plotting.plot_stat_map(coef_img, threshold=thre, output_file='%s_.pdf' % img_name, display_mode='x',
vmax=0.0004, cut_coords=range(0, 1, 1), colorbar=False)
# plotting.plot_stat_map(coef_img, threshold=thre, output_file='%s.png' % img_name)
basedir = 'D:/icml2019/data/openfmri'
mask = os.path.join(basedir, 'goodvoxmask_openfmri.nii.gz')
# os.path.join(basedir,'goodvoxmask.nii.gz')
maskimg = nib.load(mask)
maskdata = maskimg.get_data()
maskvox = np.where(maskdata)
plt.rcParams.update({'font.size': 14})
# sample_img = nib.load('ds007_sub001_c6_1.nii.gz')
algs = ['SIDeR', 'ARTL', 'SVM']
probs = ['ABandC']
for prob in probs:
for alg in algs:
coef_img = nib.load(os.path.join(basedir, 'aaai_%s%s.nii.gz'%(alg, prob)))
plot_coef(coef_img, alg)
| 1,760 |
climb.py
|
ColinBD/LED_climbing_wall
| 1 |
2171090
|
#!/usr/bin/python
# -*- coding: ascii -*-
import os, sys, time, sqlite3
from random import randint
# route mapper for raspberry pi - WORKOUT BY NUMBER OF ROUTES (we'll have another file for workout by time)
#set led pixel library
from AndyPiPixelLights import AndyPiPixelLights # Import the AndyPi Python module (you need to set the number of pixels in here)
LEDs=AndyPiPixelLights() # Set the name of our module
NUMBER_OF_PIXELS=138 # Set the number of pixels i.e. number of leds we have
ledpixels = [0] * NUMBER_OF_PIXELS # set up the pixel array
#set up required variable
current_route_num = 1
route_set = []
# ---- DECLARE FUNCTIONS ----
# ensure low grade is not higher than high grade
def grade_check():
if int(high_grade) < int(low_grade):
print ("high/low grade mismatch... you'll have to start again!... exiting program...")
time.sleep(2)
exit()
return
# create a user info function
def user_update():
print("you are on route " + str(current_route_num) + " of " + str(num_routes))
return
# ---- GET WORKOUT DETAILS FROM USER ----
num_routes = raw_input("how many routes do you want to do this workout?")
low_grade = raw_input("what is the LOWEST 'v' grade you want to do this workout? [just enter a number]")
high_grade = raw_input("what is the HIGHEST 'v' grade you want to do this workout? [just enter a number]")
# check that the high grade is not lower than the low grade - if it is ask them to choose again
grade_check()
print("\nthanks for that, we are generating your workout...\n")
# ---- GET ROUTES FROM DATABASE ----
# connect to the database
conn = sqlite3.connect('routesDB.db')
cursor = conn.cursor()
sql = "SELECT aroute FROM routes WHERE grade BETWEEN " + low_grade + " AND " + high_grade
i=0
for row in cursor.execute(sql):
#print row
route_set.append(row)
i = i+1
conn.close()
print('the number of routes matching your criteria is: ' + str(len(route_set)))
# if no routes match the users criteria quit
if len(route_set) == 0:
print("There are no routes that match your request... you'll have to start again... quitting")
time.sleep(3)
exit()
# print the routes
#i=0
#while i < len(route_set):
# print(route_set[i])
# i = i + 1
# ---- DECLARE THE DICTIONARY ----
# declare the dictionary to map between board code and LED number
mapper = {"l11": 0, "l10": 1, "l9": 2, "k9": 3, "k10": 4, "k11": 5, "k12": 6, "j11": 7, "j10": 8, "j9": 9, "i9": 10, "i10": 11, "i11": 12, "i12": 13, "h11": 14, $
# ---- THE LOOPY BIT / REPEAT FOR EACH ROUTE
# loop from 1 through to the users desired number of routes are complete
while current_route_num <= int(num_routes):
# get a route from the route_set data
# generate a random number within the bounds of the number of routes matching our criteria
dice_roll = randint(0,len(route_set)-1)
# pick a random route from the set using the random number
currentRouteMidway = route_set[(dice_roll)]
# clean up the selected route data
currentRoute = currentRouteMidway[0]
# split at ',' and store the parts in a list
currentRouteList = [x.strip() for x in currentRoute.split(',')]
# create an output array the same size as theRoute array - in this case it is filled with zeros for now
# i.e. same number of moves
theConvertedRoute = [0] * len(currentRouteList)
# convert board info into LED pixel integer info
i = 0
while i < len(currentRouteList):
theConvertedRoute[i] = mapper[(currentRouteList[i])]
print "board code: " + currentRouteList[i] + " = LED number: " + str(theConvertedRoute[i])
i += 1
# PIXEL LIGHTS STUFF HERE
try:
LEDs.cls(ledpixels) # clears all the pixels to black
time.sleep(0.1)
#loop through and set the pixels
i = 0
while i < len(theConvertedRoute):
LEDs.setpixelcolor(ledpixels, theConvertedRoute[i], LEDs.Color(0,0,255)) # set the 1st (0th) pixel to red
i = i+1
LEDs.writestrip(ledpixels) # writes the pixels (must be called after setpixelcolor to update
except KeyboardInterrupt: # clears all pixels in the case of Ctrl-C exit
LEDs.cls(ledpixels)
sys.exit(0)
# give the user some feedback
user_update()
# pause the program here and wait for the user to press a key before continuing
raw_input("Press the ENTER key to continue...")
current_route_num = current_route_num + 1
# once we are out of the while loop the user has been through all the routes so we can display a thanks and goodbye message
print('that is all the routes done... goodbye')
LEDs.cls(ledpixels)
time.sleep(3)
| 4,975 |
module/vbc_class.py
|
NMLibrary/vbc
| 2 |
2171107
|
#!/usr/bin/env python3
from enum import IntEnum
from .vbc_base import rank_str
class Player:
def __init__(self, rank, name, knowledge, speed, third_round_course):
self.rank = rank
self.name = name
self.knowledge = knowledge
self.speed = speed
self.win = False
self.lose = False
self.point = 0
self.miss = 0
self.win_rank = -1
self.third_round_course = third_round_course
self.result_str = ''
self.semifinal_seat = 0
self.semifinal_point = 0
self.final_sets = 0
self.score_str = ''
self.history_str = rank_str(rank)
class third_round_course(IntEnum):
ox = 1
swedish = 2
by = 3
updown = 4
def get_course_str(course):
if course == third_round_course.ox:
return '10o10x'
if course == third_round_course.updown:
return '10 up-down'
if course == third_round_course.swedish:
return 'Swedish 10'
if course == third_round_course.by:
return '10 by 10'
class Third_round_course_for_sort:
def __init__(self, course, priority):
self.course = course
self.priority = priority
| 1,224 |
fooltrader/sched/sched_finance.py
|
beaquant/fooltrader
| 1,103 |
2171022
|
# -*- coding: utf-8 -*-
import logging
from apscheduler.schedulers.background import BackgroundScheduler
from fooltrader.connector import es_connector
from fooltrader.datamanager import process_crawl
from fooltrader.datamanager.china_stock_manager import crawl_finance_data
from fooltrader.spiders.chinastock.stock_forecast_spider import StockForecastSpider
from fooltrader.utils.utils import init_process_log
init_process_log('crawling_china_finance_data.log')
logger = logging.getLogger(__name__)
sched = BackgroundScheduler()
@sched.scheduled_job('cron', hour=18, minute=00)
def scheduled_job1():
crawl_finance_data('000001', '666666')
es_connector.finance_sheet_to_es()
es_connector.finance_event_to_es(event_type='finance_report')
@sched.scheduled_job('cron', hour=18, minute=10)
def scheduled_job2():
process_crawl(StockForecastSpider)
es_connector.finance_event_to_es(event_type='finance_forecast')
if __name__ == '__main__':
logger.info("start crawling finance data")
crawl_finance_data('000001', '666666')
process_crawl(StockForecastSpider)
logger.info("shed crawling finance data")
sched.start()
logger.info("I would crawl finance data at 18:00")
sched._thread.join()
| 1,242 |
two_stream_bert/option.py
|
bomtorazek/LateTemporalModeling3DCNN
| 3 |
2170181
|
import argparse
import models
model_names = sorted(name for name in models.__dict__
if not name.startswith("__")
and callable(models.__dict__[name]))
def get_args():
parser = argparse.ArgumentParser(description='PyTorch Two-Stream Action Recognition')
### Dataset
#parser.add_argument('--data', metavar='DIR', default='./datasets/ucf101_frames',help='path to dataset')
parser.add_argument('--settings', metavar='DIR', default='./datasets/settings',
help='path to dataset setting files')
#parser.add_argument('--modality', '-m', metavar='MODALITY', default='rgb',
# choices=["rgb", "flow"], help='modality: rgb | flow')
parser.add_argument('--dataset', '-d', default='hmdb51',
choices=["ucf101", "hmdb51", "smtV2", "window", "cvpr", "cvpr_le"], help='dataset: ucf101 | hmdb51 | smtV2')
parser.add_argument('-s', '--split', default=1, type=int, metavar='S', help='which split of data to work on (default: 1)')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N', help='number of data loading workers (default: 2)')
parser.add_argument('--arch', '-a', default='rgb_resneXt3D64f101_bert10_FRMB', choices=model_names,
help='model architecture: ' + ' | '.join(model_names) + ' (default: rgb_resneXt3D64f101_bert10_FRMB)')
parser.add_argument('--light_enhanced', action='store_true', default=False)
parser.add_argument('--save_dir', metavar='DIR', default='./checkpoint',help='path to save checkpoints')
### Training
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-b', '--batch-size', default=8, type=int,
metavar='N', help='mini-batch size (default: 8)')
parser.add_argument('--iter-size', default=16, type=int,
metavar='I', help='iter size to reduce memory usage (default: 16)')
parser.add_argument('--optimizer', default='AdamW', choices=['Adam', 'AdamW', 'AdamP', 'MADGRAD'])
parser.add_argument('--lrs', default='Plateau', choices=['Plateau', 'Cosine_Warmup'])
parser.add_argument('--lr', '--learning-rate', default=1e-5, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-3, type=float,
metavar='W', help='weight decay (default: 1e-3)')
parser.add_argument('--print-freq', default=400, type=int,
metavar='N', help='print frequency (default: 400)')
parser.add_argument('--save-freq', default=1, type=int,
metavar='N', help='save frequency (default: 1)')
parser.add_argument('--num-seg', default=1, type=int,
metavar='N', help='Number of segments in dataloader (default: 1)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')
parser.add_argument('-c', '--continue', dest='contine', action='store_true', help='continue training model')
parser.add_argument('--gpu', default='0', type=str, help='gpu id')
parser.add_argument('--half_precision', action='store_true', help='half precision training')
parser.add_argument('--reverse_aug', action='store_true', help='data augmentation with frame reversing')
# For Temporal Augmentations
parser.add_argument('--treg_mix_prob', default=1.0, type=float)
parser.add_argument('--treg_mix_beta', default=1.0, type=float)
parser.add_argument('--mix_type', default='None', choices=['None', 'cutmix', 'framecutmix', 'cubecutmix', 'mixup', 'fademixup', 'mcutmix', 'cutout', 'framecutout', 'cubecutout'])
parser.add_argument('--randaug', default='', type=str,help='3_15_t for n and m respectively, add _t if randaug-t')
args = parser.parse_args()
return args
| 4,087 |
huazhuang/utils/download_roberta.py
|
johnson7788/TextBrewer
| 1 |
2170251
|
from transformers import AutoTokenizer, AutoModelForMaskedLM
import os
tokenizer = AutoTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext")
model = AutoModelForMaskedLM.from_pretrained("hfl/chinese-roberta-wwm-ext")
model.save_pretrained('chinese-roberta')
tokenizer.save_pretrained('chinese-roberta')
# os.remove("bert-base-multilingual-uncased/special_tokens_map.json")
# os.remove("bert-base-multilingual-uncased/tokenizer_config.json")
os.system("mv chinese-roberta ../")
# tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
# model = AutoModelForMaskedLM.from_pretrained("bert-base-uncased")
# model.save_pretrained('bert_model_uncased')
# tokenizer.save_pretrained('bert_model_uncased')
| 709 |
general-problems/general/4/solution.py
|
michaelmunje/algorithms
| 1 |
2171011
|
# General Problem 4:
# Write fibbonaci iteratively and recursively (bonus: use dynamic programming)
# Solution:
# Add the basis 1, and 1.
# Then print out max value in dictionary
# function getFibonacciDynamic
# Takes the number of fibonacci
# Outputs the fib. sequence where the length is equal to input
def getFibonacciRecursive(fibIndex,fibonacciRec):
if (fibIndex > 1):
currentSum = getFibonacciRecursive(fibIndex - 1, fibonacciRec) + getFibonacciRecursive(fibIndex - 2, fibonacciRec)
else:
currentSum = 1
if (fibIndex >= len(fibonacciRec)):
fibonacciRec.insert(fibIndex, currentSum)
return currentSum
def putFibBasis(fibonacciRec):
fibonacciRec.insert(0,1)
fibonacciRec.insert(1,1)
def getFibonacciDynamic(numOfFib,fibonacci):
if (numOfFib >= 1):
fibonacci.insert(0,1)
if (numOfFib >= 2):
fibonacci.insert(1,1)
for i in range(0, numOfFib):
fibonacci.insert(i + 2, fibonacci[i] + fibonacci[i + 1])
if __name__ == '__main__':
x = 33
fibonacci = list()
fibonacciRec = list()
getFibonacciDynamic(x,fibonacci)
print(fibonacci)
putFibBasis(fibonacciRec)
getFibonacciRecursive(x + 1,fibonacciRec)
print(fibonacciRec)
| 1,150 |
src/halo_events_to_sumologic/halo_events_to_sumologic.py
|
cloudpassage/halo-sumologic
| 1 |
2167774
|
import datetime
import os
from botocore.exceptions import ClientError
from halo_events import HaloEvents
from manage_state import ManageState
from sumologic_https import sumologic_https_forwarder
from utility import Utility
TIMESTAMP_SSM_PARAM_NAME = '/CloudPassage-SumoLogic/events/timestamp'
SSM_PARAM_DESCRIPTION = 'Timestamp for CloudPassage/Sumologic event shipper.'
AWS_REGION = 'us-west-2'
HALO_CONCURRENCY = 10
MAX_PAGES = 50
SUMO_MAX_RETRY = 3
EXPORT_BATCH_SIZE = 10
def lambda_handler(event, context):
'''
:param config: NOT USED
:param context: NOT USED
:return: Current time in Zulu format
'''
max_retry = SUMO_MAX_RETRY
sumo_url = os.environ['sumologic_https_url']
halo_api_key_id = os.environ['halo_api_key_id']
halo_api_secret = os.environ['halo_api_secret_key']
halo_events = HaloEvents(halo_api_key_id, halo_api_secret,
HALO_CONCURRENCY)
state_mgr = ManageState(AWS_REGION, TIMESTAMP_SSM_PARAM_NAME,
SSM_PARAM_DESCRIPTION)
invoke_time = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
try:
since = state_mgr.get_timestamp()
except ClientError as e:
print("Error on retrieval of starting timestamp from AWS SSM:")
print(e)
print("Setting timestamp in SSM to now and exiting.")
state_mgr.set_timestamp(invoke_time)
return
until = invoke_time
print ('Since = %s\n[lambda_handler] Until = %s' % (since, until))
# List events
list_of_events = halo_events.get_all_event_pages(since, until, MAX_PAGES)
print('Number of events: %d' % len(list_of_events))
if list_of_events:
shipped_template = "Events between {} and {} shipped to Sumologic"
fin_msg = shipped_template.format(list_of_events[0]["created_at"],
list_of_events[-1]["created_at"])
print("Generating event batches.")
batches = Utility.generate_batches(EXPORT_BATCH_SIZE, list_of_events)
print("Generated {} batches of events.".format(len(batches)))
for payload in batches:
data, last_event_created_at = payload
sumologic_https_forwarder(url=sumo_url, data=data,
max_retry=max_retry)
state_mgr.increment_timestamp(last_event_created_at)
print(fin_msg)
else:
last_event_created_at = invoke_time
# update the last time the script ran with the create_at of last event
state_mgr.set_timestamp(last_event_created_at)
print("The new since time (create_at of the last event) - %s" %
last_event_created_at)
return invoke_time
| 2,699 |
stripe/api_resources/source_transaction.py
|
henry232323/stripe-python
| 0 |
2168484
|
from stripe.stripe_object import StripeObject
class SourceTransaction(StripeObject):
OBJECT_NAME = "source_transaction"
| 126 |
PyStationB/libraries/GlobalPenalisation/gp/moment_matching/numpy/correlation_penalisation.py
|
BrunoKM/station-b-libraries
| 6 |
2171093
|
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
"""
TODO: This file doesn't belong here.
"""
from typing import Tuple, Dict, Optional
import numpy as np
from emukit.core.acquisition import Acquisition
from emukit.core.loop import CandidatePointCalculator, LoopState
from emukit.core import ParameterSpace
class CorrelationPenalization(Acquisition):
"""Correlation based penalizer."""
def __init__(self, model, prev_x: np.ndarray):
self.prev_x = prev_x
self.prev_y_mean, prev_y_var = model.predict(self.prev_x)
self.prev_y_std = np.sqrt(prev_y_var)
self.model = model
@property
def has_gradients(self) -> bool:
return False
def update_batches(self, x_batch: np.ndarray):
pass
def evaluate(self, x: np.ndarray) -> np.ndarray:
"""
Evaluates the penalization function value. x is of shape [num_points, input_dim].
"""
covar = self.model.get_covariance_between_points(x, self.prev_x)
_, new_y_variance = self.model.predict(x)
new_y_std = np.sqrt(new_y_variance)
# covar is of shape [x.shape[0], prev_x.shape[0]]. Normalise each entry by the
# std of corresponding observation at entries in x and prev_x
correlation = covar / (new_y_std * self.prev_y_std.T)
penalization = (1.0 - correlation).prod(axis=1, keepdims=True)
return penalization
def evaluate_with_gradients(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Evaluates the penalization function value and gradients with respect to x
"""
# TODO: The below method computes many unnecessary gradientes. Computational overhead.
dmean_dx, dvariance_dx = self.model.get_joint_prediction_gradients(np.concatenate((self.prev_x, x), axis=0))
# if not isinstance(self.model, IJointlyDifferentiable):
# raise AttributeError("Model is not jointly differentiable.")
# TODO
raise NotImplementedError()
class CorrelationPenalizationPointCalculator(CandidatePointCalculator):
"""
Probability of Improvement insipred global penalization point calculator
"""
def __init__(
self, acquisition: Acquisition, acquisition_optimizer, model, parameter_space: ParameterSpace, batch_size: int
):
"""
:param acquisition: Base acquisition function to use without any penalization applied, this acquisition should
output positive values only.
:param acquisition_optimizer: AcquisitionOptimizer object to optimize the penalized acquisition
:param model: Model object, used to compute the parameters of the local penalization
:param parameter_space: Parameter space describing input domain
:param batch_size: Number of points to collect in each batch
"""
self.acquisition = acquisition
self.acquisition_optimizer = acquisition_optimizer
self.batch_size = batch_size
self.model = model
self.parameter_space = parameter_space
def compute_next_points(self, loop_state: LoopState, context: Optional[Dict] = None) -> np.ndarray:
"""
Computes a batch of points using local penalization.
:param loop_state: Object containing the current state of the loop
"""
self.acquisition.update_parameters()
# Compute first point:
x1, _ = self.acquisition_optimizer.optimize(self.acquisition)
x_batch = [x1]
# Compute the next points:
for i in range(1, self.batch_size):
penalization_acquisition = CorrelationPenalization(self.model, prev_x=x1)
acquisition = self.acquisition + penalization_acquisition
# Collect point
x_next, _ = self.acquisition_optimizer.optimize(acquisition)
x_batch.append(x_next)
assert len(x_batch) == self.batch_size # TODO: Remove
return np.concatenate(x_batch, axis=0)
| 4,269 |
api/tests/test_users.py
|
enrobyn/lookit-api
| 0 |
2169748
|
import json
import uuid
from django.test import TestCase
from rest_framework.test import APITestCase
from rest_framework.test import APIClient
from rest_framework import status
from django.urls import reverse
import json
from guardian.shortcuts import assign_perm
from studies.models import Response, Study, Feedback
from accounts.models import Child, User
from django_dynamic_fixture import G
class UserTestCase(APITestCase):
def setUp(self):
self.researcher = G(User, is_active=True, is_researcher=True, given_name="Researcher 1")
self.participant = G(User, is_active=True, given_name="Participant 1")
self.participant2 = G(User, is_active=True, given_name="Participant 2")
self.participant3 = G(User, is_active=True, given_name="Participant 3")
self.child = G(Child, user=self.participant, given_name='Sally')
self.study = G(Study, creator=self.researcher)
self.response = G(Response, child=self.child, study=self.study)
self.url = reverse('user-list', kwargs={'version':'v1'})
self.user_detail_url = reverse('user-list', kwargs={'version':'v1'}) + str(self.participant.uuid) + '/'
self.client = APIClient()
# Participant GET LIST Tests
def testGetParticipantListUnauthenticated(self):
# Must be authenticated to view participants
api_response = self.client.get(self.url, content_type="application/vnd.api+json")
self.assertEqual(api_response.status_code, status.HTTP_401_UNAUTHORIZED)
def testGetResearchersInParticipantList(self):
# As a researcher, can view yourself
self.client.force_authenticate(user=self.researcher)
api_response = self.client.get(self.url, content_type="application/vnd.api+json")
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
self.assertEqual(api_response.data['links']['meta']['count'], 1)
def testParticipantCanViewThemselves(self):
# As a participant, can view yourself
self.client.force_authenticate(user=self.participant)
api_response = self.client.get(self.url, content_type="application/vnd.api+json")
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
self.assertEqual(api_response.data['results'][0]['given_name'], "Participant 1")
def testGetParticipantsIncorrectPermissions(self):
# Can_view_study permissions not sufficient for viewing participants
assign_perm('studies.can_view_study', self.researcher, self.study)
self.client.force_authenticate(user=self.researcher)
api_response = self.client.get(self.url, content_type="application/vnd.api+json")
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
self.assertEqual(api_response.data['links']['meta']['count'], 1)
def testGetParticipantListCanViewStudyResponsesPermissions(self):
# As a researcher, need can_view_study_responses permissions to view participants
assign_perm('studies.can_view_study_responses', self.researcher, self.study)
self.client.force_authenticate(user=self.researcher)
api_response = self.client.get(self.url, content_type="application/vnd.api+json")
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
self.assertEqual(api_response.data['links']['meta']['count'], 2)
self.assertEqual(api_response.data['results'][0]['given_name'], "Researcher 1")
self.assertEqual(api_response.data['results'][1]['given_name'], "Participant 1")
def testSuperusersCanViewAllUsers(self):
# Superusers can see all users
self.superuser = G(User, is_active=True, is_researcher=True, is_superuser=True)
self.client.force_authenticate(user=self.superuser)
api_response = self.client.get(self.url, content_type="application/vnd.api+json")
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
self.assertGreater(api_response.data['links']['meta']['count'], 1)
def testAdminsCannotAutomaticallyViewEmails(self):
# Regular org admin permissions and even ability to read all user data are insufficient to see usernames
self.admin = G(User, is_active=True, is_researcher=True, is_org_admin=True)
assign_perm('accounts.can_read_all_user_data', self.admin)
self.client.force_authenticate(user=self.admin)
api_response = self.client.get(self.url, content_type="application/vnd.api+json")
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
userList = api_response.json()['data']
self.assertGreater(len(userList), 1) # View all participants
for u in userList:
self.assertNotIn('username', u['attributes'].keys())
def testUsersCanViewEmailsWithPermission(self):
# User with specific 'can_view_usernames' permission can see usernames in user data
self.emailpermissionuser = G(User, is_active=True, given_name="ResearcherEmail")
assign_perm('accounts.can_read_usernames', self.emailpermissionuser)
self.client.force_authenticate(user=self.emailpermissionuser)
api_response = self.client.get(self.url, content_type="application/vnd.api+json")
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
userList = api_response.json()['data']
self.assertGreater(len(userList), 0) # View self
for u in userList:
self.assertIn('username', u['attributes'].keys())
# Participant GET Detail Tests
def testGetParticipantDetailUnauthenticated(self):
# Must be authenticated to view participants
api_response = self.client.get(self.user_detail_url, content_type="application/vnd.api+json")
self.assertEqual(api_response.status_code, status.HTTP_401_UNAUTHORIZED)
def testGetResearcherDetail(self):
# Researchers do not show up in user list, only participants
self.client.force_authenticate(user=self.researcher)
api_response = self.client.get(str(self.researcher.uuid) + '/', content_type="application/vnd.api+json")
self.assertEqual(api_response.status_code, status.HTTP_404_NOT_FOUND)
def testParticipantCanViewOwnDetailEndpoint(self):
# As a participant, can view yourself
self.client.force_authenticate(user=self.participant)
api_response = self.client.get(self.user_detail_url, content_type="application/vnd.api+json")
print(self.user_detail_url)
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
self.assertEqual(api_response.data['given_name'], "Participant 1")
def testGetParticipantDetailIncorrectPermissions(self):
# Can_view_study permissions not sufficient for viewing participants
assign_perm('studies.can_view_study', self.researcher, self.study)
self.client.force_authenticate(user=self.researcher)
api_response = self.client.get(self.user_detail_url, content_type="application/vnd.api+json")
self.assertEqual(api_response.status_code, status.HTTP_404_NOT_FOUND)
def testGetParticipantDetailCanViewStudyResponsesPermissions(self):
# As a researcher, need can_view_study_responses permissions to view participant detail
assign_perm('studies.can_view_study_responses', self.researcher, self.study)
self.client.force_authenticate(user=self.researcher)
api_response = self.client.get(self.user_detail_url, content_type="application/vnd.api+json")
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
self.assertEqual(api_response.data['given_name'], "Participant 1")
# POST User Tests
def testPostUser(self):
# Cannot POST to users
assign_perm('studies.can_view_study_responses', self.researcher, self.study)
self.client.force_authenticate(user=self.researcher)
api_response = self.client.post(self.url, content_type="application/vnd.api+json")
self.assertEqual(api_response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
# PATCH User Tests
def testUpdateUser(self):
# Cannot Update User
assign_perm('studies.can_view_study_responses', self.researcher, self.study)
self.client.force_authenticate(user=self.researcher)
api_response = self.client.patch(self.user_detail_url, content_type="application/vnd.api+json")
self.assertEqual(api_response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
# DELETE User Tests
def testDeleteUser(self):
# Cannot Delete User
assign_perm('studies.can_view_study_responses', self.researcher, self.study)
self.client.force_authenticate(user=self.researcher)
api_response = self.client.delete(self.user_detail_url, content_type="application/vnd.api+json")
self.assertEqual(api_response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
| 8,901 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.