max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
datasets/postprocess.py
|
Zumo09/Feedback-Prize
| 0 |
2025197
|
from typing import Optional
from sklearn.preprocessing import OrdinalEncoder
import torch
import torch.nn.functional as F
import pandas as pd
import numpy as np
from util import box_ops
class FBPPostProcess:
"""This module converts the model's output into the format expected by the Kaggle api"""
def __init__(
self, encoder: OrdinalEncoder, tags: pd.DataFrame, no_obj_class: int
) -> None:
super().__init__()
self.encoder = encoder
self.tags = tags
self.no_obj_class = no_obj_class
self.reset_results()
@property
def results(self) -> pd.DataFrame:
"""The DataFrame to be submitted for the challenge"""
if len(self._results) == 0:
return pd.DataFrame(columns=["id", "class", "predictionstring", "score"])
return pd.DataFrame(self._results)
def reset_results(self):
self._results = []
@staticmethod
def _predstr_to_set(pred: str):
return set(int(i) for i in pred.split())
@staticmethod
def prec_rec_f1(tp, fp, fn):
prec = tp / (tp + fp + 1e-3)
recall = tp / (tp + fn + 1e-3)
f1 = tp / (tp + 0.5 * (fp + fn) + 1e-3)
return {"precision": prec, "recall": recall, "f1": f1}
def _evaluate_doc_class(self, pred, tags):
lp = len(pred)
lt = len(tags)
overlaps_pt = np.zeros(shape=(lp, lt))
overlaps_tp = np.zeros(shape=(lp, lt))
p_sets = [self._predstr_to_set(ps) for ps in pred["predictionstring"]]
t_sets = [self._predstr_to_set(ps) for ps in tags["predictionstring"]]
for p in range(lp):
p_set = p_sets[p]
for t in range(lt):
t_set = t_sets[t]
overlaps_pt[p, t] = len(p_set.intersection(t_set)) / len(t_set)
overlaps_tp[p, t] = len(t_set.intersection(p_set)) / len(p_set)
tp = 0
fn = 0
pred_matched = []
for t in range(lt):
larger = 0
selected = None
for p in range(lp):
if p in pred_matched:
continue
opt = overlaps_pt[p, t]
otp = overlaps_tp[p, t]
if opt >= 0.5 and otp >= 0.5:
if (opt + otp) > larger:
larger = opt + otp
selected = p
if selected is None:
fn += 1
else:
pred_matched.append(selected)
tp += 1
fp = lp - tp
return tp, fp, fn
def evaluate(self, results: Optional[pd.DataFrame] = None):
"""
Evaluation metric defined by the Kaggle Challenge
"""
if results is None:
results = self.results
gb_res = results.groupby(by="id")
gb_tag = self.tags.groupby(by="id")
report = {}
for cls in self.tags["discourse_type"].unique():
tp, fp, fn = 0, 0, 0
for doc_id in results["id"].unique():
pred = gb_res.get_group(doc_id)
tags = gb_tag.get_group(doc_id)
pred = pred[pred["class"] == cls]
tags = tags[tags["discourse_type"] == cls]
a, b, c = self._evaluate_doc_class(pred, tags)
tp += a
fp += b
fn += c
report[cls] = self.prec_rec_f1(tp, fp, fn)
report["macro_avg"] = {
"precision": sum(cls_rep["precision"] for cls_rep in report.values())
/ len(report),
"recall": sum(cls_rep["recall"] for cls_rep in report.values())
/ len(report),
"f1": sum(cls_rep["f1"] for cls_rep in report.values()) / len(report),
}
return pd.DataFrame(report).transpose()
@torch.no_grad()
def add_outputs(self, outputs, infos):
"""Format the outputs and save them to a dataframe
Parameters:
outputs: raw outputs of the model
infos: list of dictionaries of length [batch_size] containing the length of each document of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
out_logits, out_bbox = outputs["pred_logits"].cpu(), outputs["pred_boxes"].cpu()
target_sizes = torch.Tensor([info["length"] for info in infos])
assert len(out_logits) == len(target_sizes)
prob = F.softmax(out_logits, -1)
scores, labels = prob.max(-1)
# from relative [0, 1] to absolute [0, tarx_len] coordinates
scale_fct = torch.stack([target_sizes, target_sizes], dim=1)
boxes = out_bbox * scale_fct[:, None, :]
# and convert to [start, end] format
boxes = box_ops.box_cl_to_se(boxes)
# round and positive
boxes = torch.round(boxes)
boxes = torch.relu(boxes).int()
for i, l, s, b in zip(infos, labels, scores, boxes):
self._add(i, l, s, b)
def _add(self, info, labels, scores, boxes):
doc_id = info["id"]
for l, s, b in zip(labels, scores, boxes):
if l != self.no_obj_class:
l = self.encoder.inverse_transform(l.reshape(-1, 1))
self._results.append(
{
"id": doc_id,
"class": l[0][0],
"predictionstring": self.prediction_string(b),
"score": s.item(),
}
)
@staticmethod
def prediction_string(box):
start, end = box
return " ".join(str(i) for i in range(start, end + 1))
| 5,815 |
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/website_livechat/models/im_livechat.py
|
gtfarng/Odoo_migrade
| 1 |
2022625
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, fields
from odoo.addons.website.models.website import slug
from odoo.tools.translate import html_translate
class ImLivechatChannel(models.Model):
_name = 'im_livechat.channel'
_inherit = ['im_livechat.channel', 'website.published.mixin']
@api.multi
def _compute_website_url(self):
super(ImLivechatChannel, self)._compute_website_url()
for channel in self:
channel.website_url = "/livechat/channel/%s" % (slug(channel),)
website_description = fields.Html("Website description", default=False, help="Description of the channel displayed on the website page", sanitize_attributes=False, translate=html_translate)
| 793 |
buildserver/main.py
|
Cal-CS-61A-Staff/cs61a-apps
| 5 |
2022849
|
import hmac
from flask import Flask, abort, redirect, request
from github import Github
import api
from common.db import connect_db
from common.html import html
from common.oauth_client import create_oauth_client, get_user, is_staff, login
from common.rpc.auth import is_admin
from common.rpc.buildserver import (
clear_queue,
deploy_prod_app_sync,
get_base_hostname,
trigger_build_sync,
)
from common.rpc.secrets import get_secret, only, validates_master_secret
from common.url_for import url_for
from conf import GITHUB_REPO
from github_utils import BuildStatus, get_github, pack, set_pr_comment
from rebuilder import create_rebuilder
from scheduling import report_build_status
from service_management import delete_unused_services
from target_determinator import determine_targets
from worker import dequeue_and_build, land_commit
DO_NOT_BUILD = "DO NOT BUILD"
app = Flask(__name__)
if __name__ == "__main__":
app.debug = True
create_oauth_client(app, "61a-buildserver")
create_rebuilder(app)
with connect_db() as db:
db(
"""CREATE TABLE IF NOT EXISTS services (
app varchar(128),
pr_number int,
locked boolean,
is_web_service boolean
)
"""
)
db(
"""CREATE TABLE IF NOT EXISTS apps (
app varchar(128),
repo varchar(128),
autobuild boolean
)"""
)
db(
"""CREATE TABLE IF NOT EXISTS mysql_users (
app varchar(128),
mysql_pw varchar(128)
)"""
)
def deploy_prod_app_description(app):
if app == "website-base":
return "<p>Redeploy cs61a.org</p>"
return ""
@app.route("/")
def index():
if not is_staff("cs61a"):
return login()
email = get_user()["email"]
if not is_admin(course="cs61a", email=email):
abort(401)
with connect_db() as db:
apps = db("SELECT app FROM services WHERE pr_number=0", []).fetchall()
pr_apps = db(
"SELECT app, pr_number FROM services WHERE pr_number>0 ORDER BY pr_number DESC",
[],
).fetchall()
return html(
f"""
This service manages the deployment of the 61A website and various apps.
{"".join(f'''
<form action="/deploy_prod_app">
{deploy_prod_app_description(app)}
<input type="submit" name="app" value="{app}" />
</form>
''' for [app] in apps)}
{"".join(f'''
<form action="/trigger_build">
<input type="hidden" name="app" value="{app}" />
<input type="hidden" name="pr_number" value="{pr_number}" />
<input type="submit" value="{app + "-pr" + str(pr_number)}" />
</form>
''' for [app, pr_number] in pr_apps)}
<form action="/delete_unused_services" method="post">
<input type="submit" value="Delete unused services" />
</form>
"""
)
@app.route("/deploy_prod_app")
def deploy_prod_app():
if not is_staff("cs61a"):
return login()
email = get_user()["email"]
if not is_admin(course="cs61a", email=email):
abort(401)
app = request.args["app"]
deploy_prod_app_sync(target_app=app, noreply=True)
return html(f"Deploying <code>{app}</code> from master!")
@deploy_prod_app_sync.bind(app)
@validates_master_secret
def handle_deploy_prod_app_sync(app, is_staging, target_app):
if app != "buildserver" or is_staging:
abort(401)
g = Github(get_secret(secret_name="GITHUB_ACCESS_TOKEN"))
repo = g.get_repo(GITHUB_REPO)
land_commit(
repo.get_branch(repo.default_branch).commit.sha,
repo,
repo,
None,
[f"{target_app}/main.py"],
)
@app.route("/trigger_build")
def trigger_build():
if not is_staff("cs61a"):
return login()
email = get_user()["email"]
if not is_admin(course="cs61a", email=email):
abort(401)
if "app" in request.args:
target = request.args["app"]
else:
target = None
pr_number = int(request.args["pr_number"])
g = Github(get_secret(secret_name="GITHUB_ACCESS_TOKEN"))
repo = g.get_repo(GITHUB_REPO)
pr = repo.get_pull(pr_number)
if DO_NOT_BUILD in [l.name for l in pr.labels]:
return html(
f"PR <code>{pr_number}</code> has a DO NOT BUILD label on it, so it cannot be built. Remove this label to build the PR."
)
trigger_build_sync(pr_number=pr_number, target_app=target, noreply=True)
return html(f"Building PR <code>{pr_number}</code>!")
@trigger_build_sync.bind(app)
@validates_master_secret
def handle_trigger_build_sync(app, is_staging, pr_number, target_app=None):
if app not in ("slack", "buildserver") or is_staging:
raise PermissionError
g = Github(get_secret(secret_name="GITHUB_ACCESS_TOKEN"))
repo = g.get_repo(GITHUB_REPO)
pr = repo.get_pull(pr_number)
if DO_NOT_BUILD in [l.name for l in pr.labels]:
raise PermissionError
land_commit(pr.head.sha, repo, repo, pr, pr.get_files(), target_app=target_app)
@clear_queue.bind(app)
@only("buildserver", allow_staging=True)
def clear_queue():
dequeue_and_build(get_github().get_repo(GITHUB_REPO))
@app.route("/delete_unused_services", methods=["POST"])
def delete_unused_services_handler():
if not is_staff("cs61a"):
return login()
email = get_user()["email"]
if not is_admin(course="cs61a", email=email):
return login()
delete_unused_services()
return redirect(url_for("index"))
@app.route("/webhook", methods=["POST"])
def webhook():
if not hmac.compare_digest(
"sha1="
+ hmac.new(
get_secret(secret_name="GITHUB_WEBHOOK_SECRET").encode("ascii"),
request.get_data(),
"sha1",
).hexdigest(),
request.headers["X-Hub-Signature"],
):
abort(401)
payload = request.json
g = Github(get_secret(secret_name="GITHUB_ACCESS_TOKEN"))
if "pusher" in payload and payload["ref"] == "refs/heads/master":
base_repo = g.get_repo(GITHUB_REPO)
repo = g.get_repo(payload["repository"]["id"])
sha = payload["after"]
land_commit(
sha,
repo,
base_repo,
None,
[
file
for commit in payload["commits"]
for file in commit["added"] + commit["modified"] + commit["removed"]
],
)
delete_unused_services()
if "pull_request" in payload:
repo_id = payload["repository"]["id"]
repo = g.get_repo(repo_id)
pr = repo.get_pull(payload["pull_request"]["number"])
if payload["action"] in ("opened", "synchronize", "reopened"):
if repo.full_name != GITHUB_REPO:
land_commit(pr.head.sha, repo, g.get_repo(GITHUB_REPO), pr, [])
else:
for target in determine_targets(repo, pr.get_files()):
report_build_status(
target,
pr.number,
pack(repo.clone_url, pr.head.sha),
BuildStatus.pushed,
None,
None,
private=True,
)
elif payload["action"] == "closed":
set_pr_comment("PR closed, shutting down PR builds...", pr)
delete_unused_services(pr.number)
set_pr_comment("All PR builds shut down.", pr)
return ""
@get_base_hostname.bind(app)
@only("domains", allow_staging=True)
def get_base_hostname(target_app):
return api.get_base_hostname(target_app)
if __name__ == "__main__":
app.run(host="127.0.0.1", port=8080, debug=True, threaded=False)
| 7,706 |
usr_scripts/UnsentMessageExample.py
|
Gamma749/IrohaFileHashing
| 1 |
2026104
|
#! /bin/python
from IrohaUtils import *
from IrohaHashCustodian import Custodian
import logging
import grpc
import json
from time import sleep
def wait_point(msg):
input(f"{bcolors.OKGREEN}===> {msg}{bcolors.ENDC}")
# INFO gives descriptions of steps
# DEBUG gives information on transactions and queries too
logging.basicConfig(level=logging.INFO)
logging.info("Create hash custodian")
while True:
try:
custodian = Custodian()
break
except grpc._channel._InactiveRpcError:
logging.info("Network unreachable, retrying")
sleep(2)
logging.info("Create user Jack")
user_a = custodian.new_hashing_user("jack")
logging.info("Create user Jill")
user_b = custodian.new_hashing_user("jill")
logging.info("Create user Victoria")
user_c = custodian.new_hashing_user("victoria")
wait_point("Jack sends message1 to Jill, logging the hash")
message1_hash = custodian.get_file_hash("messages/message1.secret")
logging.info(f"{message1_hash=}")
logging.info("Storing on chain...")
status = custodian.store_hash_on_chain(user_a, message1_hash)
assert status[0] == "COMMITTED"
logging.info("Jack successfully stored message1 hash")
wait_point("Jill replies to Jack and also logs the hash")
message2_hash = custodian.get_file_hash("messages/message2.secret")
logging.info(f"{message2_hash=}")
logging.info("Storing on chain...")
status = custodian.store_hash_on_chain(user_b, message2_hash)
assert status[0] == "COMMITTED"
logging.info("Jill successfully stored message2 hash")
wait_point("Jack, receiving Jill's reply, creates reply message3 but does not send it")
print("\n\n")
wait_point("Some time later, Victoria is called in to verify the communications have occurred")
logging.info("Victoria gets the chain hashes")
domain_hashes = custodian.get_domain_hashes()
logging.info(json.dumps(domain_hashes, indent=2))
wait_point("Victoria gets message1 and checks if the hash exists on the chain")
file_hash = custodian.get_file_hash("messages/message1.secret")
logging.info(f"Searching for hash {file_hash}...")
assert custodian.find_hash_on_chain(user_c, file_hash)
logging.info("Victoria found message1 hash on chain")
wait_point("Victoria gets message2 and checks if the hash exists on the chain")
file_hash = custodian.get_file_hash("messages/message2.secret")
logging.info(f"Searching for hash {file_hash}...")
assert custodian.find_hash_on_chain(user_c, file_hash)
logging.info("Victoria found message2 hash on chain")
wait_point("Victoria is asked to verify if message3 is on chain\nJack claims he sent message3 but actually did not")
file_hash = custodian.get_file_hash("messages/message3.secret")
logging.info(f"Searching for hash {file_hash}...")
assert not custodian.find_hash_on_chain(user_c, file_hash)
logging.info("Victoria did not find message3 hash on chain, verifies that message3 was not sent")
wait_point("Valid communications are verified, unsent messages are identified!")
log_all_blocks("node1.log")
| 2,965 |
src/rtrc_split_csv/RtrcSplitCsv.py
|
Arteric-Jeff-Knight/rtrc_split_csv
| 0 |
2025963
|
import io
from typing import Callable
import pandas as pd
class RtrcSplitCsv:
"""A helper to split an incoming csv file into a DataFrame with data and configs in a dict.
"""
defaults = {}
data = None
def __init(self, the_csv=None, defaults=None, config_name: str = 'config'):
if the_csv is not None:
self.data, self.defaults = self.split_data_and_configs(the_csv, defaults, config_name)
@staticmethod
def split_data_and_configs(df, defaults=None, config_name: str = 'config', validator: Callable = None):
if defaults is None or type(defaults) == dict:
defaults = {}
# Get everything with 'config' (or custom config_name) in first column
configs = df[df[0] == config_name]
# Build a dictionary from the key in the second column with values from the third
defaults.update(dict(zip(configs[1], configs[2])))
if validator is not None:
defaults = validator(defaults)
# Everything else that isn't a config, is data
data = df[df[0] != config_name].reset_index(drop=True)
# Assume that the first row is the column names now that configs are gone
data.columns = data.iloc[0]
# Drop the row with the column names
data.drop(df.index[0], inplace=True)
# Reset the index, so zero works below
data = data.reset_index(drop=True)
return data, defaults
@staticmethod
def split_from_csv(the_csv, defaults=None, config_name: str = 'config', validator: Callable = None):
df = pd.read_csv(the_csv, header=None)
return RtrcSplitCsv.split_data_and_configs(df, defaults, config_name, validator)
@staticmethod
def split_from_colab_upload(uploaded, defaults=None, config_name: str = 'config', validator: Callable = None):
filename = list(uploaded.keys())[0]
df = pd.read_csv(io.BytesIO(uploaded[filename]), header=None)
return RtrcSplitCsv.split_data_and_configs(df, defaults, config_name, validator)
| 2,040 |
AlexNet/data_provider.py
|
wondervictor/DeepLearningWithPaddle
| 5 |
2026284
|
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import cPickle as pickle
import numpy as np
import os
def data_reader(path, n):
def reader():
with open(path, 'r') as f:
datadict = pickle.load(f)
X = datadict['data']
Y = datadict['labels']
X = X.reshape(n, 3072).astype("float")
Y = np.array(Y)
size = n
for i in range(size):
yield X[i] / 255.0, int(Y[i])
return reader
| 1,530 |
drned/common/test_common.py
|
micnovak/drned-xmnr
| 7 |
2024623
|
import os
import re
import subprocess
# Compare two config files but ignore comments
def filecmp(a, b):
return os.system("diff -I '^ *!' -I '^ */\*' %s %s" % (a, b)) == 0
def check_output(command, **args):
return subprocess.check_output(command,
universal_newlines=True,
shell=True,
**args)
def path_in_prefixes(path, prefixes):
pathnons = path
if path.startswith("/{"):
pathnons = re.sub("{[^}]+?}", "", path)
for p in prefixes:
if path.startswith(p) or pathnons.startswith(p):
return True
return False
def gen_nodes(schema, skip_nodes, include_prefixes, exclude_prefixes, ntype):
nodes = schema.list_nodes(ntype=ntype)
for node in nodes:
p = node.get_path()
if (p not in skip_nodes and
not path_in_prefixes(p, exclude_prefixes) and
(not include_prefixes or path_in_prefixes(p, include_prefixes))):
yield node
| 1,040 |
agentnet/learning/qlearning.py
|
mraihan19/AgentNet
| 337 |
2026287
|
"""
Q-learning implementation.
Works with discrete action space.
Supports n-step updates and custom state value function (max(Q(s,a)), double q-learning, boltzmann, mellowmax, expected value sarsa,...)
"""
from __future__ import division, print_function, absolute_import
import theano.tensor as T
from lasagne.objectives import squared_error
from .generic import get_n_step_value_reference, get_values_for_actions
from ..utils.grad import consider_constant
__author__ = "<NAME>(partobs-mdp), justheuristic"
def get_elementwise_objective(qvalues, actions, rewards,
is_alive="always",
qvalues_target=None,
state_values_target=None,
n_steps=1,
gamma_or_gammas=0.99,
crop_last=True,
state_values_target_after_end="zeros",
consider_reference_constant=True,
aggregation_function="deprecated",
force_end_at_last_tick=False,
return_reference=False,
loss_function=squared_error):
"""
Returns squared error between predicted and reference Q-values according to n-step Q-learning algorithm
Qreference(state,action) = reward(state,action) + gamma*reward(state_1,action_1) + ... + gamma^n * max[action_n]( Q(state_n,action_n)
loss = mean over (Qvalues - Qreference)**2
:param qvalues: [batch,tick,actions] - predicted qvalues
:param actions: [batch,tick] - commited actions
:param rewards: [batch,tick] - immediate rewards for taking actions at given time ticks
:param is_alive: [batch,tick] - whether given session is still active at given tick. Defaults to always active.
:param qvalues_target: Q-values used when computing reference (e.g. r+gamma*Q(s',a_max). shape [batch,tick,actions]
examples:
(default) If None, uses current Qvalues.
Older snapshot Qvalues (e.g. from a target network)
:param state_values_target: state values V(s), used when computing reference (e.g. r+gamma*V(s'), shape [batch_size,seq_length,1]
double q-learning V(s) = Q_old(s,argmax Q_new(s,a))
expected_value_sarsa V(s) = E_a~pi(a|s) Q(s,a)
state values from teacher network (knowledge transfer)
Must provide either nothing or qvalues_target or state_values_target, not both at once
:param n_steps: if an integer is given, uses n-step q-learning algorithm
If 1 (default), this works exactly as normal q-learning
If None: propagating rewards throughout the whole sequence of state-action pairs.
:param gamma_or_gammas: delayed reward discounts: a single value or array[batch,tick](can broadcast dimensions).
:param crop_last: if True, zeros-out loss at final tick, if False - computes loss VS Qvalues_after_end
:param state_values_target_after_end: [batch,1] - symbolic expression for "next best q-values" for last tick
used when computing reference Q-values only.
Defaults at T.zeros_like(Q-values[:,0,None,0]). if crop_last=True, simply does not penalize at last tick.
If you wish to simply ignore the last tick, use defaults and crop output's last tick ( qref[:,:-1] )
:param consider_reference_constant: whether or not zero-out gradient flow through reference_qvalues
(True is highly recommended)
:param force_end_at_last_tick: if True, forces session end at last tick unless ended otehrwise
:param return_reference: if True, returns reference Qvalues.
If False, returns squared_error(action_qvalues, reference_qvalues)
:param loss_function: loss_function(V_reference,V_predicted). Defaults to (V_reference-V_predicted)**2.
Use to override squared error with different loss (e.g. Huber or MAE)
:return: mean squared error over Q-values (using formula above for loss)
"""
if aggregation_function != "deprecated":
raise NotImplementedError("aggregation function has beed deprecated and removed. You can now manually compute "
"any V(s) and pass it as state_state_values_target. By default it's qvalues.max(axis=-1)")
#set defaults and assert shapes
if is_alive == 'always':
is_alive = T.ones_like(rewards)
assert qvalues_target is None or state_values_target is None, "Please provide only one of (qvalues_target," \
"state_values_target) or none of them, not both"
assert actions.ndim == rewards.ndim == is_alive.ndim == 2, "actions, rewards and is_alive must have shape [batch,time]"
assert qvalues.ndim == 3, "q-values must have shape [batch,time,n_actions]"
assert qvalues_target is None or qvalues_target.ndim == 3,"qvalues_target must have shape[batch,time,n_actions]]"
assert state_values_target is None or state_values_target.ndim ==2, "state values must have shape [batch,time]"
#unless already given V(s), compute V(s) as Qvalues of best actions
state_values_target = state_values_target or T.max(qvalues_target or qvalues, axis=-1)
# get predicted Q-values for committed actions by both current and target networks
action_qvalues = get_values_for_actions(qvalues, actions)
# get reference Q-values via Q-learning algorithm
reference_qvalues = get_n_step_value_reference(
state_values=state_values_target,
rewards=rewards,
is_alive=is_alive,
n_steps=n_steps,
gamma_or_gammas=gamma_or_gammas,
state_values_after_end=state_values_target_after_end,
end_at_tmax=force_end_at_last_tick,
crop_last=crop_last,
)
if consider_reference_constant:
# do not pass gradient through reference Qvalues (since they DO depend on Qvalues by default)
reference_qvalues = consider_constant(reference_qvalues)
#If asked, make sure loss equals 0 for the last time-tick.
if crop_last:
reference_qvalues = T.set_subtensor(reference_qvalues[:,-1],action_qvalues[:,-1])
if return_reference:
return reference_qvalues
else:
# tensor of elementwise squared errors
elwise_squared_error = loss_function(reference_qvalues, action_qvalues)
return elwise_squared_error * is_alive
| 6,526 |
TF_Lab_12_3.py
|
leedongminAI/DL_Tensorflow_Lab
| 1 |
2025905
|
# Long Sequence RNN
import tensorflow as tf
import numpy as np
tf.set_random_seed(777)
sample = "if you want you"
# string 문자열을 하나 둔다.
idx2char = list(set(sample))
# set을 주면 유니크한 문자열이 만들어지고
# list를 쓰면 그것이 밑에 보이는 것과 같이 list형식으로 출력이 이루어진다.
# 계속 랜덤으로 재생할 것이다.
# idx2char output test
print(idx2char)
'''
['o', 'f', 'u', 'y', 'w', 't', 'a', 'n', ' ', 'i']
'''
char2idx = {c: i for i, c in enumerate(idx2char)}
# 문자열을 주면 char형으로 바꾼 것에 번호를 매긴다.
# char2idx output test
print(char2idx)
'''
{'o': 0, 'f': 1, 'u': 2, 'y': 3, 'w': 4, 't': 5, 'a': 6, 'n': 7, ' ': 8, 'i': 9}
'''
# hyper parameters
# hyper parameters을 정해주는 이유는 input이 아무 단어가 와도 알아서 정해주도록 만들기 위해서이다.
dic_size = len(char2idx) # RNN 'input' size (one hot size)
# 10, 문장전체에서 쓰일 단어(입력될 글자)의 size
# 'o': 0, 'f': 1, 'u': 2, 'y': 3, 'w': 4, 't': 5, 'a': 6, 'n': 7, ' ': 8, 'i': 9
hidden_size = len(char2idx) # RNN 'output' size
# 10, RNN하고 나서 출력의 size
# Cell에서 통과하고 나서 one_hot으로 했을 때 몇 size로 출력할꺼야?
num_classes = len(char2idx) # final 'output' size (RNN or 'softmax', etc.)
# 10, 출력의 class의 size, 최종적으로 나올 size, 알파벳의 개수
batch_size = 1 # one sample data, one batch
# 1, 한 번 최종단어가 나올 때 Yt가 나올때 이걸 몇번으로 끊어서 할 것인가.
# 문장나누기. Enter역할
sequence_length = len(sample) - 1 # number of LSTM rollings
# 14, 한번에 단어를 몇개 넣을 것인가? or 셀을 몇 개 둘 것인가?
# 14개니까 처음에 ex) 'if you want yo'까지
learning_rate = 0.1
sample_idx = [char2idx[c] for c in sample]
# char2dix으로 index를 지정한 것으로 sample에 있는 것들을 번호를 매긴다.
# sample_idx output test
print(sample_idx)
'''
[9, 1, 8, 3, 0, 2, 8, 4, 6, 7, 5, 8, 3, 0, 2]
'''
x_data = [sample_idx[:-1]]
y_data = [sample_idx[1:]]
# x_data는 처음부터 마지막전까지
# y_data는 두번째부터 마지막까지
# 1차원이라 행만 있음.
# ex) "if you want you"라면
# x_data는 if you want yo까지
# y_data는 f you want you이다.
# x_data, y_data output test
print(x_data)
print(y_data)
'''
[[9, 1, 8, 3, 0, 2, 8, 4, 6, 7, 5, 8, 3, 0]]
[[1, 8, 3, 0, 2, 8, 4, 6, 7, 5, 8, 3, 0, 2]]
'''
X = tf.placeholder(tf.int32, [None, sequence_length]) # X data
Y = tf.placeholder(tf.int32, [None, sequence_length]) # Y label
x_one_hot = tf.one_hot(X, num_classes)
# one hot: 1 -> 0 1 0 0 0 0 0 0 0 0
# ont hot함수를 사용하면 알아서 one hot처리를 해준다.
# one hot을 만들 때 주의점
# dim에 주의하라.
# shape이 어떻게 변하는지 잘 살펴봐야한다.
cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_size, state_is_tuple=True)
initial_state = cell.zero_state(batch_size, tf.float32)
outputs, _states = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=initial_state, dtype=tf.float32)
# cell이 input
# x_one_hot이 target 각 H1, H2에서 나올 값
# FC layer(Softmax)
X_for_fc = tf.reshape(outputs, [-1, hidden_size])
outputs = tf.contrib.layers.fully_connected(X_for_fc, num_classes, activation_fn=None)
# reshape out for sequence_loss
outputs = tf.reshape(outputs, [batch_size, sequence_length, num_classes])
weights = tf.ones([batch_size, sequence_length])
sequence_loss = tf.contrib.seq2seq.sequence_loss(logits=outputs, targets=Y, weights=weights)
loss = tf.reduce_mean(sequence_loss)
train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
prediction = tf.argmax(outputs, axis=2)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(300):
l, _ = sess.run([loss, train], feed_dict={X: x_data, Y: y_data})
result = sess.run(prediction, feed_dict={X: x_data})
# print char using dic
result_str = [idx2char[c] for c in np.squeeze(result)]
print(i, "loss:", l, "Prediction:", ''.join(result_str))
| 3,571 |
src/isanlp_rst/features_extractor.py
|
IINemo/isanlp_rst
| 6 |
2025907
|
import numpy as np
import pandas as pd
def get_embeddings(embedder, x, maxlen=100):
x_ = [text[:text.rfind('_')] for text in x.split()]
result = np.zeros((embedder.vector_size, maxlen))
for i in range(min(len(x_), maxlen)):
try:
result[i] = embedder[x_[i]]
except KeyError:
continue
return result
class FeaturesExtractor:
DROP_COLUMNS = ['snippet_x', 'snippet_y', 'snippet_x_tmp', 'snippet_y_tmp', 'postags_x', 'postags_y']
def __init__(self, processor, scaler=None, categorical_cols=None, one_hot_encoder=None, label_encoder=None):
self.processor = processor
self.scaler = scaler
self._categorical_cols = categorical_cols
self.one_hot_encoder = one_hot_encoder
self.label_encoder = label_encoder
def __call__(self, df,
annot_text, annot_tokens, annot_sentences,
annot_lemma, annot_morph, annot_postag, annot_syntax_dep_tree):
x = self.processor(df,
annot_text, annot_tokens, annot_sentences,
annot_lemma, annot_morph, annot_postag, annot_syntax_dep_tree)
if self._categorical_cols:
if self.label_encoder:
x[self._categorical_cols] = x[self._categorical_cols].apply(
lambda col: self.label_encoder.fit_transform(col))
if self.one_hot_encoder:
x_ohe = self.one_hot_encoder.transform(x[self._categorical_cols].values)
x_ohe = pd.DataFrame(x_ohe, x.index,
columns=self.one_hot_encoder.get_feature_names(self._categorical_cols))
x = x.join(
pd.DataFrame(x_ohe, x.index).add_prefix('cat_'), how='right'
).drop(columns=self._categorical_cols).drop(columns=self.DROP_COLUMNS)
if self.scaler:
return pd.DataFrame(self.scaler.transform(x.values), index=x.index, columns=x.columns)
return x
| 2,019 |
index/__init__.py
|
andrei-alpha/search
| 0 |
2026224
|
__all__ = []
# The SearchIndex class is the only class most implementations would need
from SearchIndex import SearchIndex
| 123 |
admin_autoregister/apps.py
|
janezkranjc/django-admin-autoregister
| 3 |
2023533
|
from django.apps import AppConfig
class AdminAutoregisterConfig(AppConfig):
name = 'admin_autoregister'
| 110 |
main/migrations/0001_initial.py
|
jsmnbom/htxaarhuslan
| 1 |
2025815
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-31 11:15
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Lan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.DateTimeField()),
('end', models.DateTimeField()),
('name', models.CharField(max_length=255)),
('schedule', models.TextField()),
],
),
migrations.CreateModel(
name='LanProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Lan')),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('picture', models.ImageField(upload_to='profile/')),
('bio', models.TextField()),
('grade', models.CharField(choices=[('teacher', 'Lærer'), ('none', 'Ukendt'), ('13xa', '13xa'), ('14xaa', '14xaa'), ('14xab', '14xab'), ('14abc', '14xac'), ('14xad', '14xad'), ('14xar', '14xar'), ('14xaj', '14xaj')], default='none', max_length=32)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Seat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('table', models.CharField(max_length=8)),
('row', models.SmallIntegerField()),
('num', models.CharField(max_length=8)),
],
),
migrations.AddField(
model_name='lanprofile',
name='profile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Profile'),
),
migrations.AddField(
model_name='lanprofile',
name='seat',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Seat'),
),
migrations.AddField(
model_name='lan',
name='profiles',
field=models.ManyToManyField(through='main.LanProfile', to='main.Profile'),
),
migrations.AddField(
model_name='lan',
name='seats',
field=models.ManyToManyField(to='main.Seat'),
),
]
| 3,020 |
accounts/urls.py
|
devfrb4/kip
| 0 |
2025670
|
from django.conf.urls import url
urlpatterns = [
url(r'^$', 'accounts.views.example', name='example'),
]
| 107 |
app/tests/test_comments.py
|
ChegeDaniella/Pitches
| 0 |
2025311
|
import unittest
from app.main.models import Comments
from app import db
class CommentsModelTest(unittest.TestCase):
def setUp(self):
self.new_comments=Comments( comment_content='new',pitches_id ='I love darkness',user_id = 123)
def tearDown(self):
Comments.query.delete()
def test __init__(self):
self.assertEquals(self.new_comments.comment_content,'new')
self.assertEquals(self.new_comments.pitches_id,'I love darkness',)
self.assertEquals(self.new_comments.user_id, 123)
| 557 |
src/processing/result.py
|
code-critic/codecritic2
| 0 |
2025867
|
#!/bin/python3
# author: <NAME>
import pathlib
import typing
from entities.crates import CaseResult, TestResult
from loguru import logger
from processing import ExecutorStatus, ProcessRequestType
from utils.crypto import sha1
from utils.strings import ensure_iterable
class RequestResult(object):
"""
:type solution: typing.Optional[str]
:type lang: typing.Optional[Languages]
:type result: ExecutorResult
:type results: typing.List[ExecutorResult]
"""
def __init__(self, request):
"""
:type request: processing.request.ProcessRequest
"""
self.lang = request.lang
self.solution = request.solution if request.solution else None
self.docker = request.docker
self.action = request.type
self.user = request.user
self.course = request.course
self.problem = request.problem
self.result = ExecutorResult.empty_result(ExecutorResult.RESULT)
self.results = list()
self.subcases = list(self._walk_cases())
if self.action is ProcessRequestType.SOLVE:
if self.lang.compile:
self.results.append(ExecutorResult.empty_result(ExecutorResult.COMPILATION))
else:
p = self.problem.reference
if p and p.lang_ref and p.lang_ref.compile:
self.results.append(ExecutorResult.empty_result(ExecutorResult.COMPILATION))
for subcase in self.subcases:
self.results.append(ExecutorResult.empty_result(subcase.id))
def _walk_cases(self):
for case in self.problem.tests:
for subcase in case.cases():
yield subcase
def __getitem__(self, id):
for test in self.results:
if test.id == id:
return test
if id is not ExecutorResult.COMPILATION:
logger.warning('Could not find test {} in {}', id, self.results)
def __setitem__(self, id, value):
for i, test in enumerate(self.results):
if test.id == id:
self.results[i] = value
return
if id is not ExecutorResult.COMPILATION:
logger.warning('Could not find test {} in {}', id, self.results)
def __iter__(self):
return iter(self.subcases)
@property
def compilation(self):
return self[ExecutorResult.COMPILATION]
@compilation.setter
def compilation(self, value):
self[ExecutorResult.COMPILATION] = value
def peek(self, full=True):
return TestResult(
lang=self.lang.id if self.lang else None,
solution=self.solution,
docker=self.docker,
action=self.action.value,
user=self.user.id,
course=self.course.id,
problem=self.problem.id,
result=self.result.peek(full),
results=[x.peek(full) for x in self.results],
)
class ExecutorResult(object):
"""
:type stdin: pathlib.Path
:type stdout: pathlib.Path
:type stderr: pathlib.Path
"""
COMPILATION = 'Compilation'
RESULT = 'Result'
def __init__(self, cmd=None, status=ExecutorStatus.IN_QUEUE, returncode=None, error=None):
self.cmd = cmd
self.status = status
self.returncode = returncode
self.error = error
self.uuid = None
self.id = None
self.duration = 0.0
self.message = None
self.message_details = None
self.console = None
self.attachments = list()
# score of the test
self.score = 0
self.scores = list()
self.stdin = None
self.stdout = None
self.stderr = None
@staticmethod
def try_read(stream: pathlib.Path) -> typing.List[str]:
try:
return ensure_iterable(stream.read_text().splitlines())
except:
return []
def read_stdout(self):
return self.try_read(self.stdout)
def read_stdin(self):
return self.try_read(self.stdin)
def read_stderr(self):
return self.try_read(self.stderr)
def register(self, id):
self.id = id
self.uuid = sha1(id)
return self
def __call__(self, status=None, returncode=None, error=None, duration=None):
if status is not None:
self.status = status
if returncode is not None:
self.returncode = returncode
if error is not None:
self.error = error
if duration is not None:
self.duration = duration
return self
def __repr__(self):
return 'Result([%s], status=%s, rc=%s, duration=%1.3f)' % (
self.id,
self.status.name,
str(self.returncode),
self.duration
)
def failed(self):
return self.returncode != 0
def peek(self, full=True) -> CaseResult:
if full:
return CaseResult(
uuid=self.uuid,
id=self.id,
status=self.status.str,
cmd=self.cmd,
duration=self.duration,
returncode=self.returncode,
console=ensure_iterable(self.console)[:100],
message=self.message,
message_details=ensure_iterable(self.message_details)[:100],
attachments=ensure_iterable(self.attachments),
score=self.score,
scores=self.scores,
)
return CaseResult(
id=self.id,
status=self.status.str,
cmd=' '.join(ensure_iterable(self.cmd)),
duration=self.duration,
returncode=self.returncode,
message=self.message,
message_details=ensure_iterable(self.message_details),
score=self.score,
scores=self.scores,
)
# for p in ('cmd', 'message', 'message_details'):
# if p in doc and not doc[p]:
# doc.pop(p)
# return doc
@classmethod
def empty_result(cls, id, status=ExecutorStatus.IN_QUEUE):
return ExecutorResult([], status).register(id)
@classmethod
def _showcase(cls):
for status in ExecutorStatus:
result = cls.empty_result(str(status), status)
result.message = str(status)
yield result
| 6,356 |
toko/schemas/demanda_schema.py
|
erickotsuka/sistema-contratacao-backend
| 0 |
2025600
|
from marshmallow_sqlalchemy import ModelSchema
from marshmallow import fields
from toko.models.demanda_model import DemandaModel
class DemandaSchema(ModelSchema):
class Meta:
model = DemandaModel
| 209 |
tests/crow/test_eigen_svd.py
|
rinelson456/raven
| 159 |
2026273
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This tests is used to verify the svd decomposition calculated inside crow
# the svd module from numpy.linalg is used as gold solution.
#For future compatibility with Python 3
from __future__ import division, print_function, unicode_literals, absolute_import
import warnings
warnings.simplefilter('default',DeprecationWarning)
#!/usr/bin/env python
import sys
import crowTestUtils as utils
import numpy as np
from math import sqrt
from numpy import linalg as LA
distribution1D = utils.findCrowModule('distribution1D')
# input data, random matrix can also be used.
mu = [1.0,2.0,3.0,4.0,5.0]
cov = [1.36, -0.16, 0.21, 0.43, -0.144,
-0.16, 6.59, 0.794, -0.173, -0.406,
0.21, 0.794, 5.41, 0.461, 0.179,
0.43, -0.173, 0.461, 14.3, 0.822,
-0.144, -0.406, 0.179, 0.822, 3.75]
#dim = 5
# Transform 'mu' and 'cov' to the c++ vector
muCpp = distribution1D.vectord_cxx(len(mu))
for i in range(len(mu)):
muCpp[i] = mu[i]
covCpp = distribution1D.vectord_cxx(len(cov))
for i in range(len(cov)):
covCpp[i] = cov[i]
# call the functions from the crow to compute the svd
covType = "abs"
rank = 5
mvnDistribution = distribution1D.BasicMultivariateNormal(covCpp,muCpp,str(covType),5)
dim = mvnDistribution.getSingularValuesDimension()
sCpp_vector = mvnDistribution.getSingularValues()
sCpp = [sCpp_vector[i] for i in range(dim)]
sCpp = np.asarray(sCpp)
dimVectorLeft = mvnDistribution.getLeftSingularVectorsDimensions()
uCpp_vector = mvnDistribution.getLeftSingularVectors()
uCpp = [uCpp_vector[i] for i in range(dimVectorLeft[0]*dimVectorLeft[1])]
uCpp = np.asarray(uCpp)
uCpp = np.reshape(uCpp,(dimVectorLeft[0],dimVectorLeft[1]))
dimVectorRight = mvnDistribution.getRightSingularVectorsDimensions()
vCpp_vector = mvnDistribution.getRightSingularVectors()
vCpp = [vCpp_vector[i] for i in range(dimVectorRight[0]*dimVectorRight[1])]
vCpp = np.asarray(vCpp)
vCpp = np.reshape(vCpp,(dimVectorRight[0],dimVectorRight[1]))
# using numpy to compute the svd
covNp = np.asarray(cov).reshape(-1,int(sqrt(len(cov))))
uNp,sNp,vNp = LA.svd(covNp,full_matrices=False)
# reconstruct the matrix using A = U*S*V.T
covReCompute = np.dot(uCpp,np.dot(np.diag(sCpp),vCpp.T))
results = {"pass":0,"fail":0}
utils.checkArrayAllClose("MVN singular values",sCpp,sNp,results)
utils.checkArrayAllClose("MVN left singular vectors",np.absolute(uCpp),np.absolute(uNp),results)
utils.checkArrayAllClose("MVN right singular vectors", np.absolute(vCpp),np.absolute(vNp.T),results)
utils.checkArrayAllClose("MVN singular value decomposition",covNp,covReCompute,results)
utils.checkAnswer("MVN dimensions of singular values",dim,5,results)
utils.checkAnswer("MVN row dimensions of left singular vectors",dimVectorLeft[0],5,results)
utils.checkAnswer("MVN col dimensions of left singular vectors",dimVectorLeft[1],5,results)
utils.checkAnswer("MVN row dimensions of right singular vectors",dimVectorRight[0],5,results)
utils.checkAnswer("MVN col dimensions of right singular vectors",dimVectorRight[1],5,results)
print(results)
sys.exit(results["fail"])
"""
<TestInfo>
<name>crow.test_svd</name>
<author>cogljj</author>
<created>2017-03-24</created>
<classesTested>crow</classesTested>
<description>
This test is a Unit Test for the crow swig classes. It tests that the MultiVariate Normal
distribution is accessable by Python and that the SVD on the covariance is performable
in Python
</description>
<revisions>
<revision author="alfoa" date="2018-05-15">Adding this test description.</revision>
</revisions>
</TestInfo>
"""
| 4,196 |
Lab_02/Python/copy Oranges HSV.py
|
MoElaSec/Practical-Image-Processing
| 0 |
2025307
|
import cv2
import numpy as np
def copy_orange(img):
"""A Function to Copy/extract Oranges only to a new img"""
result = np.zeros(img.shape, np.uint8)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# v = np.array([])
for i in range(result.shape[0]):
for j in range(result.shape[1]):
# v = np.append(v, np.array(img[r, c]), axis=0)
c = img[i, j]
c2 = hsv[i, j]
if c2[0] < 30 and c2[1] > 5: # only the Hue & Saturation to be guessed
result[i, j] = c
return result
def main():
path = "Oranges.jpg"
img = cv2.imread(path)
if img is None:
print('Could not open or find the image: ', path)
exit(0)
result = copy_orange(img)
cv2.imshow("Oranges", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
#####################################
if __name__ == '__main__':
main()
| 908 |
neighborhood_App/forms.py
|
Tito-74/Neighborhood-App
| 0 |
2025890
|
from django.contrib.auth.models import User
from django import forms
from django.contrib.auth.forms import UserCreationForm
from .models import Profile, Post, Business,Neighbourhood
class SignUpForm(UserCreationForm):
# email = forms.EmailField()
class meta:
model = User
fields = ('username','email','password1','<PASSWORD>')
def __init__(self, *args, **kwargs):
super(SignUpForm,self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs.update({'class':'form-control'})
# self.fields['email'].widget.attrs.update({'class':'form-control'})
self.fields['password1'].widget.attrs.update({'class':'form-control'})
self.fields['password2'].widget.attrs.update({'class':'form-control'})
class UpdateProfileForm(forms.ModelForm):
class Meta:
model=Profile
exclude=['user',]
class UpdateUserForm(forms.ModelForm):
class Meta:
model=User
exclude=['user',]
class AddEventForm(forms.ModelForm):
class Meta:
model = Post
template_name = "add.html"
fields = ['user','title','image','content','neighbourhood']
class PostForm(forms.ModelForm):
class Meta:
model=Post
exclude=['username','neighbourhood','avatar']
class AddBusinessForm(forms.ModelForm):
class Meta:
model = Business
template_name = "buzpost.html"
fields = ['user','name','bizzhood','bizz_email','desc']
class AddBusinessForm(forms.ModelForm):
class Meta:
model = Business
template_name = "buzpost.html"
fields = ['user','name','bizzhood','bizz_email','desc']
class AddNeighbourhoodForm(forms.ModelForm):
class Meta:
model = Neighbourhood
template_name = "neighbourhood.html"
fields = ['name','hood_location','description','hood_photo']
| 1,868 |
ajeita_lista/questao.py
|
Arthurnevs/E7
| 0 |
2025418
|
def ajeita_lista(lista):
cont1 = 0
for i in range(len(lista)):
if lista[i] % 2 == 0:
lista[i],lista[cont1] = lista[cont1], lista[i]
cont1 += 1
for a in range(cont1-1,0,-1):
for j in range(cont1-1,0,-1):
if lista[j] > lista[j-1]:
lista[j],lista[j-1] = lista[j-1], lista[j]
for b in range(cont1,len(lista)-1):
for k in range(cont1,len(lista)-1):
if lista[k] > lista[k+1]:
lista[k],lista[k+1] = lista[k+1],lista[k]
l = [1,-8,3,2,0]
| 468 |
fern/items/migrations/0001_initial.py
|
Eslamhathout/Fern
| 0 |
2025040
|
# Generated by Django 3.0.5 on 2020-06-19 19:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Item name', max_length=255)),
('price', models.PositiveIntegerField()),
('expiry_time', models.DateTimeField(blank=True, null=True)),
],
),
]
| 643 |
build.py
|
m0zes/z3randomizer
| 1 |
2026198
|
import os
import sys
import hashlib
from asar import init as asar_init, close as asar_close, patch as asar_patch, geterrors as asar_errors, getprints as asar_prints, getwarnings as asar_warnings
JAP10HASH = '03a63945398191337e896e5771f77173'
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
def int16_as_bytes(value):
value = value & 0xFFFF
return [value & 0xFF, (value >> 8) & 0xFF]
def int32_as_bytes(value):
value = value & 0xFFFFFFFF
return [value & 0xFF, (value >> 8) & 0xFF, (value >> 16) & 0xFF, (value >> 24) & 0xFF]
def is_bundled():
return getattr(sys, 'frozen', False)
def local_path(path):
if local_path.cached_path:
return os.path.join(local_path.cached_path, path)
elif is_bundled():
if hasattr(sys, "_MEIPASS"):
# we are running in a PyInstaller bundle
local_path.cached_path = sys._MEIPASS # pylint: disable=protected-access,no-member
else:
# cx_Freeze
local_path.cached_path = os.path.dirname(os.path.abspath(sys.argv[0]))
else:
# we are running in a normal Python environment
import __main__
local_path.cached_path = os.path.dirname(os.path.abspath(__main__.__file__))
return os.path.join(local_path.cached_path, path)
local_path.cached_path = None
def make_new_base2current(old_rom_data, new_rom_data):
from collections import OrderedDict
import json
# extend to 2 mb
old_rom_data.extend(bytearray([0x00]) * (2097152 - len(old_rom_data)))
out_data = OrderedDict()
for idx, old in enumerate(old_rom_data):
new = new_rom_data[idx]
if old != new:
out_data[idx] = [int(new)]
for offset in reversed(list(out_data.keys())):
if offset - 1 in out_data:
out_data[offset-1].extend(out_data.pop(offset))
with open('../base2current.json', 'wt') as outfile:
json.dump([{key: value} for key, value in out_data.items()], outfile, separators=(",", ":"))
basemd5 = hashlib.md5()
basemd5.update(new_rom_data)
return "New Rom Hash: " + basemd5.hexdigest()
if __name__ == '__main__':
try:
asar_init()
print("Asar DLL initialized")
print("Opening Base rom")
with open('../alttp.sfc', 'rb') as stream:
old_rom_data = bytearray(stream.read())
if len(old_rom_data) % 0x400 == 0x200:
old_rom_data = old_rom_data[0x200:]
basemd5 = hashlib.md5()
basemd5.update(old_rom_data)
if JAP10HASH != basemd5.hexdigest():
raise Exception("Base rom is not 'Zelda no Densetsu - Kamigami no Triforce (J) (V1.0)'")
print("Patching Base Rom")
result, new_rom_data = asar_patch(os.path.abspath('LTTP_RND_GeneralBugfixes.asm'), old_rom_data)
if result:
with open('../working.sfc', 'wb') as stream:
stream.write(new_rom_data)
print("Success\n")
print(make_new_base2current(old_rom_data, new_rom_data))
prints = asar_prints()
for p in prints:
print(p)
else:
errors = asar_errors()
print("\nErrors: " + str(len(errors)))
for error in errors:
print (error)
warnings = asar_warnings()
print("\nWarnings: " + str(len(warnings)))
for w in warnings:
print(w)
asar_close()
except:
import traceback
traceback.print_exc()
| 3,594 |
likes.py
|
juanchoabdon/insta-data-science
| 0 |
2025281
|
from instapy import InstaPy
session = InstaPy(username='juanchoabdon', password='<PASSWORD>', nogui=True)
session.login()
session.set_do_follow(enabled=True, percentage=10)
session.follow_user_followers(['freddiervega'], amount=500, random=False, sleep_delay=01)
session.set_sleep_reduce(01)
session.end()
| 308 |
src/051-100/P076.py
|
lord483/Project-Euler-Solutions
| 0 |
2023322
|
def solve(n):
mem = [0 for _ in range(n + 1)]
mem[0] = 1
for i in range(1, n):
for j in range(i, n + 1):
mem[j] += mem[j - i]
return mem[-1]
if __name__ == "__main__":
n = 100
print(solve(n))
| 239 |
notifier.py
|
andrewlawhh/amazon-price-drop-notifier
| 2 |
2025175
|
import requests
import bs4 as bs
import smtplib
from email.message import EmailMessage
from credentials import *
import sqlite3
import datetime
import time
from typing import List
'''
Main function
'''
def update_and_notify() -> None :
'''
For each URL in database:
get the price
if it is different, update the dictionary
if it is lower than it was before, email the user
'''
while True:
url_price_email_list = read_database()
for dict in url_price_email_list:
url = dict['url']
old_price = dict['price']
email = dict['email']
price_and_item = get_price_and_item(url)
new_price = price_and_item[0]
item_name = price_and_item[1]
if old_price != new_price:
update_table(url, new_price)
if new_price < old_price:
notify(url, item_name, new_price, email)
# Run every four hours
time.sleep(14400)
def get_price_and_item(url : str) -> (float, str) :
# Scrapes and returns the price of a given amazon url
# Set up web scraper
page = requests.get(url)
html = page.content
soup = bs.BeautifulSoup(html, 'html.parser')
# Price tags on amazon are marked with id of #priceblock_ourprice
price_tag = str(soup.find(id = 'priceblock_ourprice'))
# Item titles are marked with id of #productTitle
# Strip product title text of white space and line breaks
item_name = strip(soup.find(id = 'productTitle').text)
# Get the relevant information ($ amount) from the price tag
price = price_tag[price_tag.index('$'):price_tag.index('</')]
# Handle ranged vs flat
if '-' in price:
# Ranged price case : manipulate string to find low and high ends
low_end = float(price[1:price.index(' ')])
high_end = float(price[price.index('- $') + 3:])
return (low_end + high_end) // 2, item_name
else:
# Flat price case : Remove dollar sign and return the number as a float
price = float(price[1:])
return price, item_name
def get_price(url : str) -> float :
return get_price_and_item(url)[0]
def get_name(url : str) -> str :
return get_price_and_item(url)[1]
'''
Database handling
'''
def read_database() -> List[dict] :
# Reads from a database and returns list of dictionaries
database_list = []
conn = sqlite3.connect('tracked_items.db')
c = conn.cursor()
for row in c.execute('SELECT * FROM items'):
database_list.append({'url' : row[0], 'price' : row[1], 'email' : row[2]})
conn.close()
return database_list
def update_table(url : str, price : float) -> None :
# Updates database with the new price where url column = url parameter
conn = sqlite3.connect('tracked_items.db')
c = conn.cursor()
c.execute('UPDATE items SET price = ? WHERE url = ?', (price, url))
conn.commit()
conn.close()
c.close()
'''
Notify functions
'''
def notify(url : str, name : str, price : float, recipient : str) -> None :
# Email the recipient notifying him / her of a price drop
# Format date and time for the email message
date = datetime.datetime.now().isoformat()[:10]
time = datetime.datetime.now().isoformat()[11:19]
content = ' '.join(['Item', name, 'at', url, 'just dropped to $', str(price), 'at', time, 'on', date])
send_email(content, recipient)
def subscribe_notify(url : str, name : str, recipient : str) -> None :
# Email the person when he / she tracks a new item
price = get_price(url)
content = ' '.join(('You have signed up to receive notifications for', name, 'at', url, '.', 'The price is currently', str(price)))
send_email(content, recipient)
def unsubscribe_notify(recipient : str) -> None :
# Email the person who is unsubscribing
content = 'Thank you for using the Amazon Price Drop Notifier Service. You have unsubscribed from the mailing list.'
send_email(content, recipient)
def untrack_notify(url : str, name : str, recipient : str) -> None :
content = ' '.join(('You have untracked', name, 'at', url))
send_email(content, recipient)
def send_email(content : str, recipient : str) -> None :
# Send email to recipient with body as content
# Initialize SMTP and login to Gmail
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login(email_username, email_password)
# Create email message
msg = EmailMessage()
msg.set_content(content)
msg['Subject'] = 'Amazon Price Drop'
msg['From'] = '<EMAIL>'
msg['To'] = recipient
# Send email and quit
server.send_message(msg)
server.quit()
'''
Utility functions
'''
def strip(str : str) -> str :
'''
Returns a copy of the String argument with white space and line breaks removed
:param str: String
:return: String
'''
stripped = str
for char in stripped:
if char in '\n ':
stripped = stripped.replace(char, '')
return stripped
'''
Main
'''
def main():
update_and_notify()
if __name__ == '__main__':
main()
| 5,157 |
code/doubanboardcast.py
|
zefengdaguo/douban_crawler
| 116 |
2024993
|
from bs4 import BeautifulSoup
import re
import time
import requests
import random
def getwords(item):
txt=item.get_text(strip=False).replace(' ','').replace('\r','\n')\
.replace('\n\n\n\n\n\n','\$#n').replace('\n\n\n\n',' ').replace('\n','')\
.replace('$#','').replace('\xa0','').replace('\\n','\n').replace('+','')
try:
pic=item(class_=re.compile('view-large'))[0]['href']
except:
pic=''
return txt+pic
def madeBox(txt):
box='\t------------------------------------------------------------------------------------------\n'+'\t'+\
txt+'\n\t'+'------------------------------------------------------------------------------------------\n'
return box
def dealwithshare(txt1,txt2):
li=txt1.split('\n')
li[-4]=li[-4]+' @'+li[-3]
for word in li:
word.replace(' ','')
li.remove(li[-3])
li2=txt2.split('\n')
li.insert(-2,madeBox(''.join(li2[0:-3])))
return '\n'.join(li)
headers = {
'Uesr-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
}
rawji_america='ll="108297"; bid=TFLDE9t44mY; _pk_ses.100001.8cb4=*; __utmc=30149280; __utma=30149280.995009761.1561041533.1561041533.1561042634.2; __utmz=30149280.1561042634.2.2.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); __utmt=1; dbcl2="198268851:+gNufdpzGAw"; ck=AnNe; ap_v=0,6.0; douban-profile-remind=1; push_noty_num=0; push_doumail_num=0; __utmv=30149280.19826; __yadk_uid=nFY3eG607ZoqEtBaMMWYVuqNCXZIycd6; douban-fav-remind=1; __utmb=30149280.12.10.1561042634; _pk_id.100001.8cb4=9f8810e4b7a61874.1561041531.1.1561043477.1561041531.'
rawji='ll="108297"; bid=BQLi_2UIMh8; __utmc=30149280; __yadk_uid=Fl0aRuIUatWP1JCilVDTUzW1h2R71qWN; push_noty_num=0; push_doumail_num=0; __utmv=30149280.19826; ps=y; _vwo_uuid_v2=DD4476A9DC58A854DCFFF0D91547908DA|534c6354fc5886543fd8704a8eb02aeb; _pk_ref.100001.8cb4=%5B%22%22%2C%22%22%2C1561087673%2C%22https%3A%2F%2Faccounts.douban.com%2Faccounts%2Fsafety%2Funlock_phone%22%5D; _pk_ses.100001.8cb4=*; __utma=30149280.1984600914.1561080464.1561080464.1561087675.2; __utmz=30149280.1561087675.2.2.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/accounts/safety/unlock_phone; __utmt=1; dbcl2="198268851:Co+RFApa9xQ"; ck=g0Bz; douban-profile-remind=1; ap_v=0,6.0; douban-fav-remind=1; __gads=ID=750f05eb1a424666:T=1561087843:S=ALNI_MaWUrys775-4HBWVFaGDarZgSJRCA; _pk_id.100001.8cb4=3db8de030f64f76f.1561080462.2.1561087888.1561080727.; __utmb=30149280.21.10.1561087675'
def getCookie(raw_cookies):
cookies={}
for line in raw_cookies.split(';'):
key,value=line.split('=',1) #1代表只分一次,得到两个数据
cookies[key]=value
return cookies
def getHtml(douid,raw_cookies=rawji,beg=1,end=10):
html_list=[]
cookies={}
cookies=getCookie(raw_cookies)
firstpage='https://www.douban.com/people/'+douid+'/statuses?p='+str(beg)
s=requests.Session()
res=s.get(firstpage,headers=headers,cookies=cookies)
html_list.append(res.text)
print(f'第{beg}页',res.status_code,res.reason)
while beg<end:
beg+=1
time.sleep(random.uniform(1,5))
try:
nextpage='https://www.douban.com/people/'+douid+'/statuses?p='+str(beg)
res2=s.get(nextpage,headers=headers,cookies=cookies)
soup=BeautifulSoup(res2.text,"html.parser")
items=soup.find_all(class_=re.compile('status-item'))
print(f'第{beg}页',res2.status_code,res.reason)
except:
print('网页请求错误')
else:
html_list.append(res2.text)
return html_list
def saveHtml(douid,html_list,beg,end):
file_name=douid+"'s_board_cast_page_"+str(beg)+'-'+str(end)
with open (file_name.replace('/','_')+".html","wb") as f:
for file_content in html_list:
#写文件用bytes而不是str,所以要转码
f.write(bytes(file_content+'\n',encoding='utf-8'))
print(f'第{beg}页HTML完成')
beg+=1
def saveTXT(douid,htmlList,beg,end):
with open(douid+'board_cast_'+str(beg)+'-'+str(end)+'.txt','w',encoding='utf-8_sig') as f:
for text in htmlList:
soup=BeautifulSoup(text,"html.parser")
items=soup.find_all(class_=re.compile('status-item'))
t=0
for i in range(len(items)):
if t>=len(items):
break
txt=getwords(items[t])
if '转发:' in txt:
origin=getwords(items[t+1])
txt=dealwithshare(txt,origin)
t+=1
f.write(str(txt)+'\n')
t+=1
print(f'第{beg}页TXT完成')
beg+=1
def main():
print('hello,这是一个备份豆瓣广播的程序。\n需要你自己的cookie用来爬取广播。')
choice=input('该过程有风险,请确定你要开始备份(yes/no):')
if choice=='yes':
user_raw=input('请输入你的cookie(最后不要带空格):')
doubanid=input('请输入你的豆瓣id:')
begin=eval(input('请输入你开始备份的页码(比如1):'))
endpage=eval(input('请输入你结束备份的页码:'))
Hlist=getHtml(douid=doubanid,raw_cookies=user_raw,beg=begin,end=endpage)
print(type(Hlist[0]))
print(f'爬取了{len(Hlist)}页')
choice2=input('请选择你要输出html结果(a)还是文本txt结果(b)或者我全都要(all):')
choice2=choice2.lower()
if choice2 == 'a':
try:
saveHtml(doubanid,Hlist,beg=begin,end=endpage)
except Exception as e:
print(e)
print('储存html文件出错')
print('问题反馈:<EMAIL> | https://github.com/JimSunJing/douban_clawer')
over=input('按任意键退出')
else:
print('成功')
elif choice2 == 'b':
try:
saveTXT(doubanid,Hlist,beg=begin,end=endpage)
except Exception as e:
print(e)
print('储存txt文件出错')
print('问题反馈:<EMAIL> | https://github.com/JimSunJing/douban_clawer')
over=input('按任意键退出')
else:
print('成功')
elif choice2 == 'all':
try:
saveHtml(doubanid,Hlist,beg=begin,end=endpage)
saveTXT(doubanid,Hlist,beg=begin,end=endpage)
except Exception as e:
print(e)
print('出错')
print('程序结束,文件已存在该exe目录中')
print('问题反馈:<EMAIL> | https://github.com/JimSunJing/douban_clawer')
over=input('按任意键退出')
main()
| 6,426 |
src/management/app/migrations/0002_auto_20210329_2151.py
|
mnimmny/Project-Security-Metrics
| 47 |
2025354
|
# Generated by Django 3.1.7 on 2021-03-30 04:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='package',
old_name='name',
new_name='package_url',
),
migrations.CreateModel(
name='Metric',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=256)),
('value_integer', models.IntegerField(blank=True, null=True)),
('value_text', models.TextField(blank=True, null=True)),
('properties', models.JSONField(blank=True, null=True)),
('timestamp', models.DateTimeField(blank=True, null=True)),
('package', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.package')),
],
),
]
| 1,085 |
config/__init__.py
|
Ebryx/S3Rec0n
| 38 |
2025270
|
"""
___________________________ __ ________
/ _____/\_____ \______ \__ __ _______/ |\_____ \______
\_____ \ _(__ <| | _/ | \/ ___/\ __\_(__ <_ __ \
/ \ / \ | \ | /\___ \ | | / \ | \/
/_______ //______ /______ /____//____ > |__|/______ /__|
\/ \/ \/ \/ \/
"""
from config.functions import *
from config.colors import *
from config.config import *
from config.banner import *
from config.footer import *
| 547 |
UserDetails/users/views.py
|
Adutta1998/django_rest_api
| 0 |
2026309
|
from django.shortcuts import render,HttpResponse
from .models import UserData
# Create your views here.
def AddData(request):
return render(request,'index.html')
def DataSubmit(request):
if(request.method == 'POST'):
fname = request.POST['fname']
lname = request.POST['lname']
adrs = request.POST['adrs']
dob = request.POST['dob']
wght = request.POST['wght']
hght = request.POST['hght']
bld_grp = request.POST['bld_grp']
model = UserData(first_name=fname,last_name=lname,dob=dob,address=adrs,height=hght,weight=wght,blood_grp=bld_grp)
model.save()
return HttpResponse(request.POST['fname'])
else:
return render(request,'index.html')
| 734 |
hackerrank/1. introduction/5.py
|
Eurydia/Xian-assignment
| 0 |
2026173
|
from sys import stdin, stdout
if __name__ == '__main__':
n = int(stdin.readline())
for i in range(n):
stdout.write(f'{i*i}\n')
| 143 |
FlaskAppAML/views.py
|
ZGrinacoff/ufc_fightclub
| 0 |
2025648
|
"""
Routes and views for the flask application.
"""
import json
import urllib.request
import os
from datetime import datetime
from flask import render_template, request, redirect
from FlaskAppAML import app
from FlaskAppAML.forms import SubmissionForm
UFC_ML_KEY=os.environ.get('API_KEY', "<KEY>)
UFC_URL = os.environ.get('URL', "https://ussouthcentral.services.azureml.net/workspaces/4a71219c29084463a3cc6a1f420b86e7/services/3161dc4890504d8d898afff2e6cd335b/execute?api-version=2.0&details=true")
# Deployment environment variables defined on Azure (pull in with os.environ)
# Construct the HTTP request header
# HEADERS = {'Content-Type':'application/json', 'Authorization':('Bearer '+ API_KEY)}
HEADERS = {'Content-Type':'application/json', 'Authorization':('Bearer '+ UFC_ML_KEY)}
# Our main app page/route
@app.route('/', methods=['GET', 'POST'])
@app.route('/home', methods=['GET', 'POST'])
def home():
"""Renders the home page which is the CNS of the web app currently, nothing pretty."""
form = SubmissionForm(request.form)
# Form has been submitted
if request.method == 'POST' and form.validate():
# Plug in the data into a dictionary object
# - data from the input form
# - text data must be converted to lowercase
# form.title.data.lower() ---> Use for input form.
data = {
"Inputs": {
"input1":
{
"ColumnNames": ["Winner", "B_Height_cms", "B_Reach_cms", "B_Weight_lbs", "R_Height_cms", "R_Reach_cms", "R_Weight_lbs", "B_age", "R_age"],
"Values": [ [ "value", form.blue_height.data.lower(), form.blue_reach.data.lower(), form.blue_weight.data.lower(), form.red_height.data.lower(), form.red_reach.data.lower(), form.red_weight.data.lower(), form.blue_age.data.lower(), form.red_age.data.lower() ] ]
}, },
"GlobalParameters": {
}
}
# Serialize the input data into json string
body = str.encode(json.dumps(data))
# Formulate the request
#req = urllib.request.Request(URL, body, HEADERS)
req = urllib.request.Request(UFC_URL, body, HEADERS)
# print(UFC_URL + body + HEADERS)
# Send this request to the AML service and render the results on page
try:
# response = requests.post(URL, headers=HEADERS, data=body)
response = urllib.request.urlopen(req)
print(response)
respdata = response.read()
result = json.loads(str(respdata, 'utf-8'))
result = do_something_pretty(result)
# bar = create_chart()
# result = json.dumps(result, indent=4, sort_keys=True)
return render_template(
'result.html',
title="This is the result from AzureML running our UFC Fight Predictor:",
result=result)
# An HTTP error
except urllib.error.HTTPError as err:
result="The request failed with status code: " + str(err.reason)
return render_template(
'result.html',
title='There was an error',
result=result)
#print(err)
# Just serve up the input form
return render_template(
'form.html',
form=form,
title='Run App',
year=datetime.now().year,
message='Demonstrating a website using Azure ML Api')
@app.route('/contact')
def contact():
"""Renders the contact page."""
return render_template(
'contact.html',
title='Contact',
year=datetime.now().year,
message='UFC Fightclub contact page.'
)
@app.route('/about')
def about():
"""Renders the about page."""
return render_template(
'about.html',
title='About',
year=datetime.now().year,
message='Your application description page.'
)
def do_something_pretty(jsondata):
"""We want to process the AML json result to be more human readable and understandable"""
import itertools # for flattening a list of tuples below
# We only want the first array from the array of arrays under "Value"
# - it's cluster assignment and distances from all centroid centers from k-means model
value = jsondata["Results"]["output1"]["value"]["Values"][0]
#valuelen = len(value)
print(value)
# Convert values (a list) to a list of tuples [(cluster#,distance),...]
# valuetuple = list(zip(range(valuelen-1), value[1:(valuelen)]))
# Convert the list of tuples to one long list (flatten it)
# valuelist = list(itertools.chain(*valuetuple))
# Convert to a tuple for the list
# data = tuple(list(value[0]) + valuelist)
# Build a placeholder for the cluster#,distance values
#repstr = '<tr><td>%d</td><td>%s</td></tr>' * (valuelen-1)
# print(repstr)
output='With a prediction accuracy of ' + value[10] + ' the winner of the fight was: '+value[9] + "."
# def create_plot():
import plotly.graph_objects as go
import plotly
fighter_stats=['Height', 'Reach', 'Weight']
fig = go.Figure(data=[
go.Bar(name='Blue Fighter', x=fighter_stats, y=[value[1], value[2], value[3]]),
go.Bar(name='Red Fighter', x=fighter_stats, y=[value[4], value[5], value[6]])
])
# Change the bar mode
fig.update_layout(barmode='group', title='UFC Fighter Stats Bar')
fig.show()
# graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# return graphJSON
# Build the entire html table for the results data representation
#tablestr = 'Cluster assignment: %s<br><br><table border="1"><tr><th>Cluster</th><th>Distance From Center</th></tr>'+ repstr + "</table>"
#return tablestr % data
return output
| 5,861 |
Knapsack-Problem/knapsack.py
|
Uljibuh/Genetic-Algorithm-in-Python
| 0 |
2024777
|
import numpy as np
class Knapsack01Problem:
def __init__(self):
# initialize instance variables:
self.items = []
self.maxCapacity = 0
# initialize the data
self.__initData()
def __len__(self):
"""
:return: the total number of items defined in the problem
"""
return len(self.items)
def __initData(self):
"""
initializes the problem data
"""
self.items = [
("map", 9, 150),
("compass", 13, 35),
("water", 153, 35),
("sandwich", 50, 160),
("glucose", 15, 60),
("tin", 15, 45),
("banana", 27, 60),
("apple", 39, 40),
("cheess", 23, 30),
("beer", 52, 10),
("suntan cream", 11, 70),
("camera", 32, 30),
("t-shirt", 24, 15),
("trousers", 48, 10),
("umbrella", 73, 40),
("waterproof trousers", 42, 70),
("waterproof overcloths", 43, 75),
("note-case", 22, 80),
("sunglasses", 7, 80),
("towel", 18, 12),
("socks", 4, 50),
("book", 30, 10),
]
self.maxCapacity = 400
def getValue(self, zeroOneList):
"""
calculate the value of the selected items in the list,
while ignoring items that will cause the accumulating weight to exceed the maximun weight
:param zeroOneList: a list of 0/1 values corresponding to the list of the problem's items,
'1' means that item was selected,
:return: the calculated value
"""
totalWeight = totalValue = 0
for i in range(len(zeroOneList)):
item, weight, value = self.items[i]
if totalWeight + weight <= self.maxCapacity:
totalWeight += zeroOneList[i] * weight
totalValue += zeroOneList[i] * value
return totalValue
def printItems(self,zeroOneList):
"""
print selected items in the lsit, while ignoring item that will cause the
accumulating weight to exceed the maximum weight
:param zeroOneList: a list of 0/1 value corresponding to the list of the problem's items,
'1' mean that item was selected.
"""
totalWeight = totalValue = 0
for i in range(len(zeroOneList)):
item, weight, value = self.items[i]
if totalWeight + weight <= self.maxCapacity:
if zeroOneList[i] > 0:
totalWeight += weight
totalValue += value
print("- adding {}: weight = {}, value = {}, accumulated weight = {}, accumulated value = {}". format(item, weight, value, totalWeight, totalValue))
print("- total weight = {}, total value = {}".format(totalWeight, totalValue))
# test the class using randam generated solution
def main():
# create a problem instance:
knapsack = Knapsack01Problem()
# create a random solution and evaluate it:
randomSolution = np.random.randint(2, size=len(knapsack))
print("Random solution = ")
print(randomSolution)
knapsack.printItems(randomSolution)
if __name__ == "__main__":
main()
| 3,463 |
src/server.py
|
klingj3/subreddit_suggestor
| 8 |
2024775
|
import tensorflow as tf
from model_generation.suggester import Suggester
from flask import Flask, send_file
app = Flask(__name__)
@app.before_first_request
def load_model():
# Load the model for speed in subsequent calls.
app.suggester = Suggester()
@app.route("/api/suggestions/<username>")
def suggestions(username):
return app.suggester.get_estimates_for_user(username)
@app.route("/")
def landing():
return send_file('static/index.html')
if __name__ == '__main__':
app.run(threaded=False)
| 520 |
DailyProgrammer/DP20141105B.py
|
DayGitH/Python-Challenges
| 2 |
2026093
|
"""
[11/05/2014] Challenge #187 [Intermediate] Finding Time to Reddit
https://www.reddit.com/r/dailyprogrammer/comments/2ledaj/11052014_challenge_187_intermediate_finding_time/
#Description:
I cover the border of my monitor with post it notes with tasks I have to do during the week. I am very unorganized.
Each day I want to find the biggest block of free time to go on to Reddit. But I am not sure when that time is. I am
also curious how I spend my days.
This challenge you will help me get organized and find that time for me to be on Reddit.
#Input:
I will give you a listing of the post it notes around my monitor. Each line represents a single post it note. Sorry but
they are not in any order but I was at least smart enough to date them and put the times of my daily events.
#Output:
Get me organized. I need to see my schedule for the week. For each day you must find the 1 block of time that is the
most time between events on the post its that I can Reddit. Please help maximize my time on Reddit. Assume my start
time at work is the beginning of the first event and my end time at work is the end time of the last event for that
day.
Then show me my final schedule. And while you are at it show me across the week how many minutes I dedicate to each
task with a percentage of time it takes up my time. Hopefully I don't spend most of my time on Reddit.
#Challenge Input:
11-6-2014: 05:18 AM to 06:00 AM -- code review
11-9-2014: 08:52 AM to 09:15 AM -- food
11-8-2014: 07:00 PM to 08:05 PM -- meeting
11-8-2014: 05:30 PM to 06:36 PM -- personal appointment
11-6-2014: 02:47 PM to 03:23 PM -- work
11-11-2014: 07:14 AM to 08:32 AM -- meeting
11-11-2014: 11:22 AM to 12:10 PM -- code review
11-8-2014: 01:39 PM to 02:06 PM -- food
11-9-2014: 07:12 AM to 08:06 AM -- meeting
11-9-2014: 02:14 PM to 03:15 PM -- code review
11-8-2014: 05:13 AM to 06:05 AM -- food
11-6-2014: 05:54 PM to 06:17 PM -- personal appointment
11-7-2014: 08:24 AM to 09:23 AM -- personal appointment
11-8-2014: 11:28 AM to 12:44 PM -- meeting
11-7-2014: 09:35 AM to 10:35 AM -- workout
11-9-2014: 10:05 AM to 11:15 AM -- code review
11-11-2014: 05:02 PM to 06:09 PM -- work
11-6-2014: 06:16 AM to 07:32 AM -- food
11-10-2014: 10:08 AM to 11:14 AM -- workout
11-8-2014: 04:33 PM to 05:12 PM -- meeting
11-10-2014: 01:38 PM to 02:10 PM -- workout
11-11-2014: 03:03 PM to 03:40 PM -- food
11-11-2014: 05:03 AM to 06:12 AM -- food
11-9-2014: 09:49 AM to 10:09 AM -- meeting
11-8-2014: 06:49 AM to 07:34 AM -- work
11-7-2014: 07:29 AM to 08:22 AM -- food
11-10-2014: 03:08 PM to 03:29 PM -- code review
11-9-2014: 03:27 PM to 04:39 PM -- food
11-7-2014: 05:38 AM to 06:49 AM -- meeting
11-7-2014: 03:28 PM to 04:06 PM -- code review
11-8-2014: 02:44 PM to 03:35 PM -- meeting
11-6-2014: 08:53 AM to 09:55 AM -- workout
11-11-2014: 02:05 PM to 02:49 PM -- meeting
11-10-2014: 08:29 AM to 09:23 AM -- code review
11-10-2014: 11:09 AM to 11:35 AM -- sales call
11-6-2014: 11:29 AM to 12:18 PM -- code review
11-11-2014: 08:04 AM to 08:45 AM -- work
11-9-2014: 12:27 PM to 01:29 PM -- sales call
11-7-2014: 11:04 AM to 12:07 PM -- code review
11-11-2014: 09:21 AM to 10:37 AM -- food
11-8-2014: 09:34 AM to 10:53 AM -- meeting
11-11-2014: 12:36 PM to 01:30 PM -- meeting
11-10-2014: 05:44 AM to 06:30 AM -- personal appointment
11-6-2014: 04:22 PM to 05:05 PM -- code review
11-6-2014: 01:30 PM to 01:59 PM -- sales call
11-10-2014: 06:54 AM to 07:41 AM -- code review
11-9-2014: 11:56 AM to 12:17 PM -- work
11-10-2014: 12:20 PM to 01:17 PM -- personal appointment
11-8-2014: 07:57 AM to 09:08 AM -- meeting
11-7-2014: 02:34 PM to 03:06 PM -- work
11-9-2014: 05:13 AM to 06:25 AM -- workout
11-11-2014: 04:04 PM to 04:40 PM -- food
11-9-2014: 06:03 AM to 06:26 AM -- code review
11-6-2014: 10:32 AM to 11:22 AM -- sales call
11-6-2014: 07:51 AM to 08:25 AM -- personal appointment
11-7-2014: 01:07 PM to 02:14 PM -- meeting
#FAQ:
Dates are mm-dd-yyyy
#Check this out:
If you have ideas for challenges - please visit and post on /r/dailyprogrammer_ideas
Check out side bar -- we have an IRC channel. A listing of past challenges and much more.
"""
def main():
pass
if __name__ == "__main__":
main()
| 4,469 |
docs/conftest.py
|
goerz-testing/pypkg_rtd_02
| 0 |
2025444
|
"""This file is automatically executed by pytest when testing anything in the
docs folder"""
import pytest
import pypkg_rtd_02
@pytest.fixture(autouse=True)
def set_doctest_env(doctest_namespace):
"""Inject package itself into doctest namespace.
This is so we don't need
.. doctest::
>>> import pypkg_rtd_02
in any doctests
"""
doctest_namespace['pypkg_rtd_02'] = pypkg_rtd_02
| 424 |
PID.py
|
michaelchi08/visual_navigation
| 2 |
2025982
|
"""
A simple PID controller class.
This is a mostly literal C++ -> Python translation of the ROS
control_toolbox Pid class: http://ros.org/wiki/control_toolbox.
"""
#*******************************************************************
# Translated from pid.cpp by <NAME>
# December 2017
# See below for original license information:
#*******************************************************************
#*******************************************************************
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the <NAME> nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#*******************************************************************
import time
import math
class PID(object):
"""
This class implements a generic structure that can be used to
create a wide range of pid controllers. It can function
independently or be subclassed to provide more specific controls
based on a particular control loop.
In particular, this class implements the standard pid equation:
$command = -p_{term} - i_{term} - d_{term} $
where:
$ p_{term} = p_{gain} * p_{error} $
$ i_{term} = i_{gain} * i_{error} $
$ d_{term} = d_{gain} * d_{error} $
$ i_{error} = i_{error} + p_{error} * dt $
$ d_{error} = (p_{error} - p_{error last}) / dt $
given:
$ p_{error} = p_{state} - p_{target} $.
"""
def __init__(self, pGain, iGain, dGain, iMax, iMin):
"""Constructor, zeros out error values when created and
initialize Pid-gains and integral term limits.
Parameters:
pGain The proportional gain.
iGain The integral gain.
dGain The derivative gain.
iMax The integral upper limit.
iMin The integral lower limit.
"""
self.set_gains(pGain, iGain, dGain, iMax, iMin)
self.reset()
def reset(self):
"""
Reset the state of this PID controller
"""
self._pErrorLast = 0.0 # LAst saved postition for derivative gain
self._pError = 0.0 # Position error
self._dError = 0.0 # Derivative error
self._iError = 0.0 # Integator error
self._cmd = 0.0 # Command to send
self._lastTime = None # Used for automatic calculation of dt
def set_gains(self, pGain, iGain, dGain, iMax, iMin):
""" Set PID gains for the controller.
Parameters:
p_gain The proportional gain.
iGain The integral gain.
dGain The derivative gain.
iMax The integral upper limit.
iMin The integral lower limit.
"""
self._pGain = pGain
self._iGain = iGain
self._dGain = dGain
self._iMax = iMax
self._iMin = iMin
@property
def pGain(self):
""" Read-only access to p_gain. """
return self._pGain
@property
def iGain(self):
""" Read-only access to iGain. """
return self._iGain
@property
def dGain(self):
""" Read-only access to dGain. """
return self._dGain
@property
def iMax(self):
""" Read-only access to iMax. """
return self._iMax
@property
def iMin(self):
""" Read-only access to iMin. """
return self._iMin
@property
def pError(self):
""" Read-only access to pError. """
return self._pError
@property
def iError(self):
""" Read-only access to iError. """
return self._iError
@property
def dError(self):
""" Read-only access to dError. """
return self._dError
@property
def cmd(self):
""" Read-only access to the latest command. """
return self._cmd
def __str__(self):
""" String representation of the current state of the controller. """
result = ""
result += "p_gain: " + str(self._pGain) + "\n"
result += "iGain: " + str(self._iGain) + "\n"
result += "dGain: " + str(self._dGain) + "\n"
result += "iMax: " + str(self._iMax) + "\n"
result += "iMin: " + str(self._iMin) + "\n"
result += "pError: " + str(self._pError) + "\n"
result += "iError: " + str(self._iError) + "\n"
result += "dError: " + str(self._dError) + "\n"
result += "cmd: " + str(self._cmd) + "\n"
return result
def update_PID(self, pError, dt=None):
""" Update the Pid loop with nonuniform time step size.
Parameters:
pError Error since last call (p_state - p_target)
dt Change in time since last call, in seconds, or None.
If dt is None, then the system clock will be used to
calculate the time since the last update.
"""
if dt == None:
curTime = time.time()
if self._lastTime is None:
self._lastTime = curTime
dt = curTime - self._lastTime
self._lastTime = curTime
self._pError = pError # this is pError = pState-pTarget
if dt == 0 or math.isnan(dt) or math.isinf(dt):
return 0.0
# Calculate proportional contribution to command
pTerm = self._pGain * self._pError
# Calculate the integral error
self._iError += dt * self._pError
# Calculate integral contribution to command
iTerm = self._iGain * self._iError
# Limit iTerm so that the limit is meaningful in the output
if iTerm > self._iMax and self._iGain != 0:
iTerm = self._iMax
self._iError = iTerm / self._iGain
elif iTerm < self._iMin and self._iGain != 0:
iTerm = self._iMin
self._iError = iTerm / self._iGain
# Calculate the derivative error
self._dError = (self._pError - self._pErrorLast) / dt
self._pError_last = self._pError
# Calculate derivative contribution to command
dTerm = self._dGain * self._dError
self._cmd = -pTerm - iTerm - dTerm
return self._cmd
if __name__ == "__main__":
controller = PID(1.0, 2.0, 3.0, 1.0, -1.0)
print controller
controller.update_PID(-1)
print controller
controller.update_PID(-.5)
print controller
| 7,838 |
src/repo_peek/fetch.py
|
rahulunair/repo-peek
| 44 |
2026268
|
"""fetch a remote repo and open in a local editor."""
import asyncio
import os
from pathlib import Path
import shutil
import uvloop
from .logging import logger
from .config import Config
from .config import EDITORS
from .config import init_dir
from .utils import fetch_repo
from .utils import clone_repo
from .utils import extract
uvloop.install()
async def open_editor(editor="vim", path: Path = Path("")):
"""open editor with source."""
editor = os.getenv("EDITOR", editor)
if not editor in EDITORS:
editor = "vim"
logger.info("EDITOR not support, defaulting to vim")
logger.info("opening in : {}".format(editor))
process = await asyncio.create_subprocess_exec(editor, str(path))
await process.communicate()
def is_old(config_file: Path) -> bool:
"""if count is zero, return True, else False"""
with open(config_file, "r") as fh:
line = fh.readline()
count = int(line.split("=")[1])
if count == 0:
return True
else:
return False
def rm_stored_repos(home_dir):
logger.info("removing cached dir: {}".format(home_dir))
shutil.rmtree(home_dir, ignore_errors=True)
async def peek_repo(repo: str, service="github", cache=False):
parsed_config = Config(".repk.ini")
cache_dir = Path.home() / ".repk"
if not cache:
rm_stored_repos(cache_dir)
repo_dir = init_dir(cache_dir / "repos" / repo)
# readme_path = await readme_from_cdn(repo, repo_dir)
if os.path.isdir(repo_dir) and os.listdir(repo_dir):
await open_editor(path=repo_dir)
else:
parsed_config = Config(".repk.ini")
tar_dirs = init_dir(cache_dir / "tars")
logger.info("fetching repo: {}".format(repo))
parsed_config.config.remove_section(repo)
parsed_config.cache_repo(repo)
if service == "github":
repo_name = await fetch_repo(repo, tar_dirs)
await extract(repo_name, repo_dir)
elif service == "gitlab":
await clone_repo(repo, repo_dir)
await open_editor(path=repo_dir)
if parsed_config.is_repo_stale(repo):
rm_stored_repos(cache_dir)
parsed_config.config.remove_section(repo)
# download readme from cdn to the dir in which editor is openend
# extract files to the same dir
# types
# profile and imporve loading time
def main(repo="rahulunair/cloudstore", service="github"):
asyncio.run(peek_repo(repo, service))
| 2,459 |
export_assignment_responses.py
|
MuckRock/API-examples
| 42 |
2025620
|
#!/usr/bin/env python2
# -- coding: utf-8 --
import requests
import unicodecsv
from utils import get_api_key
token = get_api_key()
url = 'https://www.muckrock.com/api_v1/'
headers = {'Authorization': 'Token %s' % token, 'content-type': 'application/json'}
assignmentID = raw_input('What assignment are you trying to export? Look at the numbers at the end of the URL: ')
next_ = url + "assignment-responses/?crowdsource=" + str(assignmentID)
basic_fields = [
"id",
"user",
"datetime",
"tags",
"skip",
"number",
"flag",
"gallery",
"crowdsource"
]
assignment_fields = []
page = 1
csv_file = open('assignment ' + str(assignmentID) + ' export.csv', 'w')
csv_file.seek(0)
csv_writer = unicodecsv.writer(csv_file)
r = requests.get(next_, headers=headers)
json = r.json()
for column in json['results'][0]['values']:
assignment_fields.append(column['field'])
csv_writer.writerow(basic_fields + assignment_fields)
while next_ is not None:
r = requests.get(next_, headers=headers)
try:
json = r.json()
next_ = json['next']
for datum in json['results']:
submissions = []
for field in basic_fields:
submissions.append(datum[field])
for entry in datum["values"]:
submissions.append(entry["value"])
csv_writer.writerow(submissions)
# csv_writer.writerow(datum[field] for field in basic_fields + )
# + datum['values'][assignment_fields] for field in assignment_fields
print 'Page %d of %d' % (page, json['count'] / 40)
page += 1
except Exception as e:
print e
| 1,626 |
regulations/templatetags/in_context.py
|
contolini/regulations-site
| 0 |
2025595
|
from django import template
register = template.Library()
class InContextNode(template.Node):
def __init__(self, nodelist, subcontext_names):
self.nodelist = nodelist
self.subcontext_names = subcontext_names
def render(self, context):
new_context = {}
for field in self.subcontext_names:
value = context.get(field, {})
if isinstance(value, dict):
new_context.update(context.get(field, {}))
else:
new_context[field] = value
return self.nodelist.render(template.Context(new_context))
@register.tag('begincontext')
def in_context(parser, token):
"""
Replaces the context (inside of this block) for easy (and safe) inclusion
of sub-content.
For example, if the context is {'name': 'Kitty', 'sub': {'size': 5}}
1: {{ name }} {{ size }}
{% begincontext sub %}
2: {{ name }} {{ size }}
{% endcontext %}
3: {{ name }} {{ size }}
Will print
1: Kitty
2: 5
3: Kitty
Arguments which are not dictionaries will 'cascade' into the inner
context.
"""
nodelist = parser.parse(('endcontext',))
parser.delete_first_token()
return InContextNode(nodelist, token.split_contents()[1:])
| 1,298 |
main.py
|
mingewang/dmn-pytorch
| 15 |
2025594
|
import torch
import argparse
import pickle
import pprint
import numpy as np
import os
from dataset import Dataset, Config
from model import DMN
from run import run_epoch
argparser = argparse.ArgumentParser()
# run settings
argparser.add_argument('--data_path', type=str, default='./data/babi(tmp).pkl')
argparser.add_argument('--model_name', type=str, default='m')
argparser.add_argument('--checkpoint_dir', type=str, default='./results/')
argparser.add_argument('--batch_size', type=int, default=32)
argparser.add_argument('--epoch', type=int, default=100)
argparser.add_argument('--train', type=int, default=1)
argparser.add_argument('--valid', type=int, default=1)
argparser.add_argument('--test', type=int, default=1)
argparser.add_argument('--early_stop', type=int, default=0)
argparser.add_argument('--resume', action='store_true', default=False)
argparser.add_argument('--save', action='store_true', default=False)
argparser.add_argument('--print_step', type=float, default=128)
# model hyperparameters
argparser.add_argument('--lr', type=float, default=0.0003)
argparser.add_argument('--lr_decay', type=float, default=1.0)
argparser.add_argument('--wd', type=float, default=0)
argparser.add_argument('--grad_max_norm', type=int, default=5)
argparser.add_argument('--s_rnn_hdim', type=int, default=100)
argparser.add_argument('--s_rnn_ln', type=int, default=1)
argparser.add_argument('--s_rnn_dr', type=float, default=0.0)
argparser.add_argument('--q_rnn_hdim', type=int, default=100)
argparser.add_argument('--q_rnn_ln', type=int, default=1)
argparser.add_argument('--q_rnn_dr', type=float, default=0.0)
argparser.add_argument('--e_cell_hdim', type=int, default=100)
argparser.add_argument('--m_cell_hdim', type=int, default=100)
argparser.add_argument('--a_cell_hdim', type=int, default=100)
argparser.add_argument('--word_dr', type=float, default=0.2)
argparser.add_argument('--g1_dim', type=int, default=500)
argparser.add_argument('--max_episode', type=int, default=10)
argparser.add_argument('--beta_cnt', type=int, default=10)
argparser.add_argument('--set_num', type=int, default=1)
argparser.add_argument('--max_alen', type=int, default=2)
args = argparser.parse_args()
def run_experiment(model, dataset, set_num):
best_metric = np.zeros(2)
early_stop = False
if model.config.train:
if model.config.resume:
model.load_checkpoint()
for ep in range(model.config.epoch):
if early_stop:
break
print('- Training Epoch %d' % (ep+1))
run_epoch(model, dataset, ep, 'tr', set_num)
if model.config.valid:
print('- Validation')
met = run_epoch(model, dataset, ep, 'va', set_num, False)
if best_metric[1] < met[1]:
best_metric = met
model.save_checkpoint({
'config': model.config,
'state_dict': model.state_dict(),
'optimizer': model.optimizer.state_dict()})
if best_metric[1] == 100:
break
else:
# model.decay_lr()
if model.config.early_stop:
early_stop = True
print('\tearly stop applied')
print('\tbest metrics:\t%s' % ('\t'.join(['{:.2f}'.format(k)
for k in best_metric])))
if model.config.test:
print('- Testing')
run_epoch(model, dataset, ep, 'te', set_num, False)
print()
if model.config.test:
print('- Load Validation/Testing')
if model.config.resume or model.config.train:
model.load_checkpoint()
run_epoch(model, dataset, 0, 'va', set_num, False)
run_epoch(model, dataset, 0, 'te', set_num, False)
print()
return best_metric
def main():
if not os.path.exists('./results'):
os.makedirs('./results')
print('### load dataset')
dataset = pickle.load(open(args.data_path, 'rb'))
# update args
dataset.config.__dict__.update(args.__dict__)
args.__dict__.update(dataset.config.__dict__)
pp = lambda x: pprint.PrettyPrinter().pprint(x)
pp(args.__dict__)
# new model experiment
for set_num in range(args.set_num, args.set_num+1):
print('\n[QA set %d]' % (set_num))
model = DMN(args, dataset.idx2vec, set_num).cuda()
results = run_experiment(model, dataset, set_num)
print('### end of experiment')
if __name__ == '__main__':
main()
| 4,606 |
examples/pitch_setup/plot_pitches.py
|
ElJdP/mplsoccer
| 0 |
2025580
|
"""
======
Basics
======
First we import the Pitch classes and matplotlib
"""
import matplotlib.pyplot as plt
from mplsoccer import Pitch, VerticalPitch
##############################################################################
# Draw a pitch on a new axis
# --------------------------
# Let's plot on a new axis first.
pitch = Pitch()
# specifying figure size (width, height)
fig, ax = pitch.draw(figsize=(8, 4))
##############################################################################
# Draw on an existing axis
# ------------------------
# mplsoccer also plays nicely with other matplotlib figures. To draw a pitch on an
# existing matplotlib axis specify an ``ax`` in the ``draw`` method.
fig, axs = plt.subplots(nrows=1, ncols=2)
pitch = Pitch()
pie = axs[0].pie(x=[5, 15])
pitch.draw(ax=axs[1])
##############################################################################
# Supported data providers
# ------------------------
# mplsoccer supports 9 pitch types by specifying the ``pitch_type`` argument:
# 'statsbomb', 'opta', 'tracab', 'wyscout', 'uefa', 'metricasports', 'custom',
# 'skillcorner' and 'secondspectrum'.
# If you are using tracking data or the custom pitch ('metricasports', 'tracab',
# 'skillcorner', 'secondspectrum' or 'custom'), you also need to specify the
# ``pitch_length`` and ``pitch_width``, which are typically 105 and 68 respectively.
pitch = Pitch(pitch_type='opta') # example plotting an Opta/ Stats Perform pitch
fig, ax = pitch.draw()
##############################################################################
pitch = Pitch(pitch_type='tracab', # example plotting a tracab pitch
pitch_length=105, pitch_width=68,
axis=True, label=True) # showing axis labels is optional
fig, ax = pitch.draw()
##############################################################################
# Adjusting the plot layout
# -------------------------
# mplsoccer also plots on grids by specifying nrows and ncols.
# The default is to use
# tight_layout. See: https://matplotlib.org/stable/tutorials/intermediate/tight_layout_guide.html.
pitch = Pitch()
fig, axs = pitch.draw(nrows=2, ncols=3)
##############################################################################
# But you can also use constrained layout
# by setting ``constrained_layout=True`` and ``tight_layout=False``, which may look better.
# See: https://matplotlib.org/stable/tutorials/intermediate/constrainedlayout_guide.html.
pitch = Pitch()
fig, axs = pitch.draw(nrows=2, ncols=3, tight_layout=False, constrained_layout=True)
##############################################################################
# If you want more control over how pitches are placed
# you can use the grid method. This also works for one pitch (nrows=1 and ncols=1).
# It also plots axes for an endnote and title (see the plot_grid example for more information).
pitch = Pitch()
fig, axs, ax_title, ax_endnote = pitch.grid(nrows=3, ncols=3, figheight=10,
# the grid takes up 71.5% of the figure height
grid_height=0.715,
# 5% of grid_height is reserved for space between axes
space=0.05,
# centers the grid horizontally
left=None,
# grid starts 2.5% up from the bottom of the figure
bottom=0.025)
##############################################################################
# Pitch orientation
# -----------------
# There are four basic pitch orientations.
# To get vertical pitches use the VerticalPitch class.
# To get half pitches use the half=True argument.
#
# Horizontal full
pitch = Pitch(half=False)
fig, ax = pitch.draw()
##############################################################################
# Vertical full
pitch = VerticalPitch(half=False)
fig, ax = pitch.draw()
##############################################################################
# Horizontal half
pitch = Pitch(half=True)
fig, ax = pitch.draw()
##############################################################################
# Vertical half
pitch = VerticalPitch(half=True)
fig, ax = pitch.draw()
##############################################################################
# You can also adjust the pitch orientations with the ``pad_left``, ``pad_right``,
# ``pad_bottom`` and ``pad_top`` arguments to make arbitrary pitch shapes.
pitch = VerticalPitch(half=True,
pad_left=-10, # bring the left axis in 10 data units (reduce the size)
pad_right=-10, # bring the right axis in 10 data units (reduce the size)
pad_top=10, # extend the top axis 10 data units
pad_bottom=20) # extend the bottom axis 20 data units
fig, ax = pitch.draw()
##############################################################################
# Pitch appearance
# ----------------
# The pitch appearance is adjustable.
# Use ``pitch_color`` and ``line_color``, and ``stripe_color`` (if ``stripe=True``)
# to adjust the colors.
pitch = Pitch(pitch_color='#aabb97', line_color='white',
stripe_color='#c2d59d', stripe=True) # optional stripes
fig, ax = pitch.draw()
##############################################################################
# Juego de Posición
# -----------------
# You can add the Juego de Posición pitch lines and shade the middle third
pitch = Pitch(positional=True, shade_middle=True, positional_color='#eadddd', shade_color='#f2f2f2')
fig, ax = pitch.draw()
##############################################################################
# mplsoccer can also plot grass pitches by setting ``pitch_color='grass'``.
pitch = Pitch(pitch_color='grass', line_color='white',
stripe=True) # optional stripes
fig, ax = pitch.draw()
##############################################################################
# Three goal types are included ``goal_type='line'``, ``goal_type='box'``,
# and ``goal_type='circle'``
fig, axs = plt.subplots(nrows=3, figsize=(10, 18))
pitch = Pitch(goal_type='box', goal_alpha=1) # you can also adjust the transparency (alpha)
pitch.draw(axs[0])
pitch = Pitch(goal_type='line')
pitch.draw(axs[1])
pitch = Pitch(goal_type='circle', linewidth=1)
pitch.draw(axs[2])
##############################################################################
# The line markings and spot size can be adjusted via ``linewidth`` and ``spot_scale``.
# Spot scale also adjusts the size of the circle goal posts.
pitch = Pitch(linewidth=3,
# the size of the penalty and center spots relative to the pitch_length
spot_scale=0.01)
fig, ax = pitch.draw()
##############################################################################
# If you need to lift the pitch markings above other elements of the chart.
# You can do this via ``line_zorder``, ``stripe_zorder``,
# ``positional_zorder``, and ``shade_zorder``.
pitch = Pitch(line_zorder=2) # e.g. useful if you want to plot pitch lines over heatmaps
fig, ax = pitch.draw()
##############################################################################
# Axis
# ----
# By default mplsoccer turns of the axis (border), ticks, and labels.
# You can use them by setting the ``axis``, ``label`` and ``tick`` arguments.
pitch = Pitch(axis=True, label=True, tick=True)
fig, ax = pitch.draw()
##############################################################################
# xkcd
# ----
# Finally let's use matplotlib's xkcd theme.
plt.xkcd()
pitch = Pitch(pitch_color='grass', stripe=True)
fig, ax = pitch.draw(figsize=(8, 4))
annotation = ax.annotate('Who can resist this?', (60, 10), fontsize=30, ha='center')
| 7,888 |
src/ParserForSHEN.py
|
demidko/FefuParser
| 0 |
2023707
|
from bs4 import BeautifulSoup
import urllib3
def pars(url):
http = urllib3.PoolManager()
#url = 'https://www.dvfu.ru/schools/school_of_natural_sciences/structures/department/cluster-physics-and-mathematics-departments/'
#url = 'https://www.dvfu.ru/schools/school_of_natural_sciences/structures/department/the-chemical-cluster-of-departments/'
response = http.request('GET', url)
soup = BeautifulSoup(response.data)
#print('---------------------------------------------')
#print(soup.find_all('table')[0].find_all('td'))
#print('---------------------------------------------')
data = []
table = soup.find('table')
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele]) # Get rid of empty values
m_data = []
for j in range(len(data)-1):
i = j+1
data_ = dict.fromkeys(['Name', 'Post', 'Phone', 'Email', 'School', 'Department'])
data_['School'] = 'ШЕН'
data_['Department'] = data[i][0]
data_['Post'] = data[i][1].split('\n')[0]
data_['Name'] = data[i][1].split('\n')[1]
if (data[i][2].find('Тел.') > 0):
temp = data[i][2][data[i][2].find('Тел.')+6:data[i][2].find('Тел.')+23].split()
temp[1] = temp[1][1:4]
temp = '-'.join(temp)
data_['Phone'] = temp
if (data[i][2].find('E-mail:') > 0):
temp = data[i][2][data[i][2].find('E-mail:')+8:]
data_['Email'] = temp
m_data.append(data_)
return m_data
#print('\n'.join(data_))
#print(m_data)
m_data = []
urls = ['https://www.dvfu.ru/schools/school_of_natural_sciences/structures/department/cluster-physics-and-mathematics-departments/',
'https://www.dvfu.ru/schools/school_of_natural_sciences/structures/department/the-cluster-of-biological-departments/',
'https://www.dvfu.ru/schools/school_of_natural_sciences/structures/department/the-chemical-cluster-of-departments/',
'https://www.dvfu.ru/schools/school_of_natural_sciences/structures/department/the-cluster-of-the-departments-of-earth-sciences/']
with open('out.txt', 'w') as f:
for u in urls:
for i in pars(u):
s = ''
s += '{\n'
for j in i.keys():
if (i[j] != None):
s += ' '
s += j + ': "'+i[j]+'"'
s += '\n'
s = s[0:len(s)-1]
s += '\n} '
f.write(s)
| 2,312 |
bme280.py
|
jamisonderek/waterconsumption
| 0 |
2025791
|
import adafruit_bme280
class Bme280:
def __init__(self, i2c):
self.__bme280 = adafruit_bme280.Adafruit_BME280_I2C(i2c, 0x76)
def get_temperature(self):
return self.__bme280.temperature
temperature = property(get_temperature)
def get_humidity(self):
return self.__bme280.relative_humidity
humidity = property(get_humidity)
def set_sea_level_pressure(self, hPa):
self.__bme280.sea_level_pressure = hPa
| 463 |
blizzardapi/api.py
|
trevorphillipscoding/python-blizzardapi
| 10 |
2022755
|
"""api.py file."""
import requests
class Api:
"""Base API class.
Attributes:
_client_id: A string client id supplied by Blizzard.
_client_secret: A string client secret supplied by Blizzard.
_access_token: A string access token that is used to access Blizzard's API.
_api_url: A string url used to call the API endpoints.
_api_url_cn: A string url used to call the china API endpoints.
_oauth_url: A string url used to call the OAuth API endpoints.
_oauth_url_cn: A string url used to call the china OAuth API endpoints.
_session: An open requests.Session instance.
"""
def __init__(self, client_id, client_secret):
"""Init Api."""
self._client_id = client_id
self._client_secret = client_secret
self._access_token = None
self._api_url = "https://{0}.api.blizzard.com{1}"
self._api_url_cn = "https://gateway.battlenet.com.cn{0}"
self._oauth_url = "https://{0}.battle.net{1}"
self._oauth_url_cn = "https://www.battlenet.com.cn{0}"
self._session = requests.Session()
def _get_client_token(self, region):
"""Fetch an access token based on client id and client secret credentials.
Args:
region:
A string containing a region.
"""
url = self._format_oauth_url("/oauth/token", region)
query_params = {"grant_type": "client_credentials"}
response = self._session.post(
url,
params=query_params,
auth=(self._client_id, self._client_secret),
)
return self._response_handler(response)
def _response_handler(self, response):
"""Handle the response."""
return response.json()
def _request_handler(self, url, region, query_params):
"""Handle the request."""
if self._access_token is None:
json = self._get_client_token(region)
self._access_token = json["access_token"]
if query_params.get("access_token") is None:
query_params["access_token"] = self._access_token
response = self._session.get(url, params=query_params)
return self._response_handler(response)
def _format_api_url(self, resource, region):
"""Format the API url into a usable url."""
if region == "cn":
url = self._api_url_cn.format(resource)
else:
url = self._api_url.format(region, resource)
return url
def get_resource(self, resource, region, query_params={}):
"""Direction handler for when fetching resources."""
url = self._format_api_url(resource, region)
return self._request_handler(url, region, query_params)
def _format_oauth_url(self, resource, region):
"""Format the oauth url into a usable url."""
if region == "cn":
url = self._oauth_url_cn.format(resource)
else:
url = self._oauth_url.format(region, resource)
return url
def get_oauth_resource(self, resource, region, query_params={}):
"""Direction handler for when fetching oauth resources."""
url = self._format_oauth_url(resource, region)
return self._request_handler(url, region, query_params)
| 3,295 |
web/api/serializers.py
|
PauBatlle/NewsMultirank
| 0 |
2024482
|
from rest_framework.serializers import ModelSerializer
from web.api.models import Prediction
class PredictionSerializer(ModelSerializer):
class Meta:
model = Prediction
fields = ('id', 'result')
extra_kwargs = {
'id': {'read_only': True}
}
| 291 |
tools/metrics/structured/compile_time_validation.py
|
sarang-apps/darshan_browser
| 0 |
2025753
|
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Verifies that structured.xml is well-structured."""
from collections import Counter
from model import _METRIC_TYPE
from model import _EVENT_TYPE, _EVENTS_TYPE
from model import _PROJECT_TYPE, _PROJECTS_TYPE
def eventsReferenceValidProjects(data):
"""Check that any project referenced by an event exists."""
projects = {
project['name']: project
for project in data[_PROJECTS_TYPE.tag][_PROJECT_TYPE.tag]
}
for event in data[_EVENTS_TYPE.tag][_EVENT_TYPE.tag]:
project_name = event.get('project')
project = projects.get(project_name)
if project is None and project_name is not None:
raise Exception(("Structured metrics event '{}' references "
"nonexistent project '{}'.").format(
event['name'], project_name))
def projectAndEventNamesDontCollide(data):
"""Check that there are no events with the same name as a project."""
projects = {
project['name']
for project in data[_PROJECTS_TYPE.tag][_PROJECT_TYPE.tag]
}
for event in data[_EVENTS_TYPE.tag][_EVENT_TYPE.tag]:
if event['name'] in projects:
raise Exception(("Structured metrics event and project have the same "
"name: '{}'.").format(event['name']))
def eventNamesUnique(data):
"""Check that no two events have the same name."""
name_counts = Counter(
event['name'] for event in data[_EVENTS_TYPE.tag][_EVENT_TYPE.tag])
for name, count in name_counts.items():
if count != 1:
raise Exception(
"Structured metrics events have duplicate name '{}'.".format(name))
def projectNamesUnique(data):
"""Check that no two projects have the same name."""
name_counts = Counter(
project['name']
for project in data[_PROJECTS_TYPE.tag][_PROJECT_TYPE.tag])
for name, count in name_counts.items():
if count != 1:
raise Exception(
"Structured metrics projects have duplicate name '{}'.".format(name))
def metricNamesUniqueWithinEvent(data):
"""Check that no two metrics within an event have the same name."""
for event in data[_EVENTS_TYPE.tag][_EVENT_TYPE.tag]:
name_counts = Counter(metric['name'] for metric in event[_METRIC_TYPE.tag])
for name, count in name_counts.items():
if count != 1:
raise Exception(("Structured metrics event '{}' has duplicated metric "
"name '{}'.").format(event['name'], name))
def validate(data):
eventsReferenceValidProjects(data)
projectAndEventNamesDontCollide(data)
eventNamesUnique(data)
projectNamesUnique(data)
metricNamesUniqueWithinEvent(data)
| 2,774 |
montepython/ccl_tools.py
|
Maquiner/Monte_Python_2CCL
| 1 |
2024897
|
import re, os
import random
import numpy as np
import pymaster as nmt
import pyccl as ccl
default_pars = {
'h' : 0.67,
'Omega_c' : 0.27,
'Omega_b' : 0.045,
# 'ln10_A_s' : 3.044522,
'sigma_8' : 0.840421163375,
'n_s' : 0.96,
'w_0' : -1.0,
'w_a' : 0.0
}
def nofz(z,z0,sz,ndens):
return np.exp(-0.5*((z-z0)/sz)**2)*ndens/np.sqrt(2*np.pi*sz**2)
def flatten_cls(cls, n_bte, n_ells):
flat_cls = np.moveaxis(cls,[-3,-2,-1],[0,1,2])
flat_cls = flat_cls[np.triu_indices(n_bte)]
flat_cls = flat_cls.reshape(((n_bte+1)*n_bte*n_ells/2,)+cls.shape[:-3])
return flat_cls
def get_cosmo_ccl(pars):
cosmo = ccl.Cosmology(
h = pars['h'],
Omega_c = pars['Omega_c'],
Omega_b = pars['Omega_b'],
sigma8 = pars['sigma_8'],
# A_s = (10.**(-10.))*np.exp(pars['ln10_A_s']),
n_s = pars['n_s'],
w0 = pars['w_0'],
wa = pars['w_a']
)
ccl.linear_matter_power(cosmo,0.1,0.5)
return cosmo
def get_tracers_ccl(cosmo, z, pz, bz):
n_bins = pz.shape[0]
# Tracers
tracers = []
for i in range(n_bins):
tracers.append(
ccl.NumberCountsTracer(cosmo,has_rsd=False,dndz=(z[i],pz[i]),bias=(z[i],bz[i]))
)
tracers.append(
ccl.WeakLensingTracer(cosmo,dndz=(z[i],pz[i]))
)
return np.array(tracers)
def get_cls_ccl(cosmo, tracers, ell_bp):
n_bte = tracers.shape[0]
n_ells = len(ell_bp)
cls = np.zeros([n_bte, n_bte, n_ells])
for c1 in range(n_bte): # c1=te1+b1*n_te
for c2 in range(c1, n_bte):
cls[c1,c2,:] = ccl.angular_cl(cosmo,tracers[c1],tracers[c2],ell_bp)
cls[c2,c1,:] = cls[c1,c2,:]
cls_flat = flatten_cls(cls, n_bte, n_ells)
return cls_flat
# Get data
dir = os.path.abspath('.')+'/data/covfefe/'
# Ells
ell_bp = np.load(os.path.join(dir, 'ell_bp.npz'))['lsims']
for n_bins in range(1,3):
# Build photo_z
z = np.tile(np.linspace(0,3,512),[n_bins,1])
cosmo = get_cosmo_ccl(default_pars)
bz_ref=0.95*ccl.growth_factor(cosmo,1.)/ccl.growth_factor(cosmo,1./(1+z[0]))
if n_bins==1:
pz = np.array([
nofz(z[0],0.955,0.13,7.55)
])
bz = np.tile(0.65*bz_ref,[1,1])
elif n_bins==2:
pz = np.array([
nofz(z[0],0.955,0.13,7.55),
nofz(z[1],0.755,0.13,7.55)
])
bz = np.tile(bz_ref,[2,1])
np.savez_compressed(os.path.join(dir, 'z_{}'.format(n_bins)), z)
np.savez_compressed(dir+'pz_{}'.format(n_bins), pz)
np.savez_compressed(dir+'bz_{}'.format(n_bins), bz)
# Build data
tracers = get_tracers_ccl(cosmo, z, pz, bz)
data = get_cls_ccl(cosmo, tracers, ell_bp)
cov = np.load(os.path.join(dir, 'cov_sim_{}.npz'.format(n_bins)))['arr_0']
L = np.linalg.cholesky(cov)
u = np.random.randn(2*n_bins*(2*n_bins+1)/2*len(ell_bp))
data = data# + L.dot(u)
np.savez_compressed(dir+'cls_{}'.format(n_bins), data)
| 3,109 |
crypto/tasks.py
|
palakshivlani-11/cryptorium
| 1 |
2024048
|
from celery.task.schedules import crontab
from celery.decorators import periodic_task
from .models import *
import requests
import json
from django.shortcuts import render
from django.template.loader import render_to_string
from django.core.mail import EmailMessage
def send_buy_notifications(name,email,coin,coinprice,fetchprice):
html_version = 'buynotification.html'
context = {
'title':"Knock! Knock! Great Opportunity",
'name' : name
}
html_message = render_to_string(html_version,context)
subject = "Knock Knock! Great Opportunity is waiting!"
to_email = email
text = "Hey! " + name + ", Your coin" + coin + " has reached price "+" ₹" + str(fetchprice) + " .Its best time to buy your favourite coin."
message = EmailMessage(subject, text,'<EMAIL>', [to_email])
#message.content_subtype = 'html' # this is required because there is no plain text email version
message.send()
def send_sell_notifications(name,email,coin,coinprice,fetchprice):
html_version = 'sellnotification.html'
context = {
'title':"Knock! Knock! Great Opportunity",
'name' : name
}
html_message = render_to_string(html_version,context)
subject = "Knock Knock! Great Opportunity is waiting!"
to_email = email
text = "Hey! " + name + ", Your coin" + coin + " has reached price "+" ₹" + str(fetchprice) + " .Its best time to sell your favourite coin."
message = EmailMessage(subject, text,'<EMAIL>', [to_email])
#message.content_subtype = 'html' # this is required because there is no plain text email version
message.send()
def send_notifications(name,email):
html_version = 'normalnotification.html'
context = {
'title':"Knock! Knock! Keep Updating Yourself",
'name' : name
}
html_message = render_to_string(html_version,context)
subject = "Knock Knock! Keep Updating Yourself"
to_email = email
text = " Hey ! " + name + " Keep Updating Yourself "
message = EmailMessage(subject, text,'<EMAIL>', [to_email])
#message.content_subtype = 'html' # this is required because there is no plain text email version
message.send()
#@periodic_task(run_every=crontab(minute='*/1', day_of_week="*"))
def cryptoprices(quote):
price_request = requests.get("https://min-api.cryptocompare.com/data/pricemultifull?fsyms=" + quote + "&tsyms=INR")
price = json.loads(price_request.content)
#print(price['RAW'][quote]['INR']['PRICE'])
return price['RAW'][quote]['INR']['PRICE']
@periodic_task(run_every=crontab(minute='*/240', day_of_week="*"))
def compare():
obj = Notification.objects.all()
for i in obj:
user = i.us.username
mail = i.us.email
c = i.coin
p = i.coinprice
fetch = cryptoprices(c)
if fetch < p:
send_buy_notifications(user,mail,c,p,fetch)
elif fetch > p:
send_sell_notifications(user,mail,c,p,fetch)
else:
send_notifications(user,mail)
print(obj)
| 3,111 |
day01b.py
|
vvvictoire/advent_of_code_2019
| 0 |
2025366
|
#!/usr/bin/python3
from math import floor
def compute_fuel(weight):
fuel = floor(weight / 3) - 2
if fuel > 0:
return fuel + compute_fuel(fuel)
return 0
total_fuel = 0
with open('data/01a_input') as f:
lines = f.readlines()
for line in lines:
module_weight = int(line)
fuel_needed = compute_fuel(module_weight)
total_fuel += fuel_needed
print(total_fuel)
| 410 |
example/test-factories/data_provider.py
|
AutomatedOwl/ptf-pytest
| 1 |
2023489
|
from ctr_table import CtrTable
from lpm_table import LpmTable
from sp_table import SpTable
from collections import namedtuple
from ptf.testutils import *
import consts
import allure
class DataProvider:
# Create data bundles for tests
PacketBundle = namedtuple("PacketBundle", ["ports", "packet"])
LpmEntry = namedtuple("LpmEntry", ["sai_def", "ports", "vlan"])
ScratchEntry = namedtuple("ScratchEntry", ["sai_def", "ports", "vlan"])
CounterEntry = namedtuple("CounterEntry", ["sai_def", "ports", "vlan"])
# Create test data bundle as namedtuple
TestDataBundle = namedtuple("TestDataBundle", ["lpm_entries", "sp_entries", "ctr_entries", "packets"])
TestDataBundle.__new__.__defaults__ = (None,) * len(TestDataBundle._fields)
def __init__(self, device, topology, layer):
self.device = device
self.topology = topology
self.test_config_map = self.config_data_for_tests()
def config_data_for_tests(self):
# Map each test to configuration
return {
"test_send_one_pkt": self.get_common_data(),
"test_send_two_pkts": self.get_common_data(),
"test_send_receive_pkt": self.TestDataBundle(packets = [
self.PacketBundle(consts.PORT_0, self.generate_packet(consts.IP_ID_0)),
self.PacketBundle(consts.PORT_2, self.generate_packet(consts.IP_ID_2))],
lpm_entries = [self.LpmEntry(consts.SAI_DEF, consts.PORT_0, consts.VLAN_0),
self.LpmEntry(consts.SAI_DEF, consts.PORT_1, consts.VLAN_1)])
}
def get_common_data(self):
return self.TestDataBundle(packets = [
self.PacketBundle(consts.PORT_0, self.generate_packet(consts.IP_ID_0)),
self.PacketBundle(consts.PORT_1, self.generate_packet(consts.IP_ID_1))],
lpm_entries = [self.LpmEntry(consts.SAI_DEF, consts.PORT_0, consts.VLAN_0),
self.LpmEntry(consts.SAI_DEF, consts.PORT_1, consts.VLAN_1)])
@allure.step
def get_data_per_test(self, test_method):
print("Test method: " + test_method)
#print("TEST CONFIG: " + str(self.test_config_map))
return self.test_config_map[test_method]
def generate_packet(self, id):
return simple_tcp_packet(eth_dst='00:11:11:11:11:11',
eth_src='00:22:22:22:22:22',
ip_dst='10.0.0.1',
ip_id=id,
ip_ttl=64)
| 2,550 |
classFraction.py
|
bourneagain/pythonBytes
| 1 |
2026023
|
class fraction(object):
def __init__(self,n,d):
self.n,self.d=n,d
def __repr__(self):
return "X/Y=" + str(self.n) +"/" +str(self.d)
number=fraction(2,3)
print number
| 175 |
src/komtet_kassa_sdk/v2/employee.py
|
Motmom/komtet-kassa-python-sdk
| 4 |
2023092
|
# coding: utf-8
class EmployeeType(object):
"""Тип Сотрудника"""
COURIER = 'courier'
"""Курьер"""
CASHIER = 'cashier'
"""Кассир"""
DRIVER = 'driver'
"""Водитель"""
class Employee(object):
"""
:param EmployeeType type: Тип сотрудника
:param str name: ФИО сотрудника
:param str login: Логин сотрудника
:param str password: <PASSWORD> сотрудника
:param str pos_id: ID кассы
:param str inn: ИНН сотрудника
:param str phone: Телефон сотрудника
:param str email: Email сотрудника
"""
def __init__(self, type, name, login, password, pos_id, inn=None, phone=None, email=None):
self.__data = {
'type': type,
'name': name,
'login': login,
'password': password,
'pos_id': pos_id
}
if inn:
self.__data['inn'] = inn
if phone:
self.__data['phone'] = phone
if email:
self.__data['email'] = email
def __iter__(self):
for item in self.__data.items():
yield item
def __getitem__(self, item):
return self.__data[item]
def set_payment_address(self, payment_address):
"""
Установка адреса места рассчета
:param str payment_address: Адрес места рассчета
"""
self.__data['payment_address'] = payment_address
def set_access_settings(self, is_manager=None, is_can_assign_order=None,
is_app_fast_basket=None):
"""
Установка настроек доступа
:param bool is_manager: Разрешить в приложении редактировать и создавать заказы
:param bool is_can_assign_order: Разрешить просматривать весь список свободных заказов и
выбирать из него
:param bool is_app_fast_basket: Переходить в корзину сразу после выбора товара
"""
if is_manager:
self.__data['is_manager'] = is_manager
if is_can_assign_order:
self.__data['is_can_assign_order'] = is_can_assign_order
if is_app_fast_basket:
self.__data['is_app_fast_basket'] = is_app_fast_basket
| 2,181 |
cloudknot/aws/ecr.py
|
36000/cloudknot
| 0 |
2024668
|
from __future__ import absolute_import, division, print_function
import cloudknot.config
import logging
from collections import namedtuple
from .base_classes import NamedObject, clients, get_ecr_repo
__all__ = []
def registered(fn):
__all__.append(fn.__name__)
return fn
mod_logger = logging.getLogger(__name__)
# noinspection PyPropertyAccess,PyAttributeOutsideInit
@registered
class DockerRepo(NamedObject):
"""Class for creating and managing remote docker repositories"""
def __init__(self, name):
"""Initialize a Docker repo object.
User may provide only `name` input, indicating that they would
like to retrieve a pre-existing repo/image from AWS ECR. Or, if
the repo does not exist, it will be created.
Parameters
----------
name : str
Name of the remote repository
"""
super(DockerRepo, self).__init__(name=name)
# Create repo
repo_info = self._create_repo()
self._repo_uri = repo_info.uri
self._repo_registry_id = repo_info.registry_id
# Add to config file
self._section_name = self._get_section_name("docker-repos")
cloudknot.config.add_resource(self._section_name, self.name, self.repo_uri)
# Declare read only properties
@property
def repo_uri(self):
"""URI for this AWS ECR repository"""
return self._repo_uri
@property
def repo_registry_id(self):
"""Registry ID for this AWS ECR repository"""
return self._repo_registry_id
def _create_repo(self):
"""Create or retrieve an AWS ECR repository
Returns
-------
RepoInfo : namedtuple
a namedtuple with fields name, uri, and registry_id
"""
try:
# If repo exists, retrieve its info
response = clients["ecr"].describe_repositories(repositoryNames=[self.name])
repo_name = response["repositories"][0]["repositoryName"]
repo_uri = response["repositories"][0]["repositoryUri"]
repo_registry_id = response["repositories"][0]["registryId"]
mod_logger.info(
"Repository {name:s} already exists at "
"{uri:s}".format(name=self.name, uri=repo_uri)
)
except clients["ecr"].exceptions.RepositoryNotFoundException:
# If it doesn't exists already, then create it
response = clients["ecr"].create_repository(repositoryName=self.name)
repo_name = response["repository"]["repositoryName"]
repo_uri = response["repository"]["repositoryUri"]
repo_registry_id = response["repository"]["registryId"]
mod_logger.info(
"Created repository {name:s} at {uri:s}".format(
name=self.name, uri=repo_uri
)
)
# Define and return namedtuple with repo info
RepoInfo = namedtuple("RepoInfo", ["name", "uri", "registry_id"])
return RepoInfo(name=repo_name, uri=repo_uri, registry_id=repo_registry_id)
def clobber(self):
"""Delete this remote repository"""
if self.clobbered:
return
self.check_profile_and_region()
if self.name != get_ecr_repo():
try:
# Remove the remote docker image
clients["ecr"].delete_repository(
registryId=self.repo_registry_id,
repositoryName=self.name,
force=True,
)
except clients["ecr"].exceptions.RepositoryNotFoundException:
# It doesn't exist anyway, so carry on
pass
# Remove from the config file
cloudknot.config.remove_resource(self._section_name, self.name)
# Set the clobbered parameter to True,
# preventing subsequent method calls
self._clobbered = True
mod_logger.info("Clobbered docker repo {name:s}".format(name=self.name))
| 4,032 |
tests/manual_test_idpmodem_thread.py
|
Inmarsat/idpmodem
| 3 |
2026081
|
#!/usr/bin/env python
import argparse
import inspect
import pprint
import sys
import time
import unittest
from idpmodem.atcommand_thread import get_modem_thread, IdpModemBusy, AtException, AtCrcConfigError, AtCrcError, AtTimeout
DEFAULT_PORT = '/dev/ttyUSB1'
class IdpModemTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
user_options = parse_args(sys.argv)
port = user_options['port']
print("Setting up modem for test cases...")
(modem, thread) = get_modem_thread()
cls.modem = modem
cls.modem_thread = thread
cls.event_callback = None
cls.new_mt_messages = False
cls.mt_message_being_retrieved = None
cls.mo_msg_complete = False
cls.mt_messages = []
cls.mo_messages = []
cls.location_pending = False
cls.tracking_count = 0
cls.on_message_pending = False
cls.test_case = 0
def setUp(self):
sleep_time = 3
print("\n*** NEXT TEST CASE STARTING IN {}s ***".format(sleep_time))
time.sleep(sleep_time)
def tearDown(self):
print("*** TEST CASE {} COMPLETE ***".format(self.test_case))
def display_tc_header(self, more_info=None):
calling_function = inspect.stack()[1][3]
func_tags = calling_function.split('_')
self.test_case = int(func_tags[1])
func_name = func_tags[2].upper()
if len(func_tags) > 2:
for i in range(3, len(func_tags)):
func_name += ' ' + func_tags[i].upper()
if more_info is not None and isinstance(more_info, dict):
for k, v in more_info.iteritems():
func_name += ' ({}={})'.format(k, v)
print("\n*** TEST CASE {} - {} ***".format(self.test_case, func_name))
def action_prompt(self, message, ref_time, tick=5):
if time.time() - ref_time >= tick:
ref_time = time.time()
message = '\n** ' + 'TEST CASE {} - '.format(self.test_case) + message + ' **\n'
wrapper = '*' * len(message.strip())
print('{}{}{}'.format(wrapper, message, wrapper))
return ref_time
def test_01_connection(self):
self.display_tc_header()
while not self.modem.connected:
pass
self.assertTrue(self.modem.connected)
def test_02_sregisters(self):
self.display_tc_header()
at_config, reg_config = self.modem.config_report()
print('{}\n{}'.format(at_config, reg_config))
self.assertTrue(at_config and reg_config)
def test_03_crc_enable(self):
self.display_tc_header()
self.modem.config_crc_enable(True)
self.assertTrue(self.modem.crc)
def test_04_crc_disable(self):
self.display_tc_header()
self.modem.config_crc_enable(False)
self.assertFalse(self.modem.crc)
def test_05_device_mobile_id(self):
self.display_tc_header()
mobile_id = self.modem.device_mobile_id()
print('Mobile ID: {}'.format(mobile_id))
self.assertTrue(len(mobile_id) == 15)
def test_06_device_versions(self):
self.display_tc_header()
versions = self.modem.device_version()
pprint.pprint(versions)
self.assertTrue(isinstance(versions, object))
def test_07_location_get(self):
self.display_tc_header()
location = self.modem.location_get()
print(pprint.pformat(vars(location), indent=2, width=1))
self.assertTrue(isinstance(location, object))
def test_08_lowpower_notifications_set(self):
self.display_tc_header()
notifications = self.modem.lowpower_notifications_enable()
self.assertTrue(notifications)
def test_09_notification_check(self):
self.display_tc_header()
notifications = self.modem.lowpower_notification_check()
print('{}'.format(notifications))
self.assertTrue(isinstance(notifications, list))
def test_10_message_mo_send(self):
self.display_tc_header()
msg_name = self.modem.message_mo_send(data='TEST10',
data_format=1,
sin=128)
print('MO message assigned name: {}'.format(msg_name))
self.mo_messages.append(msg_name)
self.assertTrue(isinstance(msg_name, str))
def test_11_message_mo_state(self):
self.display_tc_header()
states = self.modem.message_mo_state()
pprint.pprint(states)
self.assertTrue(isinstance(states, list))
def parse_args(argv):
"""
Parses the command line arguments.
:param argv: An array containing the command line arguments
:returns: A dictionary containing the command line arguments and their values
"""
parser = argparse.ArgumentParser(description="Interface with an IDP modem.")
parser.add_argument('-p', '--port', dest='port', type=str, default=DEFAULT_PORT,
help="the serial port of the IDP modem")
return vars(parser.parse_args(args=argv[1:]))
def suite():
suite = unittest.TestSuite()
available_tests = unittest.defaultTestLoader.getTestCaseNames(IdpModemTestCase)
tests = [
'test_02_sregisters',
# Add test cases above as strings or leave empty to test all cases
]
if len(tests) > 0:
for test in tests:
for available_test in available_tests:
if test in available_test:
suite.addTest(IdpModemTestCase(available_test))
else:
for available_test in available_tests:
suite.addTest(IdpModemTestCase(available_test))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| 5,759 |
problems/egions-cut-by-slashes.py
|
sailikhithk/tech-interview-prep
| 0 |
2025138
|
class Solution(object):
def regionsBySlashes(self, grid):
region = 1
dsu = DSU()
N = len(grid)
#Union the border lines
for i in xrange(N):
dsu.union((i, 0), (i+1, 0))
dsu.union((i, N), (i+1, N))
for j in xrange(N):
dsu.union((0, j), (0, j+1))
dsu.union((N, j), (N, j+1))
#Iterate through slashes and connect the dots
#If the slash connects two already connected dots, the region will increament by one
for i, row in enumerate(grid):
for j, slash in enumerate(row):
if slash=='/':
if not dsu.union((j+1, i), (j, i+1)):
region += 1
elif slash=='\\':
if not dsu.union((j, i), (j+1, i+1)):
region += 1
return region
class DSU(object):
def __init__(self):
self.parant = {}
def find(self, x):
if x not in self.parant:
self.parant[x] = x
if self.parant[x]!=x:
self.parant[x] = self.find(self.parant[x])
return self.parant[x]
def union(self, x, y):
xr, yr = self.find(x), self.find(y)
if xr==yr: return False
self.parant[yr] = xr
return True
| 1,316 |
src/plot.py
|
astrophys/elements-of-stat-learn
| 0 |
2025615
|
import matplotlib.pyplot as plt
from error import exit_w_error
def plot_data(ScatterDataL = None, LineDataL = None, Comment = None):
"""
ARGS:
ScatterDataL = List of groups' x,y data
e.g [ [grp1x, grp1y], [grp2x,grp2y], ...]
LineDataL = (Optional) preferably the line data generated by
using beta from linear_regression
RETURN:
DESCRIPTION:
Plots data from ESL by Hastie, Fig. 2.1
NOTES:
DEBUG:
FUTURE:
"""
print("{}".format(Comment))
colors = ['blue', '#ffa500', 'red']
### Will plot arbitrary number of data sets up to len(colors) ###
for grpIdx in range(len(ScatterDataL)):
plt.scatter(x=ScatterDataL[grpIdx][0], y=ScatterDataL[grpIdx][1],
edgecolors=colors[grpIdx], marker="o", facecolors='none',
label='group {}'.format(grpIdx))
### Plot regressed line fit ###
if(len(LineDataL) != 0):
#for lnIdx in range(len(LineDataL)):
plt.plot(LineDataL[0], LineDataL[1], 'r-')
plt.legend()
plt.show()
| 1,102 |
building tree model from input 2/main.py
|
sihyunglee26/Program-Machine-2022
| 0 |
2026214
|
import argparse
import command
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='-c filename')
parser.add_argument('-c', help='input file that describes usable commands', default='command-1-line.txt')
args = parser.parse_args()
print(args)
expressions = command.read_file(args.c) # parse expressions
# display each parsed expressions
for i in expressions:
print(str(i)+':')
print(expressions[i].toString())
print()
# create a single tree that combines all expressions
tree = command.deepcopyExpression(expressions['e0'], 0, 20)
#print(tree.toString())
| 683 |
app/core/models.py
|
indrajeetzade/recipe-app-api
| 0 |
2026255
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser,BaseUserManager,PermissionsMixin
from django.conf import settings
import uuid
import os
def recipe_image_file_path(instance, filename):
"""Generate file path for new recipe image"""
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('uploads/recipe/', filename)
class UserManager(BaseUserManager):
def create_user(self,email,password=None,**extra_fields):
""" Creates and saves new user"""
if not email:
raise ValueError("User must have email address")
user=self.model(email=self.normalize_email(email),**extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self,email,password):
"""Creates super user and save"""
user=self.create_user(email,password)
user.is_superuser=True
user.is_staff=True
user.save(using=self._db)
return user
class User(AbstractBaseUser,PermissionsMixin):
"""Custome user model that uses email in place of username"""
email=models.EmailField(max_length=255,unique=True)
name=models.CharField(max_length=255)
is_active=models.BooleanField(default=True)
is_staff=models.BooleanField(default=False)
objects=UserManager()
USERNAME_FIELD='email'
class Tag(models.Model):
"""Tag to be used for recipe"""
name=models.CharField(max_length=255)
user=models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Ingredient(models.Model):
"""Ingredient to be used in a recipe"""
name=models.CharField(max_length=255)
user=models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Recipe(models.Model):
""" Recipe object """
user=models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
title=models.CharField(max_length=255)
time_minutes=models.IntegerField()
price=models.DecimalField(max_digits=5,decimal_places=2)
link=models.CharField(max_length=255,blank=True)
ingredients=models.ManyToManyField('Ingredient')
tags=models.ManyToManyField('Tag')
image = models.ImageField(null=True, upload_to=recipe_image_file_path)
def __str__(self):
return self.title
| 2,275 |
setup.py
|
WaYdotNET/urllib_s3
| 6 |
2025352
|
# coding: utf-8
import sys
from setuptools import find_packages, setup
with open("README.md") as f:
long_description = f.read()
NAME = "urllib_s3"
VERSION = '0.0.6'
REQUIRES = [
'setuptools >= 21.0.0',
'six >= 1.9.0',
'boto3 == 1.9.97'
]
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
setup(
name=NAME,
version=VERSION,
description="S3 handler for urllib",
author='<NAME>',
author_email="<EMAIL>",
url='https://github.com/WaYdotNET/urllib_s3',
license='MIT License',
keywords=["urllib", "s3", "urllib handler", 'minio', "aws", "boto3"],
package_dir={'': 'lib'},
install_requires=REQUIRES,
packages=find_packages('lib'),
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
setup_requires=pytest_runner,
tests_require=[
'pytest',
'pytest-mock',
'pytest-cov',
'pytest-flake8',
'pytest-isort',
'pytest-runner'
],
)
| 1,467 |
helpdesk/api.py
|
sagar30051991/helpdesk
| 1 |
2026311
|
import frappe
import handler
from response import get_response
from validate import validate_request
def handle():
"""
Handler for `/helpdesk` methods
### Examples:
`/helpdesk/method/{methodname}` will call a whitelisted method
"""
try:
validate_request()
return handler.handle()
except Exception, e:
import traceback
print traceback.format_exc()
return get_response(message=str(e))
| 402 |
setup.py
|
joshtronic/python-holidayapi
| 18 |
2026030
|
from setuptools import setup
setup(
name='python-holidayapi',
version='1.1.0',
description='Official Python library for Holiday API',
url='https://holidayapi.com',
author='<NAME>',
author_email='<EMAIL>',
keywords=['python','holidayapi','holiday','api'],
license='MIT',
packages=['holidayapi']
)
| 334 |
harvester/gene_enricher.py
|
Shane-Neeley/g2p-aggregator
| 0 |
2025639
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
# load gene names
# ftp://ftp.ebi.ac.uk/pub/databases/genenames/new/json/non_alt_loci_set.json
GENES = {}
ALIASES = {}
# trim payload, we only need symbol and ensembl
data = json.load(open('../data/non_alt_loci_set.json'))
for doc in data['response']['docs']:
gene = {
'symbol': doc['symbol'],
'ensembl_gene_id': doc.get('ensembl_gene_id', None),
'entrez_id': doc.get('entrez_id', None)
}
GENES[doc['symbol']] = [gene]
if gene['ensembl_gene_id']:
if gene['ensembl_gene_id'] not in ALIASES:
ALIASES[gene['ensembl_gene_id']] = []
ALIASES[gene['ensembl_gene_id']].append(gene)
if gene['entrez_id']:
if gene['entrez_id'] not in ALIASES:
ALIASES[gene['entrez_id']] = []
ALIASES[gene['entrez_id']].append(gene)
for alias in doc.get('alias_symbol', []):
if alias not in ALIASES:
ALIASES[alias] = []
ALIASES[alias].append(gene)
for prev in doc.get('prev_symbol', []):
if prev not in ALIASES:
ALIASES[prev] = []
ALIASES[prev].append(gene)
data = None
def get_gene(identifier):
""" return gene for identifier """
for store in [GENES, ALIASES]:
genes = store.get(identifier, None)
if genes and len(genes) == 1:
return genes
else:
raise ValueError('gene reference does not exist or refers to multiple genes')
def normalize_feature_association(feature_association):
""" add gene_identifiers array to feature_association """
gene_identifiers = []
for gene_symbol in feature_association['genes']:
try:
gene = get_gene(gene_symbol)
except:
gene = None
if (gene):
gene_identifiers.extend(gene)
feature_association['gene_identifiers'] = gene_identifiers
| 1,895 |
bezier2arc/__init__.py
|
phenaff/bezier2arc
| 0 |
2023966
|
from .bezier2arc import (convert_file, list_paths, colorize,get_parser, circle_from_points, convert_to_svg)
| 108 |
tests/metrics/test_core.py
|
fangchenli/zipline
| 1 |
2026197
|
from types import MappingProxyType
from zipline.finance.metrics.core import _make_metrics_set_core
from zipline.testing.fixtures import ZiplineTestCase
from zipline.testing.predicates import (
assert_equal,
assert_is,
assert_raises_str,
)
class MetricsSetCoreTestCase(ZiplineTestCase):
def init_instance_fixtures(self):
super().init_instance_fixtures()
self.metrics_sets, self.register, self.unregister, self.load = (
_make_metrics_set_core()
)
# make sure this starts empty
assert_equal(self.metrics_sets, MappingProxyType({}))
def test_load_not_registered(self):
msg = "no metrics set registered as 'ayy-lmao', options are: []"
with assert_raises_str(ValueError, msg):
self.load('ayy-lmao')
# register in reverse order to test the sorting of the options
self.register('c', set)
self.register('b', set)
self.register('a', set)
msg = (
"no metrics set registered as 'ayy-lmao', options are: "
"['a', 'b', 'c']"
)
with assert_raises_str(ValueError, msg):
self.load('ayy-lmao')
def test_register_decorator(self):
ayy_lmao_set = set()
@self.register('ayy-lmao')
def ayy_lmao():
return ayy_lmao_set
expected_metrics_sets = MappingProxyType({'ayy-lmao': ayy_lmao})
assert_equal(self.metrics_sets, expected_metrics_sets)
assert_is(self.load('ayy-lmao'), ayy_lmao_set)
msg = "metrics set 'ayy-lmao' is already registered"
with assert_raises_str(ValueError, msg):
@self.register('ayy-lmao')
def other(): # pragma: no cover
raise AssertionError('dead')
# ensure that the failed registration didn't break the previously
# registered set
assert_equal(self.metrics_sets, expected_metrics_sets)
assert_is(self.load('ayy-lmao'), ayy_lmao_set)
self.unregister('ayy-lmao')
assert_equal(self.metrics_sets, MappingProxyType({}))
msg = "no metrics set registered as 'ayy-lmao', options are: []"
with assert_raises_str(ValueError, msg):
self.load('ayy-lmao')
msg = "metrics set 'ayy-lmao' was not already registered"
with assert_raises_str(ValueError, msg):
self.unregister('ayy-lmao')
def test_register_non_decorator(self):
ayy_lmao_set = set()
def ayy_lmao():
return ayy_lmao_set
self.register('ayy-lmao', ayy_lmao)
expected_metrics_sets = MappingProxyType({'ayy-lmao': ayy_lmao})
assert_equal(self.metrics_sets, expected_metrics_sets)
assert_is(self.load('ayy-lmao'), ayy_lmao_set)
def other(): # pragma: no cover
raise AssertionError('dead')
msg = "metrics set 'ayy-lmao' is already registered"
with assert_raises_str(ValueError, msg):
self.register('ayy-lmao', other)
# ensure that the failed registration didn't break the previously
# registered set
assert_equal(self.metrics_sets, expected_metrics_sets)
assert_is(self.load('ayy-lmao'), ayy_lmao_set)
self.unregister('ayy-lmao')
assert_equal(self.metrics_sets, MappingProxyType({}))
msg = "no metrics set registered as 'ayy-lmao', options are: []"
with assert_raises_str(ValueError, msg):
self.load('ayy-lmao')
msg = "metrics set 'ayy-lmao' was not already registered"
with assert_raises_str(ValueError, msg):
self.unregister('ayy-lmao')
| 3,635 |
tests/integration/test_integration.py
|
ofples/thinglang
| 5 |
2026165
|
import glob
import os
import subprocess
import pytest
from tests.infrastructure.test_utils import ProgramTestCase
from thinglang import pipeline
from thinglang.utils import logging_utils
from thinglang.utils.source_context import SourceContext
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
SEARCH_PATTERN = os.path.join(BASE_PATH, '**/*.thing')
def collect_tests():
for path in glob.glob(SEARCH_PATTERN, recursive=True):
#if 'nested_thing_access' in path:
yield ProgramTestCase(path)
def split_lines(param):
return param.replace('\r', '').split('\n')
@pytest.mark.parametrize('test_file', collect_tests(), ids=lambda x: x.name)
def test_thing_program(test_file: ProgramTestCase):
expected_output = test_file.metadata['expected_output']
test_input = bytes('\n'.join(test_file.metadata['input']) if 'input' in test_file.metadata else '', 'utf-8')
bytecode = pipeline.compile(SourceContext.wrap(test_file.code))
logging_utils.print_header('VM execution')
with open(test_file.target_path, 'wb') as f:
f.write(bytecode.bytes())
vm = subprocess.Popen(["thinglang", test_file.target_path], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = (stream.decode('utf-8').strip() for stream in vm.communicate(test_input))
print(stderr)
logging_utils.print_header('VM output')
print(stdout)
if not isinstance(expected_output, str):
stdout = split_lines(stdout)
assert vm.returncode == 0, 'VM process crashed'
assert stdout == expected_output, 'VM output did not match expected output'
| 1,629 |
tests/python/unittest/test_multi_device_exec.py
|
axbaretto/mxnet
| 36 |
2025450
|
import os
import mxnet as mx
def test_ctx_group():
with mx.AttrScope(ctx_group='stage1'):
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
set_stage1 = set(act1.list_arguments())
with mx.AttrScope(ctx_group='stage2'):
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
fc3 = mx.symbol.BatchNorm(fc3)
mlp = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')
set_stage2 = set(mlp.list_arguments()) - set_stage1
group2ctx = {
'stage1' : mx.cpu(1),
'stage2' : mx.cpu(2)
}
texec = mlp.simple_bind(mx.cpu(0),
group2ctx=group2ctx,
data=(1,200))
for arr, name in zip(texec.arg_arrays, mlp.list_arguments()):
if name in set_stage1:
assert arr.context == group2ctx['stage1']
else:
assert arr.context == group2ctx['stage2']
if __name__ == '__main__':
test_ctx_group()
| 1,277 |
0x11-python-network_1/3-error_code.py
|
flourishcodes/holbertonschool-higher_level_programming
| 0 |
2025654
|
#!/usr/bin/python3
# Python script to send request to URL and display error code
if __name__ == "__main__":
import urllib.request as ur
import urllib.error as ure
from sys import argv
req = ur.Request(argv[1])
try:
with ur.urlopen(req) as response:
print(str(response.read(), 'utf-8'))
except ure.HTTPError as err:
print('Error code: {}'.format(err.getcode()))
| 414 |
RCT_VOLUME_CHANGE.py
|
yatharthahuja/Volume-of-Tooth-RCT-by-Segmentation
| 0 |
2024584
|
if __name__ == "__main__":
file_pre = open("./results/pre-scan-results.txt","r")
file_post = open("./results/post-scan-results.txt","r")
pre_scan_vol = float(file_pre.readline())
post_scan_vol = float(file_post.readline())
vol_change = post_scan_vol - pre_scan_vol
percent_change = (vol_change*100)/pre_scan_vol
file_pre.close()
file_post.close()
print("**********************************")
print("TOTAL TOOTH RCT VOLUME CHANGE: "+ str(vol_change) + " cubic microns")
print("TOTAL TOOTH RCT VOLUME PERCENTAGE CHANGE: "+ str(percent_change) + " %")
print("**********************************")
file = open("./results/final-scan-results.txt","w")
L = ["Volume Change ="+str(vol_change), " | Percentage Volume Change ="+str(percent_change)]
file.writelines(L)
file.close()
| 794 |
app/home/urls.py
|
ankitoscar/anxdoc
| 0 |
2023515
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('video_feed/', views.video_feed, name='video_feed'),
]
| 168 |
dash_carbon_components/_imports_.py
|
Matheus-Rangel/dash-carbon-components
| 4 |
2025380
|
from .Button import Button
from .Card import Card
from .Checkbox import Checkbox
from .Column import Column
from .DatePicker import DatePicker
from .DatePickerRange import DatePickerRange
from .Dropdown import Dropdown
from .Grid import Grid
from .Link import Link
from .MultiSelect import MultiSelect
from .NumberInput import NumberInput
from .RadioButtonGroup import RadioButtonGroup
from .Row import Row
from .Slider import Slider
from .Tab import Tab
from .Tabs import Tabs
from .UIShell import UIShell
__all__ = [
"Button",
"Card",
"Checkbox",
"Column",
"DatePicker",
"DatePickerRange",
"Dropdown",
"Grid",
"Link",
"MultiSelect",
"NumberInput",
"RadioButtonGroup",
"Row",
"Slider",
"Tab",
"Tabs",
"UIShell"
]
| 782 |
class_multiclass/multiclass.py
|
jmhernan/NIreland_NLP
| 1 |
2025334
|
import os
from pathlib import Path
import re
import pandas as pd
import numpy as np
import nltk
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
import nltk
from nltk.tokenize import word_tokenize
nltk.download('stopwords')
this_file_path = os.path.abspath(__file__)
folder_root = os.path.split(this_file_path)[0]
repo_root = os.path.split(folder_root)[0]
repo_path = os.path.join(repo_root)
df_just = pd.read_csv(os.path.join(repo_path, 'justifications_clean_text_ohe.csv'))
# Create a unique number id for each justification category
from io import StringIO
col = ['justification_cat', 'clean_text']
df = df_just[col]
df = df[pd.notnull(df['clean_text'])]
#df.columns = ['justification_cat', 'clean_text'] # this line not necessary
df['category_id'] = df['justification_cat'].factorize()[0]
category_id_df = df[['justification_cat', 'category_id']].drop_duplicates().sort_values('category_id')
category_to_id = dict(category_id_df.values)
id_to_category = dict(category_id_df[['category_id', 'justification_cat']].values)
df.head
# Collapse justification categories from 12 to 7
df['category_id2'] = df['justification_cat']
df['category_id2'] = df['category_id2'].replace('J_Intl-Domestic_Precedent', 'J_Denial')
df['category_id2'] = df['category_id2'].replace(['J_Utilitarian-Deterrence', 'J_Intelligence', 'J_Law-and-order', 'J_Development-Unity'], 'J_Outcome')
df['category_id2'] = df['category_id2'].replace('J_Last-resort', 'J_Emergency-Policy')
df['category_id2'].unique()
### If you want to look at aggregate categories rather than original, rename as follows:
df['justification_cat'] = df['category_id2']
df['category_id'] = df['justification_cat'].factorize()[0]
category_id_df = df[['justification_cat', 'category_id']].drop_duplicates().sort_values('category_id')
category_to_id = dict(category_id_df.values)
id_to_category = dict(category_id_df[['category_id', 'justification_cat']].values)
df.head
###### Function to remove stopwords (optional) ######
def rmv_stopwords(sent):
STOPWORDS = set(stopwords.words("english"))
sent = [' '.join(word for word in x.split() if word not in STOPWORDS) for x in sent.tolist()]
return sent
sentences_nosw = rmv_stopwords(df['clean_text'])
### Set X and Y for training and testing set
sentences = pd.Series(sentences_nosw).values # exclude stopwords
# sentences = df['clean_text'].values # include stopwords
y = df['justification_cat'].values
sentences_train, sentences_test, y_train, y_test = train_test_split(
sentences, y, test_size=0.25, random_state=1000)
multinom = Pipeline([('vect', CountVectorizer()), #vectorizes
('tfidf', TfidfTransformer()), #term frequency inverse document frequency
#('tfidf', TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2), stop_words='english')),
('multiclass', MultinomialNB()), #model (Naive Bayes)
])
classifier = multinom.fit(sentences_train, y_train)
y_pred = multinom.predict(sentences_test)
print('accuracy %s' % accuracy_score(y_pred, y_test))
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
# Precision: Percentage related to true positives, taking false positives into consideration
# Recall: True positives while taking into consideration false negatives
# F1 score: Balance between recall and precision; measure of true positives, balancing false positives and false negatives
# Accuracy: Limited metric, only looking at true positives and true negatives; this is good if you have well-balanced data (bw classes)
# Support: True number of incidents in a given category in the TESTING data
np.unique(y_test, return_counts=True)
## Visualize confusion matrix
np.set_printoptions(precision=2)
# Plot non-normalized and normalized confusion matrix
titles_options = [("Confusion matrix, without normalization", None, "not_normalized"),
("Normalized confusion matrix", 'true', "normalized")]
for title, normalize, short_title in titles_options:
disp = plot_confusion_matrix(classifier, sentences_test, y_test,
#display_labels=id_to_category,
display_labels=category_to_id,
cmap=plt.cm.Blues,
normalize=normalize)
disp.ax_.set_title(title)
plt.xticks(np.arange(0, len(category_to_id)), category_to_id, rotation=60, ha='right')
print(title)
print(disp.confusion_matrix)
#plt.savefig('multiclass_NB/confusion_matrix12_' + short_title + '.png')
plt.savefig('multiclass_NB/confusion_matrix7_' + short_title + '.png')
plt.close()
########################################################
#### Grid search hyperparameters + features ############
########################################################
# From website:
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2), stop_words='english')
#min_df: mindocuments a word must be present in to be kept
#norm: l2 to ensure all our feature vectors have a euclidian norm of 1.
#ngram_range is set to (1, 2) to indicate that we want to consider both unigrams and bigrams.
#stop_words is set to "english"
features = tfidf.fit_transform(sentences).toarray()
labels = df.justification_cat
features.shape
| 5,866 |
utils/compressed_sensing.py
|
bencottier/Deep-MRI-Reconstruction
| 0 |
2025925
|
import numpy as np
from . import mymath
from numpy.lib.stride_tricks import as_strided
def soft_thresh(u, lmda):
"""Soft-threshing operator for complex valued input"""
Su = (abs(u) - lmda) / abs(u) * u
Su[abs(u) < lmda] = 0
return Su
def normal_pdf(length, sensitivity):
return np.exp(-sensitivity * (np.arange(length) - length / 2)**2)
def var_dens_mask(shape, ivar, sample_high_freq=True):
"""Variable Density Mask (2D undersampling)"""
if len(shape) == 3:
Nt, Nx, Ny = shape
else:
Nx, Ny = shape
Nt = 1
pdf_x = normal_pdf(Nx, ivar)
pdf_y = normal_pdf(Ny, ivar)
pdf = np.outer(pdf_x, pdf_y)
size = pdf.itemsize
strided_pdf = as_strided(pdf, (Nt, Nx, Ny), (0, Ny * size, size))
# this must be false if undersampling rate is very low (around 90%~ish)
if sample_high_freq:
strided_pdf = strided_pdf / 1.25 + 0.02
mask = np.random.binomial(1, strided_pdf)
xc = Nx / 2
yc = Ny / 2
mask[:, xc - 4:xc + 5, yc - 4:yc + 5] = True
if Nt == 1:
return mask.reshape((Nx, Ny))
return mask
def cartesian_mask(shape, acc, sample_n=10, centred=False):
"""
Sampling density estimated from implementation of kt FOCUSS
shape: tuple - of form (..., nx, ny)
acc: float - doesn't have to be integer 4, 8, etc..
"""
N, Nx, Ny = int(np.prod(shape[:-2])), shape[-2], shape[-1]
pdf_x = normal_pdf(Nx, 0.5/(Nx/10.)**2)
lmda = Nx/(2.*acc)
n_lines = int(Nx / acc)
# add uniform distribution
pdf_x += lmda * 1./Nx
if sample_n:
pdf_x[Nx/2-sample_n/2:Nx/2+sample_n/2] = 0
pdf_x /= np.sum(pdf_x)
n_lines -= sample_n
mask = np.zeros((N, Nx))
for i in xrange(N):
idx = np.random.choice(Nx, n_lines, False, pdf_x)
mask[i, idx] = 1
if sample_n:
mask[:, Nx/2-sample_n/2:Nx/2+sample_n/2] = 1
size = mask.itemsize
mask = as_strided(mask, (N, Nx, Ny), (size * Nx, size, 0))
mask = mask.reshape(shape)
if not centred:
mask = mymath.ifftshift(mask, axes=(-1, -2))
return mask
def shear_grid_mask(shape, acceleration_rate, sample_low_freq=True,
centred=False, sample_n=10):
'''
Creates undersampling mask which samples in sheer grid
Parameters
----------
shape: (nt, nx, ny)
acceleration_rate: int
Returns
-------
array
'''
Nt, Nx, Ny = shape
start = np.random.randint(0, acceleration_rate)
mask = np.zeros((Nt, Nx))
for t in xrange(Nt):
mask[t, (start+t)%acceleration_rate::acceleration_rate] = 1
xc = Nx / 2
xl = sample_n / 2
if sample_low_freq and centred:
xh = xl
if sample_n % 2 == 0:
xh += 1
mask[:, xc - xl:xc + xh+1] = 1
elif sample_low_freq:
xh = xl
if sample_n % 2 == 1:
xh -= 1
if xl > 0:
mask[:, :xl] = 1
if xh > 0:
mask[:, -xh:] = 1
mask_rep = np.repeat(mask[..., np.newaxis], Ny, axis=-1)
return mask_rep
def perturbed_shear_grid_mask(shape, acceleration_rate, sample_low_freq=True,
centred=False,
sample_n=10):
Nt, Nx, Ny = shape
start = np.random.randint(0, acceleration_rate)
mask = np.zeros((Nt, Nx))
for t in xrange(Nt):
mask[t, (start+t)%acceleration_rate::acceleration_rate] = 1
# brute force
rand_code = np.random.randint(0, 3, size=Nt*Nx)
shift = np.array([-1, 0, 1])[rand_code]
new_mask = np.zeros_like(mask)
for t in xrange(Nt):
for x in xrange(Nx):
if mask[t, x]:
new_mask[t, (x + shift[t*x])%Nx] = 1
xc = Nx / 2
xl = sample_n / 2
if sample_low_freq and centred:
xh = xl
if sample_n % 2 == 0:
xh += 1
new_mask[:, xc - xl:xc + xh+1] = 1
elif sample_low_freq:
xh = xl
if sample_n % 2 == 1:
xh -= 1
new_mask[:, :xl] = 1
new_mask[:, -xh:] = 1
mask_rep = np.repeat(new_mask[..., np.newaxis], Ny, axis=-1)
return mask_rep
def undersample(x, mask, centred=False, norm='ortho', noise=0):
'''
Undersample x. FFT2 will be applied to the last 2 axis
Parameters
----------
x: array_like
data
mask: array_like
undersampling mask in fourier domain
norm: 'ortho' or None
if 'ortho', performs unitary transform, otherwise normal dft
noise_power: float
simulates acquisition noise, complex AWG noise.
must be percentage of the peak signal
Returns
-------
xu: array_like
undersampled image in image domain. Note that it is complex valued
x_fu: array_like
undersampled data in k-space
'''
assert x.shape == mask.shape
# zero mean complex Gaussian noise
noise_power = noise
nz = np.sqrt(.5)*(np.random.normal(0, 1, x.shape) + 1j * np.random.normal(0, 1, x.shape))
nz = nz * np.sqrt(noise_power)
if norm == 'ortho':
# multiplicative factor
nz = nz * np.sqrt(np.prod(mask.shape[-2:]))
else:
nz = nz * np.prod(mask.shape[-2:])
if centred:
x_f = mymath.fft2c(x, norm=norm)
x_fu = mask * (x_f + nz)
x_u = mymath.ifft2c(x_fu, norm=norm)
return x_u, x_fu
else:
x_f = mymath.fft2(x, norm=norm)
x_fu = mask * (x_f + nz)
x_u = mymath.ifft2(x_fu, norm=norm)
return x_u, x_fu
def data_consistency(x, y, mask, centered=False, norm='ortho'):
'''
x is in image space,
y is in k-space
'''
if centered:
xf = mymath.fft2c(x, norm=norm)
xm = (1 - mask) * xf + y
xd = mymath.ifft2c(xm, norm=norm)
else:
xf = mymath.fft2(x, norm=norm)
xm = (1 - mask) * xf + y
xd = mymath.ifft2(xm, norm=norm)
return xd
def get_phase(x):
xr = np.real(x)
xi = np.imag(x)
phase = np.arctan(xi / (xr + 1e-12))
return phase
def undersampling_rate(mask):
return float(mask.sum()) / mask.size
| 6,114 |
instructors/migrations/0002_alter_language_user_alter_user_phone.py
|
bastoune57/gokiting_back_end
| 0 |
2025479
|
# Generated by Django 4.0.2 on 2022-03-01 08:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('instructors', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='language',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='languages', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='user',
name='phone',
field=phonenumber_field.modelfields.PhoneNumberField(default='+12125552368', max_length=128, region=None),
),
]
| 786 |
tests/dingtalk_test.py
|
culiutudousi/kim-voice-assistant
| 75 |
2025474
|
# -*- coding: utf-8-*-
import unittest
import os
os.sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from src.components import logger
import logging
import json
import wave
import time
from src.config.path import CACHE_WAVE_PATH
from src.components.dingtalk import DingRobot
class TestDingtalk(unittest.TestCase):
"""
函数计算单元测试
"""
def setUp(self):
pass
def test_dingtalk_handle(self):
DingRobot.dingtalk_handle('ip地址发送到钉钉', '11111')
def atest_send_message(self):
res = DingRobot.send_message(title='## 撒地方\n阿斯蒂芬', markdown_content='## 撒地方\n阿斯蒂芬')
assert res is True
if __name__ == '__main__':
logger.init(info=True, debug=True)
unittest.main()
| 732 |
gnome/gnome2/gedit/plugins.symlink/ViGedit/bindings/change.py
|
icebreaker/dotfiles
| 4 |
2022652
|
from base import VIG_ModeBase
class Mode(VIG_ModeBase):
def setup(self, act):
self.reg(None, act.gtk.keysyms.a)
self.reg(self.nop, act.gtk.keysyms.B, after=(act.modes.block, ["change", "numLines"]))
self.reg(self.nop, act.gtk.keysyms.t, after=(act.modes.t, ["change", "numLines", "f"]))
self.reg(act.text.cut_TillEndOfWord, act.gtk.keysyms.w, after=act.modes.insert, **self.fr)
self.reg(act.text.cut_NextWord, act.gtk.keysyms.w, after=act.modes.insert, stack="a", **self.fr)
self.reg(act.text.cut_Line, act.gtk.keysyms.l, after=act.modes.insert, stack="a", **self.fr)
| 665 |
test2.py
|
retorrano/sms
| 0 |
2026125
|
import sms2
sms = sms2.TextMessage("+639959064795","BAL")
sms.connectPhone()
sms.sendMessage()
sms.disconnectPhone()
print("message sent successfully")
| 152 |
utils.py
|
sumukhaithal6/Enduro-Imitation-Learning
| 1 |
2025165
|
#!/usr/bin/env python3
"""
Simple utilities.
Authors:
LICENCE:
"""
from argparse import Namespace
from time import sleep
import gym
import torch
from gym.wrappers.monitoring.video_recorder import VideoRecorder
from torchvision import transforms
from games import Game_type
from datasets import crop
def model_play(model: torch.nn.Module, game: Game_type, args: Namespace):
"""Make model play game and store video."""
env = gym.make(game.name)
video = VideoRecorder(
env,
str(
args.model_path / args.train_run_name / (args.train_run_name + ".mp4"),
),
)
model.eval()
data_transforms = transforms.Compose([crop, transforms.ToTensor()])
cur_state = data_transforms(env.reset()).unsqueeze(0)
total_reward = 0.0
steps = 0
while True:
with torch.no_grad():
action = model(cur_state.to(args.device)).cpu()
action = action.argmax(dim=1)
state, reward, done, _ = env.step(action)
cur_state = data_transforms(state).unsqueeze(0)
total_reward += reward
if steps % 200 == 0 or done:
print("\naction ", action)
print(f"step {steps} total_reward {total_reward:+0.2f}")
steps += 1
video.capture_frame()
# sleep(0.01)
if args.watch:
isopen = env.render(mode="human")
if not isopen:
break
if done:
break
| 1,448 |
KerbalStuff/blueprints/lists.py
|
toadicus/KerbalStuff
| 1 |
2025745
|
from flask import Blueprint, render_template, abort, request, redirect, session, url_for
from flask.ext.login import current_user, login_user, logout_user
from datetime import datetime, timedelta
from KerbalStuff.email import send_confirmation, send_reset
from KerbalStuff.objects import User, Mod, ModList, ModListItem
from KerbalStuff.database import db
from KerbalStuff.common import *
import bcrypt
import re
import random
import base64
import binascii
import os
lists = Blueprint('lists', __name__, template_folder='../../templates/lists')
@lists.route("/create/pack")
def create_list():
return render_template("create_list.html")
@lists.route("/pack/<list_id>/<list_name>")
def view_list(list_id, list_name):
mod_list = ModList.query.filter(ModList.id == list_id).first()
if not mod_list:
abort(404)
editable = False
if current_user:
if current_user.admin:
editable = True
if current_user.id == mod_list.user_id:
editable = True
return render_template("mod_list.html",
**{
'mod_list': mod_list,
'editable': editable
})
@lists.route("/pack/<list_id>/<list_name>/edit", methods=['GET', 'POST'])
@with_session
@loginrequired
def edit_list(list_id, list_name):
mod_list = ModList.query.filter(ModList.id == list_id).first()
if not mod_list:
abort(404)
editable = False
if current_user:
if current_user.admin:
editable = True
if current_user.id == mod_list.user_id:
editable = True
if not editable:
abort(401)
if request.method == 'GET':
return render_template("edit_list.html",
**{
'mod_list': mod_list,
'mod_ids': [m.mod.id for m in mod_list.mods]
})
else:
description = request.form.get('description')
background = request.form.get('background')
bgOffsetY = request.form.get('bg-offset-y')
mods = json.loads(request.form.get('mods'))
mod_list.description = description
if background and background != '':
mod_list.background = background
try:
mod_list.bgOffsetY = int(bgOffsetY)
except:
pass
# Remove mods
removed_mods = [m for m in mod_list.mods if not m.mod_id in mods]
for mod in removed_mods:
mod_list.mods.remove(mod)
# Add mods
added_mods = [m for m in mods if not m in [mod.mod.id for mod in mod_list.mods]]
for m in added_mods:
mod = Mod.query.filter(Mod.id == m).first()
mli = ModListItem()
mli.mod_id = mod.id
mli.mod_list = mod_list
mod_list.mods.append(mli)
db.add(mli)
db.commit()
for mod in mod_list.mods:
mod.sort_index = mods.index(mod.mod.id)
return redirect(url_for("lists.view_list", list_id=mod_list.id, list_name=mod_list.name))
| 2,992 |
modis/helptools.py
|
Benny84/discord-music-bot-modis
| 0 |
2026088
|
from collections import OrderedDict
import json as _json
import logging
logger = logging.getLogger(__name__)
def get_help_data(filepath):
"""
Get the json data from a help file
Args:
filepath (str): The file path for the help file
Returns:
data: The json data from a help file
"""
try:
with open(filepath, 'r') as file:
return _json.load(file, object_pairs_hook=OrderedDict)
except Exception as e:
logger.error("Could not load file {}".format(filepath))
logger.exception(e)
return {}
def get_help_datapacks(filepath, prefix="!"):
"""
Load help text from a file and give it as datapacks
Args:
filepath (str): The file to load help text from
prefix (str): The prefix to use for commands
Returns:
datapacks (list): The datapacks from the file
"""
help_contents = get_help_data(filepath)
datapacks = []
# Add the content
for d in help_contents:
heading = d
content = ""
if d == "Commands":
for c in help_contents[d]:
if "name" not in c:
continue
content += "`"
command = prefix + c["name"]
content += "{}".format(command)
if "params" in c:
for param in c["params"]:
content += " [{}]".format(param)
content += "`: "
if "description" in c:
content += c["description"]
content += "\n"
else:
content += help_contents[d]
datapacks.append((heading, content, True))
return datapacks
| 1,720 |
old_fcn/utils.py
|
Camixxx/segmentation
| 0 |
2024806
|
from torchvision.utils import make_grid
import torch
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
def make_image_grid(img, mean, std):
img = make_grid(img)
for i in range(3):
img[i] *= std[i]
img[i] += mean[i]
return img
def make_label_grid(label):
label = make_grid(label.unsqueeze(1).expand(-1, 3, -1, -1))[0:1]
return label
# def get_pic(image):
# if isinstance(image, str):
# return Image.open(image)
# return image
# def get_classes(pic):
# classes = []
# color_list = pic.getcolors()
# for color in range(len(color_list)):
# classes.append(color_list[color][1])
# return classes
## iou
def compute_mean_iou(pred, label):
pred = pred.data.numpy()
label = label.data.numpy()
unique_labels = np.unique(label)
num_unique_labels = len(unique_labels)
I = np.zeros(num_unique_labels)
U = np.zeros(num_unique_labels)
for index, val in enumerate(unique_labels):
pred_i = pred == val
label_i = label == val
I[index] = float(np.sum(np.logical_and(label_i, pred_i)))
U[index] = float(np.sum(np.logical_or(label_i, pred_i)))
# mean_iou = np.mean(I / U)
return np.mean(I / U)
def get_IU(pred, label):
pred = pred.data.numpy()
label = label.data.numpy()
unique_labels = np.unique(label)
num_unique_labels = len(unique_labels)
I = np.zeros(num_unique_labels)
U = np.zeros(num_unique_labels)
for index, val in enumerate(unique_labels):
pred_i = pred == val
label_i = label == val
I[index] = float(np.sum(np.logical_and(label_i, pred_i)))
U[index] = float(np.sum(np.logical_or(label_i, pred_i)))
return I,U
def get_total(pred, label):
lb = label.data.numpy()
pd = pred.data.numpy()
total = pd == lb
return total
def get_union(pred, label):
pred = pred.data.numpy()
label = label.data.numpy()
unique_labels = np.unique(label)
classes_num = len(unique_labels)
unions = np.zeros(classes_num)
for index, val in enumerate(unique_labels):
pred_i = pred == val
label_i = label == val
unions[index] = float(np.sum(np.logical_or(label_i, pred_i)))
return unions
def pixel_accuarcy(label, pred):
t = get_total(pred, label)
pa = float(np.sum(t))/t.size
return pa
def mean_pixel_accuracy(label, pred):
I,U = get_IU(pred, label)
label = label.data.numpy()
unique_labels = np.unique(label)
num_unique_labels = len(unique_labels)
T = np.zeros(num_unique_labels)
mpa = np.sum(I/U)/I.size
return mpa
def mean_IU(label, pred):
I,U = get_IU(pred, label)
unions = get_union(pred, label)
mIoU = float(np.sum(I/U))/I.size
return mIoU
def frequency_weighted_IU(label, pred):
I,U = get_IU(pred, label)
total = get_total(pred, label)
unions = get_union(pred, label)
s = float(np.sum(total*I/U))
FWIoU = s/I.size
return FWIoU
def FCN_metric(label, pred):
PA = pixel_accuarcy(label, pred)
MPA = mean_pixel_accuracy(label, pred)
MIU = mean_IU(label, pred)
# FWIoU = frequency_weighted_IU(label, pred)
result = {'PA':PA, 'MPA':MPA, 'MIU':MIU} #, 'FWIoU':FWIoU
return result
#def test():
# dir1 = 'D:/9527/2018.4.12 语义分割评估(未完成)/DJI_0605.png'
# dir2 = 'D:/9527/2018.4.12 语义分割评估(未完成)/pred_9.png'
# dir3 = 'D:/FCN.tensorflow-master-123/test2018.4.26/A/gt/gt_5.png'
# dir4 = 'D:/FCN.tensorflow-master-123/test2018.4.26/A/pred/pred_5.png'
# print('PA = ',pixel_accuarcy(dir1, dir2))
# print('MPA = ',mean_pixel_accuracy(dir3, dir4))
# print('MIoU = ',mean_IU(dir3, dir4))
# print('FWIoU = ',frequency_weighted_IU(dir1, dir2))
def FCN_evaluate(dir1):
PA = 0
MPA = 0
MIU = 0
FWIoU = 0
print('the directory is:',dir1)
image_lists = create_image_lists(dir1)
for i in range(len(image_lists['gt'])):
print(i)
ground_truth = dir1 +'/gt/'+image_lists['gt'][i]
prediction = dir1 + '/pred/' + image_lists['pred'][i]
result = {'name': name, 'PA':PA, 'MPA':MPA, 'MIU':MIU, 'FWIoU':FWIoU}
return result
# Recommend
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None, size_average=True):
super(CrossEntropyLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss2d(weight, size_average)
def forward(self, inputs, targets):
return self.nll_loss(F.log_softmax(inputs), targets)
| 4,499 |
airbyte-integrations/connectors/source-linkedin-ads/unit_tests/analytics_tests/test_chunk_analytics_fields.py
|
harshithmullapudi/airbyte
| 0 |
2022791
|
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from source_linkedin_ads.analytics import chunk_analytics_fields
# Test chunk size for each field set
TEST_FIELDS_CHUNK_SIZE = 3
# Test fields assuming they are really available for the fetch
TEST_ANALYTICS_FIELDS = [
"field_1",
"base_field_1",
"field_2",
"base_field_2",
"field_3",
"field_4",
"field_5",
"field_6",
"field_7",
"field_8",
]
# Fields that are always present in fields_set chunks
TEST_BASE_ANALLYTICS_FIELDS = ["base_field_1", "base_field_2"]
def test_chunk_analytics_fields():
"""
We expect to truncate the fields list into the chunks of equal size,
with TEST_BASE_ANALLYTICS_FIELDS presence in each chunk,
order is not matter.
"""
expected_output = [
["field_1", "base_field_1", "field_2", "base_field_2"],
["base_field_2", "field_3", "field_4", "base_field_1"],
["field_5", "field_6", "field_7", "base_field_1", "base_field_2"],
["field_8", "base_field_1", "base_field_2"],
]
assert list(chunk_analytics_fields(TEST_ANALYTICS_FIELDS, TEST_BASE_ANALLYTICS_FIELDS, TEST_FIELDS_CHUNK_SIZE)) == expected_output
| 2,239 |
ServerlessController/providers_app/models.py
|
pacslab/ChainFaaS
| 7 |
2026182
|
from django.db import models
from profiles.models import Developer, User, Provider
from developers_app.models import Services
from datetime import datetime
from pytz import timezone
from MSc_Research_Django.settings import TIME_ZONE
# Create your models here.
class Job(models.Model):
provider = models.ForeignKey(Provider, on_delete=models.CASCADE)
service = models.ForeignKey(Services, on_delete=models.CASCADE)
start_time = models.DateTimeField(default=datetime(2018, 7, 1, tzinfo=timezone(TIME_ZONE)))
ack_time = models.DateTimeField(default=datetime(2018, 7, 1, tzinfo=timezone(TIME_ZONE)))
pull_time = models.IntegerField(default=0)
run_time = models.IntegerField(default=0)
total_time = models.IntegerField(default=0)
cost = models.FloatField(default=0.0)
finished = models.BooleanField(default=False)
corr_id = models.UUIDField(default=0, db_index=True)
response = models.TextField(default='')
| 947 |
examples/dynamic_lgp_humoro.py
|
humans-to-robots-motion/lgp
| 1 |
2024040
|
import sys
import argparse
import time
import yaml
import logging
from os.path import join, dirname, abspath, expanduser
logging.basicConfig(level=logging.INFO)
ROOT_DIR = join(dirname(abspath(__file__)), '..')
DATA_DIR = join(ROOT_DIR, 'data', 'scenarios')
MODEL_DIR = join(expanduser("~"), '.qibullet', '1.4.3')
DATASET_DIR = join(ROOT_DIR, 'datasets', 'mogaze')
sys.path.append(ROOT_DIR)
from lgp.core.dynamic import HumoroDynamicLGP
from lgp.utils.helpers import load_yaml_config
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Example run: python lgp_planner_humoro.py set_table')
parser.add_argument('scenario', help='The scenario name of the domain, problem file', type=str)
parser.add_argument('-p', help='problem number', type=str, default='1')
parser.add_argument('-v', help='verbose', type=bool, default=False)
args = parser.parse_args()
domain_file = join(DATA_DIR, 'domain_' + args.scenario + '.pddl')
problem_file = join(DATA_DIR, 'problem_' + args.scenario + args.p + '.pddl')
config_file = join(DATA_DIR, args.scenario + args.p + '.yaml')
config = load_yaml_config(config_file)
robot_model_file = join(MODEL_DIR, 'pepper.urdf')
start_time = time.time()
engine = HumoroDynamicLGP(domain_file=domain_file, problem_file=problem_file, robot_model_file=robot_model_file, path_to_mogaze=DATASET_DIR,
enable_viewer=args.v, verbose=args.v)
engine.init_planner(**config)
init_time = time.time()
print('Init time: ' + str(init_time - start_time) + 's')
engine.run(replan=True)
| 1,601 |
python/code_challenges/quick-sort/quick_sort/quick_sort.py
|
AnasAGc/data-structures-and-algorithms
| 0 |
2025984
|
def QuickSort(arr, left, right):
if left < right:
position = Partition(arr, left, right)
QuickSort(arr, left, position - 1)
QuickSort(arr, position + 1, right)
return arr
def Partition(arr, left, right):
pivot = arr[right] # 16
low = left - 1
for i in range(left,right):
if arr[i] <= pivot :
low += 1
# print(low)
Swap(arr, i, low)
print(low)
print(arr)
Swap(arr, right, low + 1)
return low + 1
def Swap(arr, i, low):
temp = arr[i]
arr[i] = arr[low]
arr[low] = temp
| 635 |
examples/rabbitmq/yunpian.py
|
ilaotan/epush
| 0 |
2026133
|
#!/usr/bin/env python
#coding:utf-8
import pika
import json
HOST = 'localhost'
USERNAME = 'hisir'
PASSWORD = '<PASSWORD>'
class Yunpian():
def __init__(self):
credentials = pika.PlainCredentials(USERNAME, PASSWORD)
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=HOST, credentials=credentials))
self.channel = self.connection.channel()
self.queue = 'yunpian'
def single_send(self):
data = {'type': 'single',
'mobile': '+861510202',
'content':'【广州科技】验证码8888,请您尽快验证,完成sir注册。如非本人操作请忽略。'}
self.in_mq(data)
def batch_send(self):
data = {'type': 'batch',
'mobile': '1510202',
'content':'【广州科技】验证码6666,请您尽快验证,完成sir注册。如非本人操作请忽略。'}
self.in_mq(data)
def end(self):
self.channel.close()
self.connection.close()
def in_mq(self, data):
self.channel.basic_publish(exchange='',
routing_key=self.queue,
body=json.dumps(data))
if __name__ == "__main__":
yunpian = Yunpian()
yunpian.single_send()
#yunpian.batch_send()
yunpian.end()
| 1,145 |
tests/compositepingertest.py
|
Global-Biofoundries-Alliance/DNA-scanner
| 12 |
2026233
|
import unittest
from Pinger import Pinger, Entities
from dummy.pinger import DummyPinger
from dummy.pinger import NotAvailablePinger
from dummy.pinger import AlwaysRunningPinger
class TestCompositePinger(unittest.TestCase):
name = "CompositePinger"
# Checks the isRunning() method.
def test_is_running(self):
print ("--->>> Start test for: " + TestCompositePinger.name + " - isRunning()")
# Create CompositePinge with 2 registered DummyPinger
pingerDummy1 = DummyPinger()
pingerDummy2 = DummyPinger()
p = Pinger.CompositePinger()
p.registerVendor(Entities.VendorInformation(name="Dummy", shortName="Dummy", key=1), pingerDummy1)
p.registerVendor(Entities.VendorInformation(name="Dummy", shortName="Dummy", key=2), pingerDummy2)
# No Pinger is Running
pingerDummy1.running = False
pingerDummy2.running = False
self.assertFalse(p.isRunning())
# DummyPinger1 is running
pingerDummy1.running = True
pingerDummy2.running = False
self.assertTrue(p.isRunning())
# DummyPinger2 is running
pingerDummy1.running = False
pingerDummy2.running = True
self.assertTrue(p.isRunning())
# Both DummyPinger are running
pingerDummy1.running = True
pingerDummy2.running = True
self.assertTrue(p.isRunning())
# Check the getVendor method
def test_get_vendor(self):
print ("--->>> Start test for: " + TestCompositePinger.name + " - getVendor")
# Create Dummy Pinger
pingerDummy1 = DummyPinger()
pingerDummy2 = DummyPinger()
# Without registered vendor/vendorpinger
p = Pinger.CompositePinger()
self.assertEqual(0, len(p.getVendors()))
# With 1 registered vendor
p.registerVendor(Entities.VendorInformation(name="Dummy", shortName="Dummy", key=1), pingerDummy1)
self.assertEqual(1, len(p.getVendors()))
vendor = p.getVendors()[0]
self.assertEqual(vendor.name, "Dummy")
self.assertEqual(vendor.shortName, "Dummy")
self.assertEqual(vendor.key, 1)
# with 2 registered vendor
p.registerVendor(Entities.VendorInformation(name="Dummy", shortName="Dummy", key=2), pingerDummy2)
self.assertEqual(2, len(p.getVendors()))
vendor = p.getVendors()[1]
self.assertEqual(vendor.name, "Dummy")
self.assertEqual(vendor.shortName, "Dummy")
self.assertEqual(vendor.key, 2)
# Test with duplicate key. Old one should be replaced with the new one
p.registerVendor(Entities.VendorInformation(name="DummyDuplicate", shortName="DummyDuplicate", key=2), pingerDummy2)
self.assertEqual(2, len(p.getVendors()))
vendor = p.getVendors()[1]
self.assertEqual(vendor.name, "DummyDuplicate")
self.assertEqual(vendor.shortName, "DummyDuplicate")
self.assertEqual(vendor.key, 2)
# Checks the getorders method
def test_getorders(self):
print ("--->>> Start test for: " + TestCompositePinger.name + " - getOrders")
# Intitialize Pinger and DummyPinger
pingerDummy1 = DummyPinger()
pingerDummy2 = DummyPinger()
p = Pinger.CompositePinger()
# Without search it should return a empty list
self.assertEqual(0, len(p.getOffers()))
self.assertFalse(p.isRunning())
# Start search with 1 Sequence and without vendors
p.searchOffers([Entities.SequenceInformation("ACTG", "TestSequence", "ts1")])
self.assertEqual(1, len(p.getOffers()))
self.assertEqual(0, len(p.getOffers()[0].vendorOffers))
# search with 2 sequences and 1 vendor
p.registerVendor(Entities.VendorInformation(name="Dummy", shortName="Dummy", key=1), pingerDummy1)
p.searchOffers([Entities.SequenceInformation("ACTG", "TestSequence", "ts1"),
Entities.SequenceInformation("ACTG", "TestSequence", "ts2")])
self.assertEqual(2, len(p.getOffers()))
self.assertEqual(1, len(p.getOffers()[0].vendorOffers))
self.assertEqual(1, len(p.getOffers()[0].vendorOffers[0].offers))
# Create a correct order
order = p.order(vendor = 1, offerIds = [p.getOffers()[0].vendorOffers[0].offers[0].key])
self.assertEqual(Entities.OrderType.NOT_SUPPORTED, order.getType())
# search with 1 sequence and 2 vendors
p.registerVendor(Entities.VendorInformation(name="Dummy", shortName="Dummy", key=2), pingerDummy2)
p.searchOffers([Entities.SequenceInformation("ACTG", "TestSequence", "ts1")])
self.assertEqual(1, len(p.getOffers()))
self.assertEqual(2, len(p.getOffers()[0].vendorOffers))
self.assertEqual(1, len(p.getOffers()[0].vendorOffers[0].offers))
self.assertEqual(1, len(p.getOffers()[0].vendorOffers[1].offers))
# Filter Vendor 1
p.searchOffers([Entities.SequenceInformation("ACTG", "TestSequence", "ts1")], vendors=[1])
self.assertEqual(1, len(p.getOffers()))
self.assertEqual(1, len(p.getOffers()[0].vendorOffers))
self.assertEqual(1, p.getOffers()[0].vendorOffers[0].vendorInformation.key)
self.assertEqual(1, len(p.getOffers()[0].vendorOffers[0].offers))
# Filter Vendor 2
p.searchOffers([Entities.SequenceInformation("ACTG", "TestSequence", "ts1")], vendors=[2])
self.assertEqual(1, len(p.getOffers()))
self.assertEqual(1, len(p.getOffers()[0].vendorOffers))
self.assertEqual(2, p.getOffers()[0].vendorOffers[0].vendorInformation.key)
self.assertEqual(1, len(p.getOffers()[0].vendorOffers[0].offers))
# search with 2 sequences, 1 vendor with orders and 1 vendor without orders
p.searchOffers([Entities.SequenceInformation("ACTG", "TestSequence", "ts1"),
Entities.SequenceInformation("ACTG", "TestSequence", "ts2")])
pingerDummy2.offers = []
self.assertEqual(2, len(p.getOffers()))
self.assertEqual(2, len(p.getOffers()[0].vendorOffers))
self.assertEqual(1, len(p.getOffers()[0].vendorOffers[0].offers))
self.assertEqual(0, len(p.getOffers()[0].vendorOffers[1].offers))
# Test that CompositePinger ignores output of a VendorPinger, if invalid
pingerDummy1.offers = [1,2,3]
self.assertEqual(2, len(p.getOffers()))
self.assertEqual(1, len(p.getOffers()[0].vendorOffers))
#
# Desc: Test the following szenarios:
# - VendorPinger are temporary Unavailable
# - Try to search while Pinger is running
#
def testVendorOffers(self):
# Define Sequences for searchOffers call
sequences = [
Entities.SequenceInformation("ACTG", "TestSequence", "ts1")
]
# Define CompositePinger (Object to test)
p = Pinger.CompositePinger()
# Pinger with success response
successPinger = DummyPinger()
p.registerVendor(Entities.VendorInformation(name="DummySuccess", shortName="DummySucc", key=1), successPinger)
p.searchOffers(sequences)
res = p.getOffers()
# 1 Sequence ...
self.assertEqual(1, len(res))
# with 1 vendor ...
self.assertEqual(1, len(res[0].vendorOffers))
# with 1 offer
self.assertEqual(1, len(res[0].vendorOffers[0].offers))
# and 0 messages
self.assertEqual(0, len(res[0].vendorOffers[0].messages))
# register Pinger who is unavailable
unavailablePinger = NotAvailablePinger()
p.registerVendor(Entities.VendorInformation(name="DummyNotAvailable", shortName="DummyNA", key=2), unavailablePinger)
p.searchOffers(sequences)
res = p.getOffers()
# 1 Sequence ...
self.assertEqual(1, len(res))
# with 2 vendor ...
self.assertEqual(2, len(res[0].vendorOffers))
# 1 vendor ...
# with 1 offer
self.assertEqual(1, len(res[0].vendorOffers[0].offers))
# and 0 messages
self.assertEqual(0, len(res[0].vendorOffers[0].messages))
# 1 vendor ...
# with 0 offer
self.assertEqual(1, len(res[0].vendorOffers[0].offers))
# and 1 messages
self.assertEqual(1, len(res[0].vendorOffers[1].messages))
self.assertEqual(Entities.MessageType.API_CURRENTLY_UNAVAILABLE, res[0].vendorOffers[1].messages[0].messageType)
# register Pinger who is always running
runningPinger = AlwaysRunningPinger()
p.registerVendor(Entities.VendorInformation(name="DummyRunning", shortName="DummyRunning", key=3), runningPinger)
# Expect IsRunningError
with self.assertRaises(Entities.IsRunningError): p.searchOffers(sequences)
#
# Desc: Test the following scenarios:
# - Try to search with different keys with equal keys
#
def testDuplicatedSequences(self):
# Define Sequences for searchOffers call
sequences = [
Entities.SequenceInformation("ACTG", "TestSequence", "ts1"),
Entities.SequenceInformation("ACTG2", "TestSequence2", "ts1")
]
# Define CompositePinger (Object to test)
p = Pinger.CompositePinger()
# Pinger with success response
successPinger = DummyPinger()
p.registerVendor(Entities.VendorInformation(name="DummySuccess", shortName="DummySucc", key=1), successPinger)
# Expect error because auf duplicated keys of sequences
with self.assertRaises(Entities.InvalidInputError): p.searchOffers(sequences)
if __name__ == '__main__':
unittest.main()
| 9,732 |
scripts/misc/config_abl_3dassembly_optimal_bc_loss.py
|
clvrai/mopa-pd
| 6 |
2025694
|
# for this figure, we need to multiple all y values by 1e6 (y_data = y_data * 1e6) and set
# y-tick labels directly plt.yticks([1.3, 1.4, 1.6, 2.0], fontsize=12).
filename_prefix = 'SawyerAssembly-Abl-Optimal-BC-Loss'
xlabel = 'Epoch'
ylabel = 'Mean Square Error (x 1e-6)'
max_step = 40
min_y_axis_value = 1e-6
max_y_axis_value = 2e-6
legend = True
data_key = ["Action Prediction Loss (Train)", "Action Prediction Loss (Test)"]
bc_y_value = 0
smoothing = False
smoothing_weight = 0
legend_loc = 'upper right'
wandb_api_path = 'arthur801031/mopa-rl-bc-visual'
num_points = 40
x_scale = 1
divide_max_step_by_1mill = False
build_log_from_multiple_keys = True
limit_y_max = True
limit_y_max_value = 2e-6
plot_labels = {
'Train': ['BC Visual Stochastic_3DAssembly_curious-spaceship-136'],
'Test': ['BC Visual Stochastic_3DAssembly_curious-spaceship-136'],
}
line_labels = {}
line_colors = {
'Train': 'C0',
'Test': 'C1',
}
| 936 |
insurancedb/file_processor.py
|
ifr1m/insurance-db
| 0 |
2025858
|
import logging
from pathlib import Path
from typing import List
import pdfplumber
from insurancedb.extractors.registry import extractors_registry_map
from insurancedb.extractors.extractor_methods import diff_months
logger = logging.getLogger(__name__)
def process_paths(paths: List[Path]):
logger.info("Processing %d files.", len(paths))
data = []
for pdf_path in paths:
with pdfplumber.open(pdf_path) as pdf:
processed = False
for extractor_key, extractor_cls in extractors_registry_map.items():
extractor = extractor_cls(pdf_path.name, pdf)
if extractor.is_match():
processed = True
logger.info("%s :-> %s", extractor_cls.__name__, {str(pdf_path)})
# NR.CRT
# ASIGURATOR
# NUMAR POLITA
# CLASA B/M
# DATA EMITERE
# DATA EXPIRARE
# NUME CLIENT
# NUMAR DE TELEFON
# TIP ASIGURARE
# NUMAR INMATRICULARE
# PERIODA DE ASIGURARE
# VALOARE POLITA - prima de asigurare (totala)
# PDF
start_date = extractor.get_start_date()
expiration_date = extractor.get_expiration_date()
interval = diff_months(expiration_date, start_date)
pdf_data = [extractor.get_insurer_short_name(), extractor.get_insurance_number(),
extractor.get_insurance_class(),
extractor.get_contract_date(), expiration_date,
extractor.get_person_name(), None, extractor.get_type(),
extractor.get_car_number(), interval,
extractor.get_insurance_amount(), str(pdf_path)]
data.append(pdf_data)
break
if not processed:
pdf_data = [f"Unprocessed {str(pdf_path)}", None, None, None, None, None, None, None, None, None, None,
pdf_path.name]
data.append(pdf_data)
return data
| 2,266 |
day-05/part-1/chloe.py
|
lypnol/adventofcode-2017
| 16 |
2024718
|
from submission import Submission
class ChloeSubmission(Submission):
def run(self, s):
input = list(map(int, s.split('\n')))
jump_index = 0
steps = 0
while jump_index < len(input) :
jump = input[jump_index]
input[jump_index] += 1
jump_index += jump
steps +=1
return steps
| 296 |
tests/server/main/match_ror_test.py
|
dataesr/matcher-affiliation
| 1 |
2025454
|
import pytest
from project.server.main.load_ror import load_ror
from project.server.main.match_ror import match_ror
from project.server.main.metrics import compute_precision_recall
from project.server.main.my_elastic import MyElastic
@pytest.fixture(scope='module')
def elasticsearch() -> dict:
index_prefix = 'test'
es = MyElastic()
load_ror(index_prefix=index_prefix)
yield {'index_prefix': index_prefix, 'es': es}
es.delete_index(index=f'{index_prefix}*')
class TestMatchRor:
@pytest.mark.parametrize(
'query,strategies,expected_results,expected_logs', [
('institut pasteur shanghai', [[['ror_name']]], ['0495fxg12'], ''),
('02feahw73', [[['ror_id']]], ['02feahw73'], ''),
('grid.4444.0', [[['ror_grid_id']]], ['02feahw73'], '')
]
)
def test_match_ror(self, elasticsearch, query, strategies, expected_results, expected_logs) -> None:
args = {'index_prefix': elasticsearch['index_prefix'], 'verbose': True, 'strategies': strategies,
'query': query}
response = match_ror(conditions=args)
results = response['results']
results.sort()
assert results == expected_results
assert expected_logs in response['logs']
def test_precision_recall(self, elasticsearch) -> None:
precision_recall = compute_precision_recall(match_type='ror', index_prefix=elasticsearch['index_prefix'])
assert precision_recall['precision'] >= 0.81
assert precision_recall['recall'] >= 0.16
| 1,541 |
examples/supervision_example.py
|
tamland/actors
| 15 |
2024320
|
# -*- coding: utf-8 -*-
import logging
import time
import random
from actors import Actor, ActorSystem, Directive
logging.basicConfig(level=logging.DEBUG)
class Crash(Exception):
pass
StartGreeting = object()
class Greeter(Actor):
def post_restart(self):
print("Greeter restarted")
def receive(self, message):
if random.randint(0, 3) == 0:
raise Crash()
print("Hello %s" % message)
class Supervisor(Actor):
@staticmethod
def supervisor_strategy(exception):
try:
raise exception
except Crash:
return Directive.Restart
except:
return Directive.Stop
def __init__(self):
self._greeter = None
def receive(self, message):
if message is StartGreeting:
self._greeter = self.context.actor_of(Greeter)
else:
# Forward the message to greeter
self._greeter.tell(message, self.context.sender)
system = ActorSystem()
supervisor = system.actor_of(Supervisor)
supervisor.tell(StartGreeting)
try:
while True:
supervisor.tell("world")
time.sleep(0.5)
except KeyboardInterrupt:
pass
system.terminate()
| 1,207 |
opencae2020B13/test_bunny_org.py
|
tkoyama010/tkoyama010
| 1 |
2025424
|
import vtk
# create reader
reader = vtk.vtkOBJReader()
reader.SetFileName("bunny.obj")
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInput(reader.GetOutput())
else:
mapper.SetInputConnection(
reader.GetOutputPort()
)
# create actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Create a rendering window and renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
# Create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Assign actor to the renderer
ren.AddActor(actor)
# Enable user interface interactor
iren.Initialize()
renWin.Render()
iren.Start()
# clean up objects
del iren
del renWin
| 729 |
prime/prime_finder/scripts/prime_finder.py
|
abrahamrhoffman/math
| 0 |
2025878
|
class PrimeFinder(object):
def find(self, number):
if isinstance(number, int):
pass
else:
print("Number must be of type: integer")
raise SystemExit
if number <= 0:
print("A non-negative, non-zero integer is required")
raise SystemExit
if number == 1:
print(1)
for n in range(2, number):
for n in range(2, number):
print(aList)
def main():
pf = PrimeFinder()
pf.find(10)
if __name__ == "__main__":
main()
| 589 |
django_distill/renderer.py
|
tback/django-distill
| 0 |
2025707
|
# -*- coding: utf-8 -*-
import os
import sys
import types
import errno
from shutil import copy2
from future.utils import raise_from
from django.utils import (six, translation)
from django.conf import settings
from django.conf.urls import include as include_urls
from django.http import HttpResponse
from django.template.response import TemplateResponse
from django.test import RequestFactory
from django.urls import reverse
from django.core.management import call_command
from django_distill.errors import (DistillError, DistillWarning)
class DistillRender(object):
'''
Renders a complete static site from all urls registered with
distill_url() and then copies over all static media.
'''
def __init__(self, output_dir, urls_to_distill):
self.output_dir = output_dir
self.urls_to_distill = urls_to_distill
# activate the default translation
translation.activate(settings.LANGUAGE_CODE)
def render(self):
for distill_func, file_name, view_name, a, k in self.urls_to_distill:
for param_set in self.get_uri_values(distill_func):
if not param_set:
param_set = ()
elif self._is_str(param_set):
param_set = param_set,
uri = self.generate_uri(view_name, param_set)
render = self.render_view(uri, param_set, a)
# rewrite URIs ending with a slash to ../index.html
if file_name is None and uri.endswith('/'):
if uri.startswith('/'):
uri = uri[1:]
yield uri, uri + 'index.html', render
continue
yield uri, file_name, render
def _is_str(self, s):
return isinstance(s, six.string_types)
def get_uri_values(self, func):
try:
v = func()
except Exception as e:
raise DistillError('Failed to call distill function: {}'.format(e))
if not v:
return (None,)
elif isinstance(v, (list, tuple)):
return v
elif isinstance(v, types.GeneratorType):
return list(v)
else:
err = 'Distill function returned an invalid type: {}'
raise DistillError(err.format(type(v)))
def generate_uri(self, view_name, param_set):
if isinstance(param_set, (list, tuple)):
uri = reverse(view_name, args=param_set)
elif isinstance(param_set, dict):
uri = reverse(view_name, kwargs=param_set)
else:
err = 'Distill function returned an invalid type: {}'
raise DistillError(err.format(type(param_set)))
return uri
def render_view(self, uri, param_set, args):
if len(args) < 2:
raise DistillError('Invalid view arguments')
view_regex, view_func = args[0], args[1]
request_factory = RequestFactory()
request = request_factory.get(uri)
if isinstance(param_set, dict):
a, k = (), param_set
else:
a, k = param_set, {}
try:
response = view_func(request, *a, **k)
except Exception as err:
e = 'Failed to render view: {}'.format(err)
raise_from(DistillError(e), err)
if self._is_str(response):
response = HttpResponse(response)
elif isinstance(response, TemplateResponse):
response.render()
if response.status_code != 200:
err = 'View returned a non-200 status code: {}'
raise DistillError(err.format(response.status_code))
return response
def copy_static(self, dir_from, dir_to):
# we need to ignore some static dirs such as 'admin' so this is a
# little more complex than a straight shutil.copytree()
if not dir_from.endswith(os.sep):
dir_from = dir_from + os.sep
if not dir_to.endswith(os.sep):
dir_to = dir_to + os.sep
for root, dirs, files in os.walk(dir_from):
dirs[:] = filter_dirs(dirs)
for f in files:
from_path = os.path.join(root, f)
base_path = from_path[len(dir_from):]
to_path = os.path.join(dir_to, base_path)
to_path_dir = os.path.dirname(to_path)
if not os.path.isdir(to_path_dir):
os.makedirs(to_path_dir)
copy2(from_path, to_path)
yield from_path, to_path
def run_collectstatic(stdout):
stdout('Distill is running collectstatic...')
call_command('collectstatic')
stdout('')
stdout('collectstatic complete, continuing...')
_ignore_dirs = ('admin', 'grappelli')
def filter_dirs(dirs):
return [d for d in dirs if d not in _ignore_dirs]
def load_urls(stdout):
stdout('Loading site URLs')
site_urls = getattr(settings, 'ROOT_URLCONF')
if site_urls:
include_urls(site_urls)
def render_to_dir(output_dir, urls_to_distill, stdout):
mimes = {}
load_urls(stdout)
renderer = DistillRender(output_dir, urls_to_distill)
for page_uri, file_name, http_response in renderer.render():
if file_name:
local_uri = file_name
full_path = os.path.join(output_dir, file_name)
else:
local_uri = page_uri
if page_uri.startswith(os.sep):
page_uri = page_uri[1:]
full_path = os.path.join(output_dir, page_uri)
content = http_response.content
mime = http_response.get('Content-Type')
renamed = ' (renamed from "{}")'.format(page_uri) if file_name else ''
msg = 'Rendering page: {} -> {} ["{}", {} bytes] {}'
stdout(msg.format(local_uri, full_path, mime, len(content), renamed))
try:
dirname = os.path.dirname(full_path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(full_path, 'wb') as f:
f.write(content)
except IOError as e:
if e.errno == errno.EISDIR:
err = ('Output path: {} is a directory! Try adding a '
'"distill_file" arg to your distill_url()')
raise DistillError(err.format(full_path))
else:
raise
mimes[full_path] = mime.split(';')[0].strip()
static_url = settings.STATIC_URL
static_url = static_url[1:] if static_url.startswith('/') else static_url
static_output_dir = os.path.join(output_dir, static_url)
for file_from, file_to in renderer.copy_static(settings.STATIC_ROOT,
static_output_dir):
stdout('Copying static: {} -> {}'.format(file_from, file_to))
media_url = settings.MEDIA_URL
if media_url:
media_url = media_url[1:] if media_url.startswith('/') else media_url
media_output_dir = os.path.join(output_dir, media_url)
for file_from, file_to in renderer.copy_static(settings.MEDIA_URL,
media_output_dir):
stdout('Copying media: {} -> {}'.format(file_from, file_to))
return True
# eof
| 7,227 |
springer/urls.py
|
JnyJny/springer_downloader
| 6 |
2025962
|
"""Springer URLS
"""
from .constants import Language, Topic, FileFormat
SPRINGER_ANNOUNCEMENT_URL = "https://www.springernature.com/gp/librarians/news-events/all-news-articles/industry-news-initiatives/free-access-to-textbooks-for-institutions-affected-by-coronaviru/17855960"
SPRINGER_REST_URL = "https://resource-cms.springernature.com/springer-cms/rest"
SPRINGER_CATALOG_EN_URL = f"{SPRINGER_REST_URL}/v1/content/17858272/data/v8"
SPRINGER_CATALOG_DE_URL = f"{SPRINGER_REST_URL}/v1/content/17863240/data/v3"
SPRINGER_NURSING_CATALOG_DE_URL = f"{SPRINGER_REST_URL}/v1/content/17856246/data/v3"
SPRINGER_PDF_URL = "https://link.springer.com/content/pdf"
SPRINGER_EPUB_URL = "https://link.springer.com/download/epub"
urls = {
"announcement": SPRINGER_ANNOUNCEMENT_URL,
"catalogs": {
Language.English: {Topic.All_Disciplines: SPRINGER_CATALOG_EN_URL},
Language.German: {
Topic.All_Disciplines: SPRINGER_CATALOG_DE_URL,
Topic.Emergency_Nursing: SPRINGER_NURSING_CATALOG_DE_URL,
},
},
"content": {FileFormat.pdf: SPRINGER_PDF_URL, FileFormat.epub: SPRINGER_EPUB_URL,},
}
| 1,140 |
cvutils/misc/iterators.py
|
MercierLucas/cv_utils
| 0 |
2026206
|
class SquareSlider2D:
def __init__(self, field, stride, image) -> None:
self.field = field
self.stride = stride
self.image = image
self.height = image.shape[0]
self.width = image.shape[1]
def __iter__(self):
for y in range(0, self.height - self.field + 1, self.stride):
for x in range(0, self.width - self.field + 1, self.stride):
yield y//self.stride, x//self.stride, self.image[y : y + self.field, x : x + self.field]
class Slider:
def __init__(self, field, stride, image) -> None:
self.field_y = field[0]
self.field_x = field[1]
self.stride = stride
self.image = image
self.height = image.shape[0]
self.width = image.shape[1]
def __iter__(self):
for y in range(0, self.height - self.field_y + 1, self.stride):
for x in range(0, self.width - self.field_x + 1, self.stride):
yield y//self.stride, x//self.stride, self.image[y : y + self.field_y, x : x + self.field_x]
| 1,058 |
pyrc/utils/hooks.py
|
JamesWrigley/pyrc
| 0 |
2026170
|
import re
import functools
class command(object):
def __init__(self, matcher=None):
self._matcher = matcher
def __call__(self, func):
# Default the command's name to an exact match of the function's name.
# ^func_name$
matcher = self._matcher
if matcher is None:
matcher = r'^%s$' % func.__name__
# convert matcher to regular expression
matcher = re.compile(matcher)
@functools.wraps(func)
def wrapped_command(*args, **kwargs):
return func(*args, **kwargs)
wrapped_command._type = "COMMAND"
wrapped_command._matcher = matcher
return wrapped_command
class privmsg(object):
def __init__(self, matcher=None):
self._matcher = matcher
def __call__(self, func):
# convert matcher to regular expression
matcher = re.compile(self._matcher)
@functools.wraps(func)
def wrapped_command(*args, **kwargs):
return func(*args, **kwargs)
wrapped_command._type = "PRIVMSG"
wrapped_command._matcher = matcher
return wrapped_command
def interval(milliseconds):
def wrapped(func):
@functools.wraps(func)
def wrapped_command(*args, **kwargs):
return func(*args, **kwargs)
wrapped_command._type = "REPEAT"
wrapped_command._interval = milliseconds
return wrapped_command
return wrapped
| 1,443 |
show/dropcounters.py
|
sg893052/sonic-utilities
| 91 |
2025361
|
import click
import utilities_common.cli as clicommon
#
# 'dropcounters' group ###
#
@click.group(cls=clicommon.AliasedGroup)
def dropcounters():
"""Show drop counter related information"""
pass
# 'configuration' subcommand ("show dropcounters configuration")
@dropcounters.command()
@click.option('-g', '--group', required=False)
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def configuration(group, verbose):
"""Show current drop counter configuration"""
cmd = "dropconfig -c show_config"
if group:
cmd += " -g '{}'".format(group)
clicommon.run_command(cmd, display_cmd=verbose)
# 'capabilities' subcommand ("show dropcounters capabilities")
@dropcounters.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def capabilities(verbose):
"""Show device drop counter capabilities"""
cmd = "dropconfig -c show_capabilities"
clicommon.run_command(cmd, display_cmd=verbose)
# 'counts' subcommand ("show dropcounters counts")
@dropcounters.command()
@click.option('-g', '--group', required=False)
@click.option('-t', '--counter_type', required=False)
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def counts(group, counter_type, verbose):
"""Show drop counts"""
cmd = "dropstat -c show"
if group:
cmd += " -g '{}'".format(group)
if counter_type:
cmd += " -t '{}'".format(counter_type)
clicommon.run_command(cmd, display_cmd=verbose)
| 1,502 |
EasyDeep/net_structures/__init__.py
|
strawsyz/straw
| 2 |
2025647
|
from .Alex import AlexNet
from .FNN import FNNWithDropout
from .CNN1D import MyCNN1D
from .FCN import FCNVgg16
| 111 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.