max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
test3_02.py
|
yoojunwoong/python_review01
| 0 |
2023982
|
# range, while을 시용하여 값의 합,평균을 구하기.
# 1 ~ 10 합을 구하시오.
sum = 0;
cnt = 0;
for i in range(1,11):
sum += i;
cnt += 1;
print(cnt);
print(sum);
print(sum/cnt);
#while
# 1~10합을 구하시오.
a = 1;
sum2 = 0;
while a < 11:
sum2 += a;
a += 1;
print(a-1); #카운트(cnt)
print(sum2);
print(sum2/(a-1));
| 337 |
src/sparsify/models/projects_model.py
|
dhuangnm/sparsify
| 152 |
2024848
|
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
DB model classes for a project's model file
"""
import datetime
import logging
import os
import uuid
from peewee import CharField, DateTimeField, ForeignKeyField, TextField
from playhouse.sqlite_ext import JSONField
from sparseml.utils import create_dirs
from sparsify.models.jobs import Job
from sparsify.models.projects import PROJECTS_DIR_NAME, BaseProjectModel, Project
__all__ = ["ProjectModel", "PROJECTS_MODEL_DIR_NAME"]
_LOGGER = logging.getLogger(__name__)
PROJECTS_MODEL_DIR_NAME = "model"
class ProjectModel(BaseProjectModel):
"""
DB model for a project's model file.
A project must have only one model file stored in the DB.
"""
model_id = CharField(primary_key=True, default=lambda: uuid.uuid4().hex)
project = ForeignKeyField(
Project, unique=True, backref="models", on_delete="CASCADE"
)
created = DateTimeField(default=datetime.datetime.now)
source = TextField(null=True, default=None)
job = ForeignKeyField(Job, null=True, default=None)
file = TextField(null=True, default=None)
analysis = JSONField(null=True, default=None)
@property
def dir_path(self) -> str:
"""
:return: the local directory path for where the model file is stored
"""
project_id = self.project_id # type: str
return os.path.join(
self._meta.storage.root_path,
PROJECTS_DIR_NAME,
project_id,
PROJECTS_MODEL_DIR_NAME,
)
@property
def file_path(self) -> str:
"""
:return: the local file path to the data file
"""
file_name = self.file # type: str
return os.path.join(self.dir_path, file_name)
def setup_filesystem(self):
"""
Setup the local file system so that it can be used with the data
"""
create_dirs(self.dir_path)
def validate_filesystem(self):
"""
Validate that the local file system and expected files are correct and exist
"""
if not os.path.exists(self.dir_path):
raise FileNotFoundError(
"project model directory at {} does not exist anymore".format(
self.dir_path
)
)
if self.file and not os.path.exists(self.file_path):
raise FileNotFoundError(
"project model file at {} does not exist anymore".format(self.file_path)
)
def delete_filesystem(self):
"""
Delete the model file from the local file system
"""
if self.file:
os.remove(self.file_path)
| 3,239 |
src/output.py
|
MATF-Software-Verification/03_Knut_profajliranje_ivica_vizuelizacija
| 0 |
2025725
|
# -BEGIN BLOCK id: 1 type: ordinary
a = 10
b = 12
c = 0
g = 0
h = 0
f = 0
# -END BLOCK id: 1
# -BEGIN BLOCK id: 2 type: if then
if a > b:
# -END BLOCK id: 2
# -BEGIN BLOCK id: 3 type: ordinary
h = 12
g = 11
# -END BLOCK id: 3
# -BEGIN BLOCK id: 4 type: else
else:
# -END BLOCK id: 4
# -BEGIN BLOCK id: 5 type: ordinary
c = 20
f = 12
# -END BLOCK id: 5
# -BEGIN BLOCK id: 6 type: if then
if g > 10:
# -END BLOCK id: 6
# -BEGIN BLOCK id: 7 type: ordinary
g = 100
# -END BLOCK id: 7
# -BEGIN BLOCK id: 8 type: for
for i in range(10):
# -END BLOCK id: 8
# -BEGIN BLOCK id: 9 type: ordinary
t = 10
f += t
# -END BLOCK id: 9
# -BEGIN BLOCK id: 10 type: ordinary
a = 10
b = 12
# -END BLOCK id: 10
# -BEGIN BLOCK id: 11 type: if then
if a == 10:
# -END BLOCK id: 11
# -BEGIN BLOCK id: 12 type: ordinary
b = 1
# -END BLOCK id: 12
# -BEGIN BLOCK id: 13 type: elif
elif a < 10:
# -END BLOCK id: 13
# -BEGIN BLOCK id: 14 type: ordinary
b = 2
# -END BLOCK id: 14
# -BEGIN BLOCK id: 15 type: elif
elif a > 10:
# -END BLOCK id: 15
# -BEGIN BLOCK id: 16 type: ordinary
b = 3
# -END BLOCK id: 16
# -BEGIN BLOCK id: 17 type: ordinary
c = 13
# -END BLOCK id: 17
| 1,183 |
codeforces/dp动态规划/1200/729B舞台打光.py
|
yofn/pyacm
| 0 |
2024887
|
#!/usr/bin/env python3
#https://codeforces.com/problemset/problem/729/B
#四个方向分别做,应该会容易些
#python nested list 不太适合做二维数组用, cf不支持numpy; 所以还是回到一维数组吧
#1000*1000也会超时? 分析下时间复杂度!
#改用高效点的实现 (采用预处理!!! 不仅高效而且简化了处理逻辑)
#用Python3.7还是超时; 用PyPy总算通过..
n,m = list(map(int,input().split()))
ss = [list(map(int,input().split())) for _ in range(n)]
ll = []
ll.append([ss[i][j] for i in range(n) for j in range(m)]) #(t-d)(l-r)
ll.append([ss[i][j] for i in range(n) for j in range(m-1,-1,-1)]) #(t-d)(r-l)
ll.append([ss[i][j] for j in range(m) for i in range(n)]) #(l-r)(t-d)
ll.append([ss[i][j] for j in range(m) for i in range(n-1,-1,-1)]) #(l-r)(b-t)
cnt = 0
for t in range(4):
l = ll[t]
run,nor = (m,n) if t<2 else (n,m)
for r in range(nor):
rs = r*run
ho = 0 #0 for no object!
for i in range(run):
if l[rs+i] == 1:
ho = 1
continue
cnt += ho
print(cnt)
| 968 |
configs/TimetableConfigs.py
|
mahkons/jbr-flatland
| 3 |
2025783
|
from configs.Config import Config
import torch
from agent.judge.Judge import Judge
class TimeTableConfig(Config):
def __init__(self):
pass
def create_timetable(self):
pass
class JudgeConfig(TimeTableConfig):
def __init__(self, window_size_generator, lr, batch_size, optimization_epochs):
self.window_size_generator = window_size_generator
self.lr = lr
self.batch_size = batch_size
self.optimization_epochs = optimization_epochs
def create_timetable(self):
return Judge(self.window_size_generator, self.lr, self.batch_size, self.optimization_epochs, torch.device("cpu"))
| 647 |
examples/chained_callback.py
|
xinetzone/dash-tests
| 1 |
2024266
|
from dash import dcc, html
from dash.dependencies import Input, Output
from app import app
all_options = {
'America': ['New York City', 'San Francisco', 'Cincinnati'],
'Canada': [u'Montréal', 'Toronto', 'Ottawa']
}
layout = html.Div([
dcc.RadioItems(
id='countries-radio',
options=[{'label': k, 'value': k} for k in all_options.keys()],
value='America'
),
html.Hr(),
dcc.RadioItems(id='cities-radio'),
html.Hr(),
html.Div(id='display-selected-values')
])
@app.callback(
Output('cities-radio', 'options'),
Input('countries-radio', 'value'))
def set_cities_options(selected_country):
return [{'label': i, 'value': i} for i in all_options[selected_country]]
@app.callback(
Output('cities-radio', 'value'),
Input('cities-radio', 'options'))
def set_cities_value(available_options):
return available_options[0]['value']
@app.callback(
Output('display-selected-values', 'children'),
Input('countries-radio', 'value'),
Input('cities-radio', 'value'))
def set_display_children(selected_country, selected_city):
return f'{selected_city} is a city in {selected_country}'
| 1,166 |
Bot/Tests/Bartest.py
|
Pyruths/DiscordRPbot
| 0 |
2024962
|
import unittest
from Bar import Bar
from Bar import Box
class BarTest(unittest.TestCase):
def setUp(self):
self.b = Bar("bar")
def test_Bar_Init(self):
self.assertEqual("bar",str(self.b),"empty bar string not displaying correctly")
def test_Bar_box(self):
box = Box(2)
self.b.add_box(box)
self.assertEqual("bar [2]",str(self.b), "Added box not properly appended to display")
self.assertEqual(self.b[1],box,"Box not properly added")
self.b.remove_box(0)
self.assertEqual("bar", str(self.b), "Box not removed")
def test_bar_spend_refresh(self):
box1 = Box(1)
box2 = Box(2)
self.b.add_box(box1)
self.b.add_box(box2)
box1.spend()
self.b.spend()
for box in self.b:
self.assertTrue(box.used,"Not all boxes spent")
self.b.refresh()
for box in self.b:
self.assertFalse(box.used,"Not all boxes refreshed")
if __name__ == '__main__':
unittest.main()
| 1,067 |
debexpo/controllers/register.py
|
jadonk/debexpo
| 0 |
2025661
|
# -*- coding: utf-8 -*-
#
# register.py — Register Controller
#
# This file is part of debexpo - https://alioth.debian.org/projects/debexpo/
#
# Copyright © 2008 <NAME> <<EMAIL>>
# Copyright © 2010 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Holds the RegisterController.
"""
__author__ = '<NAME>'
__copyright__ = 'Copyright © 2008 <NAME>, Copyright © 2010 <NAME>'
__license__ = 'MIT'
import logging
import random
from datetime import datetime
from debexpo.lib.base import *
from debexpo.lib import constants
from debexpo.lib.email import Email
from debexpo.lib.schemas import RegisterForm
from debexpo.model import meta
from debexpo.model.users import User
import debexpo.lib.utils
log = logging.getLogger(__name__)
class RegisterController(BaseController):
def __init__(self):
"""
Class constructor. Sets c.config for the templates.
"""
c.config = config
def index(self):
"""
Entry point to controller. Displays the index page.
"""
log.debug('Main register form requested')
if config['debexpo.debian_specific'] != 'true':
log.error('debexpo.debian_specific is !true; redirecting to maintainer form')
redirect(url(action='maintainer'))
return render('/register/index.mako')
def _send_activate_email(self, key, recipient):
"""
Sends an activation email to the potential new user.
``key``
Activation key that's already stored in the database.
``recipient``
Email address to send to.
"""
log.debug('Sending activation email')
email = Email('register_activate')
activate_url = 'http://' + config['debexpo.sitename'] + url.current(action='activate', id=key)
email.send([recipient], activate_url=activate_url)
@validate(schema=RegisterForm(), form='register')
def _register_submit(self):
"""
Handles the form submission for a maintainer account registration.
"""
log.debug('Register form validated successfully')
# Activation key.
key = debexpo.lib.utils.random_hash()
u = User(name=self.form_result['name'],
email=self.form_result['email'],
password=debexpo.lib.utils.hash_it(self.form_result['password']),
lastlogin=datetime.now(),
verification=key)
if self.form_result['sponsor'] == '1':
u.status=constants.USER_STATUS_DEVELOPER
meta.session.add(u)
meta.session.commit()
self._send_activate_email(key, self.form_result['email'])
log.debug('New user saved')
return render('/register/activate.mako')
def register(self):
"""
Provides the form for a maintainer account registration.
"""
# Has the form been submitted?
if request.method == 'POST':
log.debug('Maintainer form submitted')
return self._register_submit()
else:
log.debug('Maintainer form requested')
return render('/register/register.mako')
def activate(self, id):
"""
Upon given a verification ID, activate an account.
``id``
ID to use to verify the account.
"""
log.debug('Activation request with key = %s' % id)
if id is None:
log.error('Key is None')
abort(404, 'Key is None')
user = meta.session.query(User).filter_by(verification=id).first()
if user is not None:
log.debug('Activating user "%s"' % user.name)
user.verification = None
meta.session.commit()
else:
log.error('Could not find user; redirecting to main page')
abort(404, 'Could not find user; redirecting to main page')
c.user = user
return render('/register/activated.mako')
| 4,984 |
arvet_slam/systems/visual_odometry/tests/test_libviso_profile.py
|
jskinn/arvet-slam
| 4 |
2025055
|
import unittest
from arvet.core.sequence_type import ImageSequenceType
from arvet_slam.systems.visual_odometry.libviso2 import LibVisOStereoSystem, LibVisOMonoSystem
from arvet_slam.systems.test_helpers.demo_image_builder import DemoImageBuilder, ImageMode
@unittest.skip("Not running profiling")
class TestRunLibVisOProfile(unittest.TestCase):
num_frames = 1000
max_time = 50
speed = 0.1
def test_profile_mono(self, ):
import cProfile as profile
stats_file = "libviso_mono.prof"
system = LibVisOMonoSystem()
image_builder = DemoImageBuilder(
mode=ImageMode.MONOCULAR, stereo_offset=0.15,
width=640, height=480, num_stars=150,
length=self.max_time * self.speed, speed=self.speed,
close_ratio=0.6, min_size=10, max_size=100
)
profile.runctx("run_libviso(system, image_builder, self.num_frames, self.max_time, 0)",
locals=locals(), globals=globals(), filename=stats_file)
def test_profile_stereo(self, ):
import cProfile as profile
stats_file = "libviso_stereo.prof"
system = LibVisOStereoSystem()
image_builder = DemoImageBuilder(
mode=ImageMode.STEREO, stereo_offset=0.15,
width=640, height=480, num_stars=150,
length=self.max_time * self.speed, speed=self.speed,
close_ratio=0.6, min_size=10, max_size=100
)
profile.runctx("run_libviso(system, image_builder, self.num_frames, self.max_time, 0)",
locals=locals(), globals=globals(), filename=stats_file)
def run_libviso(system, image_builder, num_frames, max_time, seed=0):
system.set_camera_intrinsics(image_builder.get_camera_intrinsics(), max_time / num_frames)
system.set_stereo_offset(image_builder.get_stereo_offset())
system.start_trial(ImageSequenceType.SEQUENTIAL, seed=seed)
for idx in range(num_frames):
time = max_time * idx / num_frames
image = image_builder.create_frame(time)
system.process_image(image, time)
system.finish_trial()
| 2,113 |
secrets.py
|
TotalJTM/Interactive-Graduation-Cap
| 0 |
2024355
|
#secrets class to keep my info secret from other people
class Secrets:
BOT_TOKEN = ''
WIFI_SSID = ''
WIFI_PSWD = ''
| 119 |
utils/pvacapi/app.py
|
mrichters/pVACtools
| 96 |
2026145
|
#!/usr/bin/env python3
import connexion
import os
import sys
import time
import argparse
from flask_cors import CORS
from flask import g
from utils.pvacapi.controllers.utils import initialize
from utils.pvacapi.controllers.utils import getIpAddress
def app_parser():
parser = argparse.ArgumentParser(description='pVACapi provides a REST API to pVACtools')
parser.add_argument('--ip-address', help='IP address for the HTTP server to bind. If not provided, the default socket address will be used.')
parser.add_argument('--proxy-ip-address', help='IP address of proxy server or public IP address. If provided, server will send X-Forward headers required for Bokeh to properly work through a proxy server or with AWS private/public IP addresses.')
parser.add_argument('--debug', default=False, action='store_true', help='Start sever in debug mode.')
return parser
#FIXME: sanitize sample name
def main(args = None):
if not args:
parser = app_parser()
args = parser.parse_args(sys.argv[1:])
app = connexion.App(
"pVAC-Seq Visualization Server",
specification_dir=os.path.join(
os.path.dirname(__file__),
'config'
),
)
from werkzeug.routing import IntegerConverter as BaseIntConverter
class IntConverter(BaseIntConverter):
regex = r'-?\d+'
# determine IP address and setup CORS
IP_ADDRESS = None
if args.ip_address is None:
IP_ADDRESS = getIpAddress()
else:
IP_ADDRESS = args.ip_address
app.app.IP_ADDRESS = IP_ADDRESS
# add forwarding headers if proxy_ip_address specified
PROXY_IP_ADDRESS = None
if args.proxy_ip_address is not None:
PROXY_IP_ADDRESS = args.proxy_ip_address
@app.app.after_request
def apply_forwarding_headers(response):
response.headers["X-Forwarded-Proto"] = "http"
response.headers["X-Forwarded-Host"] = PROXY_IP_ADDRESS
response.headers["X-Forwarded-For"] = IP_ADDRESS
response.headers["X-Real-IP"] = IP_ADDRESS
return response
app.app.PROXY_IP_ADDRESS = PROXY_IP_ADDRESS
app.app.url_map.converters['int'] = IntConverter
initialize(app.app) #initialize the app configuration
app.add_api('swagger.yaml', arguments={'title': 'API to support pVacSeq user interface for generating reports on pipeline results'})
app.app.secret_key = os.urandom(1024)
# remove all CORS restrictions
CORS(app.app)
# should match IP address at with any port, path, or protocol
# CORS(
# app.app,
# origins=r'^(.+://)?' + IP_ADDRESS + r'(:\d+)?(/.*)?$'
# )
print(time.asctime(), "Starting pVACapi server at http://" + IP_ADDRESS + ":8080")
app.run(port=8080, debug=args.debug, threaded=True)
if __name__ == '__main__':
main()
| 2,847 |
plot/plotter.py
|
shoaloak/defragDNS
| 0 |
2024393
|
#!/usr/bin/env python3
from os import listdir
from os.path import isfile, join
import json
import statistics as stat
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
sns.set('paper', style="whitegrid")
#sns.set('paper', style="white")
import IPython
DATA_LOC = "./data"
SAVE_LOC = "./graphs"
def load_json(data, path):
with open(path, 'r') as f:
for line in f:
datum = json.loads(line)
data.append(datum)
#mtu = int(datum['mtu'])
#if mtu in data.keys():
# data[mtu].append(datum)
#else:
# data[mtu] = [datum]
def load_data():
paths = [join(DATA_LOC,f) for f in listdir(DATA_LOC) if isfile(join(DATA_LOC,f))]
data = []
for path in paths:
try:
load_json(data, path)
except FileNotFoundError:
sys.stderr.write("Could not find {}\n".format(path))
except json.decoder.JSONDecodeError:
sys.stderr.write("Could not parse {}\n".format(path))
return data
def analyze_data(ls):
# calculate standard deviation
stdev = stat.stdev(ls)
# take mean
mean = stat.mean(ls)
return stdev, mean
# Seaborn does this for you when using Pandas :)
def process_data(data, mtu):
# create lists
aggr_data = {}
for key in data[mtu][0].keys():
aggr_data[key] = []
# aggregate results
for datum in data[mtu]:
for key, value in datum.items():
aggr_data[key].append(value)
# remove bloat
del aggr_data['datetime']
del aggr_data['mtu']
# analyze
analyzed_data = {}
for key, value in aggr_data.items():
stdev, mean = analyze_data(value)
analyzed_data[key] = round(mean, 1)
analyzed_data['stdev_'+key] = round(stdev, 3)
# calculate percentage success
# recalculate percentage? seems ok
return analyzed_data
# Based on <NAME>
# https://stackoverflow.com/questions/43214978/seaborn-barplot-displaying-values
def show_values_on_bars(axs):
def _show_on_single_plot(ax):
one_percent_graph = ax.get_ylim()[1] / 100
for bar, line in zip(ax.patches, ax.lines):
# center x bar
_x = bar.get_x() + bar.get_width() / 2
# just above stdev
_y = line._y[1] + one_percent_graph
if np.isnan(_y):
# no stdev
_y = bar.get_y() + bar.get_height() + one_percent_graph
value = '{:.2f}'.format(bar.get_height())
ax.text(_x, _y, value, ha="center", fontsize=8)
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax)
else:
_show_on_single_plot(axs)
def create_graph(df):
for ip in ['ipv4', 'ipv6']:
mtu = 'mtu4' if ip == 'ipv4' else 'mtu6'
mtus = df[mtu].unique()
mtus.sort()
mtus = mtus[::-1]
for rslv in ['stub', 'rslv']:
# colors
#ax = sns.barplot(x='mtu', y=f'%failed_queries_{ip}_{rslv}',
# data=df, order=mtus, ci='sd', errcolor="red",
# palette=sns.color_palette("cubehelix"))
ax = sns.barplot(x=mtu, y=f'%failed_queries_{ip}_{rslv}',
data=df, order=mtus, ci='sd',
facecolor=(0.9295040369088812, 0.9295040369088812,
0.9295040369088812),
errcolor="black", edgecolor="black")
show_values_on_bars(ax)
ax.set(xlabel='MTU size', ylabel='Percentage failed DNS UDP queries')
plt.savefig(join(SAVE_LOC, f'{ip}_{rslv}.png'), format='png')
plt.savefig(join(SAVE_LOC, f'{ip}_{rslv}.eps'), format='eps')
#plt.show()
#sys.exit(0)
plt.close()
#IPython.embed()
def main():
data = load_data()
df = pd.DataFrame(data)
create_graph(df)
if __name__ == "__main__":
main()
| 4,095 |
impulse/alert/models.py
|
akurihara/impulse
| 2 |
2024173
|
from django.db import models
from django.db.models import Max
from django.urls import reverse
from phonenumber_field.modelfields import PhoneNumberField
from impulse.alert.constants import MONITOR_STATUSES
class Monitor(models.Model):
amount = models.DecimalField(max_digits=10, decimal_places=2)
external_id = models.CharField(db_index=True, max_length=10)
phone_number = PhoneNumberField()
event = models.ForeignKey('event.Event', null=True, related_name='monitors')
datetime_created = models.DateTimeField(auto_now_add=True)
datetime_updated = models.DateTimeField(auto_now=True)
def __unicode__(self):
return str(self.id)
def get_absolute_url(self):
return reverse('monitor-detail', kwargs={'pk': self.id})
@classmethod
def filter_statuses(cls, statuses):
return cls.objects.annotate(
most_recent_status=Max('statuses__status')
).filter(
most_recent_status__in=statuses
)
@property
def current_status(self):
return self.statuses.latest()
class MonitorStatus(models.Model):
class Meta:
get_latest_by = 'id'
monitor = models.ForeignKey('Monitor', related_name='statuses')
status = models.PositiveSmallIntegerField(choices=MONITOR_STATUSES.items())
datetime_created = models.DateTimeField(auto_now_add=True)
datetime_updated = models.DateTimeField(auto_now=True)
def __unicode__(self):
return str(self.id)
| 1,481 |
URL Shortener.py
|
datahackformation/Posts-Python-Tips-Tricks
| 1 |
2025679
|
import pyshorteners
def shorten(url):
link = pyshorteners.Shortener()
return link.tinyurl.short(url)
if __name__ == "__main__":
url = input("Ingresa el link: ")
print(f"\n{shorten(url)}")
| 205 |
get_sql/views.py
|
LiuJjjxxx/xiang02
| 0 |
2023076
|
# -*- coding: utf-8 -*-
from django.shortcuts import render,HttpResponse
from common.mymako import render_json
from home_application.models import warn_sum,server_value,network_server_cpu_value,network_server_flow_value,yewu_cpu_value
# Create your views here.
# 运维大屏(一)中间的趋势图
def get_warn_sum_count(request):
fuwuqi = warn_sum.objects.filter(name='fuwuqi').order_by('-date')[:5].values('date', 'warn_count')
wangluo = warn_sum.objects.filter(name='wangluo').order_by('-date')[:5].values('date', 'warn_count')
sum = len(wangluo)
data = {}
fuwuqi_count = []
wangluo_count = []
timeer = []
for i in range(sum):
fuwuqi_count.insert(i, fuwuqi[i].get("warn_count"))
wangluo_count.insert(i, wangluo[i].get("warn_count"))
timeer.insert(i, fuwuqi[i].get("date").strftime('%d/%m %H:%M'))
data["fuwuqi"] = fuwuqi_count
data["wangluo"] = wangluo_count
data["date"] = timeer
return render_json(data)
# 运维大屏(一)存储空间排行
def get_server_value_in_sql(request):
index = server_value.objects.filter(index=1)
start = index[len(index) - 1].date
server_value_sum = server_value.objects.filter(date__range=(start, start)).values()
sum = len(server_value_sum)
data = []
value = {}
for i in range(sum):
value['index'] = server_value_sum[i].get("index")
value['ip'] = server_value_sum[i].get("ip")
value['value'] = server_value_sum[i].get("value")
data.insert(i, value)
value = {}
return render_json(data)
# 运维大屏(一)流量排行
def get_netwokr_server_flow_in_sql(request):
index = network_server_flow_value.objects.filter(index=1)
start = index[len(index) - 1].date
server_value_sum = network_server_flow_value.objects.filter(date__range=(start, start)).values()
sum = len(server_value_sum)
data = []
value = {}
for i in range(sum):
value['index'] = server_value_sum[i].get("index")
value['ip'] = server_value_sum[i].get("ip")
value['value'] = server_value_sum[i].get("value")
data.insert(i, value)
value = {}
return render_json(data)
# 运维大屏(一)网络设备CPU排行
def get_netwokr_server_cpu_in_sql(request):
index = network_server_cpu_value.objects.filter(index=1)
start = index[len(index) - 1].date
server_value_sum = network_server_cpu_value.objects.filter(date__range=(start, start)).values()
sum = len(server_value_sum)
data = []
value = {}
for i in range(sum):
value['index'] = server_value_sum[i].get("index")
value['ip'] = server_value_sum[i].get("ip")
value['value'] = server_value_sum[i].get("value")
data.insert(i, value)
value = {}
return render_json(data)
def get_yewu_value(request):
cpu_value = yewu_cpu_value.objects.filter(name='zabbix').order_by('-date')[:5].values('date', 'cpu_value')
sum = len(cpu_value)
data = {}
yewu_count = []
timeer = []
for i in range(sum):
yewu_count.insert(i, cpu_value[i].get("cpu_value"))
timeer.insert(i, cpu_value[i].get("date").strftime('%d/%m %H:%M'))
data["fuwuqi"] = yewu_count
data["date"] = timeer
return render_json(data)
| 3,177 |
All Tracks/Core CS/Algorithms/Implementation/Climbing the Leaderboard/Solution.py
|
Shaikh-Nabeel/HackerRank-Solutions
| 13 |
2025937
|
#!/bin/python3
import sys
"""
Step a): we create the array 'scores' which contains all the distinct values of the scores.
The index corresponding to each value of 'scores' is equal to the leaderboard position (with an offset of 1)
associated with that score.
Example:
100 100 50 40 40 20 10
scores = [100, 50, 40, 20, 10]
100 is at position 0 and it is the highest value in the leaderboard.
50 is at position 1 and it is the second highest value in the leaderboard and so on.
Step b): for each Alice's score, we search in 'scores' (starting from the last element since 'scores' is decreasing)
the first value that is >= than Alice's score and we print the corresponding position.
Be careful that the position changes depending on whether alice's score is < or = than the score.
Thanks to the variable 'currentIndex', we keep track of the elements of 'scores' already visited and we don't iterate
through the entire array for every Alice's score, avoiding a TLE (Time Limit Excedeed) for large values of n.
"""
if __name__ == '__main__':
n = int(input().strip())
all_scores = list(map(int, input().strip().split(" ")))
scores = []
previous_score = all_scores[0]
scores.append(previous_score)
# a)
for i in range(1, len(all_scores)):
if all_scores[i] != previous_score:
scores.append(all_scores[i])
previous_score = all_scores[i]
m = int(input().strip())
alice_scores = list(map(int, input().strip().split(" ")))
current_index = len(scores) - 1
# b)
for alice_score in alice_scores:
searching_position = True
while searching_position:
if current_index < 0:
print(1)
searching_position = False
else:
current_score = scores[current_index]
if current_score == alice_score:
print(current_index + 1)
searching_position = False
elif current_score > alice_score:
print(current_index + 2)
searching_position = False
else:
current_index -= 1
| 2,159 |
malwareconfig/decoders/sakula.py
|
BA7JCM/RATDecoders
| 905 |
2025818
|
import re
from struct import unpack
from malwareconfig import crypto
from malwareconfig.common import Decoder
from malwareconfig.common import string_printable
class Sakula(Decoder):
decoder_name = "Sakula"
decoder__version = 1
decoder_author = "@kevthehermit"
decoder_description = "Sakula Rat"
def __init__(self):
self.config = {}
@staticmethod
def config_v1(config_list):
print("Found Version < 1.3")
config_dict = {}
counter = 1
for config in config_list:
config_dict['Domain'] = config[0].rstrip(b'\x88')
config_dict['URI GET1 Folder'] = config[1].rstrip(b'\x88')
config_dict['URI GET3 File'] = config[2].rstrip(b'\x88')
config_dict['URI GET2 File'] = config[3].rstrip(b'\x88')
config_dict['URI GET3 Arg'] = config[4].rstrip(b'\x88')
config_dict['Copy File Name'] = config[5].rstrip(b'\x88')
config_dict['Service Name'] = config[6].rstrip(b'\x88')
config_dict['Service Description'] = config[7].rstrip(b'\x88')
config_dict['Waiting Time'] = unpack('>H', config[8][:2].rstrip(b'\x88'))[0]
counter += 1
return config_dict
@staticmethod
def config_v2(config_list):
print("Found Version > 1.2")
config_dict = {}
counter = 1
for config in config_list:
config_dict['{}_Domain'.format(counter)] = config[0].rstrip(b'V')
config_dict['{}_URI GET1 Folder'.format(counter)] = config[1].rstrip(b'V')
config_dict['{}_URI GET3 File'.format(counter)] = config[2].rstrip(b'V')
config_dict['{}_URI GET2 File'.format(counter)] = config[3].rstrip(b'V')
config_dict['{}_URI GET3 Arg'.format(counter)] = config[4].rstrip(b'V')
config_dict['{}_Copy File Name'.format(counter)] = config[5].rstrip(b'V')
config_dict['{}_AutoRun Key'.format(counter)] = config[6].rstrip(b'V')
config_dict['{}_Copy File Path'.format(counter)] = config[7].rstrip(b'V')
config_dict['{}_Campaign ID'.format(counter)] = config[8].rstrip(b'V')
config_dict['{}_Waiting Time'.format(counter)] = unpack('<H', config[9][:2].rstrip(b'V'))[0]
counter += 1
return config_dict
def get_config(self):
'''
This is the main entry
:return:
'''
config_dict = {}
file_data = self.file_info.file_data
# RE for 1.0 and 1.1
re_pattern1 = b'([ -~\x88]{100})([ -~\x88]{100})([ -~\x88]{100})([ -~\x88]{100})([ -~\x88]{100})([ -~\x88]{100})([ -~\x88]{100})([ -~\x88]{100})(.{12}\x77\x77\x77\x77)'
# RE for 1.2, 1.3, 1.4
re_pattern2 = b'([ -~]{50})([ -~]{50})([ -~]{50})([ -~]{50})([ -~]{50})([ -~]{50})([ -~]{50})([ -~]{50})([ -~]{12})(0uVVVVVV)'
xor_data = crypto.decrypt_xor('\x88', file_data)
config_list = re.findall(re_pattern1, xor_data)
for c in config_list:
if any(b".exe" in s for s in c):
config_dict = Sakula.config_v1(config_list)
# XOR for later versions
xor_data = crypto.decrypt_xor('V', file_data)
config_list = re.findall(re_pattern2, xor_data)
for c in config_list:
if any(b".exe" in s for s in c):
config_dict = Sakula.config_v2(config_list)
# Set the config to the class for use
self.config = config_dict
| 3,468 |
week1.py
|
KillianK19/csws-week1
| 0 |
2023651
|
from asyncore import dispatcher_with_send
famous_person = "<NAME>"
message = "''Goals never come easy. No one gives you goals.''"
full_message = f"{famous_person.title()} once said {message.title()}"
print (full_message)
| 222 |
Lessons/lesson18.py
|
Javascript-void0/hxgv
| 0 |
2024966
|
# Keyboard import, use keyboard
# "Base Video Game"
from tkinter import Tk, Canvas, TclError
from time import sleep
window=Tk()
window.title("Animation")
cvs=Canvas(window, height=800, width=1200, bg="blue")
cvs.pack()
ball=cvs.create_oval(50,50,100,100,outline="red", fill="yellow")
def get_pos(id): #id=ball
pos=cvs.coords(id)
x=(pos[0]+pos[2])//2
y=(pos[1]+pos[3])//2
return x,y
def key_press(event):
print(event.keysym)
if event.keysym=="Up":
cvs.move(ball, 0, -5)
if event.keysym=="Down":
cvs.move(ball, 0, 5)
if event.keysym=="Left":
cvs.move(ball, -5, 0)
if event.keysym=="Right":
cvs.move(ball, 5, 0)
def left_click(event):
print(event.x, event.y)
cvs.coords(ball, event.x-25, event.y-25, event.x+25, event.y+25)
cvs.bind_all('<Key>', key_press)
cvs.bind_all('<Button-1>', left_click)
while True:
sleep(0.005)
try:
window.update()
except TclError:
break
| 982 |
api/app/events/repository.py
|
cclauss/Baobab
| 0 |
2022893
|
from app import db
from app.events.models import Event
class EventRepository():
@staticmethod
def get_by_id(event_id):
return db.session.query(Event).get(event_id)
| 187 |
alipay/aop/api/response/AlipayOpenStsTokenGetResponse.py
|
articuly/alipay-sdk-python-all
| 0 |
2022855
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenStsTokenGetResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenStsTokenGetResponse, self).__init__()
self._expiration = None
self._security_token = None
@property
def expiration(self):
return self._expiration
@expiration.setter
def expiration(self, value):
self._expiration = value
@property
def security_token(self):
return self._security_token
@security_token.setter
def security_token(self, value):
self._security_token = value
def parse_response_content(self, response_content):
response = super(AlipayOpenStsTokenGetResponse, self).parse_response_content(response_content)
if 'expiration' in response:
self.expiration = response['expiration']
if 'security_token' in response:
self.security_token = response['security_token']
| 1,046 |
06_Joining Data with pandas/04_Merging Ordered and Time-Series Data/04_Using merge_asof() to study stocks.py
|
mohd-faizy/DataScience-With-Python
| 5 |
2025879
|
'''
04 - Using merge_asof() to study stocks
Using merge_asof() to study stocks
You have a feed of stock market prices that you record. You attempt to
track the price every five minutes. Still, due to some network latency,
the prices you record are roughly every 5 minutes. You pull your price
logs for three banks, JP Morgan (JPM), Wells Fargo (WFC), and Bank Of
America (BAC). You want to know how the price change of the two other banks
compare to JP Morgan. Therefore, you will need to merge these three logs into
one table. Afterward, you will use the pandas .diff() method to compute the
price change over time. Finally, plot the price changes so you can review your
analysis.
The three log files have been loaded for you as tables named jpm, wells, and bac.
-----------------------------
jpm.head()
date_time close
0 2017-11-17 15:35:17 98.1200
1 2017-11-17 15:40:04 98.1800
2 2017-11-17 15:45:01 97.7307
3 2017-11-17 15:50:55 97.7400
4 2017-11-17 15:55:00 97.8150
wells.head()
date_time close
0 2017-11-17 15:35:08 54.3227
1 2017-11-17 15:40:00 54.3200
2 2017-11-17 15:45:32 54.1900
3 2017-11-17 15:50:07 54.1700
4 2017-11-17 15:55:00 54.1841
bac.head()
date_time close
0 2017-11-17 15:35:17 26.552
1 2017-11-17 15:40:06 26.552
2 2017-11-17 15:45:05 26.388
3 2017-11-17 15:50:34 26.378
4 2017-11-17 15:55:06 26.383
-----------------------------
Instructions
- Use merge_asof() to merge jpm (left table) and wells together on the date_time
column, where the rows with the nearest times are matched, and with suffixes=('', '_wells').
Save to jpm_wells.
- Use merge_asof() to merge jpm_wells (left table) and bac together on the date_time column,
where the rows with the closest times are matched, and with suffixes=('_jpm', '_bac'). Save
to jpm_wells_bac.
- Using price_diffs, create a line plot of the close price of JPM, WFC, and BAC only.
'''
# Use merge_asof() to merge jpm and wells
jpm_wells = pd.merge_asof(jpm, wells, on='date_time',
suffixes=('', '_wells'), direction='nearest')
# Use merge_asof() to merge jpm_wells and bac
jpm_wells_bac = pd.merge_asof(jpm_wells, bac, on='date_time',
suffixes=('_jpm', '_bac'), direction='nearest')
# Compute price diff
price_diffs = jpm_wells_bac.diff()
# Plot the price diff of the close of jpm, wells and bac only
price_diffs.plot(y=['close_jpm', 'close_wells', 'close_bac'])
plt.show()
| 2,517 |
core/NeuralEssentials/visuals.py
|
davidwagnerkc/TensorMONK
| 0 |
2026015
|
""" TensorMONK's :: NeuralEssentials """
import sys
import torch
import torch.nn.functional as F
import torchvision.utils as tutils
import imageio
import visdom
if sys.version_info.major == 3:
from functools import reduce
# =========================================================================== #
def MakeGIF(image_list, gif_name):
r"""Makes a gif using a list of images.
"""
if not gif_name.endswith(".gif"):
gif_name += ".gif"
imageio.mimsave(gif_name, [imageio.imread(x) for x in image_list])
# =========================================================================== #
class VisPlots(object):
r"""Visdom plots to monitor weights (histograms and 2D kernels larger than
3x3), and responses.
Args:
env: name of your environment, default = main
server: server address, default = None
"""
def __init__(self, env="main", server=None):
if server is None:
self.visplots = visdom.Visdom(env=env)
else:
self.visplots = visdom.Visdom(env=env, server=server)
def histograms(self, data, vis_name="hist"):
r""" Plots histograms of weights. For Model.state_dict(), parameter
names after used to name the plots.
Args:
data: Accepts nn.Parameter, torch.Tensor and Model.state_dict()
vis_name: required for nn.Parameter, and torch.Tensor,
default = "hist"
"""
if isinstance(data, dict):
# parameter generator (essentially, model.state_dict())
for p in data.keys():
if "weight" in p and "weight_g" not in p and \
"Normalization" not in p and "bias" not in p:
# ignore normalization weights (gamma's & beta's) and bias
newid = self._trim_name(p)
self.visplots.histogram(X=data[p].data.cpu().view(-1),
opts={"numbins": 46,
"title": newid},
win=newid)
elif (isinstance(data, torch.nn.parameter.Parameter) or
isinstance(data, torch.Tensor)):
# pytorch tensor or parameter
self.visplots.histogram(X=data.data.cpu().view(-1),
opts={"numbins": 46, "title": vis_name},
win=vis_name)
else:
raise NotImplementedError
def show_images(self, data, vis_name="images", png_name=None,
normalize=False, height=None, max_samples=512,
attention=False):
r""" Plots responses in RGB (C=3) and grey (C=1), requires BCHW
torch.Tensor. When C != 1/3, reorganizes the BxCxHxW to BCx1xHxC if
attention is False, else Bx1xHxC.
Args:
data: 4D torch.Tensor
vis_name: name for visdom plots, default = "images"
png_name: used to save png images, default = None
normalize: normalized the range to 0-1
height: max height of image, retains aspect ratio. default = None
max_samples: limited to speed ploting, default = 512
attention: computes attention BxCxHxW to Bx1xHxC using l2,
normalize is applied default = False
"""
if isinstance(data, torch.Tensor):
if data.dim() != 4:
return None
# pytorch tensor
data = data.data.cpu()
if attention:
data = data.pow(2).sum(1, True).pow(.5)
if normalize or attention: # adjust range to 0-1
data = self._normalize_01(data)
# adjust 4d tensor and reduce samples when too many
sz = data.size()
multiplier = 1
if sz[1] not in [1, 3]: # BxCxHxW to BCx1xHxC
data = data.view(-1, 1, *sz[2:])
multiplier = sz[1]
if sz[0]*multiplier > max_samples:
samples = reduce(lambda x, y: max(x, y),
[x*multiplier for x in range(sz[0]) if
x*multiplier <= max_samples])
data = data[:samples]
# resize image when height is not None
if height is not None:
sz = (height, int(float(height)*sz[3]/sz[2]))
data = F.interpolate(data, size=sz)
self.visplots.images(data, nrow=max(4, int(data.size(0)**0.5)),
opts={"title": vis_name}, win=vis_name)
# save a png if png_name is defined
if png_name is not None:
tutils.save_image(data, png_name)
def show_weights(self, data, vis_name="weights", png_name=None,
min_width=3, max_samples=512):
r""" Plots weights (histograms and images of 2D kernels larger than
min_width). 2D kernels are normalized between 0-1 for visualization.
Requires a minimum of 4 kernels to plot images.
Args:
data: Accepts nn.Parameter, torch.Tensor and Model.state_dict()
vis_name: name for visdom plots, default = "weights"
png_name: used to save png images, default = None
min_width: only plots images if the kernel size width and height is
above min_width
max_samples: limited to speed ploting, default = 512
"""
# all histograms
self.histograms(data, vis_name)
# only convolution weights when kernel size > 3
n = 0
if isinstance(data, dict):
# parameter generator (essentially, model.state_dict())
for p in data.keys():
if data[p].dim() == 4 and data[p].size(2) > min_width and \
data[p].size(3) > min_width:
pass
newid = self._trim_name(p)
ws = data[p].data.cpu()
sz = ws.size()
if sz[1] not in [1, 3]:
ws = ws.view(-1, 1, sz[2], sz[3])
sz = ws.size()
if 4 < sz[0] <= max_samples:
ws = self._normalize_01(ws)
self.visplots.images(ws, nrow=max(4, int(sz[0]**0.5)),
opts={"title": "Ws-"+newid},
win="Ws-"+newid)
if png_name is not None:
tutils.save_image(ws, png_name.rstrip(".png") +
"-ws{}".format(n) + ".png")
n += 1
elif isinstance(data, torch.nn.parameter.Parameter):
# pytorch parameter
if data.dim() == 4 and data.size(2) > min_width and \
data.size(3) > min_width:
data = data.data.cpu()
sz = data.size()
if sz[1] not in [1, 3]:
data = data.view(-1, 1, sz[2], sz[3])
sz = data.size()
if sz[0] <= max_samples:
data = self._normalize_01(data)
self.visplots.images(data, nrow=max(4, int(sz[0]**0.5)),
opts={"title": "Ws-"+vis_name},
win="Ws-"+vis_name)
if png_name is not None:
tutils.save_image(data, png_name.rstrip(".png") + "-ws-" +
vis_name + ".png")
else:
raise NotImplementedError
def _normalize_01(self, tensor):
_min = tensor.min(2, True)[0].min(3, True)[0]
_max = tensor.max(2, True)[0].max(3, True)[0]
return tensor.add(-_min).div(_max - _min + 1e-6)
@staticmethod
def _trim_name(name):
return name.replace("NET46.", "").replace("Net46.",
"") .replace("network.", "")
# visplots = VisPlots()
# hasattr(visplots, "visplots")
# visplots.show_images(torch.rand(10, 10, 200, 200), height=32)
# visplots.show_weights(torch.nn.Parameter(torch.rand(10, 10, 7, 7)))
| 8,260 |
intake_bluesky_tiff_series/__init__.py
|
ronpandolfi/intake-bluesky-tiff-series
| 0 |
2024616
|
import tifffile
from intake_bluesky_files import FileHandlerPlugin
class TIFSeriesPlugin(FileHandlerPlugin):
name = 'TIF Series'
extensions = ['.tiff', '.tif']
def __call__(self, *args, **kwargs):
return tifffile.imread(self.path)
def metadata(self):
return {key: tag.value for key, tag in tifffile.TiffFile(self.path).pages[0].tags.items()}
| 380 |
DQM/Integration/python/clients/pixel_dqm_sourceclient-live_cfg.py
|
pasmuss/cmssw
| 0 |
2025089
|
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process("PIXELDQMLIVE", eras.Run2_2017)
live=True #set to false for lxplus offline testing
offlineTesting=not live
TAG ="PixelPhase1"
process = cms.Process("PIXELDQMLIVE")
process.MessageLogger = cms.Service("MessageLogger",
debugModules = cms.untracked.vstring('siPixelDigis',
'siStripClusters',
'SiPixelRawDataErrorSource',
'SiPixelDigiSource'),
cout = cms.untracked.PSet(threshold = cms.untracked.string('ERROR')),
destinations = cms.untracked.vstring('cout')
)
#----------------------------
# Event Source
#-----------------------------
# for live online DQM in P5
if (live):
process.load("DQM.Integration.config.inputsource_cfi")
# for testing in lxplus
elif(offlineTesting):
process.load("DQM.Integration.config.fileinputsource_cfi")
#-----------------------------
# DQM Environment
#-----------------------------
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.load("DQM.Integration.config.environment_cfi")
#----------------------------
# DQM Live Environment
#-----------------------------
process.dqmEnv.subSystemFolder = TAG
process.dqmSaver.tag = TAG
process.DQMStore.referenceFileName = '/dqmdata/dqm/reference/pixel_reference_pp.root'
if (process.runType.getRunType() == process.runType.hi_run):
process.DQMStore.referenceFileName = '/dqmdata/dqm/reference/pixel_reference_hi.root'
if (process.runType.getRunType() == process.runType.cosmic_run):
process.DQMStore.referenceFileName = '/dqmdata/dqm/reference/pixel_reference_cosmic.root'
#-----------------------------
# Magnetic Field
#-----------------------------
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
#-------------------------------------------------
# GEOMETRY
#-------------------------------------------------
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
#-------------------------------------------------
# GLOBALTAG
#-------------------------------------------------
# Condition for P5 cluster
if (live):
process.load("DQM.Integration.config.FrontierCondition_GT_cfi")
# Condition for lxplus: change and possibly customise the GT
elif(offlineTesting):
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
from Configuration.AlCa.GlobalTag import GlobalTag as gtCustomise
process.GlobalTag = gtCustomise(process.GlobalTag, 'auto:run2_data', '')
#-----------------------
# Reconstruction Modules
#-----------------------
# Real data raw to digi
process.load("Configuration.StandardSequences.RawToDigi_Data_cff")
process.load("RecoLocalTracker.SiPixelClusterizer.SiPixelClusterizer_cfi")
process.load("RecoLocalTracker.SiStripZeroSuppression.SiStripZeroSuppression_cfi")
process.load("RecoLocalTracker.SiStripClusterizer.SiStripClusterizer_RealData_cfi")
process.siPixelDigis.IncludeErrors = True
process.siPixelDigis.InputLabel = cms.InputTag("rawDataCollector")
process.siStripDigis.InputLabel = cms.InputTag("rawDataCollector")
#--------------------------------
# Heavy Ion Configuration Changes
#--------------------------------
if (process.runType.getRunType() == process.runType.hi_run):
process.load('Configuration.StandardSequences.ReconstructionHeavyIons_cff')
process.load('Configuration.StandardSequences.RawToDigi_Repacked_cff')
process.siPixelDigis.InputLabel = cms.InputTag("rawDataRepacker")
# Phase1 DQM
process.load("DQM.SiPixelPhase1Config.SiPixelPhase1OnlineDQM_cff")
process.PerModule.enabled=True
process.PerReadout.enabled=True
process.OverlayCurvesForTiming.enabled=False
#--------------------------
# Service
#--------------------------
process.AdaptorConfig = cms.Service("AdaptorConfig")
#--------------------------
# Filters
#--------------------------
# HLT Filter
# 0=random, 1=physics, 2=calibration, 3=technical
process.hltTriggerTypeFilter = cms.EDFilter("HLTTriggerTypeFilter",
SelectedTriggerType = cms.int32(1)
)
process.load('HLTrigger.HLTfilters.hltHighLevel_cfi')
process.hltHighLevel.HLTPaths = cms.vstring( 'HLT_ZeroBias_*' , 'HLT_ZeroBias1_*' , 'HLT_PAZeroBias_*' , 'HLT_PAZeroBias1_*', 'HLT_PAL1MinimumBiasHF_OR_SinglePixelTrack_*', 'HLT*SingleMu*')
process.hltHighLevel.andOr = cms.bool(True)
process.hltHighLevel.throw = cms.bool(False)
#--------------------------
# Scheduling
#--------------------------
process.DQMmodules = cms.Sequence(process.dqmEnv*process.dqmSaver)
if (process.runType.getRunType() == process.runType.hi_run):
process.SiPixelClusterSource.src = cms.InputTag("siPixelClustersPreSplitting")
process.Reco = cms.Sequence(process.siPixelDigis*process.pixeltrackerlocalreco)
else:
process.Reco = cms.Sequence(process.siPixelDigis*process.siStripDigis*process.siStripZeroSuppression*process.siStripClusters*process.siPixelClusters)
process.p = cms.Path(
process.hltHighLevel #trigger selection
*process.Reco
*process.DQMmodules
*process.siPixelPhase1OnlineDQM_source
*process.siPixelPhase1OnlineDQM_harvesting
)
### process customizations included here
from DQM.Integration.config.online_customizations_cfi import *
process = customise(process)
#--------------------------------------------------
# Heavy Ion Specific Fed Raw Data Collection Label
#--------------------------------------------------
print "Running with run type = ", process.runType.getRunType()
| 5,582 |
azure_sdk_trim/azure_sdk_trim.py
|
clumio-code/azure-sdk-trim
| 2 |
2025822
|
#!/usr/bin/env python3
#
# Copyright 2021 Clumio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Google Style 4 spaces, 100 columns:
# https://google.github.io/styleguide/pyguide.html
#
"""Simple script to purge mostly useless Azure SDK API versions.
The Azure SDK for python is over 600MB and growing. The main reason for the
size and growth is that each release gets added internally and all prior
release are kept. This is a troublesome design which does not seem to be
addressed in the near future. This deleted most but not all API versions as
multiple versions are required for importing the models. This keep a high
compatibility level while trimming more than half of the space used.
So Long & Thanks For All The Fish.
https://github.com/Azure/azure-sdk-for-python/issues/11149
https://github.com/Azure/azure-sdk-for-python/issues/17801
"""
from __future__ import annotations
import argparse
import importlib.util
import logging
import pathlib
import re
import shutil
import subprocess
import sys
from typing import Optional, Sequence
from humanize import filesize # type: ignore
logger = logging.getLogger(__name__)
class Error(Exception):
"""Local Errors."""
def parse_args(argv: Sequence[str]) -> argparse.Namespace:
"""Parse the command line arguments."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose mode.')
parser.add_argument(
'--azure_dir',
type=str,
help='Optional path to the Azure SDK directory. It will try to find it automatically.',
)
return parser.parse_args(argv[1:])
def disk_usage(path: pathlib.Path) -> int:
"""Returns the disk usage size in byte for the given path.
This depends on the unix 'du' tool to be present.
"""
process = subprocess.run(['du', '-ksx', path], check=True, capture_output=True, text=True)
return 1024 * int(process.stdout.split('\t', 1)[0])
def get_base_dir() -> pathlib.Path:
"""Returns the base directory where the Azure SDK is installed."""
spec = importlib.util.find_spec('azure')
if spec is None or not spec.submodule_search_locations:
raise Error('No azure package in the python path.')
return pathlib.Path(spec.submodule_search_locations[0])
class VersionedApiDir:
"""Class to handle API dirs.
Such directories will contain one or more version folder such as v7.0,
v2020_12_01 or v2021_02_01_preview and a file named models.py which will
imports the models from specific versions. The most recent, default, version
is assumed to be in that list of imports.
We scrape the imports lines in the models file to detect that this is a
multi versioned API with potential folders to be trimmed. The import list is
use to whitelist the folders we need to keep.
"""
def __init__(self, path: pathlib.Path):
self._path = path.parent if path.name == 'models.py' else path
self._versions: Optional[tuple[str, ...]] = None
@property
def path(self) -> pathlib.Path:
"""Returns the path of the API directory."""
return self._path
def _parse_models(self) -> tuple[str, ...]:
"""Parse models.py to find which versions are in imported and in use."""
models_path = self._path / 'models.py'
if not models_path.exists():
return ()
versions: list[str] = []
# match: re.Match
for line in models_path.read_text().splitlines():
if match := re.match(r'from [.](v\d[^.]+)[.]models import *', line):
versions.append(match.group(1))
return tuple(versions)
@property
def versions(self) -> tuple[str, ...]:
"""Returns the versions declared in models.py."""
if self._versions is None:
self._versions = self._parse_models()
return self._versions
@property
def is_versioned(self) -> bool:
"""Return True if this is a versioned API directory."""
return bool(self.versions)
def trim_other_versions(self) -> int:
"""Removed the unused versions of the current API."""
if not self.is_versioned:
return 0
deleted = []
for version_dir in self.path.glob('v*_*'):
if not version_dir.is_dir():
continue
if version_dir.name in self.versions:
continue
if not re.match(r'v\d', version_dir.name):
continue
shutil.rmtree(version_dir)
deleted.append(version_dir)
return len(deleted)
def find_api_dirs(base_dir: pathlib.Path) -> set[VersionedApiDir]:
"""Find the API directories with multiple versions."""
api_dirs = set()
for sub_path in base_dir.rglob('models.py'):
api_dir = VersionedApiDir(sub_path)
if api_dir.is_versioned:
api_dirs.add(api_dir)
return api_dirs
def purge_api_dir(api_dir: VersionedApiDir):
"""Purge unnecessary folders from a versioned API directory."""
usage = disk_usage(api_dir.path)
logger.debug('%s is using %s', api_dir.path, filesize.naturalsize(usage))
deleted = api_dir.trim_other_versions()
new_usage = disk_usage(api_dir.path)
logger.debug(
'Saved %s by deleting %d versions.', filesize.naturalsize(usage - new_usage), deleted
)
def purge_old_releases(base_dir: pathlib.Path):
"""Purge old SDK versions from the Azure installation directory."""
usage = disk_usage(base_dir)
logger.info('%s is using %s.', base_dir, filesize.naturalsize(usage))
api_dirs = find_api_dirs(base_dir)
for api_dir in api_dirs:
purge_api_dir(api_dir)
new_usage = disk_usage(base_dir)
logger.info('%s is now using %s.', base_dir, filesize.naturalsize(new_usage))
logger.info('Saved %s.', filesize.naturalsize(usage - new_usage))
def main(argv: Optional[Sequence[str]] = None):
"""Main."""
if argv is None:
argv = sys.argv
args = parse_args(argv)
if args.verbose:
logging.basicConfig(
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.DEBUG,
)
logger.setLevel(logging.DEBUG)
else:
logging.basicConfig(
format='%(message)s',
level=logging.INFO,
)
base_dir = get_base_dir() if not args.azure_dir else pathlib.Path(args.azure_dir)
purge_old_releases(base_dir)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 7,132 |
nannybot/vision_control.py
|
byungsoo/bakg-home-assistant
| 0 |
2026201
|
#!/usr/bin/env python3
import os
import argparse
import time
from picamera.array import PiRGBArray
from picamera import PiCamera
from imutils.video import FPS
import cv2
import pdb
import vision_utils as vu
import boost_utils as bu
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--confidence", default=.5,
help="confidence threshold")
ap.add_argument("-d", "--display", type=int, default=0,
help="switch to display image on screen")
ap.add_argument("-i", "--input_image", type=str,
help="path to optional input image file")
ap.add_argument("-v", "--input_video", type=str,
help="path to optional input video file")
args = vars(ap.parse_args())
camera, rawCapture = vu.init_picamera()
if args.get("input_video"):
print("[INFO] opening video file...")
vs = cv2.VideoCapture(args["input_video"])
frames = []
for i in range(100):
try:
frame = vs.read()[1]
frames.append(frame)
except AttributeError:
break
print("Read %d frames" % len(frames))
elif args.get("input_image"):
frames = [cv2.imread(args["input_image"])]
else:
frames = camera.capture_continuous(rawCapture, format="bgr", use_video_port=True)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
# out2 = cv2.VideoWriter('output_anno.avi',fourcc, 20.0, (640,480))
# time.sleep(1)
fps = FPS().start()
fcnt = 0
for frame in frames:
try:
if args.get("input_video") or args.get("input_image"):
image = frame
else:
image = frame.array
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
image_for_result = image.copy()
cv2.imshow("Input", image)
# print("Frame captured...")
# use the NCS to acquire predictions
predictions = vu.detect_face(image)
# print("Prediction ran...")
# loop over our predictions
for (i, pred) in enumerate(predictions):
# extract prediction data for readability
(pred_conf, pred_boxpts) = pred
if pred_conf > args["confidence"]:
# print prediction to terminal
print("[INFO] Prediction #{}: confidence={}, "
"boxpoints={}".format(i, pred_conf,
pred_boxpts))
xloc, yloc, xsize, ysize = vu.get_rel_pos_size(pred_boxpts)
print(xloc, yloc, xsize, ysize)
if xloc < 0.3:
bu.send_cmd('left', (0.5-xloc)*3)
elif xloc > 0.7:
bu.send_cmd('right', (xloc-0.5)*3)
elif xsize < 0.5:
bu.send_cmd('front', 3)
if args["display"] > 0:
# build a label
vu.anno_face(image_for_result, pred)
if args["display"] > 0:
# display the frame to the screen
print("showing image")
# cv2.imshow("Output", image_for_result)
out.write(image_for_result)
fps.update()
# if "ctrl+c" is pressed in the terminal, break from the loop
except KeyboardInterrupt:
break
# if there's a problem reading a frame, break gracefully
except AttributeError:
break
# stop the FPS counter timer
fps.stop()
out.release()
# destroy all windows if we are displaying them
if args["display"] > 0:
cv2.destroyAllWindows()
# display FPS information
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
| 3,732 |
tests/test_createdrop.py
|
gjbadros/sqlbag
| 16 |
2025741
|
from __future__ import absolute_import, division, print_function, unicode_literals
from sqlbag import (
S,
create_database,
database_exists,
drop_database,
temporary_database,
)
def exists(db_url):
e = database_exists(db_url)
e2 = database_exists(db_url, test_can_select=True)
assert e == e2
return e
def test_createdrop(tmpdir):
sqlite_path = str(tmpdir / "testonly.db")
urls = ["postgresql:///sqlbag_testonly", "mysql+pymysql:///sqlbag_testonly"]
for db_url in urls:
drop_database(db_url)
assert not drop_database(db_url)
assert not exists(db_url)
assert create_database(db_url)
assert exists(db_url)
if db_url.startswith("postgres"):
assert create_database(db_url, template="template1", wipe_if_existing=True)
else:
assert create_database(db_url, wipe_if_existing=True)
assert exists(db_url)
assert drop_database(db_url)
assert not exists(db_url)
db_url = "sqlite://" # in-memory special case
assert exists(db_url)
assert not create_database(db_url)
assert exists(db_url)
assert not drop_database(db_url)
assert exists(db_url)
db_url = "sqlite:///" + sqlite_path
assert not database_exists(db_url)
# selecting works because sqlite auto-creates
assert database_exists(db_url, test_can_select=True)
drop_database(db_url)
create_database(db_url)
assert exists(db_url)
drop_database(db_url)
assert not database_exists(db_url)
assert database_exists(db_url, test_can_select=True)
with temporary_database("sqlite") as dburi:
with S(dburi) as s:
s.execute("select 1")
| 1,719 |
covid_app/backend/app/worker.py
|
TommyNeeld/covid-guidence
| 0 |
2026085
|
from celery import Celery
from celery.utils.log import get_task_logger
from config import settings
logger = get_task_logger(__name__)
celery_app = Celery('tasks', broker=f'pyamqp://guest@{settings.celery_host}//')
@celery_app.task
def add(x, y):
res = x + y
logger.info("Adding %s + %s, res: %s" % (x, y, res))
return res
| 339 |
dashboard/models.py
|
yusufom/ERP
| 0 |
2025181
|
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
User=settings.AUTH_USER_MODEL
# Create your models here.
class dashboard(models.Model):
number = models.CharField(max_length=5,null=True,blank=True)
mssg = models.CharField(max_length=10000,null=False,blank=False)
img = models.ImageField(blank=True,null=True)
class todo(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE,default=None)
title = models.CharField(max_length=100, null=False, blank=False)
description = models.TextField(max_length=5000, null=True, blank=True)
complete = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
duedate = models.DateField(max_length=100, blank=True,null=True)
def __str__(self):
return self.title
| 851 |
app/main.py
|
ravigoel08/MagicBrickScraper
| 4 |
2025521
|
from src import *
import streamlit as st
import pandas as pd
import time
import json
def main():
st.title("Streamlit-MBScraper")
cityname = st.sidebar.selectbox("Select City Name", constants.STATE)
property_type = st.sidebar.multiselect("Select Properties Type", constants.PROPERTY)
property_type = ','.join(property_type)
bhk = st.sidebar.slider("BHK", 1, 4, 1, 1)
validate = st.sidebar.checkbox("Check Data Availability")
if validate:
data = page.total_page(cityname, property_type, bhk)
st.sidebar.write(data)
add_scrapebutton = st.sidebar.checkbox("Start Scraping")
if add_scrapebutton:
scraped_data = scrape.mb_scraper(data, cityname, property_type,bhk)
cleandata = cleaner.data_cleaner(scraped_data, data)
df = pd.DataFrame(cleandata, columns=constants.COLUMNS)
if not df.empty:
tmp_download_link = export.export_csv(df)
st.markdown(tmp_download_link, unsafe_allow_html=True)
st.json(df.to_json(orient="records"))
if __name__ == "__main__":
main()
| 1,082 |
pa/usr/03b_Imu_fits.py
|
mlipatov/paint_atmospheres
| 4 |
2023476
|
# Requires: a file with limb darkening fit information that includes the intensity values
# a .npy file with locations and values of the lowest I(mu) / I_1
# and the lowest I'(mu) / I_1 for every (T, g, lambda)
# Outputs: a plot with I(mu) fits for the (T, g, lambda) with maximum and median deviations
# information about these fits
# information about (T, g, lambda) with the lowest I(mu) / I_1 and the lowest I'(mu) / I_1
# a plor of the corresponding fits
# Notes: to obtain the required files, run calc_limbdark with the -s option, plus 03a_Imu_fits_min.py
import numpy as np
import pickle
import pa.lib.limbdark as limbdark
import pa.lib.fit as ft
import matplotlib.pyplot as plt
from matplotlib import rc
# given an array of mu, returns an array of p-functions for each mu:
# 0: mu
# 1: p_i
def pi(mu):
return np.transpose(np.array([ np.ones_like(mu), mu, mu**2, mu**3, mu**4]))
# points for plots
# inputs: a_ij, I, index of location in I, indices of m_j, p_ij
def Iplot(a, I, ii, im, p):
aa = a[ tuple(ii) ][im, :]
Ifit = np.sum(aa * p, axis=-1)
Igrid = I[tuple(ii)]
Ifit = Ifit / Igrid[-1]
Igrid = Igrid / Igrid[-1]
return Ifit, Igrid
iodir = '../../' # location of the input/output directory
# unpickle the limb darkening information
with open(iodir + 'data/limbdark_m01.pkl', 'rb') as f:
ld = pickle.load(f)
wl = ld.lam # 1221 wavelength
g = ld.g # 11 gravity
T = ld.T # 61 temperature
bounds = ld.bounds
I = ld.I # (1221, 17, 11, 61) = (wavelength, mu, gravity, temperature)
a = ld.fit_params # (61, 11, 1221, 15) = (temperature, gravity, wavelength, parameter index)
sh = a.shape
# unpickle the minima of intensity fits and their derivatives
Im = np.load(iodir + 'Imin.npy')
# location where the intensity fit is lowest
imin = np.unravel_index(np.argmin(Im[..., 1]), Im[..., 1].shape)
# location where the derivative is lowest
ider = np.unravel_index(np.argmin(Im[..., 3]), Im[..., 3].shape)
# set the mu partition
ft.set_muB(bounds)
# a_ij coefficients: (61, 11, 1221, 3, 5) = (temperature, gravity, wavelength, interval, function)
a = a.reshape( (sh[0], sh[1], sh[2], ft.m, ft.n) )
# 17 mu
mu = ft.mu_arr
# functions at 17 mu: 2D
p = pi(mu)
# intervals where 17 mu are found
i = np.searchsorted(ft.muB_arr, mu, side='right') - 1
# at each location, in the corresponding interval, for each wavelength,
# sum up the product of fit parameters and functions of mu
# (61, 11, 1221, 17, 5) -> (61, 11, 1221, 17) = (temperature, gravity, wavelength, mu)
Ifit = np.sum(a[ ..., i, : ] * p[ np.newaxis, np.newaxis, np.newaxis, :, : ], axis=-1)
# permute the dimensions of CK04 intensity grid: (61, 11, 1221, 17) = (temperature, gravity, wavelength, mu)
I = np.transpose(I, axes=[3, 2, 0, 1])
# intensity at mu = 1 from the grid
I1 = I[...,-1][...,np.newaxis]
# relative error in intensity
Ierr = np.abs( (Ifit - I) / (I1 + 1e-300) )
# maximum and median of maximum differences for (temperature, gravity, wavelength) triples
Ierr_max = np.amax(Ierr, axis=-1)
maxerr = np.nanmax(Ierr_max)
imax = np.unravel_index(np.nanargmax(Ierr_max), Ierr_max.shape)
n = np.count_nonzero(~np.isnan(Ierr_max)) # number of non-nan entries
mederr = np.nanmedian(Ierr_max)
dist = np.abs(Ierr_max - mederr)
imed = np.array(np.unravel_index(np.argsort(dist, axis=None), dist.shape))
## points to plot
# mu values to plot
x = np.linspace(0, 1, 100)
# p values to plot
px = pi(x)
# intervals
i = np.searchsorted(ft.muB_arr, x, side='right') - 1
# intensities
Imaxfit, Imax = Iplot(a, I, imax, i, px)
Imedfit, Imed = Iplot(a, I, imed[:, 0], i, px)
Iminfit, Imin = Iplot(a, I, imin, i, px)
Iderfit, Ider = Iplot(a, I, ider, i, px)
# print relevant values
print('Maximum error is ' + str(maxerr))
print('This is realized at ')
print('T = ' + str(T[imax[0]]))
print('g = ' + str(g[imax[1]]))
print('lambda = ' + str(wl[imax[2]]))
print()
print('Median error is ' + str(mederr))
print('There are ' + str(n) + ' locations.')
print('One of the locations whose error is closest to this is ')
print('T = ' + str(T[imed[0][0]]))
print('g = ' + str(g[imed[1][0]]))
print('lambda = ' + str(wl[imed[2][0]]))
print('The error at this location is ' + str(Ierr_max[tuple(imed[:,0])]))
print()
print('Smallest relative value of intensity is ' + str(Im[imin][1]))
print('The location is ')
print('T = ' + str(T[imin[0]]))
print('g = ' + str(g[imin[1]]))
print('lambda = ' + str(wl[imin[2]]))
print('mu = ' + str(Im[imin][0]))
print()
print('Smallest relative value of the derivative is ' + str(Im[ider][3]))
print('The location is ')
print('T = ' + str(T[ider[0]]))
print('g = ' + str(g[ider[1]]))
print('lambda = ' + str(wl[ider[2]]))
print('mu = ' + str(Im[ider][2]))
print()
# interval bounds for plotting
boundsp = np.concatenate( ([0], bounds, [1]) )
plt.rcParams.update({'font.size': 18})
rc('font',**{'family':'serif','serif':['Computer Modern']})
rc('text', usetex=True)
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, sharey=False,
gridspec_kw = {'height_ratios':[1, 0.34]},
figsize=(6, 6))
ax1.plot(x, Imaxfit, color='b', zorder=1)
ax1.scatter(mu, Imax, color='k', s=20, zorder=2)
ax1.set_ylabel(r'$I_\nu / I_1$')
ax1.set_ylim((-0.05, 1.05))
ax1.plot(x, Imedfit, color='g', zorder=1)
ax1.scatter(mu, Imed, color='k', s=20, facecolors='w', zorder=2, alpha=1)
for b in boundsp:
ax1.axvline(x=b, color='k', alpha=0.5, lw=0.5)
ax2.plot(mu, Ierr[tuple(imax)], color='b', linewidth=1, linestyle='--', zorder=1)
ax2.scatter(mu, Ierr[tuple(imax)], color='b', s=20, zorder=2)
ax2.plot(mu, Ierr[tuple(imed[:,0])], color='g', linewidth=1, linestyle='--', zorder=1)
ax2.scatter(mu, Ierr[tuple(imed[:,0])], color='g', s=20, facecolors='w', zorder=2, alpha=1)
ax2.margins(y=0.2)
ax2.set_yscale('log')
ax2.set_yticks([1e-9, 1e-6, 1e-3])
ax2.set_ylabel(r'$\left|\delta I_\nu / I_1\right|$', labelpad=10)
ax2.set_xlabel(r'$\mu$')
for b in boundsp:
ax2.axvline(x=b, color='k', alpha=0.5, lw=0.5)
fig.subplots_adjust(hspace=0.1)
fig.tight_layout()
fig.savefig(iodir + 'Imu.pdf')
# plot the locations with the most negative I and the most negative derivative of I
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.plot(x, Iminfit, color='b', zorder=1)
ax.scatter(mu, Imin, color='k', s=20, zorder=2)
ax.set_ylabel(r'$I_\nu / I_1$')
ax.set_ylim((-0.05, 1.05))
ax.plot(x, Iderfit, color='g', zorder=1)
ax.scatter(mu, Ider, color='k', s=20, facecolors='w', zorder=2, alpha=1)
for b in boundsp:
ax.axvline(x=b, color='k', alpha=0.5, lw=0.5)
fig.tight_layout()
fig.savefig(iodir + 'Imu_min.pdf')
| 6,588 |
lib/ansible/module_utils/alicloud_oss.py
|
talenhao/alibaba.alicloud
| 0 |
2025000
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2017-present Alibaba Group Holding Limited. <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
try:
import footmark
import footmark.oss
HAS_FOOTMARK = True
except ImportError:
HAS_FOOTMARK = False
class AnsibleACSError(Exception):
pass
def acs_common_argument_spec():
return dict(
alicloud_access_key=dict(aliases=['acs_access_key', 'ecs_access_key', 'access_key']),
alicloud_secret_key=dict(aliases=['acs_secret_access_key', 'ecs_secret_key', 'secret_key']),
)
def oss_bucket_argument_spec():
spec = acs_common_argument_spec()
spec.update(
dict(
alicloud_region=dict(aliases=['acs_region', 'ecs_region', 'region']),
bucket=dict(aliases=['bucket_name', 'name'], type='str', required='True')
)
)
return spec
def get_oss_connection_info(module):
""" Check module args for credentials, then check environment vars access_key """
access_key = module.params.get('alicloud_access_key')
secret_key = module.params.get('alicloud_secret_key')
region = module.params.get('alicloud_region')
if not access_key:
if 'ALICLOUD_ACCESS_KEY' in os.environ:
access_key = os.environ['ALICLOUD_ACCESS_KEY']
elif 'ACS_ACCESS_KEY_ID' in os.environ:
access_key = os.environ['ACS_ACCESS_KEY_ID']
elif 'ACS_ACCESS_KEY' in os.environ:
access_key = os.environ['ACS_ACCESS_KEY']
elif 'ECS_ACCESS_KEY' in os.environ:
access_key = os.environ['ECS_ACCESS_KEY']
else:
# in case access_key came in as empty string
module.fail_json(msg="access key is required")
if not secret_key:
if 'ALICLOUD_SECRET_KEY' in os.environ:
secret_key = os.environ['ALICLOUD_SECRET_KEY']
elif 'ACS_SECRET_ACCESS_KEY' in os.environ:
secret_key = os.environ['ACS_SECRET_ACCESS_KEY']
elif 'ACS_SECRET_KEY' in os.environ:
secret_key = os.environ['ACS_SECRET_KEY']
elif 'ECS_SECRET_KEY' in os.environ:
secret_key = os.environ['ECS_SECRET_KEY']
else:
# in case secret_key came in as empty string
module.fail_json(msg="access secret key is required")
if not region:
if 'ALICLOUD_REGION' in os.environ:
region = os.environ['ALICLOUD_REGION']
elif 'ACS_REGION' in os.environ:
region = os.environ['ACS_REGION']
elif 'ACS_DEFAULT_REGION' in os.environ:
region = os.environ['ACS_DEFAULT_REGION']
elif 'ECS_REGION' in os.environ:
region = os.environ['ECS_REGION']
else:
module.fail_json(msg="region is required")
oss_params = dict(acs_access_key_id=access_key, acs_secret_access_key=secret_key, user_agent='Ansible-Provider-Alicloud')
return region, oss_params
def get_bucket_connection_info(module):
""" Check module args for credentials, then check environment vars access_key """
region, oss_params = get_oss_connection_info(module)
bucket_name = module.params.get('bucket')
if bucket_name is None:
module.fail_json(msg="bucket name is required")
oss_params.update(dict(bucket_name=bucket_name))
return region, oss_params
def connect_to_oss(acs_module, region, **params):
conn = acs_module.connect_to_oss(region, **params)
return conn
def connect_to_oss_bucket(acs_module, region, **params):
conn = acs_module.connect_to_bucket(region, **params)
return conn
def oss_bucket_connect(module):
""" Return an oss bucket connection"""
region, oss_params = get_bucket_connection_info(module)
try:
return connect_to_oss_bucket(footmark.oss, region, **oss_params)
except AnsibleACSError as e:
module.fail_json(msg=str(e))
def oss_service_connect(module):
""" Return an oss service connection"""
region, oss_params = get_oss_connection_info(module)
try:
return connect_to_oss(footmark.oss, region, **oss_params)
except AnsibleACSError as e:
module.fail_json(msg=str(e))
| 5,709 |
backprop/models/t5/models_list.py
|
lucky7323/backprop
| 0 |
2024470
|
t5_small = {
"description": "A small version of the T5 model by Google. This is a text generation model that can be finetuned to solve virtually any text based task.",
"tasks": ["text-generation", "summarisation"],
"init_kwargs": {
"model_path": "t5-small"
},
"details": {
"num_parameters": 60506624,
"max_text_length": 512,
"text-generation": {
"languages": ["eng"],
"description": "Summarise text with `summarize: some text`. Translate English to German, French, and Romanian with `translate English to German: Some sentence.`, `translate English to French: Some sentence.`, and `translate English to Romanian: Some sentence.`.",
"finetunable": True
},
"credits": [
{
"name": "Google",
"url": "https://arxiv.org/abs/1910.10683"
},
{
"name": "<NAME>",
"url": "https://huggingface.co/transformers/model_doc/t5.html"
}
]
}
}
t5_base = {
"description": "A base version of the T5 model by Google. This is a text generation model that can be finetuned to solve virtually any text based task.",
"tasks": ["text-generation", "summarisation"],
"init_kwargs": {
"model_path": "t5-base"
},
"details": {
"num_parameters": 222903552,
"max_text_length": 512,
"text-generation": {
"languages": ["eng"],
"description": "Summarise text with `summarize: some text`. Translate English to German, French, and Romanian with `translate English to German: Some sentence.`, `translate English to French: Some sentence.`, and `translate English to Romanian: Some sentence.`.",
"finetunable": True
},
"credits": [
{
"name": "Google",
"url": "https://arxiv.org/abs/1910.10683"
},
{
"name": "<NAME>",
"url": "https://huggingface.co/transformers/model_doc/t5.html"
}
]
}
}
models = {
"t5-small": t5_small,
"t5-base": t5_base
}
| 2,166 |
tst/test_blackbox.py
|
geoalgo/A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning
| 8 |
2026080
|
import numpy as np
from blackbox import BlackboxOffline
def test_blackbox():
n = 20
dim = 2
X_test = np.random.rand(n, dim)
y_test = np.random.rand(n, 1)
blackbox = BlackboxOffline(
X=X_test,
y=y_test,
)
for x, y in zip(X_test, y_test):
assert np.allclose(blackbox(x), y)
| 326 |
assignment8/main.py
|
nicedi/ML_course_projects
| 0 |
2025140
|
# -*- coding: utf-8 -*-
import numpy as np
#import matplotlib.pyplot as plt
#from matplotlib import cm
import utils
from collaborative_filtering import CF
#%% 载入数据
datapath = 'ml-latest-small'
n_movie = 9125
n_user = 671
column_movie_dict, ratings, mask = \
utils.parse_movielens(datapath, n_user, n_movie)
#%% mean normalization
ratings_mean = utils.ratings_mean(ratings, mask)
ratings -= ratings_mean
#%% 划分数据集
# 通过划分 mask 矩阵中值为1的元素实现对 ratings 的划分,用户和电影的数量没有变化
# 注:阅读 np.where 的文档理解其功能和返回值格式
ones = np.where(mask == 1)
np.random.seed(1)
idx = np.random.permutation(len(ones[0]))
# 留出2000个rating作为测试用
train_ones = [ones[0][idx[:-2000]], ones[1][idx[:-2000]]]
test_ones = [ones[0][idx[-2000:]], ones[1][idx[-2000:]]]
train_mask = np.zeros_like(mask)
train_mask[train_ones] = 1
test_mask = np.zeros_like(mask)
test_mask[test_ones] = 1
#%% 设定隐含因子数,创建模型
n_factor = 10
lambd = 10 # 规则化项的权重
model = CF(n_user, n_movie, n_factor, lambd)
#%% 训练模型
# 任务1:完成 CF 类中 predict 和 update 方法
n_iter = 10
for i in range(n_iter):
model.update(ratings, train_mask)
model.evaluate(ratings, test_mask)
utils.plot_loss(model.trainloss, model.testloss)
#%% 观察推荐系统的工作情况
# 为了不推荐用户已评分的电影,做如下操作
reverse_mask = np.ones_like(mask)
reverse_mask[ones] = 0
all_predictions = (model.predict() + ratings_mean) * reverse_mask
#%% 给定用户,推荐电影。(User - Item)
userid = 0 # 以第一个用户为例
prediction = all_predictions[userid]
recommend_ids = np.argsort(prediction)[::-1]
# 用户可能喜欢的前10部电影
for i in range(10):
print('{0}#: {1}. Predicted Rating:{2:.2f}'.format(i+1, column_movie_dict[recommend_ids[i]][1], prediction[recommend_ids[i]]))
#%% 找喜好相似的用户,根据这些用户对电影的评分进行推荐。(User - User)
# 尝试不同的“距离”计算方法
_, rank = utils.euclidean_rank(model.U, model.U[userid])
#_, rank = utils.cosine_rank(model.U, model.U[userid])
# 根据前两个喜好相似的用户进行推荐。
# 第一个相似用户
prediction = all_predictions[rank[0]]
recommend_ids = np.argsort(prediction)[::-1]
for i in range(10):
print('{0}#: {1}. Predicted Rating:{2:.2f}'.format(i+1, column_movie_dict[recommend_ids[i]][1], prediction[recommend_ids[i]]))
print()
# 第二个相似用户
prediction = all_predictions[rank[1]]
recommend_ids = np.argsort(prediction)[::-1]
for i in range(10):
print('{0}#: {1}. Predicted Rating:{2:.2f}'.format(i+1, column_movie_dict[recommend_ids[i]][1], prediction[recommend_ids[i]]))
#%% 以与用户作出好评的物品相似的物品做为推荐。(Item - Item)
prediction = all_predictions[userid]
preferred_item = np.argmax(prediction)
# 根据评分最高的物品查找其它相似物品
_, rank = utils.euclidean_rank(model.I.T, model.I[:,preferred_item])
#_, rank = utils.cosine_rank(model.U, model.U[userid])
for i in range(10):
print('{0}#: {1}. Predicted Rating:{2:.2f}'.format(i+1, column_movie_dict[rank[i]][1], prediction[rank[i]]))
| 2,755 |
network/layers/gcn.py
|
robtu328/TextBPN
| 49 |
2026096
|
###################################################################
# File Name: gcn.py
# Author: <NAME>
# mail: <EMAIL>
# Created Time: Fri 07 Sep 2018 01:16:31 PM CST
###################################################################
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class MeanAggregator(nn.Module):
def __init__(self):
super(MeanAggregator, self).__init__()
def forward(self, features, A):
x = torch.bmm(A, features)
return x
class GraphConv(nn.Module):
def __init__(self, in_dim, out_dim, agg):
super(GraphConv, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.weight = nn.Parameter(torch.FloatTensor(in_dim * 2, out_dim))
self.bias = nn.Parameter(torch.FloatTensor(out_dim))
init.xavier_uniform_(self.weight)
init.constant_(self.bias, 0)
self.agg = agg()
def forward(self, features, A):
b, n, d = features.shape
assert (d == self.in_dim)
agg_feats = self.agg(features, A)
cat_feats = torch.cat([features, agg_feats], dim=2)
out = torch.einsum('bnd,df->bnf', (cat_feats, self.weight))
out = F.relu(out + self.bias)
return out
class GCN(nn.Module):
def __init__(self, input, output):
super(GCN, self).__init__()
self.bn0 = nn.BatchNorm1d(input, affine=False)
self.conv1 = GraphConv(input, 256, MeanAggregator)
self.conv2 = GraphConv(256, 1024, MeanAggregator)
self.conv3 = GraphConv(1024, 512, MeanAggregator)
self.conv4 = GraphConv(512, 256, MeanAggregator)
self.prediction = nn.Sequential(
nn.Conv1d(256, output, 1),
nn.ReLU(inplace=True),
nn.Conv1d(output, 64, 1),
nn.ReLU(inplace=True),
nn.Conv1d(64, 2, 1))
def forward(self, x, A):
x = self.bn0(x)
x = x.permute(0, 2, 1)
b, n, c = x.shape
A = A.expand(b, n, n)
x = self.conv1(x, A)
x = self.conv2(x, A)
x = self.conv3(x, A)
x = self.conv4(x, A)
x = x.permute(0, 2, 1)
pred = self.prediction(x)
return pred
| 2,339 |
ntiles/tears/tilts_backtest_tear.py
|
Alexd14/ntiles-backtester
| 0 |
2024670
|
from abc import ABC
from typing import Optional
import pandas as pd
from .backtest_tear import BacktestTear
from .. import plotter
from .. import utils
from ..portals.base_portal import BaseGrouperPortalConstant
class TiltsBacktestTear(BacktestTear, ABC):
"""
generates a tear sheet which shows the sector exposures of a strategy
Must be run after the backtest tear
"""
def __init__(self, ntile_matrix: pd.DataFrame, daily_returns: pd.DataFrame, ntiles, holding_period: int,
long_short: bool, market_neutral: bool, show_uni: bool, factor_data: pd.DataFrame,
group_portal: Optional[BaseGrouperPortalConstant], show_ntile_tilts: bool):
"""
:param ntile_matrix: unstacked and formatted ntiles prepared by Ntiles
:param daily_returns: unstacked and formatted daily returns from Ntiles
:param holding_period: How long we want to hold positions for, represents days
:param ntiles: amount of bins we are testing (1 is high factor value n is low value)
:param long_short: show we compute the spread between ntiles: (1 - n)
:param market_neutral: subtract out the universe returns from the ntile returns?
:param show_uni: should universe return be shown in the spread plot?
:param factor_data: the factor data from Ntiles
:param group_portal: the group portal holding the groups. If this is None then the exposures will not be shown
:param show_ntile_tilts: Should we show the exposures for each individual ntile?
"""
super().__init__(ntile_matrix, daily_returns, ntiles, holding_period, long_short, market_neutral, show_uni)
self._factor_data = factor_data
self._group_portal = group_portal
self._show_ntile_tilts = show_ntile_tilts
self._daily_group_weights = {}
self._full_group_tilt_avg = {}
def compute(self) -> None:
"""
master function for the tear sheet
:return: None
"""
super().compute()
if (self._group_portal is not None) and (self._show_ntile_tilts or self.long_short):
self.compute_tilts()
def plot(self) -> None:
"""
plots the tear sheet
"""
super().plot()
if (self._group_portal is not None) and (self._show_ntile_tilts or self.long_short):
self.make_plots()
def compute_tilts(self):
"""
computes the daily tilt data for each group
:return: None
"""
self.compute_group_weights()
if self.long_short:
self.calculate_long_short_tilts()
def compute_group_weights(self):
"""
computes the weights by group for each ntile
currently computes data but work because need a time series data adjusted for index constitutes
have to use self.factor_data
:return: None
"""
group_info = self._group_portal.group_information
center_weight = group_info.groupby(group_info).count() / group_info.shape[0]
center_weight = utils.remove_cat_index(center_weight)
if self._show_ntile_tilts:
ntile_keys = self.daily_weights.keys()
else:
ntile_keys = [min(self.daily_weights.keys()), max(self.daily_weights.keys())]
new_col = self.daily_weights[ntile_keys[0]].columns.astype(str).map(self._group_portal.group_mapping)
for ntile in ntile_keys:
frame = self.daily_weights[ntile]
frame.columns = new_col
frame = self.daily_weights[ntile].stack().to_frame('weight')
frame.index.names = ['date', 'group']
weights_unstacked = frame.groupby(['date', 'group']).sum().sub(center_weight, level=1, axis=0).unstack()
weights_unstacked.columns = weights_unstacked.columns.droplevel(0)
self._daily_group_weights[ntile] = weights_unstacked
self._full_group_tilt_avg[ntile] = (frame.groupby('group').sum().weight
/ frame.index.levels[0].unique().shape[0]
- center_weight)
def calculate_long_short_tilts(self):
"""
calculates the time series tilts for the long short portfolio
:return: None
"""
ntile_n = max(self._daily_group_weights.keys())
self._daily_group_weights['Long Short'] = (self._daily_group_weights['Ntile: 1']
- self._daily_group_weights[ntile_n])
self._full_group_tilt_avg['Long Short'] = self._daily_group_weights['Long Short'].stack().groupby(
'group').mean()
def make_plots(self):
print('Weights By Group')
for ntile in self._daily_group_weights.keys():
if 'Long Short' == ntile and not self.long_short:
continue
if 'Ntile' in ntile and not self._show_ntile_tilts:
continue
ax = plotter.plot_tilt_hist(self._full_group_tilt_avg[ntile], ntile, self._group_portal.name)
plotter.plot_tilts(self._daily_group_weights[ntile], ntile, self._group_portal.name, ax)
| 5,187 |
FastAPI/Basic/main.py
|
YoshlikMedia/Data-Science
| 4 |
2025649
|
from fastapi import FastAPI
app = FastAPI()
@app.get("/")
def read_root():
return {"name": "Bekhruz"}
| 109 |
worker/views.py
|
sonicaks/Complaints-Portal
| 0 |
2026079
|
from django.contrib.auth import get_user_model
User = get_user_model()
from django.views.generic import ListView, UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.http import Http404
from django.urls import reverse_lazy
from student import models
# Create your views here.
class UpdateStatusView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
fields = ['status']
model = models.Complaint
template_name = 'worker/update_status.html'
success_url = reverse_lazy('worker:all_complaints')
def test_func(self):
return self.request.user.account_type.all()[0].type == self.get_object().type
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.save()
return super().form_valid(form)
class ComplaintListView(LoginRequiredMixin, UserPassesTestMixin, ListView):
model = models.Complaint
template_name = 'worker/complaint_list.html'
def test_func(self):
return self.request.user.account_type.all()[0].type != 'STU'
def get_queryset(self):
try:
self.all_complaints = models.Complaint.objects.filter(type=self.request.user.account_type.all()[0].type)
except:
raise Http404
return self.all_complaints
| 1,211 |
tests/test_check_digit.py
|
dsqrt4/itfvalidator-python
| 0 |
2025599
|
import pytest
import pyitf.check_digit as check_digit
@pytest.mark.parametrize("code,expected", [
(31, 0),
(201, 1),
(80037, 2),
(80030, 3),
(123456789, 5),
(4801412551324138493, 8),
])
def test_calculate_check_digit(code, expected):
assert expected == check_digit.calculate_check_digit(code)
@pytest.mark.parametrize("code,expected", [
(31, 310),
(201, 2011),
(80037, 800372),
(80030, 800303),
(123456789, 1234567895),
(4801412551324138493, 48014125513241384938),
])
def test_append_check_digit(code, expected):
assert expected == check_digit.append_check_digit(code)
| 771 |
setup.py
|
bmess/montybot
| 0 |
2025960
|
from distutils.core import setup
setup(
name='MontyBot',
version='0.9.0',
author='<NAME>',
author_email='<EMAIL>',
packages=['montybot', 'montybot.plugins'],
scripts=[],
url='http://github.com/estherbester/montybot',
license='LICENSE.txt',
description='Twisted-based IRC Bot .',
long_description=open('README').read(),
install_requires=[
"Twisted == 13.1.0",
"BeautifulSoup == 3.2.1",
"mock >= 1.0.1",
"requests >= 0.12.0",
],
)
| 510 |
flask3/request.py
|
Waithera-m/dementia-care-and-selfcare
| 0 |
2026204
|
import urllib.request,json
from .models import DementiaNews
def get_topics(topic):
'''
function gets and returns articles that cover a specific topic
'''
topical_url="https://newsapi.org/v2/everything?q={}&apiKey={}".format(topic,'57e1ca302331442e8be2e0e5c0cfe307')
with urllib.request.urlopen(topical_url) as url:
topic_details_data = url.read()
topic_data_response = json.loads(topic_details_data)
topic_results = None
if topic_data_response['articles']:
topic_results_list = topic_data_response['articles']
topic_results = modify_articles(topic_results_list)
return topic_results
def modify_articles(topic_list):
'''
function transforms api response data into a list
Args:
topic_list: a list of of dictionaries
Returns:
topic_results: a list of article objects
'''
topic_results = []
for article in topic_list:
author = article.get('author')
title = article.get('title')
publishedAt = article.get('publishedAt')
description = article.get('description')
url = article.get('url')
urlToImage = article.get('urlToImage')
if urlToImage:
article_object = DementiaNews(author,title,publishedAt,description,url,urlToImage)
topic_results.append(article_object)
return topic_results
| 1,409 |
tests/test-kernels.py
|
vangohao/PyWENO
| 26 |
2024361
|
import pyweno
def test_kernels():
k = 3
for lang in ('c', 'fortran'):
kernel = pyweno.kernels.KernelGenerator(lang, order=2*k-1, xi=[-1, 0, 1])
kernel.smoothness(reuse=False)
kernel.smoothness(reuse=True)
kernel.weights()
kernel.reconstruction()
if __name__ == '__main__':
test_kernels()
| 321 |
vip_hci/psfsub/__init__.py
|
r4lv/VIP
| 0 |
2025273
|
"""
Subpackage ``psfsub`` contains a large number of stellar PSF modelling +
subtraction algorithms. The following methods have been implemented:
- *median ADI/SDI* (Marois et al. 2006).
- *frame differencing*.
- a simplified version of *LOCI* (Lafreniere et al. 2007)
- different flavours of *PCA* (Soummer et al. 2012 and Amara et al. 2012)
working in full-frame, incremental and annular mode, including improvements,
speed tricks, compatibility with ADI/RDI/SDI datasets and datasets too large to
fit in memory.
- full-frame and annular versions of *NMF*.
"""
from .pca_fullfr import *
from .pca_local import *
from .svd import *
from .utils_pca import *
from .framediff import *
from .llsg import *
from .loci import *
from .medsub import *
from .nmf_fullfr import *
from .nmf_local import *
| 802 |
plugins/flytekit-bigquery/tests/test_bigquery.py
|
bimtauer/flytekit
| 0 |
2025706
|
from collections import OrderedDict
import pytest
from flytekitplugins.bigquery import BigQueryConfig, BigQueryTask
from google.cloud.bigquery import QueryJobConfig
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Struct
from flytekit import StructuredDataset, kwtypes, workflow
from flytekit.configuration import Image, ImageConfig, SerializationSettings
from flytekit.extend import get_serializable
query_template = "SELECT * FROM `bigquery-public-data.crypto_dogecoin.transactions` WHERE @version = 1 LIMIT 10"
def test_serialization():
bigquery_task = BigQueryTask(
name="flytekit.demo.bigquery_task.query",
inputs=kwtypes(ds=str),
task_config=BigQueryConfig(
ProjectID="Flyte", Location="Asia", QueryJobConfig=QueryJobConfig(allow_large_results=True)
),
query_template=query_template,
output_structured_dataset_type=StructuredDataset,
)
@workflow
def my_wf(ds: str) -> StructuredDataset:
return bigquery_task(ds=ds)
default_img = Image(name="default", fqn="test", tag="tag")
serialization_settings = SerializationSettings(
project="proj",
domain="dom",
version="123",
image_config=ImageConfig(default_image=default_img, images=[default_img]),
env={},
)
task_spec = get_serializable(OrderedDict(), serialization_settings, bigquery_task)
assert "SELECT * FROM `bigquery-public-data.crypto_dogecoin.transactions`" in task_spec.template.sql.statement
assert "@version" in task_spec.template.sql.statement
assert task_spec.template.sql.dialect == task_spec.template.sql.Dialect.ANSI
s = Struct()
s.update({"ProjectID": "Flyte", "Location": "Asia", "allowLargeResults": True})
assert task_spec.template.custom == json_format.MessageToDict(s)
assert len(task_spec.template.interface.inputs) == 1
assert len(task_spec.template.interface.outputs) == 1
admin_workflow_spec = get_serializable(OrderedDict(), serialization_settings, my_wf)
assert admin_workflow_spec.template.interface.outputs["o0"].type.structured_dataset_type is not None
assert admin_workflow_spec.template.outputs[0].var == "o0"
assert admin_workflow_spec.template.outputs[0].binding.promise.node_id == "n0"
assert admin_workflow_spec.template.outputs[0].binding.promise.var == "results"
def test_local_exec():
bigquery_task = BigQueryTask(
name="flytekit.demo.bigquery_task.query2",
inputs=kwtypes(ds=str),
query_template=query_template,
task_config=BigQueryConfig(ProjectID="Flyte", Location="Asia"),
output_structured_dataset_type=StructuredDataset,
)
assert len(bigquery_task.interface.inputs) == 1
assert len(bigquery_task.interface.outputs) == 1
# will not run locally
with pytest.raises(Exception):
bigquery_task()
| 2,894 |
tests/test_client.py
|
datasmoothie/datasmoothie-client-python
| 0 |
2023841
|
from io import StringIO
import pandas as pd
from datasmoothie import Client
from datasmoothie import Datasource
from datasmoothie import Report
# import quantipy as qp
def test_incorrect_token():
client = Client(api_key='incorrect')
resp = client.get_request('datasource')
assert resp['detail'] == 'Invalid token.'
def test_new_client(token):
client = Client(api_key=token, host="localhost:8030/api2", ssl=False)
resp = client.get_request('datasource')
assert 'results' in resp
def test_create_datasource(token, dataset_data, dataset_meta):
client = Client(api_key=token, host="localhost:8030/api2", ssl=False)
datasource = client.create_datasource("My datasource")
data = dataset_data.to_csv()
datasource.update_meta_and_data(meta=dataset_meta, data=data)
assert isinstance(datasource, Datasource)
def test_list_datasources(token):
client = Client(api_key=token, host="localhost:8030/api2", ssl=False)
resp = client.list_datasources()
assert 'results' in resp
def test_get_datasource(token):
client = Client(api_key=token, host="localhost:8030/api2", ssl=False)
datasources = client.list_datasources()
primary_key = datasources['results'][0]['pk']
resp = client.get_datasource(primary_key)
assert isinstance(resp, Datasource)
def test_update_datasource(token, dataset_meta, dataset_data):
client = Client(api_key=token, host="localhost:8030/api2", ssl=False)
datasources = client.list_datasources()
primary_key = datasources['results'][0]['pk']
datasource = client.get_datasource(primary_key)
meta = dataset_meta
meta['info']['from_source']['pandas_reader'] = 'changed'
data = dataset_data[:90].to_csv()
resp = datasource.update_meta_and_data(meta=meta, data=data)
datasource2 = client.get_datasource(primary_key)
meta_and_data = datasource2.get_meta_and_data()
new_meta = meta_and_data['meta']
new_data = meta_and_data['data']
new_data_df = pd.read_csv(StringIO(new_data))
assert new_meta['info']['from_source']['pandas_reader'] == 'changed'
assert resp.status_code == 200
def test_list_reports(token):
client = Client(api_key=token, host="localhost:8030/api2", ssl=False)
reports = client.list_reports()
assert 'results' in reports
def test_get_report_meta(token):
client = Client(api_key=token, host="localhost:8030/api2", ssl=False)
reports = client.list_reports()
pk = reports['results'][0]['pk']
result = client.get_report_meta(pk)
print(result)
assert 'pk' in result
def test_get_report(token):
client = Client(api_key=token, host="localhost:8030/api2", ssl=False)
reports = client.list_reports()
pk = reports['results'][0]['pk']
report = client.get_report(pk)
print(report.title)
print(report.elements)
assert isinstance(report, Report)
def test_delete_report(token):
client = Client(api_key=token, host="localhost:8030/api2", ssl=False)
number_of_reports = client.list_reports()['count']
report = client.create_report(title="api created report")
assert client.list_reports()['count'] == number_of_reports + 1
report.delete()
assert client.list_reports()['count'] == number_of_reports
def test_create_report(token):
client = Client(api_key=token, host="localhost:8030/api2", ssl=False)
number_of_reports = client.list_reports()['count']
report = client.create_report(title="api created report")
assert client.list_reports()['count'] == number_of_reports + 1
report.delete()
assert client.list_reports()['count'] == number_of_reports
| 3,611 |
rcamp/projects/models.py
|
ResearchComputing/RCAMP
| 0 |
2025316
|
from dateutil.relativedelta import relativedelta
from django.db import models
from django.db.models import Func, F
from django.db.models.functions import Substr, Lower
from django.utils import timezone
from lib import fields
from accounts.models import User
from mailer.signals import allocation_created_from_request
class Cast(Func):
function = 'CAST'
template = '%(function)s(%(expressions)s as %(target_type)s)'
class Project(models.Model):
ORGANIZATIONS = (
('ucb','University of Colorado Boulder'),
('csu','Colorado State University'),
('xsede','XSEDE'),
)
pi_emails = fields.ListField()
managers = models.ManyToManyField(User,related_name='manager_on')
collaborators = models.ManyToManyField(User,related_name='collaborator_on')
organization = models.CharField(max_length=128,choices=ORGANIZATIONS)
title = models.CharField(max_length=256)
description = models.TextField()
project_id = models.CharField(max_length=24,unique=True,blank=True,null=True)
created_on = models.DateField(auto_now_add=True)
notes = models.TextField(blank=True,null=True)
parent_account = models.CharField(max_length=24,null=True,blank=True)
qos_addenda = models.CharField(max_length=128,null=True,blank=True)
deactivated = models.BooleanField(default=False)
def __str__(self):
return self.project_id
def save(self,*args,**kwargs):
if (not self.project_id) or (self.project_id == ''):
# Assign new id to project.
org = self.organization
prefix_offset = len(org) + 1
projects = Project.objects.filter(
project_id__startswith=org
).annotate(
project_number_int=Cast(Substr('project_id', prefix_offset), target_type='UNSIGNED')
).order_by('-project_number_int')
if projects.count() == 0:
next_id = '{}{}'.format(org.lower(),'1')
else:
last_id = projects[0].project_id
last_id = last_id.replace(org,'')
next_id = int(last_id) + 1
next_id = '{}{}'.format(org.lower(),str(next_id))
self.project_id = next_id
super(Project,self).save(*args,**kwargs)
class Reference(models.Model):
project = models.ForeignKey(Project, on_delete=models.CASCADE)
description = models.TextField()
link = models.TextField()
created_on = models.DateField(auto_now_add=True)
def __str__(self):
return '{}_{}'.format((self.project.project_id,str(self.created_on)))
class AllocationManager(models.Manager):
def create_allocation_from_request(self,**kwargs):
project = kwargs.get('project')
amount_awarded = kwargs.get('amount_awarded', None)
if not project:
raise TypeError('Missing required field: project')
if amount_awarded == None:
raise TypeError('Missing required field: amount_awarded')
now = timezone.now()
next_year = now + relativedelta(years=1)
alloc_fields = {}
alloc_fields['project'] = project
alloc_fields['amount'] = amount_awarded
alloc_fields['start_date'] = now
alloc_fields['end_date'] = next_year
alloc = self.create(**alloc_fields)
return alloc
class Allocation(models.Model):
objects = AllocationManager()
project = models.ForeignKey(Project, on_delete=models.CASCADE)
allocation_id = models.SlugField(unique=True,blank=True,null=True)
amount = models.BigIntegerField()
created_on = models.DateField(auto_now_add=True)
start_date = models.DateField()
end_date = models.DateField()
expiration_notice_sent = models.BooleanField(default=False)
def __str__(self):
return self.allocation_id
def save(self,*args,**kwargs):
alloc_id_tpl = '{project_id}_summit{alloc_enum}'
if (not self.allocation_id) or (self.allocation_id == ''):
proj_id = self.project.project_id
search_prefix = '{project_id}_summit'.format(project_id=proj_id)
prefix_offset = len(proj_id) + 1
allocs = Allocation.objects.filter(
allocation_id__startswith=search_prefix
).annotate(
alloc_number_int=Cast(Substr('allocation_id', prefix_offset), target_type='UNSIGNED')
).order_by('-alloc_number_int')
if allocs.count() == 0:
next_id = alloc_id_tpl.format(
project_id=proj_id.lower(),
alloc_enum='1'
)
else:
last_id = allocs.order_by('-allocation_id')[0].allocation_id
last_id = last_id.replace(proj_id+'_summit','')
next_id = int(last_id) + 1
next_id = alloc_id_tpl.format(
project_id=proj_id.lower(),
alloc_enum=str(next_id)
)
self.allocation_id = next_id
super(Allocation,self).save(*args,**kwargs)
class AllocationRequest(models.Model):
STATUSES = (
('a','Approved'),
('d','Denied'),
('w','Waiting'),
('h','Hold'),
('r','Ready For Review'),
('q','Response Requested'),
('i','Denied - Insufficient Resources'),
('x','Denied - Proposal Incomplete'),
('f','Approved - Fully Funded'),
('p','Approved - Partially Funded'),
)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
allocation = models.ForeignKey(
Allocation,
null=True,
blank=True,
on_delete=models.CASCADE
)
abstract = models.TextField(null=True,blank=True)
funding = models.TextField(null=True,blank=True)
proposal = models.FileField(upload_to='proposals/%Y/%m/%d',null=True,blank=True)
time_requested = models.BigIntegerField(null=True,blank=True)
amount_awarded = models.BigIntegerField(default=0)
disk_space = models.IntegerField(default=0,null=True,blank=True)
software_request = models.TextField(null=True,blank=True)
requester = models.ForeignKey(User,null=True,blank=True,on_delete=models.CASCADE)
request_date = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=16,choices=STATUSES,default='w')
approved_on = models.DateTimeField(null=True,blank=True)
notes = models.TextField(null=True,blank=True)
def __str__(self):
return '{}_{}'.format(self.project.project_id,self.request_date)
def save(self,*args,**kwargs):
# Check for change in approval status
if (self.status in ['a','f','p']) and (not self.approved_on):
# Approval process
# logger.info('Approving project request: '+self.__str__())
alloc = Allocation.objects.create_allocation_from_request(
project = self.project,
amount_awarded = self.amount_awarded
)
self.amount_awarded = alloc.amount
allocation_created_from_request.send(sender=alloc.__class__,allocation=alloc)
self.approved_on=timezone.now()
self.allocation = alloc
super(AllocationRequest,self).save(*args,**kwargs)
| 7,286 |
scripts/progbuild.py
|
rumblesan/websound
| 1 |
2025652
|
#!/usr/bin/env python
from sys import argv
from os import listdir
from os.path import isfile, join, basename, splitext
def file_to_string(folder_path, file_path):
data = []
file_name = splitext(basename(file_path))[0]
with open(join(folder_path, file_path)) as f:
filelines = f.read().splitlines()
data = "\\n\\\n".join(filelines).replace('"', '\\"')
return (file_name, data)
def write_to_file(folder_path, output_folder, data):
package_name = basename(folder_path)
output_file = join(output_folder, package_name + ".js")
with open(output_file, 'w') as of:
of.write("""\nvar %s = {};\n""" % package_name)
of.write("""\n%s.data = {};\n""" % package_name)
demo_names = ", ".join(['"' + n[0] + '"' for n in data])
of.write("""\n%s.names = [%s];\n""" % (package_name, demo_names))
for l in data:
of.write("""\n%s.data.%s = "%s";\n""" % (package_name, l[0], l[1]))
of.write("""\nmodule.exports = %s;\n""" % package_name)
def main():
folder_path = argv[1]
output_folder = argv[2]
files = [f for f in listdir(folder_path) if isfile(join(folder_path, f))]
data = []
for wlfile in files:
data.append(file_to_string(folder_path, wlfile))
write_to_file(folder_path, output_folder, data)
if __name__ == '__main__':
main()
| 1,362 |
Packs/CommonWidgets/Scripts/GetLargestInputsAndOuputsInIncidents/GetLargestInputsAndOuputsInIncidents.py
|
ZingBox/content
| 0 |
2025206
|
import traceback
from operator import itemgetter
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
def find_largest_input_or_output(all_args_list):
max_arg = {'Size': 0}
for arg in all_args_list:
if arg.get('Size') > max_arg.get('Size'):
max_arg = arg
return max_arg
def get_largest_inputs_and_outputs(inputs_and_outputs, largest_inputs_and_outputs, incident_id):
inputs = []
outputs = []
if inputs_and_outputs:
for task in inputs_and_outputs:
if 'outputs' in task:
for output in task.get('outputs'):
outputs.append({
'IncidentID': incident_id,
'TaskID': task.get('id'),
'TaskName': task.get('name'),
'Name': output.get('name'),
'Size': output.get('size'),
"InputOrOutput": 'Output'
})
else:
for arg in task.get('args'):
inputs.append({
'IncidentID': incident_id,
'TaskID': task.get('id'),
'TaskName': task.get('name'),
'Name': arg.get('name'),
'Size': arg.get('size'),
'InputOrOutput': "Input"
})
if inputs:
largest_inputs_and_outputs.append(find_largest_input_or_output(inputs))
if outputs:
largest_inputs_and_outputs.append(find_largest_input_or_output(outputs))
def format_inputs_and_outputs_to_widget_table(largest_inputs_and_outputs):
widget_table = {
'data': sorted(largest_inputs_and_outputs, key=itemgetter('Size'), reverse=True),
'total': len(largest_inputs_and_outputs)
}
return widget_table
def get_extra_data_from_investigations(investigations):
largest_inputs_and_outputs: List = []
for inv in investigations:
inputs_and_outputs = demisto.executeCommand('getInvPlaybookMetaData',
args={
"incidentId": inv.get('IncidentID')
})[0].get('Contents').get('tasks')
get_largest_inputs_and_outputs(inputs_and_outputs, largest_inputs_and_outputs, inv.get('IncidentID'))
return format_inputs_and_outputs_to_widget_table(largest_inputs_and_outputs)
def main():
try:
raw_output = demisto.executeCommand('GetLargestInvestigations', args={'from': demisto.args().get('from'),
'to': demisto.args().get('to')})
investigations = raw_output[0].get('Contents', {}).get('data')
demisto.results(get_extra_data_from_investigations(investigations))
except Exception:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute GetLargestInputsAndOuputsInIncidents. Error: {traceback.format_exc()}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 3,199 |
run.py
|
kjin67511/morning-pi
| 16 |
2025646
|
import datetime
from xml.etree import ElementTree
import grequests
from bus.bus_request import bus_request
from bus.bus_view import arrivals_from_xml, bus_arrival_view
from dust.dust_request import dust_request
from dust.dust_view import dust_from_xml, dust_view
from gpio import lcd
from gpio import led
from weather.weather_request import live_weather_request, forecast_weather_request
from weather.weather_view import weather_live_from_json, weather_forecast_from_json, weather_view
from config import ConfigSectionMap
pm10_threshold = int(ConfigSectionMap("dust")['pm10_threshold'])
pm25_threshold = int(ConfigSectionMap("dust")['pm25_threshold'])
def run():
lcd.clear()
lcd.message("Loading...")
weather_live_req = live_weather_request()
weather_forecast_req = forecast_weather_request()
bus_arrival_req = bus_request()
dust_req = dust_request()
grequests.map(
(weather_live_req, weather_forecast_req, bus_arrival_req, dust_req)
)
weathers = list()
if weather_live_req.response is not None:
weathers.append(weather_live_from_json(weather_live_req.response.json()))
else:
weathers.append(None)
if weather_forecast_req.response is not None:
weathers.append(weather_forecast_from_json(weather_forecast_req.response.json(),
datetime.datetime.now() + datetime.timedelta(hours=12)))
else:
weathers.append(None)
weather_str = weather_view(weathers)
try:
bus_xml = ElementTree.fromstring(bus_arrival_req.response.content)
arrivals = arrivals_from_xml(bus_xml)
bus_str = bus_arrival_view(arrivals)
except ElementTree.ParseError:
bus_str = "Error"
try:
dust_xml = ElementTree.fromstring(dust_req.response.content)
dusts = dust_from_xml(dust_xml)
dust_str = dust_view(dusts)
except (ElementTree.ParseError, AttributeError) as e:
dust_str = "Error"
dusts = None
lcd_str = bus_str + "\n" + weather_str + " " + dust_str
lcd.clear()
lcd.message(lcd_str)
if dusts is not None:
led.off()
if dusts[0] != '-' and int(dusts[0]) > pm10_threshold:
led.on()
if dusts[1] != '-' and int(dusts[1]) > pm25_threshold:
led.on()
def reset():
lcd.clear()
led.off()
def button_pushed():
return lcd.GPIO_input()
def lcd_ready():
return lcd.lcd is not None
| 2,466 |
毒APP/duapp.py
|
irisroyaltyf/pySpider
| 0 |
2025254
|
from bs4 import BeautifulSoup
import requests
import csv
import sys
sys.path.append("..")
import mytemp
import util
import urllib.request
import urllib3
import json
# d-c-left view-phone-wrapper
# /search/list?size=[]&title=&typeId=0&catId=2&unionId=0&sortType=0&sortMode=1&page=0&limit=20&sign=4b86e534b0d233207a435111f61f8247
# []&title=&typeId=0&catId=2&unionId=0&sortType=0&sortMode=1&page=2&limit=20&sign=e485ae8eddd95547a0271beda45d09d5
url2='https://du.hupu.com/search/list?size=[]&title=&typeId=0&catId=2&unionId=0&sortType=0&sortMode=1&page=2&limit=20&sign=e485ae8eddd95547a0271beda45d09d5'
# url2='https://du.hupu.com/search/list?size=[]&title=&typeId=0&catId=2&unionId=0&sortType=0&sortMode=1&page=0&limit=20&sign=4b86e534b0d233207a435111f61f8247'
# /search/list?size=[]&title=&typeId=0&catId=4&unionId=0&sortType=0&sortMode=1&page=0&limit=20&sign=4cdb522927a504aea797cca21dac7109
# /search/list?size=[]&title=&typeId=0&catId=3&unionId=0&sortType=0&sortMode=1&page=0&limit=20&sign=bfa5ef97196795f488cd2f31a178e6c3
cook='duToken=<PASSWORD>%7C1<PASSWORD>%7C1537232477%7C471d784<PASSWORD>9f4d0'
headers = {
"Cookie":cook,
"User-Agent":"duapp/3.4.3(android;5.1)",
"Accept":"gzip",
"duchannel": "opp",
"dudeviceTrait":"",
"duloginToken": "<PASSWORD>|<PASSWORD>",
"duplatform": "android",
"duuuid": "fbc5ea1f51716822",
"duv": "3.4.3",
"Connection": "Keep-Alive",
"Host": "du.hupu.com"
}
def getJson(url,data,cook):
#获取json
urllib3.disable_warnings()
f= requests.get(url,headers=headers,verify=False)
print(f.text)
try:
target=json.loads(f.text)['data']['productList']
for tar in target:
print(tar)
break
except:
pass
# html=urllib.request.urlopen(f).read().decode('utf-8',"ignore")
# #post方法请求json数据
# r = requests.post(url, data = data)
# target=''
# try:
# target=json.loads(r.text)#["BusinessHallList"]
# print(target)
# except:
# pass
return None
getJson(url2,None,cook)
| 2,116 |
libs/ConfigHelpers.py
|
loi219/RootTheBox
| 2 |
2024674
|
import logging
import imghdr
import hashlib
from base64 import b64decode
from tornado.options import options
from datetime import datetime
from past.builtins import basestring
from libs.XSSImageCheck import is_xss_image
from libs.ValidationError import ValidationError
def save_config():
logging.info("Saving current config to: %s" % options.config)
with open(options.config, "w") as fp:
fp.write("##########################")
fp.write(" Root the Box Config File ")
fp.write("##########################\n")
fp.write(
"# Documentation: %s\n"
% "https://github.com/moloch--/RootTheBox/wiki/Configuration-File-Details"
)
fp.write("# Last updated: %s\n" % datetime.now())
for group in options.groups():
# Shitty work around for Tornado 4.1
if "rootthebox.py" in group.lower() or group == "":
continue
fp.write("\n# [ %s ]\n" % group.title())
opt = list(options.group_dict(group).items())
for key, value in opt:
try:
# python2
value_type = basestring
except NameError:
# python 3
value_type = str
if isinstance(value, value_type):
# Str/Unicode needs to have quotes
fp.write('%s = "%s"\n' % (key, value))
else:
# Int/Bool/List use __str__
fp.write("%s = %s\n" % (key, value))
def save_config_image(b64_data):
image_data = bytearray(b64decode(b64_data))
if len(image_data) < (2048 * 2048):
ext = imghdr.what("", h=image_data)
file_name = "story/%s.%s" % (hashlib.sha1(image_data).hexdigest(), ext)
if ext in ["png", "jpeg", "gif", "bmp"] and not is_xss_image(image_data):
with open("files/" + file_name, "wb") as fp:
fp.write(image_data)
return file_name
else:
raise ValidationError(
"Invalid image format, avatar must be: .png .jpeg .gif or .bmp"
)
else:
raise ValidationError("The image is too large")
| 2,224 |
src/sprites/attacks/OrbAttack.py
|
NEKERAFA/Soul-Tower
| 0 |
2025990
|
# -*- coding: utf-8 -*-
import pygame, sys, os, math, random
from src.ResourceManager import *
from src.sprites.Attack import *
from src.sprites.Bullet import *
from src.sprites.Force import *
from Normalize import *
from src.sprites.MyStaticAnimatedSprite import *
# W = 1
E = 2
# -------------------------------------------------
# Sprites de ataques
class OrbAttack(Attack):
def __init__(self, radius, delayTime, enemies, looking):
# Obtenemos las rutas a los archivos
castingFile = os.path.join('attacks', 'orb.png')
castingSheet = os.path.join('attacks', 'castingOrb.json')
effect_sound = 'pew.ogg'
self.castingAnim = MyStaticAnimatedSprite(castingFile, castingSheet)
self.castingAnim.animationLoop = False
orbFile = 'orb.png'
orbSheet = 'orb.json'
# Invocamos al constructor de la clase padre
Attack.__init__(self, orbFile, orbSheet, enemies, effect_sound)
# Rutas de las balas
imageFile = os.path.join('sprites', 'attacks', 'laser.png')
spriteSheet = os.path.join('attacks', 'laser.json')
# Cargar sheet de sprites
sheet = ResourceManager.load_image(imageFile, -1)
# Leer coordenadas de fichero
data = ResourceManager.load_sprite_conf(spriteSheet)
sheetConf = []
# Cargamos los sprites
for col in range(0, len(data)):
cell = data[col]
coords = pygame.Rect((int(cell['x']), int(cell['y'])), (int(cell['width']), int(cell['height'])))
delay = float(cell['delay'])*1000
sheetConf.append({'coords': coords, 'delay': delay})
# Frame inicial
rect = pygame.Rect(0, 0, sheetConf[0]['coords'][2], sheetConf[0]['coords'][3])
origImage = sheet.subsurface(sheetConf[0]['coords'])
self.bulletImage = origImage.copy()
self.loopAnimation = True
# Radio de acción
self.radius = radius
# Tiempo entre disparos
self.delayTime = delayTime
self.elapsedTime = 0
# Comprueba si está atacando
self.attacking = True
# Grupo de disparos
self.bullets = pygame.sprite.Group()
# Dirección
self.looking = looking
def start_attack(self, pos, enemyPos):
x,y = pos
if self.looking == E:
x += self.rect.width/2 + 7
else:
x -= self.rect.width/2 - 7
y -= self.rect.height - 10
self.rect.topleft = x,y
self.castingAnim.rect.topleft = x,y
bulletX = x+15
bulletY = y+15
self.bulletPos = bulletX, bulletY
(enemyX, enemyY) = enemyPos
# Obtenemos el ángulo entre el orbe y el enemigo
angle = int(math.degrees(math.atan2(bulletY-enemyY, enemyX-bulletX)))
# Corrección cuando el ángulo es entre 180-360
if angle < 0:
angle = 360 + angle
self.rotation = angle
self.attacking = True
def end_attack(self):
self.attacking = False
def draw(self, surface):
if not self.castingAnim.animationFinish:
surface.blit(self.castingAnim.image, self.castingAnim.rect)
else:
surface.blit(self.image, self.rect)
self.bullets.draw(surface)
def update(self, boss, time, stage):
# Actualizamos el ataque
Attack.update(self, time)
if not self.castingAnim.animationFinish:
self.castingAnim.update(time)
else:
# Si ha pasado el tiempo suficiente y estamos intentando atacar
if (self.elapsedTime > self.delayTime) and self.attacking:
#Se llama a la funcion sound_update del channel_effect
self.channel_effect.soundUpdate(time)
# Se crea una bala y se guarda en el grupo de balas
bullet = Bullet(self.bulletPos, self.rotation, self.radius, self.bulletImage, 0.15)
self.bullets.add(bullet)
# Y reiniciar el contador
self.elapsedTime = 0
else:
self.elapsedTime += time
# Actualizamos las balas
self.bullets.update(time, stage, self.bulletImage)
# Comprobamos que enemigos colisionan con que grupos
collides = pygame.sprite.groupcollide(self.bullets, self.enemies, True, False)
# Si hay una colisión, hacemos daño al enemigo y matamos la bala
for bullet in collides:
enemies = collides[bullet]
# Cogemos el primero en hacer la colisión para que reciba daño
enemy = enemies[0]
enemyPos = enemy.position
impulse = Force(bullet.rotation, boss.stats["backward"])
enemy.receive_damage(boss.stats["atk"], impulse)
if not self.attacking and len(self.bullets.sprites())==0:
boss.attack = None
self.kill()
| 5,082 |
doc/source/bin/get_links.py
|
alimon/pyke3
| 5 |
2025083
|
#!/usr/bin/env python
# get_links.py
import os
import os.path
def run_command(start_dir, outfilename):
dir = start_dir
links_seen = set()
def doctor(link_dir, path):
# Don't mess with paths that just refer to another link:
if path.rstrip()[-1] == '_': return path
path = path.lstrip()
# Don't mess with paths that point somewhere in the outside universe:
if path.startswith('http://'): return ' ' + path
# Prepend link_dir to path
if link_dir.startswith('./'): path = link_dir[2:] + '/' + path
elif link_dir != '.': path = link_dir + '/' + path
# Prepare dir (start_dir, minus initial './')
if start_dir == '.': dir = ''
elif start_dir.startswith('./'): dir = start_dir[2:]
else: dir = start_dir
rest=' '
last_dir = None
while dir and dir != last_dir:
if path.startswith(dir + '/'):
ans = rest + path[len(dir) + 1:]
#print "doctor(%s) abbr:" % (path.rstrip(),), ans
return ans
rest += '../'
last_dir = dir
dir, ignore = os.path.split(dir)
ans = rest + path
#print "doctor(%s) abs:" % (path.rstrip(),), ans
return ans
with open(outfilename, "w") as outfile:
outfile.write("\n")
while True:
try:
with open(os.path.join(dir, 'links')) as links:
for line in links:
link, path = line.split(':', 1)
if link not in links_seen:
links_seen.add(link)
outfile.write(":".join((link, doctor(dir, path))))
except IOError:
pass
if dir == '.': break
dir = os.path.dirname(dir)
if __name__ == "__main__":
import sys
if len(sys.argv) != 3:
print("usage: get_links.py dir outfile", file=sys.stderr)
sys.exit(2)
run_command(sys.argv[1], sys.argv[2])
| 2,048 |
src/ecs/systems/turnordersystem.py
|
joehowells/critical-keep
| 1 |
2025708
|
from ecs.components.aicomponent import AIComponent
from ecs.components.deathcomponent import DeathComponent
class TurnOrderSystem:
def __init__(self, container):
self.container = container
def event_initialize(self):
self.container.queue.clear()
self.container.queue.append(self.container.player)
def event_next_level(self, level):
self.event_initialize()
def event_dead(self, defender):
if defender in self.container.queue and defender is not self.container.player:
self.container.queue.remove(defender)
def event_entity_visible(self, entity):
if entity not in self.container.queue and AIComponent in entity:
self.container.queue.append(entity)
def update(self):
if DeathComponent in self.container.player:
self.container.event('game_over')
else:
self.container.queue.rotate(-1)
entity = self.container.queue[0]
self.container.event('take_turn', entity=entity)
if entity is self.container.player:
self.container.event('input_mode_move')
| 1,135 |
hangmanultimate/hangman.py
|
devarshi16/HangMan
| 10 |
2025682
|
#!/usr/bin/env python
from __future__ import print_function
from builtins import input
from termcolor import colored
import readchar
from random import randint
import sys
import os
import time
from .data import *
import threading,time
from threading import Lock
data_dir = os.path.join(os.path.dirname(__file__),'data')
# hangman sprites
hngmn = ['''
+---+
| |
|
|
|
|
=========''', '''
+---+
| |
O |
|
|
|
=========''', '''
+---+
| |
O |
| |
|
|
=========''', '''
+---+
| |
O |
/| |
|
|
=========''', '''
+---+
| |
O |
/|\ |
|
|
=========''', '''
+---+
| |
O |
/|\ |
/ |
|
=========''', '''
+---+
| |
O |
/|\ |
/ \ |
|
=========''']
hanged_man = '''
+---+
| |
_O_ |
| |
/ \ |
|
========='''
free_man = ['''
+---+
|
|
_O_ |
| |
| | |
=========''','''
+---+
|
|
\O/ |
| |
| | |
=========''']
class Game:
#fields
win = 0
lose = 0
success_stat = 0
s=0
animal='dog'
blanks='_'
done_letters=['ij']
#methods
def __init__(self,words,list_type):#initialisation
print(words)
self.animal = words[randint(0,len(words)-1)].upper()
self.blanks=len(self.animal)*'_'
for i,letter in enumerate(self.animal):
if not letter.isalpha():
self.blanks = self.blanks[:i]+letter+self.blanks[i+1:]
os.system("clear")
#print(hngmn[0])
#print ('Guess the name of the '+list_type+' letter by letter')
#print ('Your '+list_type+' is ' ,end='')
#for letter in self.blanks:
# print(letter,end = ' ')
#print()
#time.sleep(3)
def Score(self):#displayed at the end
if self.success_stat == 1:
Game.win = Game.win + 1 # Game.win will be shared between objects
else:
Game.lose = Game.lose + 1
def Status(self): # Returns current win/lose status
print("Score is "+colored("win = "+str(Game.win),'green')+" \\ "+colored("lose ="+ str(Game.lose),'red') )
def MainGame(self):#main game method
while True:
os.system("clear")
k='ij'
print (colored(hngmn[self.s],'magenta')) # Print current state of hangman
for i in range(len(self.blanks)):
print(self.blanks[i],end= ' ')
print()
print(colored("DONE LETTERS:",'blue'),end='')
print(*self.done_letters,sep=',')
while True:
k=input("Your choice of letter?")
if len(k)!=1:
print('Enter only one letter please')
continue
elif not k.isalpha():
print('Enter only alphabets')
continue
elif k.upper() in self.done_letters:
print ('The letter entered is already done')
k='ij'
continue
else:
k=k.upper()
self.done_letters.append(k)
break
if self.animal.find(k)!=-1: # When letter is in the animal name
#print ('yes the letter \''+colored(k,'red')+'\' is there')
#time.sleep(2)
i=0
while i<len(self.animal):
if self.animal[i]==k:
self.blanks=self.blanks[:i]+k+self.blanks[i+1:]
i=i+1
else:
self.s=self.s+1 # wrong selection counter
if self.blanks.find('_')==-1: # No blanks found
print('entered')
self.success_stat=1
break
if self.s==len(hngmn): # Hangman Complete
self.success_stat=0
break
key = None
def get_key_press():
global key
global printing_lock
while key!='q':
key = readchar.readchar()
if key == '\x03':
raise KeyboardInterrupt
# Start Screen of game
def start_screen():
i = 0
global key
global printing_key
try:
while key!='q':
os.system("clear") # equivalent to typing clear on terminal
print(colored(hngmn[i%7],'green'))
print (colored('##### WELCOME TO THE GAME OF HANGMAN #####','yellow'))
print (colored('####### Save the man from hanging ########','cyan'))
if i%2 == 0:
print (colored(' [press <Ctrl + c> to start]','red'))
time.sleep(0.5) # Animate by giving sleep time
i+=1
except:
pass
def select_type():
os.system("clear")
print(colored("Rules: Guess the word letter by letter to save the man from hanging",'blue'))
print(colored("\n\n\t\tSelect word list\n\t\t1. Animals\n\t\t2. Pokemons\n\t\t3. Fruits\n\t\t4. Countries\n\t\t5. Bollywood Movies","magenta"))
file_name = None
lst_type = None
while True:
c = readchar.readchar()
if c == '1':
file_name = 'animals.txt'
lst_type = 'animals'
break
elif c == '2':
file_name = 'pokemons.txt'
lst_type = 'pokemons'
break
elif c == '3':
file_name = 'fruits.txt'
lst_type = 'fruits'
break
elif c == '4':
file_name = 'countries.txt'
lst_type = 'fruits'
break
elif c == '5':
file_name = 'bollywood movies.txt'
lst_type = 'bollywood movies'
break
elif c == '\x03':
raise KeyboardInterrupt
# Open file containing animal names in read-only mode
#file_loc = os.path.join(os.path.dirname(os.path.realpath(__file__)),DATA_LOCATION,file_name)
'''
file_loc = os.path.join(data_dir,file_name)
f = open(file_loc,'r')
# Add each animal name on each line to a list 'animals'
lst = [x.strip() for x in f.readlines()]
'''
lst = word_list[lst_type]
print(lst)
return lst_type,lst
# Function for animation after win or lose
# Takes a Game class object as input
def hanging_man_anim(game):
i = 0
if game.success_stat == 0:
while i <= 4:
os.system("clear")
if i%2 == 0:
print(colored(hanged_man,'red'))
else:
print(colored(hngmn[-1],'red'))
print(colored(":( !Man Hanged! :(\nCorrect word: ",'red')+colored(game.animal,'green'))
game.Status()
time.sleep(0.5)
i+=1
else:
while i <= 4:
os.system("clear")
if i%2 == 0:
print(colored(free_man[0],'green'))
else:
print(free_man[1])
print(colored(":) !Man Saved! :)",'green'))
game.Status()
time.sleep(0.5)
i+=1
def main():
start_screen()
list_type,words = select_type()
print(words)
while True:#Game loop
game = Game(words,list_type)
game.done_letters=[]
game.MainGame()
game.Score()
hanging_man_anim(game)
#game.Score()
exit_question = input("another game?(y/n)")
break_flag = 0
while True:
if (exit_question =='y'):
break
elif (exit_question == 'n'):
break_flag =1
break
else:
exit_question = input("please type (y/n)")
if break_flag != 0:
break
| 8,859 |
setup.py
|
abhinavg97/ACSA_GNN
| 3 |
2025674
|
"""
https://godatadriven.com/blog/a-practical-guide-to-using-setup-py/
https://docs.pytest.org/en/latest/goodpractices.html
"""
import io
from os import path
from setuptools import setup, find_packages
requirements = [
'torch==1.6.0',
'pytorch_lightning==1.0.2',
'numpy==1.19.1',
'dgl==0.5.2',
'spacy==2.3.2',
'pandas==1.1.0',
'networkx==2.4',
'scikit-learn==0.23.2',
'scikit-multilearn==0.2.0',
'nltk==3.5',
'editdistance==0.5.3', # required for contextualspellcheck
'contextualSpellCheck==0.2.0',
'pycontractions==2.0.1',
'unidecode==1.1.1'
'simpletransformers==0.48.14',
'matplotlib'
]
extra_requirements = [
'matplotlib'
]
VERSION = '0.0.1'
this_directory = path.abspath(path.dirname(__file__))
with io.open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
README = f.read()
setup(
name='absa_gnn',
version=VERSION,
url='https://github.com/abhinavg97/GCN',
description='Python module designed for text GCN',
long_description=README,
packages=find_packages(),
zip_safe=True,
install_requires=requirements,
extras_require={
'interactive': extra_requirements
},
setup_requires=['pytest-runner', 'flake8'],
tests_require=['pytest'],
package_data={'': ['*.json']}
)
# run pip install -e .[interactive] to install the package
| 1,390 |
RMPCrawl.py
|
NuclearPop/ratemyprofessordiscordbot
| 0 |
2026006
|
import requests
from bs4 import BeautifulSoup
def create_soup(url):
# print(url)
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, "html.parser")
return soup
def find_prof(soup, prof: list) -> str:
all_lines = soup.find_all("a", href=True)
for line in all_lines:
if (line['href'].startswith('/ShowRatings.jsp?tid=')):
possible_prof = set(line.find('span', {'class': 'main'}).text.strip().split(', '))
if set(prof) <= possible_prof:
return 'http://www.ratemyprofessors.com' + line['href']
elif prof[-1] in possible_prof:
return 'http://www.ratemyprofessors.com' + line['href']
def get_ratings(soup):
result = dict()
for x in ("quality", "difficulty", "takeAgain"):
line = soup.find('div', {"class": x})
temp = line.find(class_="grade")
if temp is not None:
result[x] = (temp.text.strip())
return result
def run(prof: str):
prof = prof.split()
url = 'http://www.ratemyprofessors.com/search.jsp?query='
url += 'UCI+' + prof[-1]
soup = create_soup(url)
prof_page = find_prof(soup, prof)
if(prof_page != None):
prof_soup = create_soup(prof_page)
return get_ratings(prof_soup)
if __name__ == '__main__':
url = 'http://www.ratemyprofessors.com/search.jsp?query='
url += 'UCI+Pattis'
soup = create_soup(url)
prof_page = find_prof(soup, {'Pattis'})
prof_soup = create_soup(prof_page)
ratings = get_ratings(prof_soup)
| 1,583 |
ground_truth_labeling_jobs/multi_modal_parallel_sagemaker_labeling_workflows_with_step_functions/src/lambda_test/labeling_job_state_change/main_test.py
|
jerrypeng7773/amazon-sagemaker-examples
| 2,610 |
2024999
|
import unittest
from unittest import TestCase
from unittest.mock import patch
from labeling_job_state_change.main import lambda_handler
from test_shared.mock_objects import OutputTestData, TestContext
class TestCase(TestCase):
@patch("shared.db.get_job_by_arn")
@patch("shared.db.get_batch_metadata_by_labeling_job_name")
@patch("shared.db.update_batch_status")
@patch("shared.db.get_batch_metadata")
def test_lambda_handler_happy_case(
self,
get_batch_metadata_mock,
update_batch_status_mock,
get_batch_metadata_by_labeling_job_name_mock,
get_job_by_arn_mock,
):
# Setup
event = {"status": "test", "job_arns": ["arn:aws:sagemaker:test:test:labeling-job/test"]}
get_batch_metadata_mock.return_value = OutputTestData.get_batch_first_level_output
get_job_by_arn_mock.return_value = None
get_batch_metadata_by_labeling_job_name_mock.return_value = (
OutputTestData.get_batch_metadata_by_labeling_job_name_output
)
update_batch_status_mock.return_value = {}
# Act
val = lambda_handler(event, TestContext())
# Assert
self.assertEqual("success", val, "Unexpected status code returned")
if __name__ == "__main__":
unittest.main()
| 1,299 |
environment_config.py
|
azam-a/newdeployments
| 0 |
2025623
|
# all paths are relative to jenkins job's workspace (absolute path is fine too)
# common config for both deployment and cleanup scripts
AWS_CONFIG_PATH = '' # path to config file (ini format) containing aws credentials
AWS_PROFILE = '' # name of aws profile from the config file to be used
CSS_BUCKET = '' # name of the css bucket
IMAGE_BUCKET = '' # name of the image bucket
JS_BUCKET = '' # name of the js bucket
XML_PATH = '' # path of the xml file containing latest file versions
# config specific to deployment script
JAVA_PATH = '' # path of the folder containing java binary, may default to empty if already defined in system path
MINIFIER_PATH = '' # path of the folder containing the minifiers binaries (closure compiler & yuicompressor)
PREFIX_PATH = '' # path of the repo www folder
# config specific to cleanup script
CSS_PREFIX = '' # prefix of base path in buckets (eg. base folder name)
IMAGE_PREFIX = '' # to narrow down selections during bucket.list() operations
JS_PREFIX = '' # by excluding other irrelevant folders (log, etc)
| 1,141 |
madpy/tests/test_duration.py
|
seismomomo/madpy
| 1 |
2023082
|
import obspy
import unittest
import numpy as np
import madpy.duration as dur
import madpy.tests.testdata.config as cfg
class TestAmplitude(unittest.TestCase):
def test_measure_duration(self):
st = obspy.read('testdata/duration.mseed')
st[0].stats.o = obspy.UTCDateTime('2020-10-10T13:05:00.00')
st[0].stats.p = 10.
st[0].stats.s = 20.
df_dur = dur.measure_duration(st, cfg)
self.assertEqual(len(df_dur), len(st))
self.assertEqual(len(df_dur.columns), 8)
def test_coda_duration(self):
st = obspy.read('testdata/duration.mseed')
st[0].stats.o = obspy.UTCDateTime('2020-10-10T13:05:00.00')
st[0].stats.p = 10.
st[0].stats.s = 20.
duration, cc = dur.coda_duration(st[0], 10 ** -10, 'linear', cfg)
self.assertAlmostEqual(duration, 175, 0)
self.assertAlmostEqual(cc, -0.72, 2)
def test_moving_average(self):
st = obspy.read('testdata/duration.mseed')
st[0].stats.o = obspy.UTCDateTime('2020-10-10T13:05:00.00')
st[0].stats.p = 10.
st[0].stats.s = 20.
avg_time, avg_data = dur.moving_average(st[0].copy(), cfg.Duration)
self.assertEqual(np.sum(st[0].data - avg_data), 0)
cfg.Duration.moving_average_window = 2
avg_time, avg_data = dur.moving_average(st[0].copy(), cfg.Duration)
self.assertEqual(len(st[0].data) - len(avg_data),
2 * st[0].stats.sampling_rate - 1)
def test_time_relative_p(self):
st = obspy.read('testdata/duration.mseed')
st[0].stats.o = obspy.UTCDateTime('2020-10-10T13:05:00.00')
st[0].stats.p = 10.
st[0].stats.s = 20.
time = np.arange(0, len(st[0].data)) * 0.01 - 40
self.assertEqual(np.sum(dur.time_relative_p(st[0].copy()) - time), 0)
def test_log_envelope(self):
data = np.array([-10, 0, 1, 10, 100, 0, -10, 1, 100, 10], dtype=float)
envelope = dur.log_envelope(data)
self.assertEqual(len(envelope[np.isnan(envelope)]), 4)
self.assertEqual(len(envelope[~np.isnan(envelope)]), 6)
def test_coda_fitting_window(self):
st = obspy.read('testdata/duration.mseed')
st[0].stats.o = obspy.UTCDateTime('2020-10-10T13:05:00.00')
st[0].stats.p = 10.
st[0].stats.s = 20.
time, data_0 = dur.moving_average(st[0].copy(), cfg.Duration)
data = dur.log_envelope(data_0)
i0, i1 = dur.coda_fitting_window(st[0].copy(), cfg.Duration, time, data, 10 ** -10)
self.assertEqual(i0, 5631)
self.assertEqual(i1, 19292)
def test_search_window(self):
st = obspy.read('testdata/duration.mseed')
st[0].stats.o = obspy.UTCDateTime('2020-10-10T13:05:00.00')
st[0].stats.p = 10.
st[0].stats.s = 20.
time,_ = dur.moving_average(st[0].copy(), cfg.Duration)
begin = dur.search_window(st[0].copy(), time, cfg.Duration, 'begin')
end = dur.search_window(st[0].copy(), time, cfg.Duration, 'end')
self.assertEqual(begin, 4701)
self.assertEqual(end, 6901)
def test_search_window_seconds(self):
st = obspy.read('testdata/duration.mseed')
st[0].stats.o = obspy.UTCDateTime('2020-10-10T13:05:00.00')
st[0].stats.p = 10.
st[0].stats.s = 20.
begin = dur.search_window_seconds(st[0].copy(), cfg.Duration, 'begin')
end = dur.search_window_seconds(st[0].copy(), cfg.Duration, 'end')
self.assertEqual(begin, 8)
self.assertEqual(end, 30)
def test_phase_relative_p(self):
st = obspy.read('testdata/duration.mseed')
st[0].stats.o = obspy.UTCDateTime('2020-10-10T13:05:00.00')
st[0].stats.p = 10.
st[0].stats.s = 20.
self.assertEqual(dur.phase_relative_p(st[0], 'O'), -10)
self.assertEqual(dur.phase_relative_p(st[0], 'P'), 0)
self.assertEqual(dur.phase_relative_p(st[0], 'S'), 10)
def test_fitting_window_start(self):
self.assertEqual(dur.fitting_window_start(cfg.Duration, 20, 50), 20)
cfg.Duration.start_fit_max = 3
self.assertEqual(dur.fitting_window_start(cfg.Duration, 20, 50), 30)
cfg.Duration.start_fit_max = 10
self.assertEqual(dur.fitting_window_start(cfg.Duration, 20, 50), 23)
def test_fitting_window_end(self):
st = obspy.read('testdata/duration.mseed')
st[0].stats.o = obspy.UTCDateTime('2020-10-10T13:05:00.00')
st[0].stats.p = 10.
st[0].stats.s = 20.
_,data_0 = dur.moving_average(st[0].copy(), cfg.Duration)
data = dur.log_envelope(data_0)
end = dur.fitting_window_end(st[0], cfg.Duration, data, 5631, 10 ** -10)
self.assertEqual(end, 19292)
def test_coda_line_fit(self):
m = -5
b = 20
x = np.arange(0, 1000, 0.1)
y = m * x + b
fit = dur.coda_line_fit(x, y)
self.assertAlmostEqual(fit[1], m)
self.assertAlmostEqual(fit[0], b)
def test_get_duration_index(self):
m = -5
b = 20
x = np.arange(0, 1000, 0.1)
y = m * x + b
i, cross = dur.get_duration_index(cfg.Duration, [b, m], 10 ** -10)
self.assertAlmostEqual(i, 6.001)
self.assertEqual(cross, 6001)
def test_extend_line(self):
x, y = dur.extend_line(-5, 20, 0, 500, 0.1)
self.assertEqual(len(x), 500 / 0.1)
self.assertEqual(len(y), 500 / 0.1)
def test_coda_line_end(self):
self.assertEqual(dur.coda_line_end(cfg.Duration, 10 ** -10), -10)
cfg.Duration.threshold_type = 'noise'
self.assertAlmostEqual(dur.coda_line_end(cfg.Duration, 10 ** -10), -20)
cfg.Duration.threshold_type = 'absolute'
def test_get_correlation_coefficient(self):
line = np.array([-5, -1])
time = np.arange(0, 1000, 0.01)
data = line[1] * time + line[0]
cc = dur.get_correlation_coefficient(line, time, data, 5, 90)
self.assertAlmostEqual(cc, -1, 4)
self.assertWarns(UserWarning, dur.get_correlation_coefficient,
line, time, data, 5, 1110000)
def test_calculate_cc(self):
x = np.arange(0, 1000, 0.5)
y = 5 * x - 1
y_true = np.column_stack((x, y))
self.assertRaises(AssertionError, dur.calculate_cc, y, y)
self.assertEqual(dur.calculate_cc(y_true, y_true)[0, 3], 1)
def format_output(self):
data = ['2020', '13:00:00', 1., 2., 3.]
self.assertRaises(AssertionError, dur.format_output, data)
if __name__ == '__main__':
unittest.main()
| 7,147 |
trade_remedies_api/security/utils.py
|
uktrade/trade-remedies-api
| 1 |
2025555
|
import logging
from security.models import OrganisationUser
from organisations.models import get_organisation
from cases.models import get_case
from django.contrib.auth.models import Group, Permission
from .constants import GROUPS, GROUP_PERMISSIONS, ADDITIONAL_PERMISSIONS
logger = logging.getLogger(__name__)
def validate_user_organisation(user, organisation):
"""
Validate a user can access this organisation.
This is true if the user is either a member of the organisation,
or is a case worker.
TODO: At the moment TRA side is fairly open. Consider this.
"""
if user.is_tra():
return True
organisation = get_organisation(organisation)
org_user = OrganisationUser.objects.filter(organisation=organisation, user=user).exists()
return org_user
def validate_user_case(user, case, organisation):
"""
Validate the user has access to this case and organisation
Fairly simplistic at the moment
"""
if user.is_tra():
return True
case = get_case(case)
organisation = get_organisation(organisation)
return user.has_case_access(case, organisation)
# Setup/Bootsrapping utility funcitons
def create_groups():
for group_data in GROUPS:
group, created = Group.objects.get_or_create(name=group_data[0])
logger.info("\t{0} created? {1}".format(group_data[0], created))
def assign_group_permissions():
all_permissions = []
for group_name in GROUP_PERMISSIONS:
logger.info(
"Assigning {0} permissions to {1}".format(
len(GROUP_PERMISSIONS[group_name]), group_name
)
)
all_permissions += GROUP_PERMISSIONS[group_name]
group, created = Group.objects.get_or_create(name=group_name)
for permission_name in GROUP_PERMISSIONS[group_name]:
try:
app, perm = permission_name.split(".")
permission = Permission.objects.get(codename=perm, content_type__app_label=app)
group.permissions.add(permission)
except Permission.DoesNotExist:
logger.error("\t{0} -> Does not exist".format(permission_name), exc_info=True)
logger.info("Assigning {0} Super User permissions".format(len(all_permissions)))
superuser = Group.objects.get(name="Super User")
for permission_name in all_permissions + ADDITIONAL_PERMISSIONS:
try:
app, perm = permission_name.split(".")
permission = Permission.objects.get(codename=perm, content_type__app_label=app)
superuser.permissions.add(permission)
except Permission.DoesNotExist:
logger.info("{0} permission not found".format(permission_name), exc_info=True)
| 2,725 |
finplot/example-line.py
|
fjafferi/finplot
| 0 |
2025574
|
#!/usr/bin/env python3
import finplot as fplt
import numpy as np
import pandas as pd
dates = pd.date_range('01:00', '01:00:01.200', freq='1ms')
prices = pd.Series(np.random.random(len(dates))).rolling(30).mean() + 4
fplt.plot(dates, prices, width=3)
line = fplt.add_line((dates[100], 4.4), (dates[1100], 4.6), color='#9900ff', interactive=True)
## fplt.remove_line(line)
text = fplt.add_text((dates[500], 4.6), "I'm here alright!", color='#bb7700')
## fplt.remove_text(text)
def save():
fplt.screenshot(open('screenshot.png', 'wb'))
fplt.timer_callback(save, 0.5, single_shot=True) # wait some until we're rendered
fplt.show()
| 636 |
odifmap/ontology.py
|
duke-lungmap-team/ihc-segmentation-final
| 0 |
2025512
|
import ontospy
import pickle
import os
from .resources import resource_path
LOCAL_ONTOLOGY_FILE = os.path.join(resource_path, 'lung_ontology.owl')
PICKLED_ONTOLOGY = os.path.join(resource_path, 'lung_ontology.pkl')
try:
# load pickled ontospy object
f = open(PICKLED_ONTOLOGY, 'rb')
onto = pickle.load(f)
f.close()
except FileNotFoundError:
onto = ontospy.Ontospy(uri_or_path=LOCAL_ONTOLOGY_FILE, rdf_format='xml')
# pickle the ontology
f = open(PICKLED_ONTOLOGY, 'wb')
pickle.dump(onto, f)
f.close()
def get_onto_protein_uri(ontology, protein_label):
sparql_proteins_query = """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX : <http://www.semanticweb.org/am175/ontologies/2017/1/untitled-ontology-79#>
SELECT ?p ?p_label WHERE {
?p rdfs:subClassOf :Protein .
?p :has_synonym ?p_label .
VALUES ?p_label { "%s" }
}
""" % protein_label
results = ontology.query(sparql_proteins_query)
return results
def get_onto_cells_by_protein(ontology, protein_uri):
sparql_protein_cell_query = """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX : <http://www.semanticweb.org/am175/ontologies/2017/1/untitled-ontology-79#>
SELECT ?c WHERE {
?c rdfs:subClassOf* :cell .
?c rdfs:subClassOf ?restriction .
?restriction owl:onProperty :has_part ; owl:someValuesFrom ?p .
VALUES ?p { <%s> }
}
""" % protein_uri
results = ontology.query(sparql_protein_cell_query)
return results
def get_onto_tissues_by_cell(ontology, cell_uri):
sparql_cell_tissue_query = """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX : <http://www.semanticweb.org/am175/ontologies/2017/1/untitled-ontology-79#>
SELECT ?t WHERE {
?t rdfs:subClassOf* :tissue .
?t rdfs:subClassOf ?restriction .
?restriction owl:onProperty :has_part ; owl:someValuesFrom ?c .
VALUES ?c { <%s> }
}
""" % cell_uri
results = ontology.query(sparql_cell_tissue_query)
return results
def get_onto_structures_by_related_uri(ontology, uri):
sparql_tissue_structure_query1 = """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX : <http://www.semanticweb.org/am175/ontologies/2017/1/untitled-ontology-79#>
SELECT ?s ?label ?pred WHERE {
?s rdfs:subClassOf* :complex_structure .
?s :lungmap_preferred_label ?label .
?s rdfs:subClassOf ?restriction .
?restriction owl:onProperty ?pred ; owl:someValuesFrom ?t .
VALUES ?t { <%s> } .
VALUES ?pred { :has_part :surrounded_by }
}
""" % uri
results = ontology.query(sparql_tissue_structure_query1)
return results
def get_onto_sub_classes(ontology, uri):
sparql_subclass_query = """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX : <http://www.semanticweb.org/am175/ontologies/2017/1/untitled-ontology-79#>
SELECT ?sub ?label WHERE {
?sub rdfs:subClassOf ?uri .
?sub :lungmap_preferred_label ?label .
VALUES ?uri { <%s> }
}
""" % uri
results = ontology.query(sparql_subclass_query)
return results
| 3,253 |
tests/unit_tests/test_tethys_gizmos/test_urls.py
|
ezrajrice/tethys
| 0 |
2026034
|
from django.urls import reverse, resolve
from tethys_sdk.testing import TethysTestCase
class TestUrls(TethysTestCase):
def set_up(self):
pass
def tear_down(self):
pass
def test_urlpatterns(self):
url = reverse('gizmos:showcase')
resolver = resolve(url)
self.assertEqual('/developer/gizmos/', url)
self.assertEqual('index', resolver.func.__name__)
self.assertEqual('tethys_gizmos.views.gizmo_showcase', resolver.func.__module__)
self.assertEqual('gizmos', resolver.namespaces[0])
def test_urlpatterns_google_map_view(self):
url = reverse('gizmos:google_map_view')
resolver = resolve(url)
self.assertEqual('/developer/gizmos/google-map-view/', url)
self.assertEqual('google_map_view', resolver.func.__name__)
self.assertEqual('tethys_gizmos.views.gizmo_showcase', resolver.func.__module__)
self.assertEqual('gizmos', resolver.namespaces[0])
def test_urlpatterns_map_view(self):
url = reverse('gizmos:map_view')
resolver = resolve(url)
self.assertEqual('/developer/gizmos/map-view', url)
self.assertEqual('map_view', resolver.func.__name__)
self.assertEqual('tethys_gizmos.views.gizmo_showcase', resolver.func.__module__)
self.assertEqual('gizmos', resolver.namespaces[0])
def test_urlpatterns_jobs_table(self):
url = reverse('gizmos:jobs_table')
resolver = resolve(url)
self.assertEqual('/developer/gizmos/jobs-table', url)
self.assertEqual('jobs_table_demo', resolver.func.__name__)
self.assertEqual('tethys_gizmos.views.gizmo_showcase', resolver.func.__module__)
self.assertEqual('gizmos', resolver.namespaces[0])
def test_urlpatterns_esri_map(self):
url = reverse('gizmos:esri_map')
resolver = resolve(url)
self.assertEqual('/developer/gizmos/esri-map', url)
self.assertEqual('esri_map', resolver.func.__name__)
self.assertEqual('tethys_gizmos.views.gizmo_showcase', resolver.func.__module__)
self.assertEqual('gizmos', resolver.namespaces[0])
def test_urlpatterns_results(self):
url = reverse('gizmos:results', kwargs={'job_id': '123'})
resolver = resolve(url)
self.assertEqual('/developer/gizmos/123/results', url)
self.assertEqual('jobs_table_results', resolver.func.__name__)
self.assertEqual('tethys_gizmos.views.gizmo_showcase', resolver.func.__module__)
self.assertEqual('gizmos', resolver.namespaces[0])
def test_urlpatterns_sample_jobs(self):
url = reverse('gizmos:sample_jobs')
resolver = resolve(url)
self.assertEqual('/developer/gizmos/sample-jobs', url)
self.assertEqual('create_sample_jobs', resolver.func.__name__)
self.assertEqual('tethys_gizmos.views.gizmo_showcase', resolver.func.__module__)
self.assertEqual('gizmos', resolver.namespaces[0])
def test_ajax_urls_get_kml(self):
url = reverse('gizmos:get_kml')
resolver = resolve(url)
self.assertEqual('/developer/gizmos/ajax/get-kml/', url)
self.assertEqual('get_kml', resolver.func.__name__)
self.assertEqual('tethys_gizmos.views.gizmo_showcase', resolver.func.__module__)
self.assertEqual('gizmos', resolver.namespaces[0])
def test_ajax_urls_swap_kml(self):
url = reverse('gizmos:swap_kml')
resolver = resolve(url)
self.assertEqual('/developer/gizmos/ajax/swap-kml/', url)
self.assertEqual('swap_kml', resolver.func.__name__)
self.assertEqual('tethys_gizmos.views.gizmo_showcase', resolver.func.__module__)
self.assertEqual('gizmos', resolver.namespaces[0])
def test_ajax_urls_swap_overlays(self):
url = reverse('gizmos:swap_overlays')
resolver = resolve(url)
self.assertEqual('/developer/gizmos/ajax/swap-overlays/', url)
self.assertEqual('swap_overlays', resolver.func.__name__)
self.assertEqual('tethys_gizmos.views.gizmo_showcase', resolver.func.__module__)
self.assertEqual('gizmos', resolver.namespaces[0])
def test_ajax_urls_delete_job(self):
url = reverse('gizmos:delete_job', kwargs={'job_id': '123'})
resolver = resolve(url)
self.assertEqual('/developer/gizmos/ajax/123/delete', url)
self.assertEqual('delete', resolver.func.__name__)
self.assertEqual('tethys_gizmos.views.gizmos.jobs_table', resolver.func.__module__)
self.assertEqual('gizmos', resolver.namespaces[0])
def test_ajax_urls_update_job_row(self):
url = reverse('gizmos:update_job_row', kwargs={'job_id': '123'})
resolver = resolve(url)
self.assertEqual('/developer/gizmos/ajax/123/update-row', url)
self.assertEqual('update_row', resolver.func.__name__)
self.assertEqual('tethys_gizmos.views.gizmos.jobs_table', resolver.func.__module__)
self.assertEqual('gizmos', resolver.namespaces[0])
def test_ajax_urls_update_workflow_nodes_row(self):
url = reverse('gizmos:update_workflow_nodes_row', kwargs={'job_id': '123'})
resolver = resolve(url)
self.assertEqual('/developer/gizmos/ajax/123/update-workflow-nodes-row', url)
self.assertEqual('update_workflow_nodes_row', resolver.func.__name__)
self.assertEqual('tethys_gizmos.views.gizmos.jobs_table', resolver.func.__module__)
self.assertEqual('gizmos', resolver.namespaces[0])
def test_ajax_urls_bokeh_row(self):
url = reverse('gizmos:bokeh_row', kwargs={'job_id': '123', 'type': 'test'})
resolver = resolve(url)
self.assertEqual('/developer/gizmos/ajax/123/test/insert-bokeh-row', url)
self.assertEqual('bokeh_row', resolver.func.__name__)
self.assertEqual('tethys_gizmos.views.gizmos.jobs_table', resolver.func.__module__)
self.assertEqual('gizmos', resolver.namespaces[0])
| 5,938 |
utils/saver.py
|
niqbal996/ViewAL
| 126 |
2024576
|
import os
import shutil
import torch
from collections import OrderedDict
import glob
import constants
import json
import imageio
import numpy as np
class Saver:
def __init__(self, args, suffix='', experiment_group=None, remove_existing=False):
self.args = args
if experiment_group == None:
experiment_group = args.dataset
self.experiment_dir = os.path.join(constants.RUNS, experiment_group, args.checkname, suffix)
if remove_existing and os.path.exists(self.experiment_dir):
shutil.rmtree(self.experiment_dir)
if not os.path.exists(self.experiment_dir):
print(f'Creating dir {self.experiment_dir}')
os.makedirs(self.experiment_dir)
def save_checkpoint(self, state, filename='checkpoint.pth.tar'):
filename = os.path.join(self.experiment_dir, filename)
torch.save(state, filename)
def load_checkpoint(self, filename='checkpoint.pth.tar'):
filename = os.path.join(self.experiment_dir, filename)
return torch.load(filename)
def save_experiment_config(self):
logfile = os.path.join(self.experiment_dir, 'parameters.txt')
log_file = open(logfile, 'w')
arg_dictionary = vars(self.args)
log_file.write(json.dumps(arg_dictionary, indent=4, sort_keys=True))
log_file.close()
def save_active_selections(self, paths, regional=False):
if regional:
Saver.save_masks(os.path.join(self.experiment_dir, "selections") , paths)
else:
filename = os.path.join(self.experiment_dir, 'selections.txt')
with open(filename, 'w') as fptr:
for p in paths:
fptr.write(p.decode('utf-8') + '\n')
@staticmethod
def save_masks(directory, paths):
if not os.path.exists(directory):
os.makedirs(directory)
for p in paths:
imageio.imwrite(os.path.join(directory, p.decode('utf-8') +'.png'), (paths[p]*255).astype(np.uint8))
| 2,018 |
blender/arm/logicnode/physics/LN_apply_force_at_location.py
|
Lykdraft/armory
| 0 |
2026056
|
from arm.logicnode.arm_nodes import *
class ApplyForceAtLocationNode(ArmLogicTreeNode):
"""Applies a force to a rigid body at a specified position.
@seeNode Apply Force
@seeNode Apply Impulse
@seeNode Apply Impulse At Location
@input Force: the force vector
@input Force On Local Axis: if `true`, interpret the force vector as in
object space
@input Location: the location where to apply the force
@input Location On Local Axis: if `true`, use the location relative
to the objects location, otherwise use world coordinates
"""
bl_idname = 'LNApplyForceAtLocationNode'
bl_label = 'Apply Force At Location'
arm_version = 1
def init(self, context):
super(ApplyForceAtLocationNode, self).init(context)
self.add_input('ArmNodeSocketAction', 'In')
self.add_input('ArmNodeSocketObject', 'Rigid Body')
self.add_input('NodeSocketVector', 'Force')
self.add_input('NodeSocketBool', 'Force On Local Axis')
self.add_input('NodeSocketVector', 'Location')
self.add_input('NodeSocketBool', 'Location On Local Axis')
self.add_output('ArmNodeSocketAction', 'Out')
add_node(ApplyForceAtLocationNode, category=PKG_AS_CATEGORY, section='force')
| 1,261 |
tests/test_signals.py
|
justinmayer/django-improved-user
| 89 |
2025586
|
"""Test Signal Handling"""
from django.db.models.signals import post_save
from django.test import TestCase
from improved_user.models import User
class TestCreateSuperUserSignals(TestCase):
"""Simple test case for ticket #20541"""
# pylint: disable=unused-argument
def post_save_listener(self, *args, **kwargs):
"""Note when signal sent; helper function"""
self.signals_count += 1
# pylint: enable=unused-argument
def setUp(self):
"""Connect function above to postsave User model signal"""
self.signals_count = 0
post_save.connect(self.post_save_listener, sender=User)
def tearDown(self):
"""Connect utility function from postsave"""
post_save.disconnect(self.post_save_listener, sender=User)
def test_create_user(self):
"""Test User Creation"""
User.objects.create_user("<EMAIL>")
self.assertEqual(self.signals_count, 1)
def test_create_superuser(self):
"""Test Super User Creation"""
User.objects.create_superuser("<EMAIL>", "password")
self.assertEqual(self.signals_count, 1)
| 1,123 |
app/models.py
|
Napchat/microblog
| 0 |
2025834
|
'''
app.models
~~~~~~~~~~
It defines our models related to tables in database.
'''
from hashlib import md5
import sys
import re
from app import db, app
from config import WHOOSH_ENABLED
enable_search = WHOOSH_ENABLED
if enable_search:
import flask_whooshalchemy as whooshalchemy
followers = db.Table('followers',
db.Column('follower_id', db.Integer, db.ForeignKey('user.id')),
db.Column('followed_id', db.Integer, db.ForeignKey('user.id'))
)
class User(db.Model):
'''The User model'''
id = db.Column(db.Integer, primary_key=True)
nickname = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
posts = db.relationship('Post', backref='author', lazy='dynamic')
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime)
# :arg: ``User`` on both sides mean that this is a self-referential
# relationship.
# :arg: ``secondary`` indicates the association table that is used
# for this relationship.
# :arg: ``primaryjoin`` indicates the condition that links the left
# side entity(the follower user) with the association table. Note that
# because the :table:`followers` is not a model there is a slightly
# odd syntax required to get to the field name.
# :arg: ``secondaryjoin`` indicates the condition that links the
# right side entity(the followed user) with the association table.
# :arg: ``backref`` defines how this relationship will be accessed
# from the right side entity.``lazy`` indicates the execution mode
# for this query. A mode of ``dynamic`` sets up the query to not run
# until specifically requested. This is userful for performance reasons,
# and also because we will be able to take this query and modify it before
# it executes. More about this later.
# :arg: ``lazy`` is similiar to the parameter of the same name in the
# ``backref``, but this one applies to the regular query instead of the
# back reference.
# u1关注u2, 那么u1就有followed属性(followers表中的follower_id设置为u1的id,
# followed_id设置为u2的id).
followed = db.relationship('User',
secondary=followers,
primaryjoin=(followers.c.follower_id==id),
secondaryjoin=(followers.c.followed_id==id),
backref=db.backref('followers', lazy='dynamic'),
lazy='dynamic')
@property
def is_authenticated(self):
'''pass'''
return True
@property
def is_active(self):
'''pass'''
return True
@property
def is_annoymous(self):
'''pass'''
return False
def get_id(self):
'''pass'''
try:
return unicode(self.id)
except NameError:
return str(self.id)
def avatar(self, size):
'''This method returns the URL of the user's avatar image'''
return 'https://gravatar.com/avatar/%s?d=mm&s=%d' % \
(md5(self.email.encode('utf-8')).hexdigest(), size)
def __repr__(self): # pragma: no cover
'''This method tells Python how to print objects of
this class.
'''
return '<User %r>' % (self.nickname)
# staticmethod don't work on instances.
@staticmethod
def make_unique_nickname(nickname):
'''pass'''
if User.query.filter_by(nickname=nickname).first() is None:
return nickname
version = 2
while True:
new_nickname = nickname + str(version)
if User.query.filter_by(nickname=new_nickname).first() is None:
break
version += 1
return new_nickname
def follow(self, user):
'''Follows a user.'''
if not self.is_following(user):
self.followed.append(user)
return self
def unfollow(self, user):
'''Unfollows a user.'''
if self.is_following(user):
self.followed.remove(user)
return self
def is_following(self, user):
'''Check if you followed the user.'''
return self.followed.filter(followers.c.followed_id==user.id).count() > 0
def followed_posts(self):
'''Return a query object including posts of the users that you have followed.
It is always a good idea to return query object instead of results, because
that gives the caller the choice of adding more clauses to the query before
it is execeted.
'''
return Post.query.join(followers, (followers.c.followed_id==Post.user_id)). \
filter(followers.c.follower_id==self.id).order_by(Post.timestamp.desc())
@staticmethod
def make_valid_nickname(nickname):
return re.sub('[^a-zA-Z0-0_\.]', '', nickname)
class Post(db.Model):
'''The Post model'''
# It is an array with all the database fields that will be in the searchable
# index.
__searchable__ = ['body']
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
language = db.Column(db.String(5))
def __repr__(self):
'''pass'''
return '<Post %r>' % (self.body)
# To initialize the full text for models.
if enable_search:
whooshalchemy.whoosh_index(app, Post)
| 5,467 |
stackdio/api/environments/api.py
|
hdmillerdr/stackdio
| 9 |
2025257
|
# -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, unicode_literals
import logging
import os
from collections import OrderedDict
import envoy
from guardian.shortcuts import assign_perm
from rest_framework import generics, status
from rest_framework.filters import DjangoFilterBackend, DjangoObjectPermissionsFilter
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.serializers import ValidationError
from stackdio.api.environments import filters, mixins, models, serializers, utils
from stackdio.api.formulas.serializers import FormulaVersionSerializer
from stackdio.core.constants import Activity
from stackdio.core.permissions import StackdioModelPermissions, StackdioObjectPermissions
from stackdio.core.renderers import PlainTextRenderer
from stackdio.core.serializers import ObjectPropertiesSerializer
from stackdio.core.viewsets import (
StackdioModelUserPermissionsViewSet,
StackdioModelGroupPermissionsViewSet,
StackdioObjectUserPermissionsViewSet,
StackdioObjectGroupPermissionsViewSet,
)
logger = logging.getLogger(__name__)
class EnvironmentListAPIView(generics.ListCreateAPIView):
"""
Displays a list of all environments visible to you.
"""
queryset = models.Environment.objects.all()
permission_classes = (StackdioModelPermissions,)
filter_backends = (DjangoObjectPermissionsFilter, DjangoFilterBackend)
filter_class = filters.EnvironmentFilter
lookup_field = 'name'
def get_serializer_class(self):
if self.request.method == 'POST':
return serializers.FullEnvironmentSerializer
else:
return serializers.EnvironmentSerializer
def perform_create(self, serializer):
env = serializer.save()
for perm in models.Environment.object_permissions:
assign_perm('environments.%s_environment' % perm, self.request.user, env)
class EnvironmentDetailAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = models.Environment.objects.all()
serializer_class = serializers.EnvironmentSerializer
permission_classes = (StackdioObjectPermissions,)
lookup_field = 'name'
class EnvironmentPropertiesAPIView(generics.RetrieveUpdateAPIView):
queryset = models.Environment.objects.all()
serializer_class = ObjectPropertiesSerializer
permission_classes = (StackdioObjectPermissions,)
lookup_field = 'name'
class EnvironmentHostListAPIView(mixins.EnvironmentRelatedMixin, generics.ListAPIView):
serializer_class = serializers.EnvironmentHostSerializer
def get_queryset(self):
environment = self.get_environment()
return sorted(environment.get_current_hosts(), key=lambda x: x['id'])
class EnvironmentLabelListAPIView(mixins.EnvironmentRelatedMixin, generics.ListCreateAPIView):
serializer_class = serializers.EnvironmentLabelSerializer
def get_queryset(self):
environment = self.get_environment()
return environment.labels.all()
def get_serializer_context(self):
context = super(EnvironmentLabelListAPIView, self).get_serializer_context()
context['content_object'] = self.get_environment()
return context
def perform_create(self, serializer):
serializer.save(content_object=self.get_environment())
class EnvironmentLabelDetailAPIView(mixins.EnvironmentRelatedMixin,
generics.RetrieveUpdateDestroyAPIView):
serializer_class = serializers.EnvironmentLabelSerializer
lookup_field = 'key'
lookup_url_kwarg = 'label_name'
def get_queryset(self):
environment = self.get_environment()
return environment.labels.all()
def get_serializer_context(self):
context = super(EnvironmentLabelDetailAPIView, self).get_serializer_context()
context['content_object'] = self.get_environment()
return context
class EnvironmentComponentListAPIView(mixins.EnvironmentRelatedMixin, generics.ListAPIView):
serializer_class = serializers.EnvironmentComponentSerializer
def get_queryset(self):
environment = self.get_environment()
return environment.get_components()
class EnvironmentActionAPIView(mixins.EnvironmentRelatedMixin, generics.GenericAPIView):
serializer_class = serializers.EnvironmentActionSerializer
def get(self, request, *args, **kwargs):
environment = self.get_environment()
# Grab the list of available actions for the current environment activity
available_actions = Activity.env_action_map.get(environment.activity, [])
# Filter them based on permissions
available_actions = utils.filter_actions(request.user, environment, available_actions)
return Response({
'available_actions': sorted(available_actions),
})
def post(self, request, *args, **kwargs):
"""
POST request allows RPC-like actions to be called to interact
with the environment. Request contains JSON with an `action` parameter
and optional `args` depending on the action being executed.
"""
environment = self.get_environment()
serializer = self.get_serializer(environment, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
class EnvironmentFormulaVersionsAPIView(mixins.EnvironmentRelatedMixin, generics.ListCreateAPIView):
serializer_class = FormulaVersionSerializer
def get_queryset(self):
environment = self.get_environment()
return environment.formula_versions.all()
def perform_create(self, serializer):
serializer.save(content_object=self.get_environment())
class EnvironmentLogsAPIView(mixins.EnvironmentRelatedMixin, generics.GenericAPIView):
log_types = (
'provisioning',
'provisioning-error',
'orchestration',
'orchestration-error',
)
def get(self, request, *args, **kwargs):
environment = self.get_environment()
root_dir = environment.get_root_directory()
log_dir = environment.get_log_directory()
latest = OrderedDict()
for log_type in self.log_types:
spl = log_type.split('-')
if len(spl) > 1 and spl[1] == 'error':
log_file = '%s.err.latest' % spl[0]
else:
log_file = '%s.log.latest' % log_type
if os.path.isfile(os.path.join(root_dir, log_file)):
latest[log_type] = reverse(
'api:environments:environment-logs-detail',
kwargs={'parent_name': environment.name, 'log': log_file},
request=request,
)
historical = [
reverse('api:environments:environment-logs-detail',
kwargs={'parent_name': environment.name, 'log': log},
request=request)
for log in sorted(os.listdir(log_dir))
]
ret = OrderedDict((
('latest', latest),
('historical', historical),
))
return Response(ret)
class EnvironmentLogsDetailAPIView(mixins.EnvironmentRelatedMixin, generics.GenericAPIView):
renderer_classes = (PlainTextRenderer,)
# TODO: Code complexity ignored for now
def get(self, request, *args, **kwargs): # NOQA
environment = self.get_environment()
log_file = self.kwargs.get('log', '')
try:
tail = int(request.query_params.get('tail', 0))
except ValueError:
tail = None
try:
head = int(request.query_params.get('head', 0))
except ValueError:
head = None
if head and tail:
return Response('Both head and tail may not be used.',
status=status.HTTP_400_BAD_REQUEST)
if log_file.endswith('.latest'):
log = os.path.join(environment.get_root_directory(), log_file)
elif log_file.endswith('.log') or log_file.endswith('.err'):
log = os.path.join(environment.get_log_directory(), log_file)
else:
log = None
if not log or not os.path.isfile(log):
raise ValidationError({
'log_file': ['Log file does not exist: {0}.'.format(log_file)]
})
if tail:
ret = envoy.run('tail -{0} {1}'.format(tail, log)).std_out
elif head:
ret = envoy.run('head -{0} {1}'.format(head, log)).std_out
else:
with open(log, 'r') as f:
ret = f.read()
return Response(ret)
class EnvironmentModelUserPermissionsViewSet(StackdioModelUserPermissionsViewSet):
model_cls = models.Environment
class EnvironmentModelGroupPermissionsViewSet(StackdioModelGroupPermissionsViewSet):
model_cls = models.Environment
class EnvironmentObjectUserPermissionsViewSet(mixins.EnvironmentPermissionsMixin,
StackdioObjectUserPermissionsViewSet):
pass
class EnvironmentObjectGroupPermissionsViewSet(mixins.EnvironmentPermissionsMixin,
StackdioObjectGroupPermissionsViewSet):
pass
| 9,839 |
site-packages/visual/examples/eventHandlers.py
|
lebarsfa/vpython-wx
| 68 |
2025200
|
from __future__ import print_function
from visual import *
s = sphere()
instruct = """
Try mouse and keyboard events,
and note the printed outputs.
"""
l = label(pos=s.pos, text=instruct)
redrawCount = 0
class RedrawCounter(object):
redrawCount = 0
def increment(self):
self.redrawCount += 1
def handleMouseDown(evt, arbArg):
print("Mouse down!" + repr(evt.pos) + ':' + repr(arbArg) + ':' + evt.event)
if m.enabled:
m.stop()
print("keydown events are now disabled")
else:
m.start()
print("keydown events are now enabled")
def handleMouseUp( evt ):
print("Mouse up! " + evt.event)
def handleMouseClick( evt ):
print("Mouse click!" + evt.event)
def handleKeyUp( evt ):
print("The ", evt.key, "key has gone up:", evt.event)
print(' evt.ctrl =', evt.ctrl, ', evt.alt =', evt.alt, ', evt.shift =', evt.shift)
def handleKeyDown( evt ):
print("The ", evt.key, "key has gone down", evt.event)
print(' evt.ctrl =', evt.ctrl, ', evt.alt =', evt.alt, ', evt.shift =', evt.shift)
if evt.key == 'R':
print("There have been", redraw.redrawCount, "redraws")
def handleMouseMove( evt, num ):
print(evt)
print(num)
print("Mouse moved! pos=", repr(evt.pos), ":", evt.event)
redraw = RedrawCounter()
scene.bind('mousedown', handleMouseDown, scene)
scene.bind('mouseup', handleMouseUp)
scene.bind('click', handleMouseClick)
m = scene.bind('keydown', handleKeyDown)
scene.bind('keyup', handleKeyUp)
scene.bind('redraw', redraw.increment)
scene.bind('mousemove', handleMouseMove, 3.2)
| 1,588 |
fair_flearn/flearn/models/cifar/inception.py
|
litian96/TERM
| 30 |
2025044
|
import numpy as np
import tensorflow as tf
from tqdm import trange
import tensorflow.contrib.slim as slim
from flearn.utils.model_utils import batch_data, gen_batch
from flearn.utils.tf_utils import graph_size
from flearn.utils.tf_utils import process_grad
trunc_normal = lambda stddev: tf.truncated_normal_initializer(stddev=stddev)
def process_x(raw_x_batch):
x_batch = np.array(raw_x_batch)
return x_batch
def process_y(raw_y_batch):
labels = np.array(raw_y_batch)
y_batch = np.eye(10)[labels]
return y_batch
class Model(object):
def __init__(self, num_classes, q, optimizer, seed):
self.num_classes = num_classes
self.graph = tf.Graph()
with self.graph.as_default():
# tf.set_random_seed(123 + seed)
tf.set_random_seed(456 + seed)
self.features, self.labels, self.train_op, self.grads, self.eval_metric_ops, self.loss = self.create_model(
optimizer, q)
self.saver = tf.train.Saver()
self.sess = tf.Session(graph=self.graph)
self.size = graph_size(self.graph)
with self.graph.as_default():
self.sess.run(tf.global_variables_initializer())
metadata = tf.RunMetadata()
opts = tf.profiler.ProfileOptionBuilder.float_operation()
self.flops = tf.profiler.profile(self.graph, run_meta=metadata, cmd='scope', options=opts).total_float_ops
def create_model(self, optimizer, q, dropout_keep_prob=0.5, prediction_fn=slim.softmax):
end_points = {}
images = tf.placeholder(tf.float32, [None, 32, 32, 3])
one_hot_labels = tf.placeholder(tf.int32, [None, self.num_classes])
net = slim.conv2d(images, 64, [5, 5], scope='conv1')
net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
net = slim.conv2d(net, 64, [5, 5], scope='conv2')
net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
net = slim.max_pool2d(net, [2, 2], 2, scope='pool2')
net = slim.flatten(net)
net = slim.fully_connected(net, 384, scope='fc3')
net = slim.fully_connected(net, 192, scope='fc4')
logits = slim.fully_connected(net, self.num_classes,
biases_initializer=tf.zeros_initializer(),
weights_initializer=trunc_normal(1 / 192.0),
weights_regularizer=None,
activation_fn=None,
scope='logits')
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=one_hot_labels, logits=logits)
#loss = tf.reduce_mean(loss)
loss = tf.reshape(loss, [-1, 1])
loss = (1.0 / q) * tf.log(
(1.0 / 128) * tf.reduce_sum(tf.exp(q * loss)) + 1e-6)
grads_and_vars = optimizer.compute_gradients(loss)
grads, _ = zip(*grads_and_vars)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=tf.train.get_global_step())
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_labels, 1))
eval_metric_ops = tf.count_nonzero(correct_pred)
return images, one_hot_labels, train_op, grads, eval_metric_ops, loss
def set_params(self, model_params=None):
if model_params is not None:
with self.graph.as_default():
all_vars = tf.trainable_variables()
for variable, value in zip(all_vars, model_params):
variable.load(value, self.sess)
def get_params(self):
with self.graph.as_default():
model_params = self.sess.run(tf.trainable_variables())
return model_params
def get_loss(self, data):
with self.graph.as_default():
loss = self.sess.run(self.loss,
feed_dict={self.features: process_x(data['x']), self.labels: process_y(data['y'])})
return loss
def solve_sgd(self, mini_batch_data):
with self.graph.as_default():
input_data = process_x(mini_batch_data[0])
target_data = process_y(mini_batch_data[1])
grads, loss, _ = self.sess.run([self.grads, self.loss, self.train_op],
feed_dict={self.features: input_data, self.labels: target_data})
soln = self.get_params()
return grads, loss, soln
def solve_inner(self, data, num_epochs=1, batch_size=32):
'''
Args:
data: dict of the form {'x': [list], 'y': [list]}
Return:
soln: trainable variables of the lstm model
comp: number of FLOPs computed while training given data
'''
for idx in trange(num_epochs, desc='Epoch: ', leave=False):
for X, y in batch_data(data, batch_size):
with self.graph.as_default():
self.sess.run(self.train_op,
feed_dict={self.features: X, self.labels: y})
soln = self.get_params()
comp = num_epochs * (len(data['y']) // batch_size) * batch_size * self.flops
return soln, comp
def test(self, data):
'''
Args:
data: dict of the form {'x': [list], 'y': [list]}
Return:
tot_correct: total #samples that are predicted correctly
loss: loss value on `data`
'''
l = 0
tot = 0
for X, y in batch_data(data, 125):
with self.graph.as_default():
tot_correct, loss = self.sess.run([self.eval_metric_ops, self.loss],
feed_dict={self.features: process_x(X), self.labels: process_y(y)})
l += loss
tot += tot_correct
return tot, l / 80
def close(self):
self.sess.close()
| 5,953 |
wright_plans/attune.py
|
wright-group/wright-plans
| 1 |
2023726
|
import attune
import numpy as np
from bluesky import Msg
from cycler import cycler
from ._constants import Constant, ConstantTerm
from ._messages import set_relative_to_func_wrapper, inject_set_position_except_wrapper
from ._plans import scan_nd_wp
def motortune(detectors, opa, use_tune_points, motors, spectrometer=None, *, md=None):
cyc = 1
md = md or {}
instr = attune.Instrument(**opa.instrument)
arrangement = opa.arrangement
relative_sets = {}
exceptions = []
constants = {}
axis_units = {}
scanned_motors = [
m for m, params in motors.items() if params.get("method") == "scan"
]
if use_tune_points:
cyc = cycler(opa, get_tune_points(instr, instr[arrangement], scanned_motors))
axis_units[opa] = "nm" # TODO more robust units?
for mot, params in motors.items():
if params["method"] == "static":
yield Msg("set", getattr(opa, mot), params["center"])
exceptions += [mot]
elif params["method"] == "scan":
exceptions += [mot]
if use_tune_points:
params["center"] = 0
def _motor_rel(opa, motor):
def _motor_rel_inner():
return instr(opa.position, arrangement)[motor]
return _motor_rel_inner
relative_sets[getattr(opa, mot)] = _motor_rel(opa, mot)
cyc *= cycler(
getattr(opa, mot),
np.linspace(
params["center"] - params["width"] / 2,
params["center"] + params["width"] / 2,
params["npts"],
),
)
if spectrometer and spectrometer["device"]:
if spectrometer["method"] == "static":
yield Msg("set", spectrometer["device"], spectrometer["center"])
elif spectrometer["method"] == "zero":
yield Msg("set", spectrometer["device"], 0)
elif spectrometer["method"] == "track":
constants[spectrometer["device"]] = Constant("nm", [ConstantTerm(1, opa)])
elif spectrometer["method"] == "set":
if use_tune_points:
constants[spectrometer["device"]] = Constant(
"nm", [ConstantTerm(1, opa)]
)
else:
yield Msg("set", spectrometer["device"], spectrometer["center"])
elif spectrometer["method"] == "scan":
if use_tune_points:
spectrometer["center"] = 0
def _spec_rel(opa):
def _spec_rel_inner():
return opa.position
return _spec_rel_inner
relative_sets[spectrometer["device"]] = _spec_rel(opa)
cyc *= cycler(
spectrometer["device"],
np.linspace(
spectrometer["center"] - spectrometer["width"] / 2,
spectrometer["center"] + spectrometer["width"] / 2,
spectrometer["npts"],
),
)
axis_units[spectrometer["device"]] = "nm"
plan = scan_nd_wp(detectors, cyc, axis_units=axis_units, constants=constants, md=md)
if relative_sets:
plan = set_relative_to_func_wrapper(plan, relative_sets)
if exceptions:
plan = inject_set_position_except_wrapper(plan, opa, exceptions)
return (yield from plan)
def get_tune_points(instrument, arrangement, scanned_motors):
min_ = arrangement.ind_min
max_ = arrangement.ind_max
if not scanned_motors:
scanned_motors = arrangement.keys()
inds = []
for scanned in scanned_motors:
if scanned in arrangement.keys() and hasattr(
arrangement[scanned], "independent"
):
inds += [arrangement[scanned].independent]
continue
for name in arrangement.keys():
if (
name in instrument.arrangements
and scanned in instrument(instrument[name].ind_min, name).keys()
and hasattr(arrangement[scanned], "independent")
):
inds += [arrangement[scanned].independent]
if len(inds) > 1:
inds = np.concatenate(inds)
else:
inds = inds[0]
unique = np.unique(inds)
tol = 1e-3 * (max_ - min_)
diff = np.append(tol * 2, np.diff(unique))
return unique[diff > tol]
def run_holistic(detectors, opa, motor0, motor1, width, npts, spectrometer, *, md=None):
return (
yield from motortune(
detectors + [motor0],
opa,
True,
{motor1: {"method": "scan", "width": width, "npts": npts}},
spectrometer,
md=md,
)
)
def run_intensity(detectors, opa, motor, width, npts, spectrometer, *, md=None):
assert not spectrometer or spectrometer["method"] in ("none", "track", "zero")
return (
yield from motortune(
detectors,
opa,
True,
{motor: {"method": "scan", "width": width, "npts": npts}},
spectrometer,
md=md,
)
)
def run_setpoint(detectors, opa, motor, width, npts, spectrometer, *, md=None):
return (
yield from motortune(
detectors,
opa,
True,
{motor: {"method": "scan", "width": width, "npts": npts}},
spectrometer,
md=md,
)
)
def run_tune_test(detectors, opa, spectrometer, *, md=None):
return (yield from motortune(detectors, opa, True, {}, spectrometer, md=md))
| 5,614 |
databricks_cli/groups/api.py
|
testruction/databricks-cli
| 252 |
2026001
|
"""Implement Databricks Groups API, interfacing with the GroupsService."""
# Databricks CLI
# Copyright 2017 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"), except
# that the use of services to which certain application programming
# interfaces (each, an "API") connect requires that the user first obtain
# a license for the use of the APIs from Databricks, Inc. ("Databricks"),
# by creating an account at www.databricks.com and agreeing to either (a)
# the Community Edition Terms of Service, (b) the Databricks Terms of
# Service, or (c) another written agreement between Licensee and Databricks
# for the use of the APIs.
#
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from databricks_cli.sdk import GroupsService
class GroupsApi(object):
"""Implement the databricks '2.0/groups' API Interface."""
def __init__(self, api_client):
self.client = GroupsService(api_client)
def add_member(self, parent_name, user_name, group_name):
"""
Only one of ``user_name`` or ``group_name`` should be provided.
"""
assert bool(user_name is not None) ^ bool(group_name is not None)
return self.client.add_to_group(parent_name=parent_name,
user_name=user_name,
group_name=group_name)
def create(self, group_name):
"""Create a new group with the given name."""
return self.client.create_group(group_name)
def list_members(self, group_name):
"""Return all of the members of a particular group."""
return self.client.get_group_members(group_name)
def list_all(self):
"""Return all of the groups in an organization."""
return self.client.get_groups()
def list_parents(self, user_name, group_name):
"""
Only one of ``user_name`` or ``group_name`` should be provided.
Retrieve all groups in which a given user or group is a member.
Note: this method is non-recursive - it will return all groups in
which the given user or group is a member but not the groups in which
those groups are members).
"""
assert bool(user_name is not None) ^ bool(group_name is not None)
return self.client.get_groups_for_principal(user_name=user_name, group_name=group_name)
def remove_member(self, parent_name, user_name, group_name):
"""
Only one of ``user_name`` or ``group_name`` should be provided.
"""
assert bool(user_name is not None) ^ bool(group_name is not None)
return self.client.remove_from_group(parent_name=parent_name,
user_name=user_name,
group_name=group_name)
def delete(self, group_name):
"""Remove a group from this organization."""
return self.client.remove_group(group_name)
| 3,370 |
learn_stem/devops/selenium/test-3-screenshot.py
|
wgong/open_source_learning
| 1 |
2025911
|
from selenium import webdriver
driver = webdriver.Remote("http://localhost:4444/wd/hub", webdriver.DesiredCapabilities.FIREFOX.copy())
driver.get("http://www.google.com")
driver.get_screenshot_as_file('google.png')
| 219 |
imix/models/losses/diverse_loss.py
|
linxi1158/iMIX
| 0 |
2024499
|
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from ..builder import LOSSES
from .base_loss import BaseLoss
@LOSSES.register_module()
class DiverseLoss(BaseLoss):
def __init__(self):
super().__init__(loss_name=str(self))
def forward(self, model_output):
w_div = 0.125
predict_anchor = model_output['attnscore_list']
target_bbox = model_output['input_mask']
losses = DiverseLoss.compute_loss(predict_anchor, target_bbox)
losses = w_div * losses
return losses
def __str__(self):
return 'diverse_loss'
@staticmethod
def mask_softmax(attn_score, word_mask, tempuature=10., clssep=False, lstm=False):
if len(attn_score.shape) != 2:
attn_score = attn_score.squeeze(2).squeeze(2)
word_mask_cp = word_mask[:, :attn_score.shape[1]].clone()
score = F.softmax(attn_score * tempuature, dim=1)
if not clssep:
for ii in range(word_mask_cp.shape[0]):
if lstm:
word_mask_cp[ii, word_mask_cp[ii, :].sum() - 1] = 0
else:
word_mask_cp[ii, 0] = 0
word_mask_cp[ii, word_mask_cp[ii, :].sum()] = 0 # set one to 0 already
mask_score = score * word_mask_cp.float()
mask_score = mask_score / (mask_score.sum(1) + 1e-8).view(mask_score.size(0), 1).expand(
mask_score.size(0), mask_score.size(1))
return mask_score
@staticmethod
def compute_loss(score_list, word_mask, m=-1, coverage_reg=True):
score_matrix = torch.stack([DiverseLoss.mask_softmax(score, word_mask) for score in score_list],
dim=1) # (B,Nfilm,N,H,W)
cov_matrix = torch.bmm(score_matrix, score_matrix.permute(0, 2, 1)) # (BHW,Nfilm,Nfilm)
id_matrix = Variable(torch.eye(cov_matrix.shape[1]).unsqueeze(0).repeat(cov_matrix.shape[0], 1, 1).cuda())
if m == -1.:
div_reg = torch.sum(((cov_matrix * (1 - id_matrix))**2).view(-1)) / cov_matrix.shape[0]
else:
div_reg = torch.sum(((cov_matrix - m * id_matrix)**2).view(-1)) / cov_matrix.shape[0]
if coverage_reg:
word_mask_cp = word_mask.clone()
for ii in range(word_mask_cp.shape[0]):
word_mask_cp[ii, 0] = 0
word_mask_cp[ii, word_mask_cp[ii, :].sum()] = 0 # set one to 0 already
cover_matrix = 1. - torch.clamp(torch.sum(score_matrix, dim=1, keepdim=False), min=0., max=1.)
cover_reg = torch.sum((cover_matrix * word_mask_cp.float()).view(-1)) / cov_matrix.shape[0]
div_reg += cover_reg
return div_reg
| 2,714 |
functions.py
|
laidig/pysiri2validator
| 0 |
2025726
|
#!/usr/bin/python3
"""
Python functions to validate SIRI 2.0 formatted XML
much credit to https://gist.github.com/iiska/215879
"""
import lxml
from lxml import etree
def getLoadedSchema(schemafile):
doc = etree.parse(schemafile)
try:
schema = etree.XMLSchema(doc)
except Exception as e:
print(e)
exit(1)
return schema
def parsebytes(schema, bytes):
""" returns True if valid to XSD"""
print("Validating")
doc = etree.XML(bytes)
return assertSchema(schema, doc)
def parseFile(schema, xml):
""" returns True if valid to XSD"""
print("Validating")
doc = etree.parse(xml)
return assertSchema(schema, doc)
def assertSchema(schema, doc):
try:
schema.assertValid(doc)
except lxml.etree.DocumentInvalid as e:
print(e)
return False
return True
| 893 |
test_2c2p.py
|
SweiLz/Python-2C2P
| 0 |
2025813
|
import hashlib
import hmac
import random
import time
import json
from flask import Flask, url_for, render_template, redirect, request, jsonify
from forms import PaymentInfoForm
app = Flask(__name__)
class Payment2C2P():
def __init__(self, merchant_id, secret_key, request_uri):
self.request_uri = request_uri
self.secret_key = secret_key
self.data = {
'merchant_id': merchant_id,
'order_id': str(int(round(time.time()*1000))),
'version': '8.5',
'result_url_1': 'http://127.0.0.1:5000/payment-complete'
}
def setTransaction(self, payment_description, currency, amount, order_id):
self.data['payment_description'] = payment_description
self.data['currency'] = str(self._get_currency_id_from_code(currency))
self.data['amount'] = '{:012}'.format(amount)
self.data['order_id'] = '{:05}'.format(order_id)
return self.data
def request(self):
mandatory_fields = ['payment_description',
'order_id', 'amount', 'currency']
for f in mandatory_fields:
if f not in self.data:
raise Exception('Missing mandatory value {}'.format(f))
self.data['hash_value'] = self._get_request_hash()
form_id = ''
chars = 'bcdfghjklmnpqrstvwxyz'
for i in range(0, 20):
form_id = '%s%s' % (
form_id, chars[random.randint(0, len(chars) - 1)])
html = []
html.append('<form id="{}" action="{}" method="post">'.format(
form_id, self.request_uri))
for key in self.data:
html.append('<input type="hidden" name="%s" value="%s" />' %
(key, self.data[key]))
html.append('<input type="submit" value="Pay now">')
html.append('</form>')
html.append('<script>document.forms.%s.submit()</script>' % form_id)
return ''.join(html)
def _get_request_hash(self):
fields = ['version', 'merchant_id', 'payment_description',
'order_id', 'currency', 'amount', 'result_url_1']
hash_str = ''
for f in fields:
if f in self.data:
hash_str = '{}{}'.format(hash_str, self.data[f])
print(hash_str)
hash_val = hmac.new(bytes(self.secret_key, 'utf-8'),
bytes(hash_str, 'utf-8'), hashlib.sha256).hexdigest()
return hash_val
def _validate_response_hash(self):
# rdata = json.dumps("version=8.5&request_timestamp=2020-01-11+16%3A17%3A14&merchant_id=JT04¤cy=764&order_id=1578734226319&amount=000000120000&invoice_no=&transaction_ref=1578734226319&approval_code=479135&eci=05&transaction_datetime=2020-01-11+16%3A18%3A17&payment_channel=001&payment_status=000&channel_response_code=00&channel_response_desc=success&masked_pan=411111XXXXXX1111&stored_card_unique_id=&backend_invoice=&paid_channel=&paid_agent=&recurring_unique_id=&ippPeriod=&ippInterestType=&ippInterestRate=&ippMerchantAbsorbRate=&payment_scheme=VI&process_by=VI&sub_merchant_list=&user_defined_1=&user_defined_2=&user_defined_3=&user_defined_4=&user_defined_5=&browser_info=Type%3DChrome79%2CName%3DChrome%2CVer%3D79.0&mcp=&mcp_amount=&mcp_currency=&mcp_exchange_rate=&hash_value=F1247BF43E9E5BAA72BD145BE082110ED280042620889964D369FD367827AADA")
fields = [
'version', 'request_timestamp', 'merchant_id', 'currency', 'order_id',
'amount', 'invoice_no', 'transaction_ref',
'approval_code', 'eci', 'transaction_datetime', 'payment_channel',
'payment_status', 'channel_response_code', 'channel_response_desc',
'masked_pan', 'stored_card_unique_id', 'backend_invoice',
'paid_channel', 'paid_agent', 'recurring_unique_id',
'ippPeriod', 'ippInterestType', 'ippInterestRate',
'ippMerchantAbsorbRate', 'payment_scheme', 'process_by',
'sub_merchant_list',
'user_defined_1',
'user_defined_2', 'user_defined_3', 'user_defined_4', 'user_defined_5',
'browser_info', 'mcp', 'mcp_amount', 'mpc_currency', 'mcp_exchange_rate'
]
hash_str = ''
for f in fields:
if f in request.POST:
hash_str = '{}{}'.format(hash_str, request.POST[f])
print(hash_str)
hash_val = hmac.new(bytes(self.secret_key, 'utf-8'),
bytes(hash_str, 'utf-8'), hashlib.sha256).hexdigest()
return hash_val
def _get_currency_id_from_code(self, code):
return {
'SGD': 702,
'MMK': 104,
'IDR': 360,
'THB': 764,
'PHP': 608,
'HKD': 344,
'MYR': 458,
'VND': 704
}[code.upper()]
payment2C2P = Payment2C2P(
'JT04', 'QnmrnH6QE23N', 'https://demo2.2c2p.com/2C2PFrontEnd/RedirectV3/payment')
@app.route('/', methods=('GET', 'POST'))
def hello_world():
form = PaymentInfoForm(request.form)
if request.method == 'POST':
if form.is_submitted():
print("Form successfully submitted")
if form.validate_on_submit():
id = payment2C2P.data['order_id']
# message = json.
# print(form.json())
# print(form.description.data)
# "Hello World" #
return redirect(url_for('payment', desc=form.description.data, curr=form.currency.data, amount=form.amount.data, order_id=id), code=307)
else:
print(form.errors)
return render_template('index.html', form=form, template='form-template')
@app.route('/payment', methods=('GET', 'POST'))
def payment():
if request.method == 'POST':
desc = request.args.get('desc')
curr = request.args.get('curr')
amount = int(float(request.args.get('amount'))*100)
order_id = int(request.args.get('order_id'))
payment2C2P.setTransaction(desc, curr, amount, order_id)
return payment2C2P.request()
return "OK"
@app.route('/payment-complete', methods=('GET', 'POST'))
def success():
# print(request.)
return render_template('success.html', template='success-template')
if __name__ == "__main__":
app.config['SECRET_KEY'] = "powerful secretkey"
app.run(debug=True)
# Test Card
'''
Card brand Card no / Account information Expiry date security code
Visa 4111-1111-1111-1111 12/2020 123
MasterCard 5555-5555-5555-4444 12/2020 123
JCB 3566-1111-1111-1113 12/2020 123
Amex 3782-8224-6310-005 12/2020 1234
China Union Pay 6250-9470-0000-0014 Mobile phone number: 11112222
Dynamic Verification Code: 111111 12/2033 123
Alipay Username: <EMAIL>
Password: <PASSWORD>
Payment password: <PASSWORD>
WeChat Pay Test User Account: 2604462170
Login Password: <PASSWORD>
Pay Password: <PASSWORD>
'''
| 6,900 |
tests/deprecated/backup_test.py
|
fred-yu-2013/Elastos.Hive.Node
| 5 |
2024422
|
# -*- coding: utf-8 -*-
"""
Testing file for the backup module.
"""
import unittest
from tests import init_test
from tests.utils.http_client import HttpClient
@unittest.skip
class BackupTestCase(unittest.TestCase):
def __init__(self, method_name='runTest'):
super().__init__(method_name)
init_test()
self.cli = HttpClient(f'/api/v2')
@staticmethod
def _subscribe():
HttpClient(f'/api/v2').put('/subscription/vault')
@classmethod
def setUpClass(cls):
cls._subscribe()
def test01_get_state(self):
r = self.cli.get('/vault-deprecated/content')
self.assertEqual(r.status_code, 200)
@unittest.skip
def test02_backup(self):
self.backup(self.cli.get_backup_credential())
@unittest.skip
def test03_restore(self):
self.restore(self.cli.get_backup_credential())
def backup(self, credential):
r = self.cli.post('/vault-deprecated/content?to=hive_node', body={'credential': credential})
self.assertEqual(r.status_code, 201)
def restore(self, credential):
r = self.cli.post('/vault-deprecated/content?from=hive_node', body={'credential': credential})
self.assertEqual(r.status_code, 201)
| 1,238 |
udemy/lazyprogrammer/logistic-regression-python/ecommerce_logistic.py
|
balazssimon/ml-playground
| 0 |
2026245
|
import numpy as np
from ecommerce_data import get_binary_data
import matplotlib.pyplot as plt
Xtrain, Ytrain, Xtest, Ytest = get_binary_data()
D = Xtrain.shape[1]
Ntrain = Xtrain.shape[0]
ones = np.array([[1]*Ntrain]).T
Xtrain = np.concatenate((ones, Xtrain), axis=1)
Ntest = Xtest.shape[0]
ones = np.array([[1]*Ntest]).T
Xtest = np.concatenate((ones, Xtest), axis=1)
# randomly initialize weights
W = np.random.randn(D+1)
# make predictions
def sigmoid(a):
return 1 / (1 + np.exp(-a))
# cross entropy
def cross_entropy(Y, P):
return -np.mean(Y*np.log(P+0.0000001) + (1 - Y)*np.log(1 - P+0.0000001))
# calculate the accuracy
def classification_rate(Y, P):
return np.mean(Y == P)
# gradient descent
def gradient_descent(L1=False, L2=False):
train_costs = []
test_costs = []
y = Ytrain
w = W
l1 = 0.01
l2 = 0.01
learning_rate = 0.001
for i in range(1000):
a = Xtrain.dot(w)
y = sigmoid(a)
ctrain = cross_entropy(y, Ytrain)
train_costs.append(ctrain)
ctest = cross_entropy(sigmoid(Xtest.dot(w)), Ytest)
test_costs.append(ctest)
# gradient descent
cost = learning_rate * Xtrain.T.dot(y - Ytrain)
if L1:
cost += l1*np.sign(w)
if L2:
cost += l2*w
w -= cost
if i % 100 == 0:
print(i, ctrain, ctest)
Ptrain = np.round(sigmoid(Xtrain.dot(w)))
Ptest = np.round(sigmoid(Xtest.dot(w)))
print("Final w:", w)
print("Train score:", classification_rate(Ptrain, Ytrain))
print("Test score:", classification_rate(Ptest, Ytest))
legend1, = plt.plot(train_costs, label='train cost')
legend2, = plt.plot(test_costs, label='test cost')
plt.legend([legend1, legend2])
plt.show()
#gradient_descent()
#gradient_descent(L2=True)
#gradient_descent(L1=True)
gradient_descent(L1=True, L2=True)
| 1,967 |
common_utils.py
|
viciouspetal/knn-ml-cit
| 0 |
2025543
|
import pandas as pd
import numpy as np
def load_data(path, columns):
"""
For a given path to file it loads a dataset with given headers.
:param path: paths to dataset
:param columns: columns specified for a given dataset
:return: dataset with headers loaded in a pandas dataframe
"""
df = pd.read_csv(path, names=columns, header=None)
return df
def calculate_distances(data_points, query_instance):
"""
Calculates a distance matrix for each of the records detailing how far each datapoint is from a given query instance.
Additionally computes a sorted array detailing indices of the smallest to largest distance from a given
query point, from smallest (or closest point to query instance) to largest.
:param data_points: data points of a given dataset
:param query_instance: instance of a dataset for which the distance matrix will be computed for
:return:
"""
# row wise sum with a negative lookahead
distance_matrix = euclideanDistance(data_points, query_instance)
#print('For query instance of {0} the distance matrix is {1}'.format(query_instance,distance_matrix))
# sorts the distance matrix and returns indices of elements from smallest distance value to largest
sorted_distance_matrix = np.argsort(distance_matrix)[np.in1d(np.argsort(distance_matrix),np.where(distance_matrix),1)]
return distance_matrix, sorted_distance_matrix
def euclideanDistance(data_points, query_instance):
"""
Calculate euclidean distance
:param data_points: data points of a given dataset
:param query_instance: instance of a dataset for which the distance matrix will be computed for
:return: distance value
"""
return np.sqrt(((data_points - query_instance) ** 2).sum(-1))
def manhattanDistance(data_points, query_instance):
"""
Calculate manhattan distance
:param data_points: data points of a given dataset
:param query_instance: instance of a dataset for which the distance matrix will be computed for
:return: distance value
"""
return np.abs(data_points - query_instance).sum(-1)
def minkowskiDistance(data_points, query_instance, p_value = 1):
"""
Calculate minkowski distance
:param data_points: data points of a given dataset
:param query_instance: instance of a dataset for which the distance matrix will be computed for
:param p_value:
:return: distance value
"""
return np.abs(((data_points - query_instance) / p_value) / (1 / p_value)).sum(-1)
def clean_cancer_dataset(df_training):
"""
Checks and cleans the dataset of any potential impossible values, e.g. bi-rads columns, the 1st only allows
values in the range of 1-5, ordinal
Age, 2nd column, cannot be negative, integer
Shape, 3rd column, only allows values between 1 and 4, nominal
Margin, only allows a range of 1 to 5, nominal
Density only allows values between 1-4,ordinal.
All deletions will be performed in place.
:return: cleaned up dataframe, count of removed points
"""
rows_pre_cleaning = df_training.shape[0]
df_training.drop(df_training.index[df_training['bi_rads'] > 5], inplace=True)
df_training.drop(df_training.index[df_training['shape'] > 4], inplace=True)
df_training.drop(df_training.index[df_training['margin'] > 5], inplace=True)
df_training.drop(df_training.index[df_training['density'] > 4], inplace=True)
rows_removed = rows_pre_cleaning - df_training.shape[0]
return df_training, rows_removed
def compute_classification_accuracy(correctly_classified, incorrectly_classified):
"""
Computes the accuracy of the model based on the number of correctly and incorrectly classified points.
Expresses accuracy as a percentage value.
:param correctly_classified: count of correctly classified data points
:param incorrectly_classified: count of incorrectly classified data points
:return: accuracy score
"""
accuracy = (correctly_classified / (correctly_classified + incorrectly_classified)) * 100
return accuracy
| 4,088 |
robot/test/simple_mov.py
|
rbccps-iisc/CORNET2.0
| 0 |
2025738
|
#!/usr/bin/python
import sys
import socket
from math import pi, cos, sin
from random import random
import unicodedata
import time
def getpoint(h, k, r):
theta = random() * 2 * pi
# return h + cos(theta) * r, k + sin(theta) * r
return 50, 50
def mov_left(x, y):
new = x - 0.5
return new, y
def mov_right(x, y):
new = x + 0.5
return new, y
def mov_up(x, y):
new = y + 0.5
return x, new
def mov_down(x, y):
new = y - 0.5
return x, new
def client(msg):
host = '127.0.0.1'
port = 12345
#while msg != 'q' and msg != 'exit':
s = socket.socket()
s.connect((host, port))
s.send(str(msg).encode('utf-8'))
data = s.recv(1024).decode('utf-8')
print('Received from Server: ', data)
return data
s.close()
if __name__ == '__main__':
# get the postion of AP1
ap1 = client("get.ap1.position")
ap1 = unicodedata.normalize('NFKD', ap1).encode('ascii', 'ignore')
ap1 = ap1.replace("[", " ")
ap1 = ap1.replace("]", " ")
print ap1
# centre = list(ap1.split(","))
args = sys.argv
if len(args) != 2:
# print("usage: network_config.py <config_file>")
#print args[1]
[x, y] = [1, 0]
else:
print args[1]
[x, y] = [1, float(args[1])]
while True:
# x = x + 1
# if x % 2 == 0:
# y = y + 1
# y = y + 0.5
# getpoint(int(float(centre[1])),int(float(centre[2])), 60)
if x <= 1 and y >= 0 and y <= 58:
x, y = mov_up(x, y)
elif x >= 1 and x <= 58 and y >= 58:
x, y == mov_right(x, y)
elif x >= 58 and y <= 58 and y >= 1:
x, y == mov_down(x, y)
elif x <= 58 and x > 1 and y <= 1:
x, y == mov_left(x, y)
# msg = 'set.mybot.setPosition("'
msg = 'set.robot1.setPosition("'
msg = msg + str(float(x)) + ',' + str(float(y)) + ',0")'
print msg
result = client(msg)
print client("get.robot1.position")
# print client("get.ap1.position")
time.sleep(1.0)
if y == 90:
break
| 2,120 |
flower/server.py
|
ari-dasci/S-DDaBA
| 0 |
2024426
|
import flwr as fl
from typing import List, Optional, Tuple
from flwr.common import Weights
from ddaba import DDaBA
import tensorflow as tf
strategy = DDaBA()
fl.server.start_server(config={"num_rounds": 10}, strategy = strategy)
| 237 |
node/blockchain/management/commands/clear_blockchain.py
|
thenewboston-developers/Node
| 18 |
2024815
|
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from node.blockchain.facade import BlockchainFacade
from node.blockchain.utils.lock import delete_all_locks
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Clears local blockchain' # noqa: A003
def handle(self, *args, **options):
with transaction.atomic():
delete_all_locks()
BlockchainFacade.get_instance().clear()
| 489 |
nbt.py
|
aheadley/pynemap
| 6 |
2024978
|
from struct import pack, unpack
from gzip import GzipFile
TAG_END = 0
TAG_BYTE = 1
TAG_SHORT = 2
TAG_INT = 3
TAG_LONG = 4
TAG_FLOAT = 5
TAG_DOUBLE = 6
TAG_BYTE_ARRAY = 7
TAG_STRING = 8
TAG_LIST = 9
TAG_COMPOUND = 10
class TAG(object):
"""Each Tag needs to take a buffer, an index into the buffer and return the index that it stops reading at."""
id = None
def __init__(self, value=None, name=None):
if name: self.name = TAG_String(name)
else: self.name = None
self.value = value
#Parsers and Generators
def _parse_buffer(self, buffer):
raise NotImplementedError(self.__class__.__name__)
def _render_buffer(self, buffer, offset=None):
raise NotImplementedError(self.__class__.__name__)
#Printing and Formatting of tree
def tag_info(self):
if self.name:
return self.__class__.__name__ + \
('("%s")'%self.name) + \
": " + self.__repr__()
else:
return self.__class__.__name__ + \
('("%s")'%"") + \
": " + self.__repr__()
def pretty_tree(self, indent=0):
return ("\t"*indent) + self.tag_info()
class _TAG_Numeric(TAG):
def __init__(self, unpack_as, size, buffer=None, value=None, name=None):
super(_TAG_Numeric, self).__init__(value, name)
self.unpack_as = unpack_as
self.size = size
if buffer:
self._parse_buffer(buffer)
#Parsers and Generators
def _parse_buffer(self, buffer, offset=None):
self.value = unpack(self.unpack_as, buffer.read(self.size))[0]
def _render_buffer(self, buffer, offset=None):
buffer.write(pack(self.unpack_as, self.value))
#Printing and Formatting of tree
def __repr__(self):
return str(self.value)
class TAG_Byte(_TAG_Numeric):
id = TAG_BYTE
def __init__(self, value=None, name=None, buffer=None):
super(TAG_Byte, self).__init__(">b", 1, buffer, value, name)
class TAG_Short(_TAG_Numeric):
id = TAG_SHORT
def __init__(self, value=None, name=None, buffer=None):
super(TAG_Short, self).__init__(">h", 2, buffer, value, name)
class TAG_Int(_TAG_Numeric):
id = TAG_INT
def __init__(self, value=None, name=None, buffer=None):
super(TAG_Int, self).__init__(">i", 4, buffer, value, name)
class TAG_Long(_TAG_Numeric):
id = TAG_LONG
def __init__(self, value=None, name=None, buffer=None):
super(TAG_Long, self).__init__(">q", 8, buffer, value, name)
class TAG_Float(_TAG_Numeric):
id = TAG_FLOAT
def __init__(self, value=None, name=None, buffer=None):
super(TAG_Float, self).__init__(">f", 4, buffer, value, name)
class TAG_Double(_TAG_Numeric):
id = TAG_DOUBLE
def __init__(self, value=None, name=None, buffer=None):
super(TAG_Double, self).__init__(">d", 8, buffer, value, name)
class TAG_Byte_Array(TAG):
id = TAG_BYTE_ARRAY
def __init__(self, buffer=None):
super(TAG_Byte_Array, self).__init__()
self.tags = []
if buffer:
self._parse_buffer(buffer)
#Parsers and Generators
def _parse_buffer(self, buffer, offset=None):
self.length = TAG_Int(buffer=buffer)
self.value = buffer.read(self.length.value)
def _render_buffer(self, buffer, offset=None):
self.length._render_buffer(buffer, offset)
buffer.write(self.value)
#Printing and Formatting of tree
def __repr__(self):
return "[%i bytes]" % self.length.value
class TAG_String(TAG):
id = TAG_STRING
def __init__(self, value=None, name=None, buffer=None):
super(TAG_String, self).__init__(value, name)
if buffer:
self._parse_buffer(buffer)
#Parsers and Generators
def _parse_buffer(self, buffer, offset=None):
self.length = TAG_Short(buffer=buffer)
if self.length.value > 0:
self.value = unicode(buffer.read(self.length.value), "utf-8")
else: self.value = None
def _render_buffer(self, buffer, offset=None):
if self.value:
save_val = self.value.encode("utf-8")
self.length.value = len(save_val)
self.length._render_buffer(buffer, offset)
if self.length > 0:
buffer.write(save_val)
else:
self.length.value = 0
self.length._render_buffer(buffer, offset)
#Printing and Formatting of tree
def __repr__(self):
return self.value
class TAG_List(TAG):
id = TAG_LIST
def __init__(self, name=None, type=None, buffer=None):
super(TAG_List, self).__init__(name=name)
if type:
self.tagID = TAG_Byte(value = type.id)
else: self.tagID = None
self.length = None
self.tags = []
if buffer:
self._parse_buffer(buffer)
#Parsers and Generators
def _parse_buffer(self, buffer, offset=None):
self.tagID = TAG_Byte(buffer=buffer)
self.length = TAG_Int(buffer=buffer)
for x in range(self.length.value):
self.tags.append(TAGLIST[self.tagID.value](buffer=buffer))
def _render_buffer(self, buffer, offset=None):
self.tagID._render_buffer(buffer, offset)
self.length._render_buffer(buffer, offset)
for tag in self.tags:
tag._render_buffer(buffer, offset)
#Printing and Formatting of tree
def __repr__(self):
return "%i entries of type %s" % (len(self.tags), TAGLIST[self.tagID.value].__name__)
def pretty_tree(self, indent=0):
output = [super(TAG_List,self).pretty_tree(indent)]
if len(self.tags):
output.append(("\t"*indent) + "{")
output.extend([tag.pretty_tree(indent+1) for tag in self.tags])
output.append(("\t"*indent) + "}")
return '\n'.join(output)
class TAG_Compound(TAG):
id = TAG_COMPOUND
def __init__(self, buffer=None):
super(TAG_Compound, self).__init__()
self.tags = []
if buffer:
self._parse_buffer(buffer)
#Parsers and Generators
def _parse_buffer(self, buffer, offset=None):
while True:
type = TAG_Byte(buffer=buffer)
if type.value == TAG_END:
#print "found tag_end"
break
else:
name = TAG_String(buffer=buffer)
try:
#DEBUG print type, name
tag = TAGLIST[type.value](buffer=buffer)
tag.name = name
self.tags.append(tag)
except KeyError:
raise ValueError("Unrecognised tag type")
def _render_buffer(self, buffer, offset=None):
for tag in self.tags:
TAG_Byte(tag.id)._render_buffer(buffer, offset)
tag.name._render_buffer(buffer, offset)
tag._render_buffer(buffer,offset)
buffer.write('\x00') #write TAG_END
#Accessors
def __getitem__(self, key):
if isinstance(key,int):
return self.tags[key]
elif isinstance(key, str):
for tag in self.tags:
if tag.name.value == key:
return tag
else:
raise KeyError("A tag with this name does not exist")
else:
raise ValueError("key needs to be either name of tag, or index of tag")
#Printing and Formatting of tree
def __repr__(self):
return '%i Entries' % len(self.tags)
def pretty_tree(self, indent=0):
output = [super(TAG_Compound,self).pretty_tree(indent)]
if len(self.tags):
output.append(("\t"*indent) + "{")
output.extend([tag.pretty_tree(indent+1) for tag in self.tags])
output.append(("\t"*indent) + "}")
return '\n'.join(output)
TAGLIST = {TAG_BYTE:TAG_Byte, TAG_SHORT:TAG_Short, TAG_INT:TAG_Int, TAG_LONG:TAG_Long, TAG_FLOAT:TAG_Float, TAG_DOUBLE:TAG_Double, TAG_BYTE_ARRAY:TAG_Byte_Array, TAG_STRING:TAG_String, TAG_LIST:TAG_List, TAG_COMPOUND:TAG_Compound}
class NBTFile(TAG_Compound):
"""Represents an NBT file object"""
def __init__(self, filename=None, mode=None, buffer=None):
super(NBTFile,self).__init__()
self.__class__.__name__ = "TAG_Compound"
if filename:
self.file = GzipFile(filename, mode)
self.parse_file(self.file)
def parse_file(self, file=None):
if not file:
file = self.file
if file:
self.type = TAG_Byte(buffer=file)
if self.type.value == self.id:
name = TAG_String(buffer=file)
self._parse_buffer(file)
self.name = name
self.file.close()
else:
raise ValueError("First record is not a Compound Tag")
def write_file(self, filename=None, file=None):
if file:
self.file = file
elif filename:
self.file = GzipFile(filename, "wb")
else:
raise ValueError("Need to specify either a filename or a file")
#Render tree to file
self.type._render_buffer(file)
self.name._render_buffer(file)
self._render_buffer(file)
| 7,972 |
test2.py
|
javajawa/debian-repo-remux
| 1 |
2025906
|
#!/usr/bin/python3
"""
Test a thing
"""
from apt.repo import Repository
# noinspection PyProtectedMember
def main():
"""
Main function
"""
repo = Repository('file://C:/svn/debian-remux/debian')
with open('bind9-dev.deb', 'rb') as debian_file:
package = repo.adopt(debian_file)
print(repo)
dist = repo.distribution('stable')
packages = dist.package_list('main', 'amd64')
packages.add(package)
print(dist)
if __name__ == "__main__":
main()
| 499 |
src/tests/test_api.py
|
tchalvak/Brokk
| 0 |
2026108
|
import requests
import json
#import pprint
#import config
API_BASE = 'https://t8wwrnycz5.execute-api.us-east-1.amazonaws.com/dev/' # Move this to config later
class TestApi:
''' Hits the api endpoints and checks for json coming back
'''
def root(self):
'''Get the root url from the config'''
return API_BASE
def status_code(self, url):
''' Gets http status codes of the urls '''
try:
r = requests.head(url, verify=False)
return r.status_code
except requests.ConnectionError:
return None
def pull_json(self, url, endpoint):
''' Get a page to parse as json, for the api '''
params = dict(
type=endpoint,
jsoncallback='fake'
)
resp = requests.get(url=url, params=params, verify=False)
cut = resp.text
return cut
def test_root_url_config_works(self):
''' Ensure root is configured '''
assert (self.root() is not None and
len(str(self.root())) > 5)
def test_api_urls(self):
endpoints = ['register', 'order', 'subscribe', 'payment-method/card', 'payment-method/bank-account',
'payment-method/source', 'initiate-payment-intent', 'payments', 'search-transactions',
'query', 'mutate']
player_data = self.pull_json(self.root(), 'register')
assert (player_data is not None)
for endpoint in endpoints:
data = self.pull_json(self.root(), endpoint)
assert (data is not None and
json.loads(data) is not False and
len(json.loads(data)) > 0
)
| 1,674 |
scheduler/tests/test_slot_scheduler.py
|
srobo/volunteer-scheduler
| 0 |
2025219
|
from unittest import TestCase
from scheduler.jobs_board import JobsBoard
from scheduler.match_maker import MatchMaker
from scheduler.slot_scheduler import SlotScheduler
from scheduler.scheduling_exception import SchedulingException
from scheduler.tests.helpers import *
class TestSlotScheduler(TestCase):
def setUp(self):
self.volunteers = {
'Jack': create_chef('Jack'),
'Jill': create_delivery_driver('Jill'),
'Sue': create_food_critic('Sue')
}
self.constraints_by_role = {
'chef': [can_cook],
'taster': [can_critique],
'delivery-driver': [can_deliver]
}
def test_generates_a_schedule(self):
matchmaker = MatchMaker(self.volunteers, self.constraints_by_role)
jobs = JobsBoard(
['chef'],
['delivery-driver'],
['taster']
)
scheduler = SlotScheduler(jobs, matchmaker)
assert scheduler.generate_schedule() == {
'Jack': 'chef',
'Sue': 'taster',
'Jill': 'delivery-driver'
}
def test_raises_exception_when_not_all_necessary_roles_filled(self):
matchmaker = MatchMaker(self.volunteers, self.constraints_by_role)
jobs = JobsBoard(
['chef', 'chef'],
['delivery-driver'],
['taster']
)
scheduler = SlotScheduler(jobs, matchmaker)
with self.assertRaises(SchedulingException):
scheduler.generate_schedule()
def test_raises_exception_when_not_enough_volunteers(self):
matchmaker = MatchMaker(self.volunteers, self.constraints_by_role)
jobs = JobsBoard(
['chef', 'delivery-driver', 'delivery-driver', 'taster'],
['delivery-driver'],
['taster']
)
scheduler = SlotScheduler(jobs, matchmaker)
with self.assertRaises(SchedulingException):
scheduler.generate_schedule()
| 1,976 |
office365/communications/callrecords/call_record.py
|
theodoriss/Office365-REST-Python-Client
| 544 |
2026251
|
from office365.directory.identities.identity_set import IdentitySet
from office365.entity import Entity
class CallRecord(Entity):
"""Represents a single peer-to-peer call or a group call between multiple participants,
sometimes referred to as an online meeting."""
@property
def join_web_url(self):
"""Meeting URL associated to the call. May not be available for a peerToPeer call record type."""
return self.properties.get("joinWebUrl", None)
@property
def organizer(self):
"""The organizing party's identity.."""
return self.properties.get("organizer", IdentitySet())
| 630 |
sn4sp/core/__init__.py
|
CoeGSS-Project/SN4SP
| 0 |
2024732
|
"""
Core classes of the SN4SP package.
"""
from .similarity_network import SimilarityGraph
__all__ = [ SimilarityGraph ]
| 122 |
tests/tests/views_mixins/views.py
|
toshiki-tosshi/django-boost
| 25 |
2025241
|
from datetime import timedelta
from django.forms import Form
from django.utils.timezone import now
from django.views.generic import FormView, TemplateView, View
from django_boost.forms.mixins import FormUserKwargsMixin
from django_boost.views.generic import TemplateView as BoostTemplateView
from django_boost.views.mixins import (AllowContentTypeMixin, CSRFExemptMixin,
DynamicRedirectMixin, JsonRequestMixin,
JsonResponseMixin, LimitedTermMixin,
ReAuthenticationRequiredMixin,
# RedirectToDetailMixin,
StaffMemberRequiredMixin,
SuperuserRequiredMixin,
UserAgentMixin, ViewUserKwargsMixin)
class AllowContentTypeNoneView(AllowContentTypeMixin, FormView):
template_name = "boost/test/index.html"
form_class = Form
success_url = '/'
class AllowContentTypeAllowedView(AllowContentTypeMixin, FormView):
template_name = "boost/test/index.html"
allowed_content_types = ['text/html']
form_class = Form
success_url = '/'
class AllowContentTypeView(AllowContentTypeMixin, FormView):
template_name = "boost/test/index.html"
form_class = Form
success_url = '/'
class CSRFExemptView(CSRFExemptMixin, FormView):
template_name = "boost/test/index.html"
form_class = Form
success_url = '/'
class DynamicRedirectView(DynamicRedirectMixin, FormView):
template_name = "boost/test/index.html"
form_class = Form
success_url = '/'
class JsonRequestView(JsonRequestMixin, FormView):
template_name = "boost/test/index.html"
form_class = Form
success_url = '/'
class JsonResponseView(JsonResponseMixin, View):
extra_context = {'json': True}
class LimitedTermView(LimitedTermMixin, TemplateView):
template_name = "boost/test/index.html"
class LimitedTermAfterEndView(LimitedTermMixin, TemplateView):
template_name = "boost/test/index.html"
end_datetime = now() - timedelta(days=1)
class LimitedTermBeforeEndView(LimitedTermMixin, TemplateView):
template_name = "boost/test/index.html"
end_datetime = now() + timedelta(days=1)
class LimitedTermAfterStartView(LimitedTermMixin, TemplateView):
template_name = "boost/test/index.html"
start_datetime = now() - timedelta(days=1)
class LimitedTermBeforeStartView(LimitedTermMixin, TemplateView):
template_name = "boost/test/index.html"
start_datetime = now() + timedelta(days=1)
class ReAuthenticationRequiredView(ReAuthenticationRequiredMixin,
TemplateView):
template_name = "boost/test/index.html"
interval = 100
class StaffMemberRequiredView(StaffMemberRequiredMixin, TemplateView):
template_name = "boost/test/index.html"
class SuperuserRequiredView(SuperuserRequiredMixin, TemplateView):
template_name = "boost/test/index.html"
class UserAgentView(UserAgentMixin, BoostTemplateView):
template_name = "boost/test/index.html"
class ViewUserKwargsView(ViewUserKwargsMixin, FormView):
class UserKwargsForm(FormUserKwargsMixin, Form):
pass
template_name = "boost/test/index.html"
form_class = UserKwargsForm
success_url = '/'
| 3,350 |
examples/sync_buckets/sync_buckets.py
|
Andrew-Wichmann/pytest-localstack
| 63 |
2024038
|
"""Copy objects from one S3 bucket to another."""
import optparse
import boto3
s3 = boto3.resource("s3")
def sync_buckets(src_bucket, dest_bucket):
"""Sync objects from one AWS S3 bucket to another.
Args:
src_bucket (boto3 Bucket): Objects will be copied from this bucket
to *dest_bucket*.
dest_bucket (boto3 Bucket): Objects will be copied here from *src_bucket*.
Returns:
int: Count of objects copied between buckets.
"""
count = 0
for src_obj in src_bucket.objects.all():
response = src_obj.get()
dest_bucket.put_object(Key=src_obj.key, Body=response["Body"].read())
count += 1
return count
def sync_buckets_by_name(src_bucket_name, dest_bucket_name):
"""Sync objects from one AWS S3 bucket to another by name."""
src_bucket = s3.Bucket(src_bucket_name)
dest_bucket = s3.Bucket(dest_bucket_name)
return sync_buckets(src_bucket, dest_bucket)
def main():
parser = optparse.OptionParser(
description="Copy objects from one S3 bucket to another.",
usage="usage: %prog [options] src_bucket dest_bucket",
)
options, args = parser.parse_args()
src_bucket, dest_bucket = args
sync_buckets_by_name(src_bucket, dest_bucket)
if __name__ == "__main__":
main()
| 1,308 |
Fintech-Banking-Chatbot/bot-application/app.py
|
malastare-ai/Malastare.ai
| 3 |
2025593
|
from flask import Flask
from flask import render_template,jsonify,request
import requests
from response import *
import random
app = Flask(__name__)
@app.route('/')
def hello_and_welcome_my_name_is_Lennox():
"""
Sample hello and welcome my name is Lennox
"""
return render_template('home.html')
get_random_response = lambda intent:random.choice(response[intent])
isClosingCard = False;
@app.route('/chat',methods=["POST"])
def chat():
global isClosingCard
"""
chat end point that performs NLU using rasa.ai
and constructs response from response.py
"""
try:
response = requests.get("http://localhost:5000/parse",params={"q":request.form["text"]})
#response = requests.get("http://localhost:5000/parse?q="+request.form["text"])
response = response.json()
intent = response["intent"]
intent = intent["name"]
entities = response["entities"]
length = len(entities)
if(length>0):
entity = entities[0]["entity"]
value = entities[0]["value"]
if intent == "event-request":
response_text = get_event(entities["day"],entities["time"],entities["place"])
elif isClosingCard == True:
if response["text"] == "1234":
response_text = "We are closing this card. Thanks for your patience."
isClosingCard = False
elif response["text"] == "ok":
response_text = "How else I can help you? :)"
isClosingCard = False
else:
response_text = "This card doesn't exist. Please check the number again. If you want to talk about anything else rather than this #then type ok"
elif intent == "lost_card":
response_text = "Please give us your credit card number"
isClosingCard = True
elif intent == "card_charge":
if(length>0 and entity =="card_type"):
if(value.lower() == "debit card"):
response_text = "For debit card first year is free and from second year onward we charge 200 per year"
elif(value.lower() =="credit card"):
response_text = "For credit cards we charge 300 per year"
else:
response_text = "I am not sure about that, sorry!"
else:
response_text = "For credit cards we charge 300 per year. And for debit cards first year is free and from second year we charge 200 per year"
# elif intent == "get_cheque_book":
# response_text = "We will send the new checkbook to your address."
# elif intent == "loan_car":
# response_text = "We provide car loan of minimum 250000 and maximum of 1000000"
# elif intent == "loan_home":
# response_text = "We provide home loan of minimum 550000 and maximum 3000000"
# elif intent == "loan_max":
# if(length>0 and entity == "loan"):
# if (value.lower() =="medical" or value.lower() == "personal" or value.lower() == "marriage" or value.lower() == "traveling" or value.lower() == "education"):
# response_text = "We provide personal loan of maximum 50000"
# elif value.lower() == "car":
# response_text = "We provide car loan of maximum of 1000000"
# elif value.lower() == "home":
# response_text = "We provide home loan of maximum 3000000"
elif intent == "loan_min":
if(length>0 and entity == "loan"):
if (value.lower() =="medical" or value.lower() == "personal" or value.lower() == "marriage" or value.lower() == "traveling" or value.lower() == "education"):
response_text = "We provide minimum personal loan of 10000"
elif value.lower() == "car":
response_text = "We provide car loan of minimum of 250000"
elif value.lower() == "home":
response_text = "We provide home loan of minimum 550000"
elif intent == "loan_max_home":
response_text = "We provide home loan of maximum 3000000"
elif intent == "loan_details":
response_text = "We provide 3 different kinds of loans currently.\n1.Personal loan(Marriage, traveling, education etc)\n2.Car loan\n3.Home loan"
elif intent == "show_balance":
response_text = "Your current account balance is 15000"
elif intent == "summary":
response_text = "Account type: Checking Account\nCurrent balance: 15000 tk\nAvailable to withdraw: 14500"
else:
response_text = get_random_response(intent)
return jsonify({"status":"success","response":response_text})
except Exception as e:
print e
return jsonify({"status":"success","response":"Sorry I am not trained to do that yet..."})
app.config["DEBUG"] = True
if __name__ == "__main__":
app.run(port=8000)
| 5,118 |
Health Bot/main.py
|
adilshehzad786/Healthtech-Bot-StartHack-EU
| 1 |
2026019
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import asyncio
from adapter import ConsoleAdapter
from bot import Name
from bot import Gender
# Create adapter
ADAPTER = ConsoleAdapter()
BOT = Name()
BOT= Gender()
LOOP = asyncio.get_event_loop()
if __name__ == "__main__":
try:
# Greet user
print("Hi... I'm a Health Bot (Still in Development State) ")
print("What is your Name?")
LOOP.run_until_complete(ADAPTER.process_activity(BOT.on_turn))
except KeyboardInterrupt:
pass
finally:
LOOP.stop()
LOOP.close()
| 635 |
protos/params_pb2.py
|
chalant/pluto
| 0 |
2024528
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: contrib/coms/protos/params.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='contrib/coms/protos/params.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n contrib/coms/protos/params.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x80\x02\n\tRunParams\x12\x14\n\x0c\x63\x61pital_base\x18\x01 \x01(\x02\x12\x31\n\rstart_session\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x65nd_session\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04live\x18\x04 \x01(\x08\x12\x13\n\x0bmetrics_set\x18\x05 \x01(\t\x12\x30\n\x0e\x64\x61ta_frequency\x18\x06 \x01(\x0e\x32\x18.RunParams.DataFrequency\"$\n\rDataFrequency\x12\n\n\x06MINUTE\x10\x00\x12\x07\n\x03\x44\x41Y\x10\x01\x62\x06proto3')
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_RUNPARAMS_DATAFREQUENCY = _descriptor.EnumDescriptor(
name='DataFrequency',
full_name='RunParams.DataFrequency',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MINUTE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DAY', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=290,
serialized_end=326,
)
_sym_db.RegisterEnumDescriptor(_RUNPARAMS_DATAFREQUENCY)
_RUNPARAMS = _descriptor.Descriptor(
name='RunParams',
full_name='RunParams',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='capital_base', full_name='RunParams.capital_base', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start_session', full_name='RunParams.start_session', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end_session', full_name='RunParams.end_session', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='live', full_name='RunParams.live', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metrics_set', full_name='RunParams.metrics_set', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_frequency', full_name='RunParams.data_frequency', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_RUNPARAMS_DATAFREQUENCY,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=70,
serialized_end=326,
)
_RUNPARAMS.fields_by_name['start_session'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_RUNPARAMS.fields_by_name['end_session'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_RUNPARAMS.fields_by_name['data_frequency'].enum_type = _RUNPARAMS_DATAFREQUENCY
_RUNPARAMS_DATAFREQUENCY.containing_type = _RUNPARAMS
DESCRIPTOR.message_types_by_name['RunParams'] = _RUNPARAMS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RunParams = _reflection.GeneratedProtocolMessageType('RunParams', (_message.Message,), dict(
DESCRIPTOR = _RUNPARAMS,
__module__ = 'contrib.coms.protos.params_pb2'
# @@protoc_insertion_point(class_scope:RunParams)
))
_sym_db.RegisterMessage(RunParams)
# @@protoc_insertion_point(module_scope)
| 5,279 |
code/getweb/reEncode.py
|
cblanesg/dipMex
| 2 |
2025113
|
# attempt to read, encode to unicode, write the file... need to work with a loop
f = open('/home/eric/Dropbox/data/rollcall/dipMex/data/fromWeb/votes/62/tmp1.txt', 'r') # read file... check if using 'w' affects file after close
from bs4 import BeautifulSoup # load library
soup = BeautifulSoup(f)
f.close()
print(soup.get_text()) # how to write text only?
f.readline()
f.close()
| 382 |
06-learning-to-classify-text/sentiment_analyzer.py
|
SolangeUG/nltk-book
| 2 |
2025750
|
# noinspection PyUnresolvedReferences
from util import data_util
from nltk.classify import NaiveBayesClassifier
from nltk.sentiment import SentimentAnalyzer
from nltk.sentiment.util import extract_unigram_feats
from nltk.tokenize import sent_tokenize
def generate_labeled_documents(data, label):
"""
Return a list of 'labeled' documents (tuples) for a given dataset
:param data: input dataset
:param label: input label to characterize the generated documents
:return: list of labeled documents
"""
documents = []
for i in range(len(data)):
# we're interested in the 'reviewText' component of our dataset's elements
documents.extend(
((data_util.tokenize_with_negation(sent), label) for sent in sent_tokenize(data[i]['reviewText']))
)
return documents
if __name__ == '__main__':
# Dataset information
datadir, dataset = './data/', 'Baby'
# Download data and load into memory
baby_dataset = data_util.load_data(dataset, datadir)
baby_train, baby_valid, baby_test = data_util.partition_train_validation_test(baby_dataset)
# Sentiment analyzer
baby_train_docs_subj = generate_labeled_documents(baby_train[:800], 'subj')
baby_train_docs_obj = generate_labeled_documents(baby_train[800:1000], 'obj')
baby_train_docs = baby_train_docs_subj + baby_train_docs_obj
# print("baby train docs [0]", baby_train_docs[0])
baby_test_docs_subj = generate_labeled_documents(baby_test[:200], 'subj')
baby_test_docs_obj = generate_labeled_documents(baby_test[200:400], 'obj')
baby_test_docs = baby_test_docs_subj + baby_test_docs_obj
# print("baby test docs [0]", baby_test_docs[0])
analyzer = SentimentAnalyzer()
all_words_with_negation = analyzer.all_words([doc for doc in baby_train_docs])
print("\nVocabulary size: {}".format(str(len(all_words_with_negation))))
# Unigram features
unigram_features = analyzer.unigram_word_feats(all_words_with_negation, min_freq=4)
print("Unigram features size: {}".format(str(len(unigram_features))))
analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_features)
# Apply features and build the training set
training_set = analyzer.apply_features(baby_train_docs)
test_set = analyzer.apply_features(baby_test_docs)
# Train our classifier
trainer = NaiveBayesClassifier.train
classifier = analyzer.train(trainer, training_set)
# Evaluation results
for key, value in sorted(analyzer.evaluate(test_set).items()):
print('{0}: {1}'.format(key, value))
| 2,586 |
Programming/rm_arr.py
|
flybaozi/algorithm-study
| 0 |
2025186
|
def removeDuplicates(nums):
"""
:type nums: List[int]
:rtype: int
"""
i = 0
while i < len(nums) - 1:
if nums[i] == nums[i + 1]:
nums.remove(nums[i])
else:
i = i + 1
return nums
print(removeDuplicates([0, 0, 1, 1, 1, 2, 2, 3, 3, 4]))
| 303 |
iq-find-component/iq-find-components.py
|
sonatype-nexus-community/nexus-iq-api-scripts
| 3 |
2023702
|
import requests
import argparse
import json
import csv
from urllib.parse import urlencode
def __parse_args():
parser = argparse.ArgumentParser(
description='Connects to IQ and finds all applications with referenced components.')
parser.add_argument('-s', dest='iq_url', default='http://localhost:8070', help='URL of IQ instance.', required=False)
parser.add_argument('-u', dest='username', default='admin', required=False)
parser.add_argument('-p', dest='password', default='<PASSWORD>', required=False)
parser.add_argument('-i', dest='input_file', default='packageUrl.txt', help='text file of packageUrls', required=False)
return parser.parse_args()
args = __parse_args()
iq_url = args.iq_url
username = args.username
password = <PASSWORD>
iq_session = requests.Session()
iq_session.auth = requests.auth.HTTPBasicAuth(username, password)
stages = ["build","stage-release","release"]
results = []
print("--------------------------------------")
## test for IQ server
try:
url = f"{iq_url}/api/v2/applications"
response = iq_session.get(url)
response.raise_for_status()
except (iq_session.exceptions.ConnectionError, iq_session.exceptions.Timeout):
print("Cannot find IQ Server, check URL")
exit()
except iq_session.exceptions.HTTPError:
print("Could not authenticate, check username and password")
exit()
url = f"{iq_url}/rest/product/version"
response = iq_session.get(url)
version = response.json()['version']
print(f'IQ Server Version {version}')
components = []
with open("packageUrl.txt", "r") as f:
for line in f:
packageUrl = line.strip()
#ignore blanks lines.
if len(packageUrl) > 5:
components.append({"packageUrl":packageUrl})
for component in components:
for stage in stages:
component['stageId'] = stage
params = urlencode(component).replace("%27", "%22")
url = f"{iq_url}/api/v2/search/component?{params}"
response = iq_session.get(url)
if response.status_code == 200:
for result in response.json()['results']:
#clean-up extra data to csv below can output.
for field in ["componentIdentifier", "reportHtmlUrl", "dependencyData"]:
del result[field]
result['stage'] = stage
results.append(result)
print("--------------------------------------")
output = json.dumps(results, indent=4)
with open("results.json", "w+") as file:
file.write(output)
print("Json results saved to -> results.json")
print("--------------------------------------")
with open("results.csv", "w+") as file:
fields = ['applicationName', 'applicationId', 'stage', "threatLevel", 'hash', 'packageUrl', 'reportUrl']
writer = csv.DictWriter(file, fieldnames = fields)
writer.writeheader()
writer.writerows(results)
print("csv results saved to -> results.csv")
print("--------------------------------------")
print("--------------------------------------")
| 2,849 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.