seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14816787756
|
"""web URL Configuration"""
from django.conf.urls import include,url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index, name='index'),
url(r'^pdx/(?P<pk>\w{0,50})/$', views.pdx, name="pdx"),
url(r'^search/', views.search, name="search"),
#url(r'^search/', include('haystack.urls')),
url(r'^resources/', views.resources, name="resources"),
]
|
jmason-ebi/pdx
|
web/urls.py
|
urls.py
|
py
| 442 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 14,
"usage_type": "call"
}
] |
36649336174
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 11 10:56:48 2020
@author: alexanderfalk
"""
import itertools
import time
import sys
class ThreeOPT:
def __init__(self, computed_solution, time_limit=60):
self.solution = computed_solution
self.time_limit = time_limit
def construct(self, time_left):
return self.algorithm(time_left)
def algorithm(self, start_time):
for index, route in enumerate(self.solution.routes):
segments = self.tour_segments(route)
for i, j, k in segments:
self.solution.routes[index] = self.improvement(route, i, j, k)
t1 = time.time() # End time
if t1 - start_time > self.time_limit:
sys.stdout.write("Time Expired\n")
return self.solution
return self.solution
def distance(self, i, j):
return self.solution.instance.pre_distance(i, j)
def tour_segments(self, route):
indices = [index for index in range(len(route))]
return list(itertools.combinations(indices, r = 3))
def improvement(self, route, i, j, k):
A, B, C, D, E, F = route[i-1], route[i], route[j-1], route[j], route[k-1], route[k % len(route)]
dist0 = self.distance(A, B) + self.distance(C, D) + self.distance(E, F)
dist1 = self.distance(A, C) + self.distance(B, D) + self.distance(E, F)
dist2 = self.distance(A, B) + self.distance(C, E) + self.distance(D, F)
dist3 = self.distance(A, D) + self.distance(E, B) + self.distance(C, F)
dist4 = self.distance(F, B) + self.distance(C, D) + self.distance(E, A)
if dist0 > dist1:
route[i:j] = reversed(route[i:j])
elif dist0 > dist2:
route[j:k] = reversed(route[j:k])
elif dist0 > dist4:
route[i:k] = reversed(route[i:k])
elif dist0 > dist3:
route[i:k] = route[j:k] + route[i:j]
return route
|
AlexanderFalk/2020_Project01_CS_HA
|
src/threeopt.py
|
threeopt.py
|
py
| 2,076 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.time",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "itertools.combinations",
"line_number": 41,
"usage_type": "call"
}
] |
39922767694
|
# THIS FILE IS SAFE TO EDIT. It will not be overwritten when rerunning go-raml.
from flask import jsonify, request
import sqlite3
def books_postHandler():
connection = sqlite3.connect("BookStore")
cursor=connection.cursor()
inputs = request.get_json()
cursor.execute("CREATE TABLE IF NOT EXISTS BookStore (BookId INTEGER PRIMARY KEY AUTOINCREMENT,title TEXT,subTitle TEXT,authors TEXT)")
cursor.execute("INSERT INTO BookStore VALUES(NULL,?,?,?)", (inputs["title"],inputs["subTitle"],inputs["authors"]))
connection.commit()
connection.close()
return jsonify("successfully")
|
BolaNasr/BookStore-API
|
server/handlers/books_postHandler.py
|
books_postHandler.py
|
py
| 608 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sqlite3.connect",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 15,
"usage_type": "call"
}
] |
9460034812
|
"""
Запуск класса с датасетами твитов и генома ВИЧ
"""
import Hyperbolic
import numpy as np
import Levenshtein as Lev
import pandas as pd
from grad_descent import MSE
import draw
"""
ТВИТЫ
"""
positive = np.array(pd.read_csv(
r'twitter_data/positive.csv', sep=';', usecols=[3], names=['text']))
negative = np.array(pd.read_csv(
r'twitter_data/negative.csv', sep=';', usecols=[3], names=['text']))
# positive_90 = np.array(
# positive[positive['text'].apply(lambda text: len(text) == 90)])
# negative_90 = np.array(
# negative[negative['text'].apply(lambda text: len(text) == 90)])
size = 50
dataset = np.concatenate((positive[np.random.choice(
len(positive), size)], negative[np.random.choice(len(negative), size)]))
perm = np.random.permutation(2*size)
ran = np.array(range(2*size))
map = {perm[i]: ran[i] for i in range(2*size)}
dataset = dataset[perm]
distance = np.zeros((2*size, 2*size), dtype=float)
for i in range(2*size):
for j in range(2*size):
distance[i, j] = Lev.distance(dataset[i, 0], dataset[j, 0])
distance = distance / 10.
H = Hyperbolic.Hyperbolic(graph=distance, dimension=2, maxiter=1000, batch=1.)
print("MSE %f" % MSE(H.point_coordinates, distance))
draw.draw(H.point_coordinates, distance,
draw_edges=False, map=map, annotate=False)
"""
Геном вич
"""
# lst = np.array([]).reshape(0, 0)
# for i in range(100):
# f = open(rf'data/sequence ({i}).txt', 'r')
# string = f.read().replace("\n", "")
# tmp = list(string.encode())
# if i == 0:
# lst = np.array(tmp).reshape(1, -1)
# else:
# lst = np.vstack((lst, np.array(tmp).reshape(1, -1)))
# # создал пустой интовый массив для расстояний
# distance = np.zeros((100, 100), dtype=float)
# # заполнил его правильными значениями
# for i in range(100):
# for j in range(100):
# distance[i, j] = (lst[i] != lst[j]).sum()
# distance = distance / 3.
# H = Hyperbolic.Hyperbolic(graph=distance, dimension=2, maxiter=100, batch=0.1)
# draw.draw(H.point_coordinates, distance, True, annotate=False)
|
DanilShishkin/Hyperbolic
|
actual/main.py
|
main.py
|
py
| 2,184 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.permutation",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "Levenshtein.distance",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "Hyperbolic.Hyperbolic",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "grad_descent.MSE",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "draw.draw",
"line_number": 47,
"usage_type": "call"
}
] |
32592778621
|
import geoip2.database
"""
Requirements:
geoip2
use this page to download the db:
https://dev.maxmind.com/geoip/geolite2-free-geolocation-data?lang=en
"""
# This creates a Reader object. You should use the same object
# across multiple requests as creation of it is expensive.
with geoip2.database.Reader('/path/to/GeoLite2-City.mmdb') as reader:
# Replace "city" with the method corresponding to the database
# that you are using, e.g., "country".
response = reader.city('203.0.113.0')
print(
response.country.iso_code +
response.country.name +
response.country.names['zh-CN'] +
response.subdivisions.most_specific.name +
response.subdivisions.most_specific.iso_code +
response.city.name +
response.postal.code +
response.location.latitude +
response.location.longitude +
response.traits.network, sep="\n"
)
""
|
steriospydev/tutools
|
Functions/get_ip_location.py
|
get_ip_location.py
|
py
| 927 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "geoip2.database.database.Reader",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "geoip2.database.database",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "geoip2.database",
"line_number": 10,
"usage_type": "name"
}
] |
26889693373
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
def viz2(img, regions, rooms, all_ctrs):
all_ctrs_img = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
cv2.drawContours(all_ctrs_img, all_ctrs, -1, (0, 255, 0), 3)
filtered_ctrs_img = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
for region in regions + rooms:
cv2.drawContours(filtered_ctrs_img, [region], -1, (0, 255, 0), 3)
f, ax = plt.subplots(1, 2, figsize=(20, 14))
ax[0].imshow(all_ctrs_img)
ax[0].axis('off')
ax[0].set_title('All contours simplified')
ax[1].imshow(filtered_ctrs_img)
ax[1].axis('off')
ax[1].set_title('All contours filtered and simplified')
f.savefig('fig2.jpg')
return
|
jmou/quarks-knit
|
viz2.py
|
viz2.py
|
py
| 736 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.zeros",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cv2.drawContours",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cv2.drawContours",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
}
] |
10506264622
|
"""
a very simple MNIST classifier
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
FLAGS = None
def main(_):
#import data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
#create nodes for the input images and target output classes
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
#define the weights w and biases b for model
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
#implement regression model
#multiply the vectorized input images x by the weight matrix W, add the bias b
y = tf.matmul(x, W) + b
#specify a loss function
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
#train the model
#minimize cross entropy using gradient descent with a learning rate of 0.5
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
#initialize variable with specified values
tf.global_variables_initializer().run()
#run the training procedure
for _ in range(1000):
batch = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch[0], y_: batch[1]})
#evaluate the model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='d:/Workspace/tensorflow/MNIST_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
RuanYB/tensorflow
|
mnist.py
|
mnist.py
|
py
| 1,897 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tensorflow.examples.tutorials.mnist.input_data",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.Variable",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tensorflow.zeros",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tensorflow.Variable",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tensorflow.zeros",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tensorflow.matmul",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_mean",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.softmax_cross_entropy_with_logits",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.GradientDescentOptimizer",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.InteractiveSession",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "tensorflow.equal",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "tensorflow.argmax",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_mean",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "tensorflow.cast",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "tensorflow.app.run",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "tensorflow.app",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 63,
"usage_type": "attribute"
}
] |
14070127148
|
import math
import numpy as np
from scipy.special import expit
class LogReg():
def __init__(self, lambda_1=0.0, lambda_2=1.0, gd_type='full',
tolerance=1e-4, max_iter=1000, w0=None, alpha=1e-3):
"""
lambda_1: L1 regularization param
lambda_2: L2 regularization param
gd_type: 'full' or 'stochastic'
tolerance: for stopping gradient descent
max_iter: maximum number of steps in gradient descent
w0: np.array of shape (d) - init weights
alpha: learning rate
"""
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.gd_type = gd_type
self.tolerance = tolerance
self.max_iter = max_iter
self.w0 = w0
self.alpha = alpha
self.w = None
self.loss_history = None
def fit(self, X, y):
"""
X: np.array of shape (l, d)
y: np.array of shape (l)
---
output: self
"""
self.loss_history = []
count = 0
self.w = np.ones((len(X[0]),))
while np.linalg.norm(self.w - self.w0) > self.tolerance or count < self.max_iter:
count += 1
self.w = self.w0
if self.gd_type == 'stochastic':
i = random.randint(0, len(y))
grad = self.calc_gradient(X[i, :], np.array(y[i]))
else:
grad = self.calc_gradient(X, y)
self.w = self.w0 - self.alpha * grad
loss = self.calc_loss(X, y)
self.loss_history.append(loss)
self.w0 = self.w
return self
def predict_proba(self, X):
"""
X: np.array of shape (l, d)
---
output: np.array of shape (l, 2) where
first column has probabilities of -1
second column has probabilities of +1
"""
if self.w is None:
raise Exception('Not trained yet')
pred = expit(np.dot(X, self.w))
return pred
def calc_gradient(self, X, y):
"""
X: np.array of shape (l, d) (l can be equal to 1 if stochastic)
y: np.array of shape (l)
---
output: np.array of shape (d)
"""
tm1 = expit(-y * np.dot(X, self.w))
tm2 = y[:, np.newaxis] * X
tm3 = tm1[:, np.newaxis] * tm2
tm4 = -np.sum(tm3, axis=0)
grad = tm4 / X.shape[0] + self.lambda_2 * self.w
return grad
def calc_loss(self, X, y):
"""
X: np.array of shape (l, d)
y: np.array of shape (l)
---
output: float
"""
n = X.shape[0]
tm1 = np.logaddexp(0, -y * np.dot(X, self.w))
reg = self.lambda_2 * np.sum(self.w ** 2) / 2
loss = (1 / n) * np.sum(tm1, axis=0) + reg
return loss
|
idStep/hse_ml
|
logreg.py
|
logreg.py
|
py
| 2,793 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.ones",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "scipy.special.expit",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "scipy.special.expit",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "numpy.newaxis",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.logaddexp",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 89,
"usage_type": "call"
}
] |
39220567289
|
from sklearn.preprocessing import StandardScaler
file_name2 = 'data2.csv'
df = pd.read_csv(file_name2)
df['race_date'] = pd.to_datetime(df['race_date']).dt.date
# 情報不足行を削除
df = df.dropna(subset=['past_time_sec1', 'past_time_sec2', 'past_time_sec3',
'past_time_sec4', 'past_time_sec5']).reset_index(drop=True)
# レースID付与
def set_race_id(params):
param_list = params.split('_')
race_date, place, race_num = param_list[0], param_list[1], param_list[2],
return f'{race_date}{place}{race_num}'
df['tmp'] = df['race_date'].astype(
str) + '_' + df['place'].astype(str) + '_' + df['race_num'].astype(str)
df['race_id'] = df['tmp'].map(set_race_id)
df = df.drop(columns=['tmp'])
# 予測に使用しない列を削除(必要に応じて変更)
df = df.drop(columns=['horse', 'jockey', 'race_num', 'stable',
'race_name', 'rank', 'pop', 'gap', 'tansho', 'win1', 'time_sec'])
# ダミー列定義(One-Hot変換対象)
dummy_columns = ['sex', 'place', 'course_type', 'course_lr', 'weather', 'ground', 'past_course_type1', 'past_course_lr1', 'past_weather1', 'past_ground1', 'past_gap1', 'past_course_type2', 'past_course_lr2', 'past_weather2', 'past_ground2', 'past_gap2',
'past_course_type3', 'past_course_lr3', 'past_weather3', 'past_ground3', 'past_gap3', 'past_course_type4', 'past_course_lr4', 'past_weather4', 'past_ground4', 'past_gap4', 'past_course_type5', 'past_course_lr5', 'past_weather5', 'past_ground5', 'past_gap5']
# ダミー化
df_dummy = df[dummy_columns]
df_dummy = pd.get_dummies(df_dummy, dummy_na=True)
df_main = df.drop(columns=dummy_columns)
# 標準化前に必要な情報を退避
df_main['kitaichi'] = df_main['win3'] * df_main['fukusho']
train_kitaichi = df_main.pop('kitaichi')
train_labels = df_main.pop('win3')
train_date = df_main.pop('race_date')
train_raceids = df_main.pop('race_id')
df_main = df_main.drop(columns=['fukusho'])
df_main = df_main.astype(float)
standard_file = 'standard.csv'
df_main.to_csv(standard_file, index=False)
# 標準化
ss = StandardScaler()
df_main = pd.DataFrame(ss.fit_transform(
df_main), columns=df_main.columns, index=df_main.index)
# ダミー列とマージ
df = pd.concat([df_main, df_dummy], axis=1)
df['kitaichi'] = train_kitaichi.values
df['win3'] = train_labels.values
df['race_date'] = train_date.values
df['race_id'] = train_raceids.values
file_name3 = 'data3.csv'
df.to_csv(file_name3, index=False)
|
keisukee/horse_racing
|
normalize_3.py
|
normalize_3.py
|
py
| 2,492 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 49,
"usage_type": "call"
}
] |
651508757
|
from . learningTasks import RandomForest
import os
import luigi
import numpy as np
import logging
# import the proper nifty version
try:
import nifty.graph.rag as nrag
except ImportError:
try:
import nifty_with_cplex.graph.rag as nrag
except ImportError:
import nifty_with_gurobi.graph.rag as nrag
from .dataTasks import StackedRegionAdjacencyGraph, InputData
from .tools import config_logger, run_decorator
from .featureTasks import RegionNodeFeatures
from .customTargets import HDF5DataTarget, FolderTarget
from .pipelineParameter import PipelineParameter
# init the workflow logger
workflow_logger = logging.getLogger(__name__)
config_logger(workflow_logger)
class DefectNodeGroundtruth(luigi.Task):
pathToSeg = luigi.Parameter()
pathToDefectGt = luigi.Parameter()
def requires(self):
return{
"rag": StackedRegionAdjacencyGraph(self.pathToSeg),
"defect_gt": InputData(self.pathToDefectGt, dtype='uint8')
}
@run_decorator
def run(self):
inp = self.input()
rag = inp['rag'].read()
defect_gt = inp['defect_gt']
defect_gt.open()
node_labels = nrag.gridRagAccumulateLabels(rag, defect_gt.get())
assert (np.unique(node_labels) == np.array([0, 1])).all(), str(np.unique(node_labels))
self.output().write(node_labels)
def output(self):
seg_file = os.path.split(self.pathToSeg)[1][:-3]
save_path = "DefectNodeGroundtruth_%s.h5" % seg_file
return HDF5DataTarget(save_path)
class LearnDefectRandomForest(luigi.Task):
pathsToSeg = luigi.ListParameter()
pathsToDefectGt = luigi.ListParameter()
def requires(self):
assert len(self.pathsToSeg) == len(self.pathsToGt)
n_inputs = len(self.pathsToSeg)
inputs = PipelineParameter().inputs
if n_inputs == 1:
raw_path = inputs['data'][0]
return {
'gt': DefectNodeGroundtruth(self.pathsToSeg[0], self.pathsToDefectGt[0]),
'feats': RegionNodeFeatures(self.pathsToSeg[0], raw_path)
}
else:
inp_paths = inputs['data']
assert n_inputs % inp_paths == 0
inp_per_seg = len(inp_paths) // n_inputs
return {
'gt': [DefectNodeGroundtruth(self.pathsToSeg[i], self.pathsToDefectGt[i]) for i in range(n_inputs)],
'feats': [RegionNodeFeatures(self.pathToSeg[i], inp_paths[inp_per_seg * i]) for i in range(n_inputs)]
}
@run_decorator
def run(self):
if(self.pathsToSeg) > 1:
self._learn_defect_rf_multi_input()
else:
self._learn_defect_rf_single_input()
def _learn_defect_rf_multi_input(self):
inp = self.input()
gts = inp['gt']
feats = inp['feats']
assert len(gts) == len(feats)
features = []
labels = []
for i, gt in enumerate(gts):
this_gt = gt.read()
this_feats = feats[i].read([0, 0], feats[i].shape)
assert len(this_gt) == len(this_feats), "%i, %i" % (len(this_gt), len(this_feats))
features.append(this_feats)
labels.append(this_gt)
features = np.concatenate(features, axis=0)
labels = np.concatenate(labels, axis=0)
rf = RandomForest(
features, labels,
n_trees=PipelineParameter().nTrees,
n_threads=PipelineParameter().nThreads
)
rf.write(str(self.output().path), 'rf')
def _learn_defect_rf_single_input(self):
inp = self.input()
gt = inp['gt'].read()
feats = inp['feats']
feats = feats.readSubarray([0, 0], feats.shape)
assert len(gt) == len(feats), "%i, %i" % (len(gt), len(feats))
rf = RandomForest(
feats, gt,
n_trees=PipelineParameter().nTrees,
n_threads=PipelineParameter().nThreads
)
rf.write(str(self.output().path), 'rf')
def output(self):
save_path = 'LearnDefectRandomForest_%s' % (
'multi_input' if len(self.pathsToSeg) > 1 else 'single_input',
)
return FolderTarget(save_path)
|
constantinpape/mc_luigi
|
mc_luigi/defectRandomForests.py
|
defectRandomForests.py
|
py
| 4,204 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tools.config_logger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "luigi.Task",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "luigi.Parameter",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "dataTasks.StackedRegionAdjacencyGraph",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "dataTasks.InputData",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "nifty_with_gurobi.graph.rag.gridRagAccumulateLabels",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "nifty_with_gurobi.graph.rag",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "numpy.unique",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "tools.run_decorator",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "os.path.split",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "customTargets.HDF5DataTarget",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "luigi.Task",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "luigi.ListParameter",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "luigi.ListParameter",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "pipelineParameter.PipelineParameter",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "featureTasks.RegionNodeFeatures",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "featureTasks.RegionNodeFeatures",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "tools.run_decorator",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "learningTasks.RandomForest",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "pipelineParameter.PipelineParameter",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "pipelineParameter.PipelineParameter",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "learningTasks.RandomForest",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "pipelineParameter.PipelineParameter",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "pipelineParameter.PipelineParameter",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "customTargets.FolderTarget",
"line_number": 127,
"usage_type": "call"
}
] |
31360177466
|
#Long Short-Term Memory
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
dataset_train = pd.read_csv('files/Salestrain.csv')
plt.plot(dataset_train, color='blue', label='Vendas')
plt.title('Vendas')
plt.xlabel('Tempo')
plt.ylabel('Vendas')
plt.legend()
plt.show()
sc = MinMaxScaler(feature_range=(0, 1))
trainning_set_scaled = sc.fit_transform(dataset_train)
X_train = []
y_train = []
for i in range(90, len(trainning_set_scaled)):
data = trainning_set_scaled[i-90:i, 0]
X_train.append(data)
y_train.append(trainning_set_scaled[i,0])
X_train = np.array(X_train.reshape(-1,90,1))
y_train = np.array(y_train)
model = Sequential()
model.add(LSTM(units=100, return_sequences=True, input_shape=(X_train.shape[1],1)))
model.add(Dropout(0.2))
model.add(LSTM(units=100, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=100, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=100))
model.add(Dropout(0.2))
model.add(Dense(units=1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(X_train, y_train, epochs=300, batch_size=1)
dataset_test = pd.read_csv('files/Salestest.csv')
train_values = dataset_train['data'].values
test_values = dataset_test['data'].values
total_values = np.concatenate((train_values, test_values), axis=0)
time_index = range(len(total_values))
plt.plot(time_index[:len(train_values)], color='blue', label='Vendas - Treinamento')
plt.plot(time_index[len(test_values):], color='red', label='Vendas - Teste')
plt.title('Vendas')
plt.xlabel('Tempo')
plt.ylabel('Vendas')
plt.legend()
plt.show()
dataset_test_anomalies = dataset_test.copy()
dataset_test_anomalies.loc[:9, 'data'] = 90
dataset_test_anomalies.loc[10:34, 'data'] = np.random.uniform(100,200,size=(25,))
dataset_test_anomalies.loc[35:, 'data'] = 90
plt.plot(dataset_test, color='blue', label='Vendas')
plt.plot(dataset_test_anomalies, color='red', label='Vendas com anomalias')
plt.title('Vendas')
plt.xlabel('Tempo')
plt.ylabel('Vendas')
plt.legend()
plt.show()
dataset_total = pd.concat((dataset_train['data'], dataset_test['data']), axis=0)
inputs = dataset_total[len(dataset_total)-len(dataset_test-90):]
inputs = pd.DataFrame(inputs, columns=['data'])
inputs = sc.transform(inputs)
dataset_total_anomalies = pd.concat((dataset_train['data'], dataset_test_anomalies['data']), axis=0)
inputs_anomalies = dataset_total_anomalies[len(dataset_total_anomalies)-len(dataset_test_anomalies-90):]
inputs_anomalies = pd.DataFrame(inputs_anomalies, columns=['data'])
inputs_anomalies = sc.transform(inputs_anomalies)
X_test = []
X_test_anomalies = []
for i in range(90, len(inputs)):
X_test.append(inputs[i-90:i,0])
X_test_anomalies.append(inputs_anomalies[i-90:i,0])
X_test, X_test_anomalies = np.array(X_test), np.array(X_test_anomalies)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
X_test_anomalies = np.reshape(X_test_anomalies, (X_test_anomalies.shape[0], X_test_anomalies.shape[1], 1))
prediced_sales = model.predict(X_test)
prediced_sales = sc.inverse_transform(prediced_sales)
prediced_sales_anomalies = model.predict(X_test_anomalies)
prediced_sales_anomalies = sc.inverse_transform(prediced_sales_anomalies)
mean_squared_error_test = mean_squared_error(test_values, prediced_sales)
mean_squared_error_anomalies = mean_squared_error(test_values, prediced_sales_anomalies)
print(f'MSE normal data: ', mean_squared_error_test)
print(f'MSE data with anomalies: ', mean_squared_error_anomalies)
plt.plot(test_values, color='blue', label='Valores reais')
plt.plot(prediced_sales_anomalies, colo='red', label='Previsões com anomalias')
plt.plot(prediced_sales, color='green', label='Previsões')
plt.title('Previsões com anomalias, sem anomalias e valores reais')
plt.xlabel('Tempo')
plt.ylabel('Vendas')
plt.legend()
plt.show()
|
francinehahn/ai-and-machine-learning
|
detectingAnomalies/LSTM.py
|
LSTM.py
|
py
| 4,027 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "sklearn.preprocessing.MinMaxScaler",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "numpy.random.uniform",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "pandas.concat",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.mean_squared_error",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.mean_squared_error",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 112,
"usage_type": "name"
}
] |
11753378103
|
import urllib3
import requests
from bs4 import BeautifulSoup
from csv import writer
import csv
import pandas as pd
url = 'https://www.mubawab.tn/fr/cc/immobilier-a-louer-all:o:i:sc:houses-for-rent:p:' + str(1)
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
lists = soup.find_all('li', class_='listingBox w100')
for list in lists:
estate_local = list.find('h3', class_='listingH3').text.split()[-1].strip()
estate_type = "Maison"
estate_surface = getattr(list.find('h4', class_='listingH4 floatR'),'text', None)
estate_piece = list.find('h4', class_='listingH4 floatR').text.split()[0].strip()
estate_price = getattr(list.find("span", class_= "priceTag hardShadow float-right floatL yellowBg"),'text', None)
if (str(estate_price)=='None'):
estate_price = getattr(list.find("span", class_= "priceTag hardShadow float-right floatL"),'text', None)
info = [estate_local, estate_type, estate_surface, estate_piece, str(estate_price)]
print(info)
|
sofienne-chouiekh/Scraping_data_estate_location
|
test.py
|
test.py
|
py
| 1,091 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 11,
"usage_type": "call"
}
] |
28322769353
|
from django.urls import path, include
from . import views
app_name = 'api'
employment = [
path('', views.EmploymentListEmployee.as_view(), name='list'),
]
employee = [
path('<int:pk>/employment/', include((employment, 'employment'))),
]
urlpatterns = [
path('employee/', include((employee, 'employee'))),
]
|
crowmurk/mallenom
|
mallenom/api/urls.py
|
urls.py
|
py
| 325 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 17,
"usage_type": "call"
}
] |
34118032788
|
# Going to be extremely similar to the staff groups file
from __future__ import print_function
import os.path
import json
from typing import get_type_hints
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
# importing module
import oracledb # needed for connection to PowerSchool server (ordcle database)
import os # needed for environement variable reading
from datetime import *
# setup db connection
un = 'PSNavigator' #PSNavigator is read only, PS is read/write
pw = os.environ.get('POWERSCHOOL_DB_PASSWORD') #the password for the database account
cs = os.environ.get('POWERSCHOOL_PROD_DB') #the IP address, port, and database name to connect to
print("Username: " + str(un) + " |Password: " + str(pw) + " |Server: " + str(cs)) #debug so we can see where oracle is trying to connect to/with
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/admin.directory.user', 'https://www.googleapis.com/auth/admin.directory.group', 'https://www.googleapis.com/auth/admin.directory.group.member', 'https://www.googleapis.com/auth/apps.licensing']
emailSuffix = os.environ.get('EMAIL_SUFFIX')
studentSuffix = os.environ.get('STUDENT_SUFFIX')
allStudentGroup = os.environ.get('ALL_STUDENT_GROUP')
studentOU = os.environ.get('STUDENT_OU')
gradYearPrefix = os.environ.get('GRAD_YEAR_PREFIX')
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.json', 'w') as token:
token.write(creds.to_json())
service = build('admin', 'directory_v1', credentials=creds)
# function to take a group by email, and return all the members of the group as well as their role. Makes a dict with these pairings, then adds that dict as well as the group email to the overall memberLists dict
def getGroupMembers(groupEmail):
try:
studentMemberToken = '' # blank primer token for multi-page query results
tempDict = {} # create a temp dict that will hold the members and their roles
print(f'Getting members of {groupEmail}') # debug
while studentMemberToken is not None: # while we still have results to process
studentMemberResults = service.members().list(groupKey=groupEmail, pageToken=studentMemberToken, includeDerivedMembership='True').execute() # get the members of the group by email
studentMemberToken = studentMemberResults.get('nextPageToken')
studentMembers = studentMemberResults.get('members', []) # separate the actual members array from the rest of the result
for member in studentMembers: # go through each member and store their email and role in variables
studentEmail = member.get('email')
studentMemberType = member.get('role')
# print(f'{staffMemberEmail} is a {staffMemberType}')
tempDict.update({studentEmail : studentMemberType}) # add the email : role entry to the dict
memberLists.update({groupEmail : tempDict}) # update the overall master member dict with with this group's email and member sub-dict
except Exception as er:
if ("notFound" in str(er)):
print(f'ERROR: Group {groupEmail} not found')
print(f'ERROR: Group {groupEmail} not found',file=log)
else:
print(f'ERROR: {er}')
print(f'ERROR: {er}',file=log)
# go through all student members in the OU, look at their school access lists, and see if they are in the groups they belong in
def processGroups(orgUnit):
userToken = ''
queryString = "orgUnitPath='" + orgUnit + "'" # have to have the orgUnit enclosed by its own set of quotes in order to work
print(queryString)
while userToken is not None: # do a while loop while we still have the next page token to get more results with
userResults = service.users().list(customer='my_customer', orderBy='email', projection='full', pageToken=userToken, query=queryString).execute()
userToken = userResults.get('nextPageToken')
users = userResults.get('users', [])
for user in users:
# print(user) # debug
try:
ou = user.get('orgUnitPath')
if ('test' not in ou.lower()) and ('fbla' not in ou.lower()) and ('pre students' not in ou.lower()): # ignore any accounts that are in an OU that contains the word test, fbla, pre students
email = user.get('primaryEmail') # .get allows us to retrieve the value of one of the sub results
homeschool = str(user.get('customSchemas').get('Synchronization_Data').get('Homeschool_ID')) # get their homeschool ID
gradYear = str(user.get('customSchemas').get('Synchronization_Data').get('Graduation_Year')) # get their homeschool ID
print(f'{email} should be a part of {allStudentGroup}, {schoolAbbreviations.get(homeschool) + studentSuffix + emailSuffix} and {gradYearPrefix + gradYear + emailSuffix}')
print(f'{email} should be a part of {allStudentGroup}, {schoolAbbreviations.get(homeschool) + studentSuffix + emailSuffix} and {gradYearPrefix + gradYear + emailSuffix}', file=log)
addBodyDict = {'email' : email, 'role' : 'MEMBER'} # define a dict for the member email and role type, which is this case is just their email and the normal member role
# Check to see if they are a member of the all student group, if not we need to add them
if not memberLists.get(allStudentGroup).get(email):
print(f'ACTION: {email} is currently not a member of {allStudentGroup}, will be added')
print(f'ACTION: {email} is currently not a member of {allStudentGroup}, will be added', file=log)
service.members().insert(groupKey=allStudentGroup, body=addBodyDict).execute() # do the addition to the group
# else: # debug
# print(f'INFO: {email} is already a part of {allStudentGroup}, no action needed')
# print(f'INFO: {email} is already a part of {allStudentGroup}, no action needed', file=log)
# go through each school code : abbreviation pair to check membership for each building group
for schoolEntry in schoolAbbreviations.keys():
try:
schoolGroupEmail = schoolAbbreviations.get(schoolEntry) + studentSuffix + emailSuffix
if schoolEntry == homeschool: # if the school id number we are currently is their school, they should be a part of that school's groups
if not memberLists.get(schoolGroupEmail).get(email): # if they are not a member of the group
print(f'ACTION: {email} is currently not a member of {schoolGroupEmail}, will be added')
print(f'ACTION: {email} is currently not a member of {schoolGroupEmail}, will be added', file=log)
service.members().insert(groupKey=schoolGroupEmail, body=addBodyDict).execute() # do the addition to the group
# else: # debug
# print(f'INFO: {email} is already a part of {schoolGroupEmail}, no action needed')
# print(f'INFO: {email} is already a part of {schoolGroupEmail}, no action needed', file=log)
else: # if the current school entry is not their school, we need to make sure they are NOT part of that schools groups and remove them if they are
if memberLists.get(schoolGroupEmail).get(email): # if they are a member of the group
if memberLists.get(schoolGroupEmail).get(email) == 'MEMBER': # check and see if they are just a member, if so remove them, otherwise we do not want to touch the managers and owners
print(f'ACTION: {email} should not be a member of {schoolGroupEmail}, will be removed')
print(f'ACTION: {email} should not be a member of {schoolGroupEmail}, will be removed', file=log)
service.members().delete(groupKey=schoolGroupEmail, memberKey=email).execute() # do the removal from the group
else: # if they are an elevated member just give a warning
print(f'WARNING: {email} is an elevated role in {schoolGroupEmail} and will NOT be removed')
print(f'WARNING: {email} is an elevated role in {schoolGroupEmail} and will NOT be removed', file=log)
except Exception as er:
print(f'ERROR: in building {schoolEntry} on user {email}: {er}')
print(f'ERROR: in building {schoolEntry} on user {email}: {er}', file=log)
# go through each grad year group to check membership
for year in gradYears:
try:
gradYearEmail = gradYearPrefix + str(year) + emailSuffix
if str(year) == gradYear: # if the year we are currently on is their grad year, they should be a part of the group
if not memberLists.get(gradYearEmail).get(email):
print(f'ACTION: {email} is currently not a member of {gradYearEmail}, will be added')
print(f'ACTION: {email} is currently not a member of {gradYearEmail}, will be added', file=log)
service.members().insert(groupKey=gradYearEmail, body=addBodyDict).execute() # do the addition to the group
# else: # debug
# print(f'INFO: {email} is already a part of {gradYearEmail}, no action needed')
# print(f'INFO: {email} is already a part of {gradYearEmail}, no action needed', file=log)
else: # if the year is not their grad year, we need to make sure they are NOT a part of that group
if memberLists.get(gradYearEmail).get(email):
if memberLists.get(gradYearEmail).get(email) == 'MEMBER': # check and see if they are just a member, if so remove them, otherwise we do not want to touch the managers and owners
print(f'ACTION: {email} should not be a member of {gradYearEmail}, will be removed')
print(f'ACTION: {email} should not be a member of {gradYearEmail}, will be removed', file=log)
service.members().delete(groupKey=gradYearEmail, memberKey=email).execute() # do the removal from the group
else: # if they are an elevated member just give a warning
print(f'WARNING: {email} is an elevated role in {gradYearEmail} and will NOT be removed')
print(f'WARNING: {email} is an elevated role in {gradYearEmail} and will NOT be removed', file=log)
except Exception as er:
print(f'ERROR: in grad year entry {schoolEntry} on user {email}: {er}')
print(f'ERROR: in grad year entry {schoolEntry} on user {email}: {er}', file=log)
except Exception as er:
print(f'ERROR: on {user} - {er}')
print(f'ERROR: on {user} - {er}',file=log)
# main program
with oracledb.connect(user=un, password=pw, dsn=cs) as con: # create the connecton to the database
with con.cursor() as cur: # start an entry cursor
with open('StudentGroupsLog.txt', 'w') as log:
startTime = datetime.now()
startTime = startTime.strftime('%H:%M:%S')
currentYear = int(datetime.now().strftime('%Y')) # get current year for calculations of grad year classes
print(f'Execution started at {startTime}')
print(f'Execution started at {startTime}', file=log)
# Start by getting a list of schools id's and abbreviations for the "real" schools which are not excluded from state reporting
cur.execute('SELECT abbreviation, school_number FROM schools WHERE State_ExcludeFromReporting = 0')
schools = cur.fetchall()
schoolAbbreviations = {} # define a dict to store the school codes and abbreviations linked
for school in schools:
# store results in variables mostly just for readability
schoolAbbrev = school[0].lower() # convert to lower case since email groups are all lower
schoolNum = str(school[1])
# print(f'School {schoolAbbrev} - Code {schoolNum}')
schoolAbbreviations.update({schoolNum : schoolAbbrev})
print(f'Schools numbers and their abbreviations: {schoolAbbreviations}')
print(f'Schools numbers and their abbreviations: {schoolAbbreviations}', file=log)
memberLists = {} # make a master dict for group memberships, that will have sub-dict sof each member and their role as its values
gradYears = [] # make an array that will hold the next 14 years to have as reference for graduation years
for i in range(17):
gradYears.append(currentYear + (i-1)) # start with 0 (-1) from the current year and go through the next 15 years
print(f'The graduation years in range: {gradYears}') # debug
print(f'The graduation years in range: {gradYears}', file=log) # debug
# find the members of each group once at the start so we do not have to constantly query via the api whether a user is a member, we can just do a list comparison
for entry in schoolAbbreviations.values():
# go through each school abbreviation and find their student group
studentGroup = entry + studentSuffix + emailSuffix
getGroupMembers(studentGroup)
for year in gradYears:
classGroup = gradYearPrefix + str(year) + emailSuffix
getGroupMembers(classGroup)
getGroupMembers(allStudentGroup) # get membership for the district wide student group added to dict
print(memberLists) # debug, now should have a dict containing each group email as the keys, and the value is a dict of its own containing the emails and roles of each member of the group
# print(memberLists, file=log) # debug, now should have a dict containing each group email as the keys, and the value is a dict of its own containing the emails and roles of each member of the group
processGroups(studentOU) # process the student groups for the main student OU, this will also include any sub-OUs
endTime = datetime.now()
endTime = endTime.strftime('%H:%M:%S')
print(f'Execution ended at {endTime}')
print(f'Execution ended at {endTime}', file=log)
|
Philip-Greyson/D118-Google-Groups-Licensing
|
doStudentGroups.pyw
|
doStudentGroups.pyw
|
pyw
| 16,072 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.environ.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "google.oauth2.credentials.Credentials.from_authorized_user_file",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "google.oauth2.credentials.Credentials",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "google.auth.transport.requests.Request",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "google_auth_oauthlib.flow.InstalledAppFlow",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "googleapiclient.discovery.build",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "oracledb.connect",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "datetime.now",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "datetime.now",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "datetime.now",
"line_number": 215,
"usage_type": "call"
}
] |
5675740119
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import os, re, collections, getpass, functools, click, six, logging, json, threading
import dash
from dash import dcc
from dash import html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State, ALL
import os, collections, getpass, functools
_name_dev_dd={
'A-UL': '207',
'B-UL': '202',
'A-TZ': '220',
'B-TZ': '221',
'A-LV': '222',
'B-LV': '223',
'A-SA': '224',
'B-SA': '225'}
_dev_name_dd={vv:kk for kk,vv in _name_dev_dd.items()}
def _get_devices(path='/device/dfe'):
return [{}]
# _root=nalct.NalctConfiguration.NalctConfiguration(quiet=True, _context=_context)()._asdict()
# _devices=next(iter(_root.get('entity', DotMap()).get('container', DotMap()).match(('name','device'))), DotMap())
# _dev_ent=_devices.get('container', DotMap()).get('entity', [])
# _dfe=next(iter(list(filter(lambda x: x.get('name') in ['dfe'], _dev_ent))), DotMap())
# # _dfe=next(iter(_devices.get('container', DotMap()).get('entity', []).match(('name','dfe'))), DotMap())
# _devs=list(map(lambda x: x.get('name'), _dfe.get('container', DotMap()).get('entity', []) ))
# return [{'label': _dev_name_dd.get(_dev, _dev), 'value':os.path.join(path, _dev)} for _dev in _devs]
_command_funcs=[]
_devices=_get_devices()
logging.getLogger(__name__).debug('Devices: {}'.format(_devices))
# _commands=sorted(list(set( [_cmd for _device in [_dev.get('value') for _dev in _devices] for _cmds in nalct.NalctCommanding.NalctCommanding(path=_device, _context=_context).commands for _cmd in _cmds ] )))
_commands=[]
# _dev_cmds={_device:list(nalct.NalctCommanding.NalctCommanding(path=_device, _context=_context).commands) for _device in [_dev.get('value') for _dev in _devices] }
_dev_cmds={}
# _commands=[]
# [_commands.extend(_vv) for _vv in _dev_cmds.values()]
_commands=sorted(list(set( _commands )))
_get_click_context = lambda command: click.Context(command).__enter__()
# _commands=collections.OrderedDict([(_cmd.command.name, _cmd) for _cmd in map(_get_click_context, _commands)])
# _cmd_exe_dd = collections.OrderedDict([(_cmd_name, _command_funcs[ii]) for ii,_cmd_name in enumerate(_commands.keys())])
_cmd_exe_dd = {}
_all_services={} #cc.__name__ : cc for cc in nalct.DockerService.DockerService.yield_progeny()}
_all_service_names=sorted(set( filter(lambda x: isinstance(x, six.string_types), [getattr(vv,'component', None) for vv in _all_services.values()]) ))
input_groups = html.Div(
[
dbc.Container(
children=[
html.Label([
"Devices to Command",
dcc.Dropdown(
id='nalct-device-select-dropdown',
options=_devices,
multi=True,
value=[_dev.get('value') for _dev in _devices])
]),
dbc.InputGroup(
[
# dbc.InputGroupAddon("Nalct Command", addon_type="prepend"),
dbc.InputGroupText("Nalct Command"),
dbc.Select(options=[{'label': _command_name, 'value': _command_name} for _command_name in _commands],id='nalct-command-input'),
],),
dbc.Container(id='nalct-command-parameters')
],
id='nalct-command-builder'
),
],
)
layout = dbc.Container(
children=[
dbc.Alert("PyNAPL -- NetAcquire Commanding", color="success"),
input_groups,
dbc.Container(
children=[dbc.InputGroup([dbc.Button("Execute", id='nalct-launch', outline=True, color="info", className="mr-1", disabled=True),]),],
id='nalct-command-cfg-exec-container'
),
dbc.Container(
children=[],
id='nalct-command-exec-container'
),
],
className="p-5",
)
def make_bool_option(_command, _param):
return dbc.FormGroup([
dbc.Checklist(
options=[ {"label": _param.name, "value": _param.name} ],
value=[],
id={'type': 'nalct-command-line-flags', 'index': '{}-{}'.format(_command, _param.name)},
switch=True,
),
dbc.FormText(_param.help, color="secondary",)]
)
def make_command_option(_command, _param):
_ctx=_context
if re.match(r'^.*enclave.*$', _param.name):
return dbc.FormGroup([
dbc.Label(_param.name, html_for='nalct-command-line-options-{}-{}'.format(_command, _param.name)),
dbc.Select(
options=[{'label': _enclave, 'value': _enclave} for _enclave in sorted(_ctx.active.keys())] if re.match(r'^kill$', _command) else [{'label': _enclave, 'value': _enclave} for _enclave in sorted(_ctx.enclaves.keys())],
id={'type': 'nalct-command-line-options', 'index': '{}-{}'.format(_command, _param.name)}),
dbc.FormText(_param.help, color="secondary",)]
)
elif re.match(r'^.*service.*$', _param.name):
return dbc.FormGroup([
dbc.Label(_param.name, html_for='nalct-command-line-options-{}-{}'.format(_command, _param.name)),
dbc.Select(
options=[{'label': _service, 'value': _service} for _service in _all_service_names],
id={'type': 'nalct-command-line-options', 'index': '{}-{}'.format(_command, _param.name)}),
dbc.FormText(_param.help, color="secondary",)]
)
else:
return dbc.FormGroup([
dbc.Label(_param.name, html_for='nalct-command-line-options-{}-{}'.format(_command, _param.name)),
dbc.Input(placeholder="{} value...".format(_param.name), type="text", id={'type': 'nalct-command-line-options', 'index': '{}-{}'.format(_command, _param.name)}),
dbc.FormText(_param.help, color="secondary",)]
)
def build_command_form(command):
if not(command): return
_ctx=None #_commands.get(command)
_params=[] #_ctx.command.params
_flags=[] #list(filter(lambda x: x.is_flag, _params))
_options=[] #list(filter(lambda x: not(x.is_flag), _params))
_content=[]
if _options:
options = dbc.FormGroup([
dbc.FormGroup([make_command_option(command, _param) for _param in _options],
id='nalct-command-line-options-{}'.format(command))])
_content.append(options)
if _flags:
flags = dbc.FormGroup([
dbc.Label("Flags", html_for='nalct-command-line-flags-{}'.format(command)),
dbc.FormGroup([make_bool_option(command, _param) for _param in _flags],
id='nalct-command-line-flags-{}'.format(command))])
_content.append(flags)
if _content:
_content= [dbc.Label("Nalct {} Options".format(command.capitalize()), 'nalct-command-options-{}'.format(command))] + _content
return dbc.FormGroup(_content)
@app.callback([Output("nalct-command-parameters", "children"), Output('nalct-launch', 'disabled')], [Input("nalct-command-input", "value")])
def update_command_form(nalct_command):
logging.getLogger('.'.join([__name__, 'update_command_form'])).info('Command: "{}"'.format(nalct_command))
if nalct_command:
return [build_command_form(nalct_command)], False
return [], True
def _invoke(command, devices):
pass
@app.callback(
Output('nalct-command-exec-container', 'children'),
[Input('nalct-launch', 'n_clicks')],
[State("nalct-command-input", "value"),
State("nalct-device-select-dropdown", "value")],
)
def launch_nalct_command(launch_button_clicks, command, devices):
logging.getLogger('.'.join([__name__, 'launch_nalct_command'])).info('Command: "{}", Devices: {}'.format(command, devices))
if command and devices:
_opts=SmartDict(command=command, devices=devices)
try:
threading.Thread(target=_invoke, args=(command, devices), name='.'.join([__name__, 'launch_nalct_command','invoke'])).start()
return [dbc.Toast(
[
html.P("Launching `nalct.Command({})`".format(', '.join('{}={}'.format(kk,vv) for kk,vv in _opts.items())), className="mb-0"),
html.A("Status", href="/apps/app2", className="mb-0"),
dbc.Spinner(spinner_style={"width": "3rem", "height": "3rem"})],
id="nalct-launch-notifier",
header="Launching Nalct Command",
icon="primary",
duration=5000,
),]
except Exception as e:
return [dbc.Toast(
[html.P("Failed to Launch `nalct.{}({})`: {}".format(command, _opts, e), className="mb-0")],
id="nalct-launch-notifier",
header="Uh-oh!",
icon="danger",
duration=5000,
)]
return [None]
################################################################################
# vim:set sr et ts=4 sw=4 ft=python fenc=utf-8: // See Vim, :help 'modeline'
|
meghanstell/SNOWIE
|
megalodon/web/apps/launch.py
|
launch.py
|
py
| 9,144 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "click.Context",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "six.string_types",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "dash.html.Div",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "dash.html",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "dash_bootstrap_components.Container",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "dash.html.Label",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "dash.html",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "dash.dcc.Dropdown",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "dash.dcc",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "dash_bootstrap_components.InputGroup",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.InputGroupText",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Select",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Container",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Container",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Alert",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Container",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.InputGroup",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Button",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Container",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.FormGroup",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Checklist",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.FormText",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.FormGroup",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Label",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Select",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.FormText",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.FormGroup",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Label",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Select",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.FormText",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.FormGroup",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Label",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Input",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.FormText",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.FormGroup",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.FormGroup",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.FormGroup",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Label",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.FormGroup",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Label",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.FormGroup",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Toast",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "dash.html.P",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "dash.html",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "dash.html.A",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "dash.html",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "dash_bootstrap_components.Spinner",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Toast",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "dash.html.P",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "dash.html",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.State",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.State",
"line_number": 176,
"usage_type": "call"
}
] |
73883744829
|
import os
_, filename = os.path.split('/a/b/c/t.txt')
print(filename)
metro_areas = [
('Tokyo', 'JP', 36.933, (35.689722, 139.691667)),
('Delhi NCR', 'IN', 21.935, (28.613889, 77.208889)),
('Mexico City', 'MX', 20.142, (19.433333, -99.133333)),
('New York-Newark', 'US', 20.104, (40.808611, -74.020386)),
('Sao Paulo', 'BR', 19.649, (-23.547778, -46.635833)),
]
print('{:15} | {:^9} | {:^9}'.format('', 'lat.', 'long.'))
fmt = '{:15} | {:9.4f} | {:9.4f}'
for name, cc, pop, (latitude, latitude) in metro_areas:
if latitude <= 0:
print(fmt.format(name, latitude, latitude))
from collections import namedtuple
City = namedtuple('City', 'name country population coordinates')
tokyo = City('Tokyo', 'JP', 36.933, (35.689722, 139.691667))
print(tokyo)
for key, value in tokyo._asdict().items():
print(key + ":", value)
|
yubo-yue/yubo-python
|
fluentpython/ch02.py
|
ch02.py
|
py
| 857 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.split",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "collections.namedtuple",
"line_number": 23,
"usage_type": "call"
}
] |
728222177
|
import string
# Initializing Variables
num_sentences = 0
num_words = 0
the_num_sentences = 0
frequency_the = 0
# Task 0
with open('war_and_peace.txt', 'r') as f: # opening and reading file
for line in f:
line = line.rstrip() # removing the space on right side
num_sentences += line.count('.') + line.count('!') + line.count('?') # counting the sentences
with open('war_and_peace.txt', 'r') as f:
for line in f:
words = line.split(None) # splitting into words and storing in list
num_words += len(words) # finding length of list
print("Number of sentences:", num_sentences)
print("Number of words:", num_words)
# Task 1
text = open('war_and_peace.txt', "r") # Open the file in read mode
d = dict() # Create an empty dictionary
for line in text: # Loop through each line of the file
line = line.strip() # Remove the leading spaces and newline character
# Convert the characters in line to
line = line.lower() # lowercase to avoid case mismatch
line = line.translate(line.maketrans("", "", string.punctuation)) # Remove the punctuation marks from the line
words = line.split(" ") # Split the line into words
for word in words: # Iterate over each word in line
if word in d: # Check if the word is already in dictionary
d[word] = d[word] + 1 # Increment count of word by 1
else:
d[word] = 1
sorted_by_value = sorted(d.items(), key=lambda kv: kv[1], reverse=True) # sorting dictionary as higher value first
with open('result.csv', 'w', newline='') as f: # creating csv file
for w in sorted_by_value:
f.write(w[0] + "," + str(d[w[0]]) + "," + str(d[w[0]] / num_words) + '\n') # writing in to csv file
# Task 2
with open('war_and_peace.txt', 'r') as f:
for line in f:
line = line.rstrip()
the_num_sentences += (line.count(". the") + line.count("! the") + line.count("? the") +
line.count('. The') + line.count('! The') + line.count('? The'))
print("Number of sentences starts with 'the' :", the_num_sentences)
frequency_the = the_num_sentences / num_sentences
print("Frequency of THE sentences:", frequency_the)
# Task 3
import re
from itertools import islice
from collections import Counter
s = open("war_and_peace.txt") #opening file
g = s.read()
words = re.findall("\w+", g) #finding the reg-ex
letter = (Counter(zip(words, islice(words, 1, None)))) #counting The frequent two word combunations
print(letter.most_common()[0])
|
ruchitakatkar04/CNS-project-1
|
project1.py
|
project1.py
|
py
| 2,634 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "string.punctuation",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "itertools.islice",
"line_number": 79,
"usage_type": "call"
}
] |
36734211163
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: Pramod Bharadwaj Chandrashekar, Li Liu
@email: [email protected], [email protected]
"""
import numpy as np
from sklearn.cluster import KMeans
import scipy.stats as stats
def get_cdf_pval(data):
""" Function for guassian mixture of dat and computing pvalues """
cdf_pvals = []
for i in range(0, len(data)):
mn_samp = np.mean(data[i, :])
sd_samp = np.std(data[i, :])
kcl = KMeans(n_clusters=2, random_state=0).fit(np.reshape(data[i], [-1, 1]))
cluster_1_id = np.where(kcl.labels_ == 0)[0]
c1_mn, c1_sd = np.mean(data[i, cluster_1_id]), np.std(data[i, cluster_1_id])
cdf_pval_1 = np.reshape(1.0 - stats.norm.cdf(data[i, :], c1_mn, c1_sd), [-1, 1])
cluster_2_id = np.where(kcl.labels_ == 1)[0]
c2_mn, c2_sd = np.mean(data[i, cluster_2_id]), np.std(data[i, cluster_2_id])
cdf_pval_2 = np.reshape(1.0 - stats.norm.cdf(data[i, :], c2_mn, c2_sd), [-1, 1])
cdf_pval_3 = np.reshape(1.0 - stats.norm.cdf(data[i, :], mn_samp, sd_samp), [-1, 1])
cdf_pvals.append(np.concatenate((cdf_pval_1, cdf_pval_2, cdf_pval_3), axis=1))
return cdf_pvals
def get_important_bins(pval_data):
""" Fetch important bins based on pvalues"""
imp_bins = []
# Bonferroni Corrected pvals check
if len(np.where(pval_data*200 < 0.05)[0]) > 0:
imp_bins = np.where(pval_data*200 < 0.05)[0]
# Normal pval check
elif len(np.where(pval_data < 0.05)[0]):
imp_bins = np.where(pval_data < 0.05)[0]
# Top 10 bins
else:
sorted_bins = np.argsort(pval_data)
imp_bins = sorted_bins[0:20]
#imp_bins = np.argpartition(pval_data, 10)
return imp_bins
|
liliulab/DeepCORE
|
DeepCORE_attention_util.py
|
DeepCORE_attention_util.py
|
py
| 1,744 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.mean",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "scipy.stats.norm.cdf",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "scipy.stats.norm",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "scipy.stats",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "numpy.where",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "scipy.stats.norm.cdf",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "scipy.stats.norm",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "scipy.stats",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "numpy.reshape",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "scipy.stats.norm.cdf",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "scipy.stats.norm",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "scipy.stats",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 45,
"usage_type": "call"
}
] |
31141179192
|
import transformers
import torch
def shape(structure):
try:
return structure.shape
except AttributeError:
return (f"list[{len(structure)}]", *shape(structure[0]))
short_prompt = """To be or not to"""
long_prompt = """It was the best of times, it was the worst"""
if __name__ == "__main__":
print("Started")
model_uri = "gpt2"
tokenizer = transformers.AutoTokenizer.from_pretrained(model_uri)
tokenizer.pad_token = tokenizer.eos_token
model = transformers.AutoModelForCausalLM.from_pretrained(model_uri)
model.to("cuda:0")
inputs = tokenizer([short_prompt, long_prompt], padding=True, return_tensors="pt").to("cuda:0")
generated = []
for _ in range(10):
print(inputs.keys())
print(shape(inputs["input_ids"]))
print(shape(inputs["attention_mask"]))
print(inputs["attention_mask"].sum(axis=1, dtype=torch.int64))
with torch.no_grad():
outputs = model(**inputs)
print(shape(outputs.past_key_values))
print()
next_tokens = outputs.logits[:, -1, :].max(axis=-1).indices
generated.append(tokenizer.decode(next_tokens))
insertion_points = inputs["attention_mask"].sum(axis=1, dtype=torch.int64)
new_column = torch.tensor(tokenizer.pad_token_id).repeat(2).to("cuda:0")
new_inputs = torch.cat((inputs["input_ids"], new_column[:, None]), dim=1)
new_inputs.scatter_(1, insertion_points[:, None], next_tokens[:, None])
mask = inputs["attention_mask"]
new_mask_column = torch.zeros((len(inputs["input_ids"]), 1)).to("cuda:0")
new_mask = torch.cat((mask, new_mask_column), dim=1)
new_mask.scatter_(1, insertion_points[:, None], torch.ones(2, 1).to("cuda:0"))
# inputs["input_ids"] = new_inputs
# inputs["attention_mask"] = new_mask
# inputs.past_key_values = outputs.past_key_values
inputs = {
"input_ids": new_inputs,
"attention_mask": new_mask,
# "past_key_values": outputs.past_key_values
}
print(tokenizer.batch_decode(new_inputs, ignore_special_tokens=True))
|
jjjmillist/ttc-workbench
|
scripts/23-05-18@13:50:44.py
|
23-05-18@13:50:44.py
|
py
| 2,176 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "transformers.AutoTokenizer.from_pretrained",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "transformers.AutoTokenizer",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "transformers.AutoModelForCausalLM.from_pretrained",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "transformers.AutoModelForCausalLM",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "torch.int64",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.int64",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 53,
"usage_type": "call"
}
] |
18804693997
|
import copy
from typing import Dict, Optional, TypeVar
from pymilvus.exceptions import CollectionNotExistException, ExceptionsMessage
from pymilvus.settings import Config
Index = TypeVar("Index")
Collection = TypeVar("Collection")
class Index:
def __init__(
self,
collection: Collection,
field_name: str,
index_params: Dict,
**kwargs,
) -> Index:
"""Creates index on a specified field according to the index parameters.
Args:
collection(Collection): The collection in which the index is created
field_name(str): The name of the field to create an index for.
index_params(dict): Indexing parameters.
kwargs:
* *index_name* (``str``) --
The name of index which will be created. If no index name is specified,
default index name will be used.
Raises:
MilvusException: If anything goes wrong.
Examples:
>>> from pymilvus import *
>>> from pymilvus.schema import *
>>> from pymilvus.types import DataType
>>> connections.connect()
<pymilvus.client.stub.Milvus object at 0x7fac15e53470>
>>> field1 = FieldSchema("int64", DataType.INT64, is_primary=True)
>>> field2 = FieldSchema("fvec", DataType.FLOAT_VECTOR, is_primary=False, dim=128)
>>> schema = CollectionSchema(fields=[field1, field2])
>>> collection = Collection(name='test_collection', schema=schema)
>>> # insert some data
>>> index_params = {
... "index_type": "IVF_FLAT",
... "metric_type": "L2",
... "params": {"nlist": 128}}
>>> index = Index(collection, "fvec", index_params)
>>> index.params
{'index_type': 'IVF_FLAT', 'metric_type': 'L2', 'params': {'nlist': 128}}
>>> index.collection_name
test_collection
>>> index.field_name
fvec
>>> index.drop()
"""
from .collection import Collection
if not isinstance(collection, Collection):
raise CollectionNotExistException(message=ExceptionsMessage.CollectionType)
self._collection = collection
self._field_name = field_name
self._index_params = index_params
index_name = kwargs.get("index_name", Config.IndexName)
self._index_name = index_name
self._kwargs = kwargs
if self._kwargs.pop("construct_only", False):
return
conn = self._get_connection()
conn.create_index(self._collection.name, self._field_name, self._index_params, **kwargs)
indexes = conn.list_indexes(self._collection.name)
for index in indexes:
if index.field_name == self._field_name:
self._index_name = index.index_name
break
def _get_connection(self):
return self._collection._get_connection()
@property
def params(self) -> dict:
"""dict: The index parameters"""
return copy.deepcopy(self._index_params)
@property
def collection_name(self) -> str:
"""str: The corresponding collection name"""
return self._collection.name
@property
def field_name(self) -> str:
"""str: The corresponding field name."""
return self._field_name
@property
def index_name(self) -> str:
"""str: The corresponding index name."""
return self._index_name
def __eq__(self, other: Index) -> bool:
"""The order of the fields of index must be consistent."""
return self.to_dict() == other.to_dict()
def to_dict(self):
"""Put collection name, field name and index params into dict."""
return {
"collection": self._collection._name,
"field": self._field_name,
"index_name": self._index_name,
"index_param": self.params,
}
def drop(self, timeout: Optional[float] = None, **kwargs):
"""Drop an index and its corresponding index files.
Args:
timeout(float, optional): An optional duration of time in seconds to allow
for the RPC. When timeout is set to None, client waits until server response
or error occur
kwargs:
* *index_name* (``str``) --
The name of index. If no index is specified, the default index name is used.
"""
copy_kwargs = copy.deepcopy(kwargs)
index_name = copy_kwargs.pop("index_name", Config.IndexName)
conn = self._get_connection()
conn.drop_index(
collection_name=self._collection.name,
field_name=self.field_name,
index_name=index_name,
timeout=timeout,
**copy_kwargs,
)
|
milvus-io/pymilvus
|
pymilvus/orm/index.py
|
index.py
|
py
| 4,921 |
python
|
en
|
code
| 744 |
github-code
|
6
|
[
{
"api_name": "typing.TypeVar",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "typing.TypeVar",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "collection.Collection",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "pymilvus.exceptions.CollectionNotExistException",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pymilvus.exceptions.ExceptionsMessage.CollectionType",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "pymilvus.exceptions.ExceptionsMessage",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "pymilvus.settings.Config.IndexName",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "pymilvus.settings.Config",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "pymilvus.settings.Config.IndexName",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "pymilvus.settings.Config",
"line_number": 126,
"usage_type": "name"
}
] |
3709323219
|
import pandas as pd
import numpy as np
from cloudservice import get_documenttask, download_doc
from cloudservice import get_doctag, create_doctag, delete_doctag
from cloudservice import create_doctagrel, delete_doctagrel
from cloudservice import change_step
from cloudservice import get_docs_byid, fill_docinfo
from cloudservice import get_all_projs, get_file_projs
import time, os, shutil
import config
import core
import utils
from datetime import datetime
from wordapi import transdoc
from pptapi import transppt
def analysis_log(info, info_obj):
print(info, info_obj)
def on_loop(project_id):
docresponse = get_documenttask(projid=project_id)
docdata = pd.DataFrame(docresponse)
if len(docdata) == 0:
return
docdata = docdata[docdata['step'] == 1]
docdata = docdata.tail(config.n_for_project_in_loop)
docdata = (docdata
# .sort_values('name')
.dropna(subset=['fileUrl', 'step'])
.reset_index()
)
# basepath = os.path.join(config.root_dir, str(project_id))
basepath = r'E:\file-local-analysis'
for indx, dt in docdata.iterrows():
info_log_obj = {'id': dt['fileId'], 'name': dt['name']}
print()
analysis_log('开始', info_log_obj)
# if not dt['fileUrl'].startswith('http'):
# analysis_log('无文件', info_log_obj)
# continue
try:
# curpath = os.path.join(basepath, dt['name'])
curpath = dt['fileUrl']
# transformed = core.transform(curpath, basepath, extname)
ext_tuple = os.path.splitext(dt['name'])
extname = ext_tuple[1]
# 补写
# if extname != '.dwg' and extname != '.rar':
# continue
# analysis_log('开始', info_log_obj)
# 补写
if extname == '.doc':
transdoc.doc2docx(curpath, basepath, remove=False)
curpath = os.path.join(basepath, dt['name'])
if extname == '.ppt':
transppt.ppt2pptx(curpath, basepath, remove=False)
curpath = os.path.join(basepath, dt['name'])
# dwg rar本地转移 在线分析不用
if extname == '.dwg':
shutil.copy(curpath, basepath)
curpath = os.path.join(basepath, dt['name'])
if extname == '.rar' or extname == '.zip':
shutil.copy(curpath, basepath)
curpath = os.path.join(basepath, dt['name'])
# 很大的
if os.path.getsize(dt['fileUrl']) > 100 * 1000 * 1000:
analysis_log('文件过大', info_log_obj)
dt['step'] = 2
change_step(dt['id'], dt.to_dict(), projid=project_id)
continue
except Exception as e:
analysis_log('下载和转换文件', info_log_obj)
continue
# 分析成字段
try:
kwords, kwfreq, pharr, nwarr, sumarr, *img_none = core.analysis(
curpath, extname, imgdir=None, do_drawings=True)
kwords_arr = kwords.split(',')
real_kwords = []
for kw in kwords_arr:
if is_real_kw(kw):
real_kwords.append(kw)
if len(real_kwords) > 5:
low_kw = real_kwords[5:]
else:
low_kw = []
except Exception as e:
analysis_log('分析成字段', info_log_obj)
print(e)
# avoid always fail
dt['step'] = 2
change_step(dt['id'], dt.to_dict(), projid=project_id)
# avoid always fail
continue
# 文件表写入字段
file_table_write_success = False
try:
doc_record = get_docs_byid(dt['fileId'], projid=project_id)
# choose summary
real_summary = []
for su in sumarr:
if is_real_summary(su):
real_summary.append(su)
summarylimit = 3
if len(real_summary) > summarylimit:
real_summary = sorted(real_summary, key=lambda x: len(x), reverse=True)[:summarylimit]
nwlimit = 900
nwarr = utils.remove_blank(nwarr)
if len(nwarr) > nwlimit:
nwarr = nwarr[:nwlimit]
updated = {
# "keyWord": kwords,
"keyWord": ','.join(low_kw),
"abstract": ','.join(real_summary),
"newWords": nwarr,
"wordFrequency": kwfreq,
"phrases": pharr
}
doc_record.update(updated)
# print(doc_record)
fill_docinfo(doc_record['id'], doc_record, projid=project_id)
file_table_write_success = True
except Exception as e:
analysis_log('文件表填入', info_log_obj)
continue
# 创建新标签并关联
try:
if not real_kwords:
analysis_log('无内容', info_log_obj)
else:
alltags = get_doctag(projid=project_id)
if len(real_kwords) >= config.web_keywords_num:
curtags = real_kwords[:config.web_keywords_num]
else:
curtags = real_kwords
dtrels = []
for curtag in curtags:
existq = False
for t in alltags:
if str(t['name']).upper() == str(curtag).upper():
dtrels.append((dt['fileId'], t['id']))
existq = True
break
if not existq:
tagid = create_doctag(curtag, projid=project_id)
dtrels.append((dt['fileId'], tagid))
# 写入关联文件和标签
create_doctagrel(dtrels, projid=project_id)
except:
analysis_log('标签', info_log_obj)
continue
# 更改task的阶段为已完成
if file_table_write_success:
dt['step'] = 2
change_step(dt['id'], dt.to_dict(), projid=project_id)
# 删除本地下载文件
pass
analysis_log('完成', info_log_obj)
# delete_doctagrel(13, projid=project_id)
print('end proj')
def is_real_kw(kw: str) -> bool:
if len(kw) < 2:
return False
undercount = 0
for c in kw:
if c == '_':
undercount += 1
if undercount / len(kw) > 0.499:
return False
return True
def is_real_summary(su) -> bool:
if len(su) < 6:
return False
return True
def find_needed_project_ids():
pids = np.loadtxt(r'.\ftp-pids.csv', dtype=int)
return pids
def exitq() -> bool:
with open('stop.cms') as sf:
sign = sf.readline()
sign = int(sign)
if sign > 0:
return True
return False
if __name__ == '__main__':
# projects = find_needed_project_ids()
# loop_id = 0
# while True:
# if exitq():
# print('exit')
# print(datetime.now())
# break
# loop_id += 1
# print('loop: ' + str(loop_id))
# for pid in projects:
# time.sleep(0.1)
# on_loop(project_id=pid)
# print('loop: ' + str(loop_id) + ' / proj: ' + str(pid))
# time.sleep(2)
projects = find_needed_project_ids() # with exclude
have_file_projects = get_file_projs()
loop_id = 0
while True:
if exitq():
print('exit')
print(datetime.now())
break
loop_id += 1
print('loop: ' + str(loop_id))
for pid in projects:
if pid not in have_file_projects:
continue
time.sleep(0.1)
print('loop: ' + str(loop_id) + ' / proj: ' + str(pid))
on_loop(project_id=pid)
time.sleep(2)
|
pengyang486868/PY-read-Document
|
analysislocal.py
|
analysislocal.py
|
py
| 8,022 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cloudservice.get_documenttask",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "config.n_for_project_in_loop",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "wordapi.transdoc.doc2docx",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "wordapi.transdoc",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "pptapi.transppt.ppt2pptx",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pptapi.transppt",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "shutil.copy",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "shutil.copy",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "os.path.getsize",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "cloudservice.change_step",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "core.analysis",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "cloudservice.change_step",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "cloudservice.get_docs_byid",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "utils.remove_blank",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "cloudservice.fill_docinfo",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "cloudservice.get_doctag",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "config.web_keywords_num",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "config.web_keywords_num",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "cloudservice.create_doctag",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "cloudservice.create_doctagrel",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "cloudservice.change_step",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "cloudservice.get_file_projs",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 253,
"usage_type": "call"
}
] |
16832434416
|
# 图形画布
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib # 导入图表模块
import matplotlib.pyplot as plt # 导入绘图模块
class PlotCanvas(FigureCanvas):
def __init__(self, parent=None, width=0, height=0, dpi=100):
# 避免中文乱码
matplotlib.rcParams['font.sans-serif'] = ['SimHei'] #选择字体为SimHei
matplotlib.rcParams['axes.unicode_minus'] = False #处理坐标抽轴线的负刻度值情况
# 创建图形
fig = plt.figure(figsize=(width, height), dpi=dpi)
# 初始化图形画布
FigureCanvas.__init__(self, fig)
self.setParent(parent) # 设置父类
# 折线图
def broken_line(self,number,train_list):
'''
linewidth:折线的宽度
marker:折点的形状
markerfacecolor:折点实心颜色
markersize:折点大小
number:车票数量
train_list:车次
'''
#enumerate内建序列函数,返回(i,value)索引和值
day_x = ['第二天', '第三天','第四天','第五天', '第六天'] # X轴折线点
for index, n in enumerate(number):
plt.plot(day_x, n, linewidth=1, marker='o',
markerfacecolor='blue', markersize=8, label=train_list[index]) # 绘制折线marker:折点,label:图例
plt.grid(linestyle=":")
plt.legend(bbox_to_anchor=(-0.03,1)) # 让图例生效,并设置图例显示位置
plt.title('卧铺车票数量走势图') # 标题名称
# bbox_to_anchor(num1, num2), bbox_to_anchor被赋予的二元组中,第一个数值用于控制legend的左右移动,值越大越向右边移动,第二个数值用于控制legend的上下移动,值越大,越向上移动。
|
yunmi02/MyProject
|
11/源程序/ticket _analysis/chart.py
|
chart.py
|
py
| 1,900 |
python
|
zh
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
}
] |
27834268107
|
#! /usr/bin/python3
import numpy as np
from matplotlib import pyplot as plt
# Simple Euler forward
# Input variables
Q = 10.
b = [20.]
S = 1E-2
D = 2E-2
h_b = 4
intermittency = 1
# Constants
phi = 3.97
g = 9.805
rho_s = 2700.
rho = 1000.
tau_star_crit = 0.0495
# Derived variables
a1 = 1. / h_b
a2 = S**0.7 / ( 2.9 * (rho_s - rho)/rho * g**0.3 * D**0.9 )
kh = D**.1 / (2.9 * g**.3 * S**.3)
# Starting values
t = [0.]
dt = 10000
nt = 120
# Equilibrium width?
beq = 0.17 / ( g**.5 * ((rho_s - rho)/rho)**(5/3.) * 1.2**(5/3.)
* tau_star_crit**(5/3.) ) * Q * S**(7/6.) / D**1.5
# Depth?
h = kh * (Q/b[-1])**0.6
# Tau*
tau_star_bed = h * S / ( ((rho_s - rho)/rho) * D)
tau_star_bank = tau_star_bed / 1.2
# Compute through time
for i in range(nt):
bi = b[-1]
tau_star_bank = a2 * (Q/bi)**(3/5.) / 1.2
if tau_star_bank > tau_star_crit:
bi += a1 * ( tau_star_bank - tau_star_crit )**(3/2.) \
* dt * intermittency
else:
b = beq
break
b.append(bi)
t.append(t[-1] + dt)
t = np.array(t)
b = np.array(b)
plt.figure()
plt.hlines(beq, t[0] / (24.*60.*60.), t[-1] / (24.*60.*60.),
'.5', label='Equilibrium width', linewidth=2)
plt.plot(t / (24.*60.*60.), b, 'k-', label='Transient width',
linewidth=2)
plt.xlabel('Flood duration [days]')
plt.ylabel('Channel width [m]')
plt.legend()
plt.tight_layout()
|
MNiMORPH/OTTAR
|
examples/standalone-widening-intuitive/transport-limited-width.py
|
transport-limited-width.py
|
py
| 1,398 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hlines",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
}
] |
4817137896
|
import cv2
import os
import numpy as np
import imutils
def open_picture(image):
"""We open picture"""
img = cv2.imread(image)
return img
def show_picture(name, image, mode, destroy):
cv2.imshow(name, image)
cv2.waitKey(mode)
if mode == 1:
time.sleep(0.2)
if destroy == "y":
cv2.destroyAllWindows()
def save_picture(name, image):
path = "dataset/data_analysing/{}"
cv2.imwrite(path.format(str(name)), image)
def blanck_picture(img):
"""Create a black background picture same dimension of original picture"""
blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
blank_image[0:img.shape[0], 0:img.shape[1]] = 0, 0, 0
return blank_image
def find_object(img):
"""
We binarising picture
for only have a form of our object.
We search contours now
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,250,255,cv2.THRESH_BINARY_INV)
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
return contours
def recup_object(contours, img):
"""
We search the max contours.
Sometimes there are noise of litle
area of the rest of the background
of pixels (5x5) of background.
We don't want it !
After we make a crop of that.
"""
maxi = 0
for cnts in contours:
if cv2.contourArea(cnts) > maxi:
maxi = cv2.contourArea(cnts)
for cnts in contours:
if cv2.contourArea(cnts) == maxi:
x, y, w, h = cv2.boundingRect(cnts)
crop = img[y:y+h, x:x+w]
return crop
def main_croping(picture):
img = open_picture(picture)
contours = find_object(img)
crop = recup_object(contours, img)
return crop
|
LeGrosLezard/qu-est-ce-qu-il-y-a-dans-une-salle-a-manger-
|
program/training/crop_object.py
|
crop_object.py
|
py
| 1,965 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.imread",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY_INV",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "cv2.findContours",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_TREE",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "cv2.contourArea",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "cv2.contourArea",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "cv2.contourArea",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "cv2.boundingRect",
"line_number": 79,
"usage_type": "call"
}
] |
42602524063
|
#!/usr/bin/env python
import numpy as np
from subprocess import call
from ase.io import read
def argparse():
import argparse
parser = argparse.ArgumentParser(description = """
This code will give you the (Total/Partial) Raidial Distribution Function.
Return npy file.
""")
# Positional arguments
parser.add_argument('chem1', type=str, help='chem1,2, are chemical symbols consisting bonds.')
parser.add_argument('chem2', type=str, help='e.g. Ge Te | "a": any symbols, "x": do all partial.')
parser.add_argument('alist_file', type=str, help='ASE readable atoms list file name.')
# Optional arguments
parser.add_argument('-n', '--image_slice', type=str, default=':', help='Image slice following python convention. default=":" (e.g.) -n :1000:10')
parser.add_argument('-r', '--rcut', type = float, default=8.5, help='Maximum radius for RDF. Default: 8.5')
parser.add_argument('-b', '--nBin', type=int, default=500, help='Number of bins. Default: 500')
parser.add_argument('-g', '--gsmear', type=float, default=0., help='Width(simga, STD) of Gaussian smearing in Angstrom unit. Zero means no smearing. [default: 0]')
parser.add_argument('-e', '--rectify_cut', type=float, default=None, help='All of drastic kink higher than this will be omitted. [Default: no rectify]')
parser.add_argument('-m', '--multiply', type=float, default=1., help='Multiply this value to RDF (re-scale). [default: 1.]')
parser.add_argument('-s', '--dont_save', dest='save_bool', action='store_false', help='If provided, npy will not be saved. Default: Save array')
parser.add_argument('-o', '--dont_load', dest='load_bool', action='store_false', help='If provided, npy will not be loaded. Default: Load if possible')
parser.add_argument('-t', '--dont_share_y', action='store_true', help='Subplots will not share y-axes if provided.')
parser.add_argument('-j', '--x_lower', type=float, default=0, help='Lower bound for RDF x-axis [Default: 0]')
parser.add_argument('-u', '--rdf_upper', type=float, default=None, help='Upper bound for RDF plot [Default: automatic]')
parser.add_argument('-l', '--rdf_lower', type=float, default=0, help='Lower bound for RDF plot [Default: 0]')
parser.add_argument('-p', '--s_upper', type=float, default=None, help='Upper bound for S(Q) plot [Default: automatic]')
parser.add_argument('-q', '--s_lower', type=float, default=0, help='Lower bound for S(Q) plot [Default: 0]')
parser.add_argument('-x', '--xtick_list', type=float, nargs='+', default=None, help='Specify x ticks of RDF. [Default: automatic]')
parser.add_argument('-y', '--ytick_list', type=float, nargs='+', default=None, help='Specify y ticks of RDF. [Default: automatic]')
parser.add_argument('-v', '--s_xtick_list', type=float, nargs='+', default=None, help='Specify x ticks of S(Q). [Default: automatic]')
parser.add_argument('-w', '--s_ytick_list', type=float, nargs='+', default=None, help='Specify y ticks of S(Q). [Default: automatic]')
return parser.parse_args()
def get_RDF(
alist,
rcut,
nBin=500,
symbol_tuple=None,
log=False,
):
from asap3.analysis.rdf import RadialDistributionFunction as RDF
RDFobj = RDF(
atoms=alist[0],
rMax=rcut,
nBins=nBin,
)
for i in range(1,len(alist)):
RDFobj.atoms = alist[i]
RDFobj.update()
if log and i % 1000 == 999:
print('\t Updating '+str(i+1)+" th image's RDF")
## Total RDF
if symbol_tuple == ('a', 'a'):
rdf = RDFobj.get_rdf()
## Partial RDF
else:
# Get normalize constant
(unique, counts) = np.unique(alist[0].get_chemical_symbols(), return_counts=True)
norm_const = counts[list(unique).index(symbol_tuple[1])] / np.sum(counts, dtype=np.float)
#
from chemical_symbol_number_inverter import invert_chem_sym_num
spec_inds = invert_chem_sym_num(symbol_tuple)
#
rdf = RDFobj.get_rdf(elements=tuple(spec_inds)) / norm_const
x = np.arange(nBin) / float(nBin) * rcut
## Return curve
return np.transpose(np.concatenate(([x], [rdf])))
def get_s_factor(
r,
RDF,
rho,
):
"""
inf sin(kr)
S(k) = 1 + 4 \pi \rho dr (sum) r^2 {g(r)-1} ---------
r=0 kr
where \rho: Number density
g(r): RDF
"""
dr = r[1] - r[0]
k = np.fft.fftfreq(len(r)) / dr
kr_matrix = k.reshape(-1,1) *r.reshape(-1,1).T
S = 1. +4*np.pi *rho *dr *np.sum(
np.reshape(r**2 *(RDF-1), (1,-1)) *np.sinc(kr_matrix/np.pi),
axis=1,
)
# print(np.reshape(r**2 *(RDF-1), (1,-1)) *np.sinc(kr_matrix/np.pi))
# print(np.sum(np.reshape(r**2 *(RDF-1), (1,-1)) *np.sinc(kr_matrix/np.pi), axis=1))
realpart = k >= 0.
return k[realpart], S[realpart]
def get_curve(
alist,
image_slice,
alist_file,
chem1,
chem2,
nBin = 500,
rcut = 8.5,
load_bool = True,
save_bool = True,
rectify_cut = None,
gsmear_std = 0.,
):
# Slice process
from ss_util import str_slice_to_list
slice_list = str_slice_to_list(image_slice)
# out file
out_fname = 'rdf-saved/{}_slice-{}-{}-{}_sym-{}-{}_nBin-{}_rcut-{}_.npy'.format(
alist_file, *slice_list, chem1, chem2, nBin, rcut)
out_fname2 = 'rdf-saved/{}_slice-{}-{}-{}_sym-{}-{}_nBin-{}_rcut-{}_.npy'.format(
alist_file, *slice_list, chem2, chem1, nBin, rcut)
## Main
dr = rcut /nBin
try:
assert load_bool == True
curve = np.load(out_fname)
except:
try:
assert load_bool == True
curve = np.load(out_fname2)
except:
do_calc = True
if load_bool:
print('Failed to load saved npy file. Calculation will be carried out')
print(' Failed to load npy file "{}"'.format(out_fname))
print(' or equivalent data "{}"'.format(out_fname2))
else:
print('File "{}" has been loaded.'.format(out_fname2))
do_calc = False
if do_calc:
curve = get_RDF(alist, rcut, nBin, (chem1, chem2), log=True)
if save_bool:
from ss_util import pick_folder_from_path as pffp
folder = pffp(out_fname)
call('mkdir -p {}'.format(folder), shell=True)
np.save(out_fname, curve)
print('=================================================================================================='.center(120))
print('RDF saved! ----------> {}'.format(out_fname).center(120))
print('=================================================================================================='.center(120))
else:
print('File "{}" has been loaded.'.format(out_fname))
# @ Rectify curve
if rectify_cut:
from ss_util import rectify_curve
curve = rectify_curve(curve, rectify_cut)
if not gsmear_std == 0:
print(' Gaussian smearing...')
# from gaussian_smear import gsmear
# agr= gsmear(angd,agr,gsmear_std)
from scipy.ndimage.filters import gaussian_filter1d
curve[:,1] = gaussian_filter1d(curve[:,1], gsmear_std /dr)
# Debug option
print('Integration of RDF.={}'.format(np.trapz(curve[:,1], curve[:,0])))
return curve
if __name__ == '__main__':
## Intro
import datetime
now = datetime.datetime.now()
time = now.strftime('%Y-%m-%d %H:%M:%S')
print('')
print('>>>>> Code by Young Jae Choi @ POSTECH <<<<<'.center(120))
print(('Code runtime : '+time).center(120))
print('')
print('=================================================================================================='.center(120))
print('This code will give you the (Total/Partial) Raidial Distribution Function'.center(120))
print('=================================================================================================='.center(120))
print('')
args = argparse()
## Read input params
# params
chem1 = args.chem1
chem2 = args.chem2
rcut = args.rcut
nBin = args.nBin
gsmear_std = args.gsmear
rectify_cut = args.rectify_cut
#
den_list = []
## Read inputs
alist = read(args.alist_file, args.image_slice)
if not isinstance(alist, list):
alist = [alist]
den_list = []
for atoms in alist:
den_list.append(len(atoms) / atoms.get_volume())
num_den = np.mean(den_list)
# In case symbol is 'x'
chem_list = np.unique(alist[0].get_chemical_symbols()).tolist()
if chem1 == 'x':
chem1_list = chem_list[:]
else:
chem1_list = [chem1]
if chem2 == 'x':
chem2_list = chem_list[:]
else:
chem2_list = [chem2]
# Make symbol_sets
symbol_sets = []
if len(chem1_list) == 1 or len(chem2_list) == 1:
for s1 in chem1_list:
for s2 in chem2_list:
symbol_sets.append([s1, s2])
else:
for i in range(len(chem_list)):
for j in range(i,len(chem_list)):
symbol_sets.append([chem_list[i], chem_list[j]])
# Main
curve_list = []
for symb_set in symbol_sets:
cv = get_curve(
alist,
args.image_slice,
args.alist_file,
symb_set[0],
symb_set[1],
nBin,
rcut,
args.load_bool,
args.save_bool,
rectify_cut,
gsmear_std,
)
cv[:,1] *= args.multiply,
curve_list.append(cv)
# @ Get structure factor
k_list = []
S_list = []
for curve in curve_list:
k, S = get_s_factor(curve[:,0], curve[:,1], num_den)
k_list.append(k)
S_list.append(S)
# @ Plot
title = '{} slice-{} symb-{},{} nBin-{} rcut-{}'.format(
args.alist_file, args.image_slice, chem1, chem2, nBin, rcut)
import matplotlib.pyplot as plt
font = {'family':'sans-serif', 'sans-serif':'Arial'}
plt.rc('font', **font)
if args.dont_share_y:
fig, axs = plt.subplots(len(curve_list), sharex=True)
else:
fig, axs = plt.subplots(len(curve_list), sharex=True, sharey=True)
if not isinstance(axs, np.ndarray):
axs = [axs]
# Plot RDF
symbol_set_plot = []
for i in range(len(symbol_sets)):
symbol_set_plot.append([])
for j in range(len(symbol_sets[i])):
sym = symbol_sets[i][j]
if sym == 'X':
symbol_set_plot[i].append('V')
else:
symbol_set_plot[i].append(sym)
if args.rdf_upper is not None:
rdf_upper = args.rdf_upper
else:
rdf_upper = np.max(np.array(curve_list)[:,:,1]) * 1.10
for i in range(len(curve_list)):
#
axs[i].plot(curve_list[i][:,0], curve_list[i][:,1], c='k', lw=2)
#
if (symbol_set_plot[i][0], symbol_set_plot[i][1]) == ('a', 'a'):
axs[i].set_ylabel(r'$g_{\rm tot} \it (r)$', fontsize='x-large')
else:
axs[i].set_ylabel(r'$g\rm _{{{}}} \it (r)$'.format(symbol_set_plot[i][0]+symbol_set_plot[i][1]), fontsize='x-large')
#
if args.xtick_list is not None:
axs[i].set_xticks(args.xtick_list)
else:
intvl = int(rcut // 10 + 1)
axs[i].set_xticks(range(0, int(rcut)+1, intvl))
#
if args.ytick_list is not None:
axs[i].set_yticks(args.ytick_list)
else:
intvl = int(rdf_upper // 4 + 1)
axs[i].set_yticks(range(0, int(rdf_upper)+1, intvl))
#
axs[i].tick_params(axis="both",direction="in", labelsize='x-large', labelbottom=False)
axs[i].set_xlim(args.x_lower, rcut)
axs[i].set_ylim(args.rdf_lower, rdf_upper)
axs[i].axhline(1., linestyle='dashed', linewidth=1, c='k')
axs[i].grid(alpha=0.5)
axs[-1].tick_params(axis="both",direction="in", labelsize='x-large', labelbottom=True)
axs[-1].set_xlabel(r'Distance $\rm (\AA)$', fontsize='x-large')
axs[0].set_title(title, pad=10)
bottom = (1.-len(axs)*0.1) /2.
plt.subplots_adjust(left=0.25, bottom=bottom, right=0.75, top=1-bottom, wspace=0.20, hspace=0.20)
# plt.subplots_adjust(left=0.30, bottom=0.40, right=0.70, top=1-bottom, wspace=0.20, hspace=0.20)
# Plot S(Q)
if args.s_upper is not None:
s_upper = args.s_upper
else:
s_upper = np.max(np.array(S_list)) * 1.10
if args.dont_share_y:
fig, axs = plt.subplots(len(curve_list), sharex=True)
else:
fig, axs = plt.subplots(len(curve_list), sharex=True, sharey=True)
if not isinstance(axs, np.ndarray):
axs = [axs]
for i in range(len(curve_list)):
#
axs[i].plot(k_list[i], S_list[i], c='k', lw=2)
#
if (symbol_set_plot[i][0], symbol_set_plot[i][0]) == ('a', 'a'):
axs[i].set_ylabel(r'$S_{\rm tot} (Q)$', fontsize='x-large')
else:
axs[i].set_ylabel(r'$S\rm _{{{}}} (Q)$'.format(symbol_set_plot[i][0]+symbol_set_plot[i][1]), fontsize='x-large')
#
if args.s_xtick_list is not None:
axs[i].set_xticks(args.s_xtick_list)
else:
intvl = int(np.max(k_list[i]) // 10 + 1)
axs[i].set_xticks(range(0, int(np.max(k_list[i]))+1, intvl))
#
if args.s_ytick_list is not None:
axs[i].set_yticks(args.s_ytick_list)
else:
intvl = int(s_upper // 4 + 1)
axs[i].set_yticks(range(0, int(s_upper)+1, intvl))
#
axs[i].tick_params(axis="both",direction="in", labelsize='x-large', labelbottom=False)
axs[i].set_xlim(0., np.max(k_list[i]))
axs[i].set_ylim(args.s_lower, s_upper)
axs[i].axhline(1., linestyle='dashed', linewidth=1, c='k')
axs[i].grid(alpha=0.5)
axs[-1].tick_params(axis="both",direction="in", labelsize='x-large', labelbottom=True)
axs[-1].set_xlabel(r'$\rm Q\ (\AA^{-1})$', fontsize='x-large')
axs[0].set_title(title, pad=10)
bottom = (1.-len(axs)*0.1) /2.
plt.subplots_adjust(left=0.25, bottom=bottom, right=0.75, top=1-bottom, wspace=0.20, hspace=0.20)
# plt.subplots_adjust(left=0.30, bottom=0.40, right=0.70, top=1-bottom, wspace=0.20, hspace=0.20)
plt.show()
|
hitergelei/tools
|
ase_rdf.py
|
ase_rdf.py
|
py
| 14,474 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "asap3.analysis.rdf.RadialDistributionFunction",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "chemical_symbol_number_inverter.invert_chem_sym_num",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.transpose",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.fft.fftfreq",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.fft",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "asap3.analysis.rdf.RadialDistributionFunction",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "numpy.sinc",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "ss_util.str_slice_to_list",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "ss_util.pick_folder_from_path",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "ss_util.rectify_curve",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.filters.gaussian_filter1d",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "numpy.trapz",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "ase.io.read",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 258,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 261,
"usage_type": "attribute"
},
{
"api_name": "numpy.max",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots_adjust",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "numpy.max",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 318,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 320,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 321,
"usage_type": "attribute"
},
{
"api_name": "numpy.max",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots_adjust",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 353,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 355,
"usage_type": "name"
}
] |
31014617376
|
import numpy as np
import os
import matplotlib.pyplot as plt
import sys
import NeuralTrainerCustoms as ntc
import AdaMod as am
import keras
from keras import backend as K
from keras.models import Sequential, Model
from keras.layers import Input, Layer, Dense, Activation, Embedding, LSTM, Bidirectional, Lambda, concatenate
from keras.layers.wrappers import TimeDistributed
import keras.losses
# import tensorflow as tf
from keras_contrib.layers import CRF
from keras_contrib.losses import crf_loss
from keras_contrib.metrics import crf_accuracy
from keras.utils.generic_utils import get_custom_objects
losses = ntc.Losses()
class NeuralModel:
embedding_size = 300
hidden_size = 100
def __init__(self, maxlen, num_tags, word_index, embeddings, save_weights=False):
self.maxlen = maxlen
self.vocab_size = len(word_index)+1
self.num_tags = num_tags
self.word_index = word_index
self.embeddings = embeddings
self.model = None
self.tags = ['']*num_tags
self.arg_classes = ['']*num_tags
self.transition_matrix = None
self.save_weights = save_weights
self.read_tag_mapping()
self.set_transition_matrix()
num_measures = 1 + 3*(num_tags - 2)
def read_tag_mapping(self):
f = open('tag_mapping.txt', 'r', encoding='utf-8')
lines = f.readlines()
tags = {}
for mapping in lines:
if(mapping == ''):
continue
map = mapping.split('\t')
tags[int(map[1][0])-1] = map[0]
for i in range(0, self.num_tags):
self.tags[i] = tags[i]
if tags[i] == '(O)':
self.arg_classes[i] = '|'
elif tags[i] == '(P)':
self.arg_classes[i] = 'premise'
elif tags[i] == '(C)':
self.arg_classes[i] = 'claim'
elif tags[i] == '(I)':
self.arg_classes[i] = 'inside'
def set_transition_matrix(self):
transition_matrix = np.array([[1]*self.num_tags]*self.num_tags)
# matrix is initialized to 1
# this function sets some entries to -1
for i in range(0, self.num_tags):
if self.tags[i] == '(O)':
for j in range(0, self.num_tags):
if self.tags[j] == '(P)': # impossible transition to (O)
transition_matrix[i][j] = -1
elif self.tags[j] == '(C)': # impossible transition to (O)
transition_matrix[i][j] = -1
elif self.tags[i] == '(P)':
for j in range(0, self.num_tags):
if self.tags[j] == '(P)': # impossible transition to (P)
transition_matrix[i][j] = -1
elif self.tags[j] == '(C)': # impossible transition to (P)
transition_matrix[i][j] = -1
elif self.tags[i] == '(C)':
for j in range(0, self.num_tags):
if self.tags[j] == '(P)': # impossible transition to (C)
transition_matrix[i][j] = -1
elif self.tags[j] == '(C)': # impossible transition to (C)
transition_matrix[i][j] = -1
elif self.tags[i] == '(I)':
for j in range(0, self.num_tags):
if self.tags[j] == '(O)': # impossible transition to (I)
transition_matrix[i][j] = -1
print(transition_matrix) #debug
self.transition_matrix = transition_matrix
# def switch_loss_wrapper(self, crf_layer):
# # current_epoch = self.monitor.current_epoch
# def switch_loss(y_true, y_pred):
# if not K.is_tensor(y_pred):
# y_pred = K.constant(y_pred)
# y_true = K.cast(y_true, y_pred.dtype)
# pure_mae = K.mean(K.abs(y_pred - y_true), axis=-1)
# y_true_aux = K.squeeze(y_true, axis=-1)
# zero = K.constant(0)
# simple_loss = K.switch(K.equal(y_true_aux, zero), K.zeros_like(pure_mae), pure_mae)
# # print('ypred shape', K.int_shape(y_pred))
# I_prob = K.squeeze(crf_layer[:,:,:1], axis=-1)
# ypred_size = K.int_shape(y_pred)[1]
# tiled = K.tile(y_pred, [1, 2, 1]) #repeat array like [1, 2, 3] -> [1, 2, 3, 1, 2, 3]
# rolled_y_pred = tiled[:,ypred_size-1:-1] #crop repeated array (from len-1) -> [3, 1, 2] <- (to -1)
# dist_dif = K.abs((rolled_y_pred - y_pred) - K.ones_like(y_pred))
# dist_err_mae = K.switch(K.greater(I_prob, K.constant(0.5)), K.mean(K.abs(y_pred - y_true + dist_dif), axis=-1), K.mean(K.abs(y_pred - y_true), axis=-1))
# dist_err_loss = K.switch(K.equal(y_true_aux, zero), K.zeros_like(dist_err_mae), dist_err_mae)
# simple_loss = keras.losses.mean_squared_error(y_true, y_pred)
# dist_err_loss = keras.losses.mean_squared_logarithmic_error(y_true, y_pred)
# return K.switch(K.less(current_epoch, K.constant(100)), dist_err_loss, simple_loss)
# return switch_loss
def createEmbeddings(self, word_index, embeddings):
embeddings_index = {}
path = 'Embeddings/' + embeddings + '.txt'
f = open(path, "r", encoding='utf8')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
embedding_matrix = np.zeros((self.vocab_size, self.embedding_size))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
def create_biLSTM(self, input):
embeddingMatrix = self.createEmbeddings(self.word_index, self.embeddings)
emb = Embedding(self.vocab_size, self.embedding_size, weights=[embeddingMatrix], input_length=self.maxlen,
trainable=False, mask_zero=True, name='embedding')(input)
biLSTM_tensor = TimeDistributed(Dense(self.hidden_size, activation='relu'), name='time_distributed_1')(emb)
biLSTM_tensor = Bidirectional(LSTM(self.hidden_size, return_sequences=True, activation='pentanh', recurrent_activation='pentanh'), name='biLSTM_1')(biLSTM_tensor)
biLSTM_tensor = Bidirectional(LSTM(self.hidden_size, return_sequences=True, activation='pentanh', recurrent_activation='pentanh'), name='biLSTM_2')(biLSTM_tensor)
return biLSTM_tensor
def create_CRF(self, biLSTM_tensor, learn, test):
crf_tensor = TimeDistributed(Dense(20, activation='relu'), name='time_distributed_2')(biLSTM_tensor)
chain_matrix = keras.initializers.Constant(self.transition_matrix)
if learn == 'marginal': #loaded model or std CRF-dist model
crf = CRF(self.num_tags, sparse_target=False, learn_mode=learn, test_mode=test,
chain_initializer=chain_matrix, name='crf_layer')
# crf = CRF(self.num_tags, sparse_target=False, learn_mode=learn, test_mode=test, name='crf_layer')
else: #baseline model
crf = CRF(self.num_tags, sparse_target=False, learn_mode=learn, test_mode=test, name='crf_layer')
crf_tensor = crf(crf_tensor)
return crf_tensor
def create_dist_layer(self, biLSTM_tensor, crf_tensor):
dist_tensor = TimeDistributed(Dense(1, activation='relu'), name='distance_layer')(biLSTM_tensor)
soft_argmax = ntc.SoftArgMax()
soft_argmax.create_soft_argmax_layer()
# zero_switch = ntc.SoftArgMax()
# zero_switch.create_zero_switch_layer()
concat = concatenate([crf_tensor, dist_tensor], axis=-1, name='concatenate')
### LAYER OPTIONS:
##### soft_argmax.layer
##### zero_switch.layer
output = TimeDistributed(soft_argmax.layer, name='softargmax')(concat)
return (output, soft_argmax)
def create_model(self, fold_name=''):
input = Input(shape=(self.maxlen,), name='input')
biLSTM_tensor = self.create_biLSTM(input)
crf_tensor = self.create_CRF(biLSTM_tensor, 'marginal', 'marginal')
temp_model = Model(input=input, output=crf_tensor)
if self.save_weights:
print('MODEL LOADED FROM FILE')
base_crf_tensor = self.create_CRF(biLSTM_tensor, 'marginal', 'marginal')
baseline_model = Model(input=input, output=base_crf_tensor)
print(baseline_model.summary()) #debug
baseline_model.compile(optimizer='adam', loss=crf_loss, metrics=[crf_accuracy])
# baseline_model.run_eagerly = True #debug
baseline_model.load_weights('./tmp/'+fold_name+'/baseline_checkpoint.h5', by_name=True)
base_layers = baseline_model.layers
model_layers = temp_model.layers
for i in range(0, len(base_layers)):
print(model_layers[i].name, base_layers[i].name)
assert model_layers[i].name == base_layers[i].name
layer_name = base_layers[i].name
temp_model.get_layer(layer_name).set_weights(base_layers[i].get_weights())
temp_model.get_layer(layer_name).trainable = False
(dist_tensor, soft_argmax) = self.create_dist_layer(temp_model.get_layer('biLSTM_2').output, temp_model.output)
self.model = Model(input=input, output=[temp_model.output,dist_tensor])
print(self.model.summary()) #debug
#loss_weights=[1.0, 0.10],
####LOSSES:
######'mean_absolute_error'
######'loss_func'
######'consecutive_dist_loss'
####OPTIMIZERS:
######'adam'
######am.AdaMod() ??
# get_custom_objects().update({'consecutive_dist_loss': losses.consecutive_dist_loss_wrapper(crf_tensor)})
# get_custom_objects().update({'switch_loss': losses.switch_loss_wrapper(crf_tensor)})
#
# keras.losses.consecutive_dist_loss = losses.consecutive_dist_loss_wrapper(crf_tensor)
self.model.compile(optimizer='adam', loss=[crf_loss,losses.loss_func], loss_weights=[1.0, 0.10], metrics={'crf_layer':[crf_accuracy], 'softargmax':'mae'})
# self.model.compile(optimizer='adam', loss=[crf_loss,losses.consecutive_dist_loss_wrapper(temp_model.output)], loss_weights=[1.0, 0.10], metrics={'crf_layer':[crf_accuracy], 'softargmax':'mae'})
# self.model.compile(optimizer='adam', loss=[crf_loss,keras.losses.mean_squared_error], loss_weights=[1.0, 0.10], metrics={'crf_layer':[crf_accuracy], 'softargmax':'mae'})
# self.model.run_eagerly = True #debug
def create_baseline_model(self):
input = Input(shape=(self.maxlen,))
biLSTM_tensor = self.create_biLSTM(input)
crf_tensor = self.create_CRF(biLSTM_tensor, 'marginal', 'marginal')
self.model = Model(input=input, output=crf_tensor)
print(self.model.summary()) #debug
self.model.compile(optimizer='adam', loss=crf_loss, metrics=[crf_accuracy])
# self.model.run_eagerly = True #debug
def recompile_model_new_loss(self, loss, fold_name=''):
input = Input(shape=(self.maxlen,), name='input')
biLSTM_tensor = self.create_biLSTM(input)
crf_tensor = self.create_CRF(biLSTM_tensor, 'marginal', 'marginal')
(dist_tensor, soft_argmax) = self.create_dist_layer(biLSTM_tensor, crf_tensor)
temp_model = Model(input=input, output=crf_tensor)
if self.save_weights:
print('MODEL LOADED FROM FILE')
base_crf_tensor = self.create_CRF(biLSTM_tensor, 'marginal', 'marginal')
baseline_model = Model(input=input, output=base_crf_tensor)
baseline_model.compile(optimizer='adam', loss=crf_loss, metrics=[crf_accuracy])
# baseline_model.run_eagerly = True #debug
baseline_model.load_weights('./tmp/'+fold_name+'/baseline_checkpoint.h5', by_name=True)
base_layers = baseline_model.layers
model_layers = temp_model.layers
for i in range(0, len(base_layers)):
print(model_layers[i].name, base_layers[i].name)
assert model_layers[i].name == base_layers[i].name
layer_name = base_layers[i].name
temp_model.get_layer(layer_name).set_weights(base_layers[i].get_weights())
temp_model.get_layer(layer_name).trainable = False
(dist_tensor, soft_argmax) = self.create_dist_layer(temp_model.get_layer('biLSTM_2').output, temp_model.output)
new_model = Model(input=input, output=[temp_model.output,dist_tensor])
new_layers = new_model.layers
model_layers = self.model.layers
total_layers = len(model_layers)
for i in range(total_layers-3, total_layers):
assert model_layers[i].name == new_layers[i].name
layer_name = model_layers[i].name
new_model.get_layer(layer_name).set_weights(model_layers[i].get_weights())
print(new_model.summary()) #debug
self.model = new_model
if loss == 'consecutive_dist_loss':
self.model.compile(optimizer='adam', loss=[crf_loss,losses.consecutive_dist_loss_wrapper(temp_model.output)], loss_weights=[1.0, 0.10], metrics={'crf_layer':[crf_accuracy], 'softargmax':'mae'})
# new_model.compile(optimizer='adam', loss=[crf_loss,keras.losses.mean_squared_logarithmic_error], loss_weights=[1.0, 0.10], metrics={'crf_layer':[crf_accuracy], 'softargmax':'mae'})
else:
self.model.compile(optimizer='adam', loss=[crf_loss,losses.loss_func], loss_weights=[1.0, 0.10], metrics={'crf_layer':[crf_accuracy], 'softargmax':'mae'})
|
fspring/NeuralArgMining
|
NeuralModel.py
|
NeuralModel.py
|
py
| 13,844 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "NeuralTrainerCustoms.Losses",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "keras.layers.Embedding",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "keras.layers.wrappers.TimeDistributed",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "keras.layers.Bidirectional",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "keras.layers.Bidirectional",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "keras.layers.wrappers.TimeDistributed",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "keras.initializers.Constant",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "keras.initializers",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "keras_contrib.layers.CRF",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "keras_contrib.layers.CRF",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "keras.layers.wrappers.TimeDistributed",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "NeuralTrainerCustoms.SoftArgMax",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "keras.layers.concatenate",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "keras.layers.wrappers.TimeDistributed",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "keras_contrib.losses.crf_loss",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "keras_contrib.metrics.crf_accuracy",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "keras.models.Model",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "keras_contrib.losses.crf_loss",
"line_number": 245,
"usage_type": "name"
},
{
"api_name": "keras_contrib.metrics.crf_accuracy",
"line_number": 245,
"usage_type": "name"
},
{
"api_name": "keras.layers.Input",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "keras_contrib.losses.crf_loss",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "keras_contrib.metrics.crf_accuracy",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "keras.layers.Input",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "keras_contrib.losses.crf_loss",
"line_number": 279,
"usage_type": "name"
},
{
"api_name": "keras_contrib.metrics.crf_accuracy",
"line_number": 279,
"usage_type": "name"
},
{
"api_name": "keras.models.Model",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "keras_contrib.losses.crf_loss",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "keras_contrib.metrics.crf_accuracy",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "keras_contrib.losses.crf_loss",
"line_number": 312,
"usage_type": "name"
},
{
"api_name": "keras_contrib.metrics.crf_accuracy",
"line_number": 312,
"usage_type": "name"
}
] |
71578033787
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple check list from AllenNLP repo:
https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
1. Change the version in __init__.py, setup.py as well as docs/source/conf.py.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release:
git tag VERSION -m 'Adds tag VERSION for pypi'
Push the tag to git:
git push --tags origin master
4. Build both the sources and the wheel.
Do not change anything in setup.py between creating the wheel and the
source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level
directory. (this will build a wheel for the python version you use to
build it).
For the sources, run: "python setup.py sdist" You should now have a /dist
directory with both .whl and .tar.gz source versions.
5. Check that everything looks correct by uploading package to test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
You may have to specify the repository url,
use the following command then:
twine upload dist/* -r pypitest\
--repository-url=https://test.pypi.org/legacy/
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi transformers
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github.
"""
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname: str) -> str:
""" Read and return README as str. """
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="asta",
version="0.0.7",
author="Brendan Whitaker",
description=("Shape annotations for homogeneous numpy arrays and pytorch/tensorflow tensors."),
license="GPLv3",
packages=["asta"],
long_description=read("README"),
long_description_content_type="text/plain",
install_requires=["toml", "numpy", "sympy", "oxentiel"],
package_data={"asta": ["defaults/astarc"]},
include_package_data=True,
python_requires=">=3.7.0",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
"Topic :: Scientific/Engineering",
"Programming Language :: Python :: 3.7",
],
)
|
langfield/asta
|
setup.py
|
setup.py
|
py
| 2,854 |
python
|
en
|
code
| 14 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 64,
"usage_type": "call"
}
] |
9582892535
|
import os
from pathlib import Path
def get_last_n_files_in_dir(dir_path, n, recurse=False, *args, **kwargs):
method_str = "rglob" if recurse else "glob"
p = Path(dir_path)
fluid_glob = getattr(p, method_str)
l = [(i, i.stat().st_mtime) for i in fluid_glob("*.*")]
l.sort(key=lambda x: x[0], **kwargs)
l_ = l[:n]
return [i[0] for i in l_]
def delete_last_n_files_in_dir(dir_path, *args, **kwargs):
fpaths = get_last_n_files_in_dir(dir_path, *args, **kwargs)
for p in fpaths:
os.remove(p)
|
royassis/djangoRestML
|
myapi/helpers.py
|
helpers.py
|
py
| 536 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pathlib.Path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 20,
"usage_type": "call"
}
] |
72532713149
|
from pathlib import Path
from .code_description import CodeDescriptionParams, CodeDescriptionXLSXDocument
from .dataset_description import (
DatasetDescriptionParams,
DatasetDescriptionXLSXDocument,
)
from .manifest import ManifestXLSXDocument
def write_xlsx_files(
base_path: Path,
dataset_description_params: DatasetDescriptionParams,
code_description_params: CodeDescriptionParams,
) -> None:
dataset_description_xlsx = DatasetDescriptionXLSXDocument()
dataset_description_xlsx.save_document(
base_path=base_path, template_data=dataset_description_params
)
code_description_xlsx = CodeDescriptionXLSXDocument()
code_description_xlsx.save_document(
base_path=base_path, template_data=code_description_params
)
manifest_xlsx = ManifestXLSXDocument()
manifest_xlsx.save_document(base_path=base_path, template_data=None)
|
ITISFoundation/osparc-simcore
|
services/web/server/src/simcore_service_webserver/exporter/_formatter/xlsx/writer.py
|
writer.py
|
py
| 892 |
python
|
en
|
code
| 35 |
github-code
|
6
|
[
{
"api_name": "pathlib.Path",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "dataset_description.DatasetDescriptionParams",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "code_description.CodeDescriptionParams",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "dataset_description.DatasetDescriptionXLSXDocument",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "code_description.CodeDescriptionXLSXDocument",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "manifest.ManifestXLSXDocument",
"line_number": 25,
"usage_type": "call"
}
] |
26255854051
|
from MainA1 import Mainfun
from unigramIndex import Linkedlist
import string
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import pickle
class QueryProcess:
def __init__(self):
'''Attribute for each Query processing results, Totdocmatch for total documentmatch,
comparison for total comparison done in a merging algo, and fnamelist for list of all matched file'''
self.Totdocmatch = 0
self.comparison = 0
self.fnamelist = []
'''function for preprocessing of a query including converting into
lower letter, remove punctuation, tokenization, remove stopping words and Lemmatization'''
def preprocess(self, query):
#normalisation
result1 = query.lower()
result2 = result1.translate(str.maketrans("","", string.punctuation))
#tokenization
tokens = word_tokenize(result2)
#removing the stopping words
stop_words = set(stopwords.words('english'))
result3 = [w for w in tokens if w not in stop_words]
#Lemmatization
lem = WordNetLemmatizer()
result4query = []
for word in result3:
lmword = lem.lemmatize(word)
result4query.append(lmword)
return(result4query)
def MergingAlgo(self, postlink, operatorseq, maxDocID, filename):
length = len(operatorseq)
#retrieve first posting list
post1 = postlink[0]
#Process the query from Left to Right, Iterate the query starting from query operator list
for i in range(length):
#REtrieve the operator and second postinglist
operator = operatorseq[i]
post2 = postlink[i+1]
if (operator == 'AND'):
p1 = post1.headptr
p2 = post2.headptr
#Calling the specific intersection Merge Algo
post1 = self.MergeAND(p1, p2)
'''checking the resultant postinglist will be null or not,
if it is null then this post1 will move further to the next index in query list'''
if(post1.freq == 0):
post1 = postlink[i+1]
i=i+1
elif(operator == 'OR'):
p1 = post1.headptr
p2 = post2.headptr
#Calling the specific Union Merge Algo
post1 = self.MergeOR(p1, p2)
'''checking the resultant postinglist will be null or not,
if it is null then this post1 will move further to the next index in query list'''
if(post1.freq == 0):
post1 = postlink[i+1]
i=i+1
elif(operator == 'AND NOT'):
tp2 = post2.headptr
#Computing the complement of second posting list
resulttp = self.ListCompliment(tp2, maxDocID)
p1 = post1.headptr
p2 = resulttp.headptr
#Calling the specific intersection Merge Algo
post1 = self.MergeAND(p1, p2)
'''checking the resultant postinglist will be null or not,
if it is null then this post1 will move further to the next index in query list'''
if(post1.freq == 0):
post1 = postlink[i+1]
i=i+1
elif(operator == 'OR NOT'):
tp2 = post2.headptr
#Computing the complement of second posting list
resulttp = self.ListCompliment(tp2, maxDocID)
p1 = post1.headptr
p2 = resulttp.headptr
#Calling the specific Union Merge Algo
post1 = self.MergeOR(p1, p2)
'''checking the resultant postinglist will be null or not,
if it is null then this post1 will move further to the next index in query list'''
if(post1.freq == 0):
post1 = postlink[i+1]
i=i+1
'''After completing the merging Algo, the final resultant posting list will be post1
retreiving the Document name acc. to the docID present in the final posting list'''
self.Totdocmatch = post1.freq
pt = post1.headptr
while(pt is not None):
self.fnamelist.append(filename[pt.IDval])
pt = pt.next
def MergeAND(self, ptr1, ptr2):
answer = Linkedlist()
#ptr1 and ptr2 , iterate the both pointer till the end of the linkedlist, both linkedlist are already in sorted form
while(ptr1 is not None and ptr2 is not None):
if(ptr1.IDval == ptr2.IDval):
#here when both pointer node value matches, then add the nodevalue to the answer linked list
answer.addnode(ptr1.IDval)
#move both pointer by one node
ptr1 = ptr1.next
ptr2 = ptr2.next
#here counting the comarison, in this algo this is the first comparison so just add 1 to the comparison variable
self.comparison = self.comparison + 1
elif(ptr1.IDval < ptr2.IDval):
#here the ptr1 is behind the ptr2, so just move ptr1 by one node
ptr1 = ptr1.next
#here counting the comarison, in this algo this is the second comparison so just add 2 to the comparison variable
self.comparison = self.comparison + 2
else:
#here in the else, the ptr2 is behind the ptr1, so just move ptr2 by one node
ptr2 = ptr2.next
#here counting the comarison, in this algo 2 comparison are already done in above, so just add 2 to the comparison variable
self.comparison = self.comparison + 2
return answer
def MergeOR(self, ptr1, ptr2):
answer = Linkedlist()
#ptr1 and ptr2 , iterate the both pointer till the end of the linkedlist, both linkedlist are already in sorted form
while(ptr1 is not None and ptr2 is not None):
if(ptr1.IDval < ptr2.IDval):
#add the nodevalue to the answer linked list
answer.addnode(ptr1.IDval)
#here the ptr1 is behind the ptr2, so just move ptr1 by one node
ptr1 = ptr1.next
#here counting the comarison, in this algo this is the first comparison so just add 1 to the comparison variable
self.comparison = self.comparison + 1
elif(ptr1.IDval > ptr2.IDval):
#add the nodevalue to the answer linked list
answer.addnode(ptr2.IDval)
#the ptr2 is behind the ptr1, so just move ptr2 by one node
ptr2 = ptr2.next
#here counting the comarison, in this algo this is the second comparison so just add 2 to the comparison variable
self.comparison = self.comparison + 2
else:
#here in the else, when both pointer node value matches, then add the nodevalue to the answer linked list
answer.addnode(ptr1.IDval)
#move both pointer by one node
ptr1 = ptr1.next
ptr2 = ptr2.next
#here counting the comarison, in this algo 2 comparison are already done in above, so just add 2 to the comparison variable
self.comparison = self.comparison + 2
#if ptr2 becomes none but ptr1 is not none, so just add the remaining node value of ptr1 to the answer linkedlsit
while(ptr1 is not None):
answer.addnode(ptr1.IDval)
ptr1 = ptr1.next
#if ptr1 becomes none but ptr2 is not none, so just add the remaining node value of ptr2 to the answer linkedlsit
while(ptr2 is not None):
answer.addnode(ptr2.IDval)
ptr2 = ptr2.next
return answer
#Function for finding the complement of a linkedlist
def ListCompliment(self, ptr, maxDocID):
i = 0
answer = Linkedlist()
#here maxDOCID is representing the number that the max docID that allocate to the document(0-maxdocID)
while(i < maxDocID and ptr is not None):
#if the docID present in the list, so just move to the next node
if(i == ptr.IDval):
i = i+1
ptr = ptr.next
#if the docID not present in the list, so just add to the answer linkedlist
elif(i < ptr.IDval):
answer.addnode(i)
i=i+1
#adding the remaining docID to the answer linkedlist
while(i < maxDocID):
answer.addnode(i)
i=i+1
return(answer)
if __name__ == '__main__':
#Deserailization of MainA1 class object, in which unigram data structure has stored
with open('store.dat' , 'rb') as fr:
tempomainobj = pickle.load(fr)
#retriving the unigram data structure, list of all doc, max doc ID
dictlist = tempomainobj.postinglist
filename = tempomainobj.docname
maxDocID = tempomainobj.docID
#Input the no. of query from the User
n = int(input("Enter the number of Query: "))
for i in range(n):
#input the query and query operator
query = input("Input Query: ")
queryoperatorseq = input("Input Query operator: ").split(', ')
#Preprocessing of Query
Queryobj = QueryProcess()
prepresult = Queryobj.preprocess(query)
#Retriving the postinglist of each tokenize word of a query in postlink[] list
postlink = []
for qword in prepresult:
LinkL = dictlist.get(qword)
postlink.append(LinkL)
#Process the Query and query operator by merging Algoruthm
Queryobj.MergingAlgo(postlink, queryoperatorseq, maxDocID, filename)
#print the desirable result of a query
print('Number of document matched: ', end=' ')
print(Queryobj.Totdocmatch)
print('Number of comparison Done in Merging Algorithm: ', end=' ')
print(Queryobj.comparison)
print('List of matched document name:')
print(Queryobj.fnamelist)
|
prashant18360/Information-Retrieval-Assignment-1
|
Qprocessing.py
|
Qprocessing.py
|
py
| 10,931 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "string.punctuation",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "nltk.tokenize.word_tokenize",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "nltk.stem.WordNetLemmatizer",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "unigramIndex.Linkedlist",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "unigramIndex.Linkedlist",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "unigramIndex.Linkedlist",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 242,
"usage_type": "call"
}
] |
31546382074
|
from functools import reduce
def main():
# one liner functions
culc_sum = lambda number_list: sum(number_list)
check_palindrome = lambda number: str(number) == str(number)[::-1]
factorial = lambda number: reduce((lambda a, b: a * b), range(1, number + 1))
# check functions
print(culc_sum([1, 2, 3, 4, 5, 10]))
print(check_palindrome(12121))
print(factorial(6))
if __name__ == '__main__':
main()
|
lidorelias3/Lidor_Elias_Answers
|
python/One Liners/OneLiners.py
|
OneLiners.py
|
py
| 437 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "functools.reduce",
"line_number": 8,
"usage_type": "call"
}
] |
16705170564
|
from django.shortcuts import render, redirect
from .models import Todo
from .forms import TodoForm
def tasks_list(request):
todos = Todo.objects.all()
context = {'todos': todos}
return render(request, 'tasks_list.html', context)
def add_todo(request):
if request.method == 'POST':
form = TodoForm(request.POST)
if form.is_valid():
form.save()
return redirect('add_todo')
else:
form = TodoForm()
todos = Todo.objects.all()
context = {
'form': form,
'todos': todos,
}
return render(request, 'add_todo.html', context)
|
Chikitonik/DI_Bootcamp
|
Week_12_PY/Day2/exercises_xp/todo_project/todo_list/todos/views.py
|
views.py
|
py
| 622 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "models.Todo.objects.all",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "models.Todo.objects",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "models.Todo",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "forms.TodoForm",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "forms.TodoForm",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "models.Todo.objects.all",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "models.Todo.objects",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "models.Todo",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 28,
"usage_type": "call"
}
] |
44999838
|
import networkx as nx
import numpy as np
# from GPy.kern import Kern
from functools import reduce
from itertools import product
import copy
# class Ours(Kern):
# def __init__(self, input_dim, G, scale=1., variance = 1,
# active_dims=None, name='ours', K_matrix = None, kern = "linear",
# pava = True):
# Kern.__init__(self, input_dim, active_dims, name)
# self.scale = scale
# self.N_objs = input_dim
# self.G = G
# self.K_matrix = K_matrix
# self.kern = kern
# self.variance = variance
# self.pava = pava
# def kernel_f(self, sigma, sigma_prime):
# if self.kern == "exp":
# return np.exp(-self.scale*np.linalg.norm(self.phi_(sigma) - self.phi_(sigma_prime)))
# elif self.kern == "linear":
# return np.dot(self.phi_(sigma), self.phi_(sigma_prime))
# elif self.kern == "linear*exp":
# phi_sigma = self.phi_(sigma)
# phi_sigma_prime = self.phi_(sigma_prime)
# l = np.dot(phi_sigma, phi_sigma_prime)
# e = np.exp(-self.scale*np.linalg.norm(phi_sigma - phi_sigma_prime))
# return l*e
# def _index(self, X, X2):
# if X2 is None: i1 = i2 = X.astype('int').flat
# else: i1, i2 = X.astype('int').flat, X2.astype('int').flat
# return self.K_matrix[i1,:][:,i2]
# def K(self, X, X2=None): return self.variance * self._index(X, X2)
# def Kdiag(self, X): return self.variance * self._index(X,None).diagonal()
# def update_gradients_full(self, dL_dK, X, X2=None): pass
# def update_gradients_diag(self, dL_dKdiag, X): raise NotImplementedError
# def gradients_X(self, dL_dK, X, X2=None): raise NotImplementedError
# def gradients_X_diag(self, dL_dKdiag, X): raise NotImplementedError
# def calc_v(self, groups):
# v = np.zeros(len(groups))
# B_i, B_i_ = set(), set()
# k = 0
# while len(B_i) < self.N_objs:
# B_i = B_i.union(groups[len(groups) - 1 - k])
# # B_i = B_i.union(groups[k])
# v[k] = - (self.F(B_i) - self.F(B_i_)) / (len(B_i)-len(B_i_))
# B_i_ = B_i.copy()
# k += 1
# return v
# def F(self, A_): return nx.cut_size(self.G, A_, None, 'weight')
# def phi_(self, A):
# assert type(A[0]) == set
# A_is = A.copy()
# if not self.pava:
# v = self.calc_v(A_is)
# else:
# v = []
# k = len(A_is)
# while len(v) < len(A_is):
# B_i = reduce(lambda a,b: a.union(b), A_is[k-1:])
# B_i_ = reduce(lambda a,b: a.union(b), A_is[k:]) if k < len(A_is) else set([])
# v_ = - (self.F(B_i) - self.F(B_i_)) / (len(B_i)-len(B_i_))
# if len(v) != 0 and v_ < v[0]:
# A_is[k-1:k+1] = [A_is[k-1].union(A_is[k])]
# v.pop(0)
# continue
# v.insert(0,v_)
# k -= 1
# w = np.zeros(self.N_objs)
# # Reordering
# for i in range(len(A_is)): w[list(A_is[i])] = v[i]
# # Not Reordering
# # for a,i in zip(A_is,range(len(v))):
# # w[list(a)] = v[i]
# return - w
# def F(A_, G): return nx.cut_size(G, A_, None, 'weight')
def F(A_, G):
return nx.cut_size(G, A_, None)
def phi_(A, N_objs, G):
assert type(A[0]) == set
A_is = A.copy()
v = []
k = len(A_is)
while len(v) < len(A_is):
B_i = reduce(lambda a, b: a.union(b), A_is[k - 1 :])
B_i_ = reduce(lambda a, b: a.union(b), A_is[k:]) if k < len(A_is) else set([])
v_ = -(F(B_i, G) - F(B_i_, G)) / (len(B_i) - len(B_i_))
if len(v) != 0 and v_ < v[0]:
A_is[k - 1 : k + 1] = [A_is[k - 1].union(A_is[k])]
v.pop(0)
continue
v.insert(0, v_)
k -= 1
w = np.zeros(N_objs)
for i in range(len(A_is)):
w[list(A_is[i])] = v[i]
return -w
def get_phi(X_ours, G, N_objs, train_ind, test_ind):
X_phi = np.zeros((len(X_ours), N_objs))
for i, x in enumerate(X_ours):
f = phi_(x, N_objs, G)
X_phi[i] = f / np.linalg.norm(f)
return X_phi[train_ind.ravel()], X_phi[test_ind.ravel()]
def phi_interleaving(A_inter, G, N_objs, heuristic=False, samples=2000):
absents = list(set(list(range(N_objs))) - set(list(A_inter)))
inter = [set()]
for o in A_inter:
inter.append(set(([o])))
inter.append(set())
possible_positions = list(range(0, len(inter), 2))
X_inter_phi = np.zeros(N_objs)
if not heuristic or samples >= len(possible_positions) ** len(absents):
coherent_set = product(possible_positions, repeat=len(absents))
div = len(possible_positions) ** len(absents)
else:
rng = np.random.RandomState(N_objs)
coherent_set = rng.choice(possible_positions, (samples, len(absents)))
div = samples
for i, abs_pos in enumerate(coherent_set):
cur = copy.deepcopy(inter)
for pos, o in zip(abs_pos, absents):
cur[pos].add(o)
while set() in cur:
cur.remove(set())
f = phi_(cur, N_objs, G)
X_inter_phi += f # / np.linalg.norm(f)
# weighting more the certain partitions
# w = 1 / N_objs
# w1 = 2 * w
# w2 = round((1-w1*len(A_inter)),2) / len(absents)
# for i in range(N_objs):
# X_inter_phi[i] = X_inter_phi[i] * (w1 if i in A_inter else w2)
return X_inter_phi / div
# %%
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme()
from sushi_dataset import sushi_dataset as load_dataset
my_dpi = 96
N_objs = 10
X, X_ours, y, y_train, y_test, train_ind, test_ind, G =\
load_dataset(0, "full", N=2500)
plt.figure(figsize=(500/my_dpi/1.6, 500/my_dpi/1.6), dpi=my_dpi)
# plt.figure()
# plt.subplot(1,2,1)
# A_1 = [set([x]) for x in X[2][:5]]+ [set([x for x in X[2][5:]])]
# A_2 = [set([x]) for x in X[2][:4:-1]]+[set([x for x in X[2][:5]])]
A_1 = X_ours[2]
A_2 = X_ours[2][::-1]
print(A_1)
print(A_2)
w_1 = phi_(A_1, N_objs, G)
w_2 = phi_(A_2, N_objs, G)
# plt.scatter(range(1,1+N_objs), w_1, label = r"$\phi(A_1)$")
# plt.scatter(range(1,1+N_objs), w_2, label = r"$\phi(A_2)$")
# plt.xticks(range(1,1+N_objs))
# plt.yticks(np.arange(-5,7,2))
plt.vlines(0,-0.5,10.5, "black", "--", alpha = 0.7)
plt.scatter(w_1, range(1,1+N_objs), label = r"$\phi(A)$", color = "gold")
plt.scatter(w_2, range(1,1+N_objs), label = r"$\phi(A')$", color = "red")
# plt.yticks(range(1,1+N_objs), df_sushi.iloc[:,0].tolist())
# plt.yticks(range(1,1+N_objs), [str(set([x])) for x in range(1,1+N_objs)])
plt.yticks(range(1,1+N_objs), range(1,1+N_objs))
# plt.xticks(np.arange(-5,7,2))
# plt.ylabel("W")
plt.xlabel(r"$\phi_d$", fontsize = 18)
plt.ylabel(r"$d$ ", fontsize = 18).set_rotation(0)
# plt.xlim(-6,8)
plt.ylim(0.5,10.5)
plt.legend(fontsize = 12, borderpad=0.01, borderaxespad=0, labelspacing=0.4,handletextpad=-0.3, scatterpoints=1, loc = "upper right")
plt.tight_layout()
plt.savefig("cached_results/interpretation21.pdf", bbox_inches="tight")
plt.show()
# plt.subplot(1,2,2)
plt.figure(figsize=(500/my_dpi/1.6, 500/my_dpi/1.6), dpi=my_dpi)
plt.vlines(0,-0.5,9.5, "black", "--", alpha = 0.7)
plt.scatter(w_1*w_2, range(1,1+N_objs), color = "orange")#, label = r"$\phi(A_1)_i\cdot\phi(A_2)_i \forall i=1\ldots n$")
plt.yticks([])
plt.xlabel(r"$\phi(A)_d\phi(A')_d$", fontsize = 18)
# plt.xlim(-6,12)
plt.ylim(-0.5,9.5)
plt.tight_layout()
plt.savefig("cached_results/interpretation22.pdf",bbox_inches="tight")
# plt.legend()
plt.show()
# # %%
# str(w_1.round(2).tolist())[1:-1].replace(",", "\\")
# str(w_2.round(2).tolist())[1:-1].replace(",", "\\")
# str((w_1 * w_2).round(2).tolist())[1:-1].replace(",", "\\")
# # %%
from copy import deepcopy
A_is = deepcopy(X_ours[2])
v = []
k = len(A_is)
while len(v) < len(A_is):
B_i = reduce(lambda a,b: a.union(b), A_is[k-1:])
B_i_ = reduce(lambda a,b: a.union(b), A_is[k:]) if k < len(A_is) else set([])
v_ = - (F(B_i, G) - F(B_i_, G)) / (len(B_i)-len(B_i_))
if len(v) != 0 and v_ < v[0]:
A_is[k-1:k+1] = [A_is[k-1].union(A_is[k])]
v.pop(0)
continue
v.insert(0,v_)
k -= 1
my_dpi = 96
plt.figure(figsize=(450/my_dpi/1.6, 250/my_dpi/1.6), dpi=my_dpi)
for i in range(len(A_is)):
A_is[i] = set([x+1 for x in A_is[i]])
plt.hlines(0,-0.5,9.5, "black", "--", alpha = 0.7)
plt.scatter(range(len(v)) , -np.array(v), 10)
a = str(A_is).replace("[","").replace("]","").split("{")[1:]
a = [("\n" if i % 2 == 0 else "")+"{"+x.replace(",","") for i,x in enumerate(a) ]
plt.xticks(range(len(v)), a, fontsize = 7)
plt.yticks(np.arange(-4,6,2), fontsize = 7)
plt.yticks(fontsize = 7)
plt.xlim(-0.5,6.5)
plt.ylabel("values", fontsize = 8)
plt.tight_layout()
plt.savefig("cached_results/interpretation1.pdf")
plt.show()
|
MichelangeloConserva/CutFunctionKernel
|
interleaving/our_kernel.py
|
our_kernel.py
|
py
| 8,950 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "networkx.cut_size",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "numpy.random.RandomState",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "seaborn.set_theme",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "sushi_dataset.sushi_dataset",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.vlines",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.vlines",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 244,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hlines",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 285,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 293,
"usage_type": "name"
}
] |
3901695878
|
from parse import parse
import pygame
from pygame.locals import *
from cube import Cube
from const import *
from pygame.math import Vector3
from utils import *
from drawable_chunk import DrawableChunk
from hero import Hero
from level import *
class Level(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.cubes = []
self.drawables = []
self.events = []
self.image_tileset = pygame.image.load("res/tileset.png").convert_alpha()
self.size = Vector3()
def add_drawable(self, drawable):
self.drawables.append(drawable)
def read(self, filename):
tileset_width = self.image_tileset.get_width() // TILE_SIZE
with open(filename) as f:
lines = [line.rstrip() for line in f]
for line in lines:
r = parse("{:d}:{:d}:{:d} {:d}:{:d}:{:d}", line)
c0 = to_2d_coords(
r[3],
tileset_width,
)
c1 = to_2d_coords(
r[4],
tileset_width,
)
c2 = to_2d_coords(
r[5],
tileset_width,
)
coords = [c0, c1, c2]
cube = Cube(coords)
cube.position = Vector3(
r[0] * Cube.SIZE, r[1] * Cube.SIZE, r[2] * Cube.SIZE
)
cube.indexes = Vector3(r[0], r[1], r[2])
cube.zindex = sum(cube.indexes)
self.cubes.append(cube)
self.update_size()
def update_size(self):
for cube in self.cubes:
if self.size.x < cube.indexes.x + 1:
self.size.x = cube.indexes.x + 1
if self.size.y < cube.indexes.y + 1:
self.size.y = cube.indexes.y + 1
if self.size.z < cube.indexes.z + 1:
self.size.z = cube.indexes.z + 1
def get_drawable(self, x, y, z):
for i in range(len(self.drawables)):
drawable = self.drawables[i]
if drawable.position.x // 16 == x and drawable.position.y // 16 == y and drawable.position.z // 16 == z:
return drawable
def get_cube(self, x, y, z):
for i in range(len(self.cubes)):
cube = self.cubes[i]
if cube.indexes.x == x and cube.indexes.y == y and cube.indexes.z == z:
return cube
def get_cube_index(self, x, y, z):
for i in range(len(self.cubes)):
cube = self.cubes[i]
if cube.indexes.x == x and cube.indexes.y == y and cube.indexes.z == z:
return i
def draw(self, camera, surface_display):
drawables_with_chunks = []
# Work In Progress: split drawables into chunks when needed
for drawable in self.drawables:
if isinstance(drawable, Hero):
# assum that only hero needs chunk display
# draw in a temporary surface
surface_tmp = pygame.Surface(
(drawable.drawable_width, drawable.drawable_height), pygame.SRCALPHA
)
drawable.draw(0, 0, surface_tmp)
# assum 2 chunks
nb_chunk = 2 # drawable_height // Cube.SIZE
for number in range(nb_chunk):
drawable_chunk = DrawableChunk(
drawable.position.x,
drawable.position.y,
drawable.position.z + 16, # Shift
)
# TODO fix drawing when hero jump
drawable_chunk.zindex = (
sum(
list(
map(
(lambda x: x / Cube.SIZE),
drawable_chunk.position,
)
)
)
+ number
- 1
)
drawable_chunk.number = nb_chunk - number - 1
drawable_chunk.surface = surface_tmp
drawable_chunk.size = Vector2(
drawable.drawable_width, drawable.drawable_height
)
drawables_with_chunks.append(drawable_chunk)
else:
drawables_with_chunks.append(drawable)
sorted_drawables = sorted(
self.cubes + drawables_with_chunks, key=lambda drawable: drawable.zindex
)
for drawable in sorted_drawables:
drawable_iso = cartesian_to_isometric(
(drawable.position.x, drawable.position.y)
)
x = camera.x + drawable_iso.x - Cube.SIZE
y = camera.y + drawable_iso.y - drawable.position.z
if isinstance(drawable, Cube):
drawable.draw(x, y, surface_display, self.image_tileset)
else:
drawable.draw(x, y, surface_display)
def clear(self):
self.drawables.clear()
self.cubes.clear()
self.events.clear()
|
odrevet/isometric-map
|
level.py
|
level.py
|
py
| 5,207 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.sprite",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pygame.math.Vector3",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "parse.parse",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cube.Cube",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "cube.position",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "pygame.math.Vector3",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "cube.Cube.SIZE",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "cube.Cube",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "cube.indexes",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "pygame.math.Vector3",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "cube.zindex",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "cube.indexes",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "cube.indexes",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "cube.indexes",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "cube.indexes",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "cube.indexes",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "cube.indexes",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "cube.indexes",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "cube.indexes",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "cube.indexes",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "hero.Hero",
"line_number": 96,
"usage_type": "argument"
},
{
"api_name": "pygame.Surface",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "pygame.SRCALPHA",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "drawable_chunk.DrawableChunk",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "drawable_chunk.zindex",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "cube.Cube.SIZE",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "cube.Cube",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "drawable_chunk.position",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "drawable_chunk.number",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "drawable_chunk.surface",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "drawable_chunk.size",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "cube.Cube.SIZE",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "cube.Cube",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "cube.Cube",
"line_number": 147,
"usage_type": "argument"
}
] |
14177896732
|
"""Reads parameters of received http request"""
import http.client as http_client
import logging
from typing import Optional
import azure.functions as func
from .custom_error import DownloadBlobError
log = logging.getLogger(name="log." + __name__)
def main(req: func.HttpRequest, params_list: Optional[list] = None) -> dict[str, str]:
"""
Reads parameters of received http request.
Args:
req (azure.functions.HttpRequest): HTTP request sent to Azure Function's endpoint.
params_list (Optional[list], optional): list of parameters expected in the request.\
Defaults to ["invoice_id", "single_file_download", "file_format"].
Raises:
DownloadBlobError: if any of the expected parameters is not found in the request.
Returns:
dict[str, str]: dictionary of parameters and their values.
"""
log.debug(msg=f"Reading parameters of the request {req}.")
if params_list is None:
params_list = ["invoice_id", "file_format"]
params = {}
for param in params_list:
try:
params[param] = req.params[param]
except KeyError as exc:
message = f"No {param} parameter in the request."
raise DownloadBlobError(
exception_type="KeyError",
details=f"KeyError: {param}",
message=message,
status_code=http_client.BAD_REQUEST,
) from exc
log.debug(msg=f"Parameters of the request {req} read successfully.")
return params
|
wieczorekgrzegorz/ksef-krportal-communication
|
modules/download_blob/modules/read_params.py
|
read_params.py
|
py
| 1,531 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "azure.functions.HttpRequest",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "azure.functions",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "custom_error.DownloadBlobError",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "http.client.BAD_REQUEST",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "http.client",
"line_number": 42,
"usage_type": "name"
}
] |
33247661764
|
import sys
from pathlib import Path
import cv2
import imutils
import numpy as np
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1]
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT))
code_dir_path = ROOT.joinpath("Code")
data_dir_path = ROOT.joinpath("Data")
class BlurTool:
def __init__(self):
cfg_path = code_dir_path.joinpath("yolov7.cfg")
weight_path = code_dir_path.joinpath("yolov7.weights")
self.net = cv2.dnn.readNetFromDarknet(str(cfg_path), str(weight_path))
self.layers = [(self.net.getLayerNames()[i - 1]) for i in self.net.getUnconnectedOutLayers()]
def _check_file(self, file_name):
self.file_path = data_dir_path.joinpath(file_name)
assert self.file_path.exists(), "File does not exist!"
suffix = self.file_path.suffix
if suffix in [".png", ".jpg"]:
self.is_video = False
elif suffix in [".mp4"]:
self.is_video = True
else:
raise ValueError(f"No Support for the Format: {suffix}")
def process(self, file_name="LP.png"):
self._check_file(file_name)
if not self.is_video:
self.process_image()
else:
self.process_video()
def process_image(self):
frame = cv2.imread(str(self.file_path))
vehicles = self.detect_vehicle(frame)
# for vehicle in vehicles:
# xmin, xmax, ymin, ymax = vehicle
# self.detect_license_plate(frame[ymin: ymax, xmin: xmax])
xmin, xmax, ymin, ymax = vehicles[1]
self.detect_license_plate(frame[ymin: ymax, xmin: xmax])
# print(vehicles)
def process_video(self):
cap = cv2.VideoCapture()
def detect_vehicle(self, frame):
frame_height, frame_width = frame.shape[:2]
vehicle_classes = [2, 3, 5, 7]
vehicle_boxes, vehicle_scores, vehicles = [], [], []
vehicle_conf_thr, vehicle_nms_thr = 0.5, 0.2
# ?: size
# preprocess -> set input -> forward
blob = cv2.dnn.blobFromImage(frame, scalefactor=1 / 255, size=(320, 320), mean=[0, 0, 0], swapRB=True)
self.net.setInput(blob)
outputs = self.net.forward(self.layers)
for output in outputs:
for detection in output:
# [x, y, w, h, conf, score1, score2, ..., score80]
scores = detection[5:]
class_id = np.argmax(scores)
conf = scores[class_id]
if (class_id in vehicle_classes) and (conf > vehicle_conf_thr):
x, y = detection[0] * frame_width, detection[1] * frame_height
w, h = detection[2] * frame_width, detection[3] * frame_height
xmin, xmax = x - w / 2, x + w / 2
ymin, ymax = y - h / 2, y + h / 2
vehicle_boxes.append((xmin, xmax, ymin, ymax))
vehicle_scores.append(float(conf))
# postprocess: nms -> size filter
vehicle_indices = cv2.dnn.NMSBoxes(vehicle_boxes, vehicle_scores, vehicle_conf_thr, vehicle_nms_thr)
for index in vehicle_indices:
xmin, xmax, ymin, ymax = map(int, vehicle_boxes[index])
if (xmax - xmin) * (ymax - ymin) >= frame_width * frame_height * 0.03:
# [(xmin, xmax, ymin, ymax), ...]
vehicles.append((xmin, xmax, ymin, ymax))
return vehicles
def detect_license_plate(self, frame):
license_plate = None
# convert to grey scale -> reduce noise -> detect edges
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_reduce_noise = cv2.bilateralFilter(frame_gray, d=13, sigmaColor=15, sigmaSpace=15)
frame_edge = cv2.Canny(frame_reduce_noise, threshold1=30, threshold2=200)
contours = imutils.grab_contours(cv2.findContours(frame_edge.copy(), mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE))
# check which one has a rectangle shape (4 sides) and closed figure
for cnt in sorted(contours, key=cv2.contourArea, reverse=True)[:5]:
peri = cv2.arcLength(curve=cnt, closed=True)
approx = cv2.approxPolyDP(curve=cnt, epsilon=0.1 * peri, closed=True)
if len(approx) == 4:
x, y, w, h = cv2.boundingRect(approx)
license_plate = (x, y, x + w, y + h)
break
return license_plate
# # open -> threshold -> edge detection
# frame_open = cv2.morphologyEx(frame_reduce_noise, op=cv2.MORPH_OPEN, kernel=np.ones((23, 23), np.uint8))
# frame_add_weight = cv2.addWeighted(src1=frame_reduce_noise, alpha=1, src2=frame_open, beta=-1, gamma=0)
# _, frame_thresh = cv2.threshold(frame_add_weight, thresh=0, maxval=255, type=cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# frame_edge = cv2.Canny(frame_thresh, threshold1=100, threshold2=200)
# frame_edge = cv2.morphologyEx(frame_edge, op=cv2.MORPH_CLOSE, kernel=np.ones((10, 10), np.uint8))
# frame_edge = cv2.morphologyEx(frame_edge, cv2.MORPH_OPEN, kernel=np.ones((10, 10), np.uint8))
# contours = imutils.grab_contours(cv2.findContours(frame_edge.copy() , cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE))
# for cnt in sorted(contours, key=cv2.contourArea, reverse=True)[:5]:
# (x, y), (w, h), angle = cv2.minAreaRect(cnt)
# cv2.imshow("test", frame_edge)
# cv2.waitKey()
# exit()
# cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)
# cv2.imshow("test", frame)
# cv2.waitKey()
# cv2.drawContours(frame, [a], -1, (0, 255, 0), 3)
# cv2.imshow("test", frame)
# cv2.waitKey()
blur_tool = BlurTool()
blur_tool.process(file_name="LP.png") # LP.png frame.jpg
"""
# face detection
prototxt_path = "Code/prototxt.txt"
model_path = "Code/res10_300x300_ssd_iter_140000_fp16.caffemodel"
model = cv2.dnn.readNetFromCaffe(prototxt_path, model_path)
xmin, xmax, ymin, ymax = map(int, vehicle_boxes[index])
vehicles.append()
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 255, 255), 5)
cv2.imshow("test", frame)
cv2.waitKey()
"""
|
Beau-Yang/CapstoneProject
|
Code/blur_tool_version.py
|
blur_tool_version.py
|
py
| 6,194 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "cv2.dnn.readNetFromDarknet",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cv2.dnn.blobFromImage",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "numpy.argmax",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "cv2.dnn.NMSBoxes",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "cv2.bilateralFilter",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "cv2.Canny",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "imutils.grab_contours",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "cv2.findContours",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_TREE",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "cv2.contourArea",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "cv2.arcLength",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "cv2.approxPolyDP",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "cv2.boundingRect",
"line_number": 115,
"usage_type": "call"
}
] |
18995573707
|
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
from paddleocr import PaddleOCR,draw_ocr
# Paddleocr supports Chinese, English, French, German, Korean and Japanese.
# You can set the parameter `lang` as `ch`, `en`, `fr`, `german`, `korean`, `japan`
# to switch the language model in order.
ocr = PaddleOCR(use_angle_cls=True, lang='ch') # need to run only once to download and load model into memory
img_path = 'Im6.png'
result = ocr.ocr(img_path, cls=True)
#for line in result:
# print(line)
# Each line consists of a 4 * 2 list and a tuple,
# containing coordinates of a bounding box and ocr result with confidence, respectively.
# draw result
from PIL import Image
image = Image.open(img_path).convert('RGB')
boxes = [line[0] for line in result]
txts = [line[1][0] for line in result]
scores = [line[1][1] for line in result]
im_show = draw_ocr(image, boxes, txts, scores, font_path='./fonts/simfang.ttf')
im_show = Image.fromarray(im_show)
im_show.save('result.jpg')
|
tota1Noob/autoBookmarkGen4PDF
|
moduleTryouts/ocrTest.py
|
ocrTest.py
|
py
| 983 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "paddleocr.PaddleOCR",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "paddleocr.draw_ocr",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 24,
"usage_type": "name"
}
] |
73955138107
|
from tkinter import *
from PIL import Image, ImageDraw
from src.Model import Model
b1 = "up"
xold, yold = None, None
image1, drawimg = None, None
model = Model()
def create_lines(canv):
canv.create_line(30, 0, 30, 140, smooth=TRUE, fill="red", width="1")
canv.create_line(110, 0, 110, 140, smooth=TRUE, fill="red", width="1")
canv.create_line(0, 30, 140, 30, smooth=TRUE, fill="red", width="1")
canv.create_line(0, 110, 140, 110, smooth=TRUE, fill="red", width="1")
def testCallback(canv):
global image1, model
#image1 = image1.resize((28,28))
image1.save("./valami.png")
model.testImage(image1)
def clearCallback(canv):
global image1, drawimg
canv.delete('all')
create_lines(canv)
drawimg.rectangle((0, 0, image1.size[0], image1.size[1]), fill=0)
def main():
global image1, drawimg
image1 = Image.new(mode="L", size=(28, 28))
drawimg = ImageDraw.Draw(image1)
root = Tk()
root.title("DRAW")
root.geometry('200x150')
drawing_area = Canvas(root)
drawing_area.grid(row=0, column=0, rowspan=2)
drawing_area.config(width=140, height=140)
drawing_area.configure(background='black')
create_lines(drawing_area)
drawing_area.bind("<Motion>", motion)
drawing_area.bind("<ButtonPress-1>", b1down)
drawing_area.bind("<ButtonRelease-1>", b1up)
B1 = Button(root, text="Test", command=lambda: testCallback(drawing_area))
B1.grid(row=0, column=1)
B2 = Button(root, text="Clear", command=lambda: clearCallback(drawing_area))
B2.grid(row=1, column=1)
root.mainloop()
def b1down(event):
global b1
b1 = "down"
def b1up(event):
global b1, xold, yold
b1 = "up"
xold = None
yold = None
def motion(event):
global drawimg
if b1 == "down":
global xold, yold
if xold is not None and yold is not None:
event.widget.create_line(xold, yold, event.x, event.y, smooth=TRUE, fill="white", width="10")
drawimg.line((xold / 5, yold / 5, event.x / 5, event.y / 5), fill=255, width=2)
xold = event.x
yold = event.y
if __name__ == "__main__":
model.gen_data()
model.train()
main()
|
Freyb/LegoAI-homework
|
src/Gui.py
|
Gui.py
|
py
| 2,185 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "src.Model.Model",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PIL.Image.new",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "PIL.ImageDraw.Draw",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw",
"line_number": 33,
"usage_type": "name"
}
] |
33043283636
|
import base64
import os
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.core.files.base import ContentFile
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.http import require_http_methods
from .forms import DinoImageForm, DinosaurForm
from .models import DinoImage, Dinosaur, Favorite
def register(request):
if request.method == "POST":
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get("username")
messages.success(request, f"Account created for {username}!")
return redirect("login")
else:
form = UserCreationForm()
return render(request, "registration/register.html", {"form": form})
def user_login(request):
if request.method == "POST":
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = form.get_user()
login(request, user)
messages.success(request, f"You are now logged in as {user.username}")
return redirect("home")
else:
form = AuthenticationForm()
return render(request, "registration/login.html", {"form": form})
def user_logout(request):
logout(request)
messages.success(request, "You have been logged out")
return redirect("home")
@login_required
def home(request):
dinos = Dinosaur.objects.all()
return render(request, "home.html", {"dinos": dinos})
@login_required
# def dinosaur_detail(request, pk):
# dino = get_object_or_404(Dinosaur, pk=pk)
# images = DinoImage.objects.filter(dinosaur=dino)
# image_urls = []
# for image in images:
# image_urls.append(
# f"data:{image.content_type};base64,{base64.b64encode(image.image.read()).decode()}"
# )
# is_favorited = Favorite.objects.filter(user=request.user, dinosaur=dino).exists()
# return render(
# request,
# "dinosaur_detail.html",
# {"dino": dino, "image_urls": image_urls, "is_favorited": is_favorited},
# )
def dinosaur_detail(request, pk):
dino = get_object_or_404(Dinosaur, pk=pk)
images = DinoImage.objects.filter(dinosaur=dino)
image_urls = []
for image in images:
image_path = os.path.join(settings.MEDIA_ROOT, str(image.image))
with open(image_path, "rb") as f:
image_data = f.read()
image_base64 = base64.b64encode(image_data).decode()
image_url = f"data:image/jpeg;base64,{image_base64}"
image_urls.append(image_url)
return render(
request,
"dinosaur_detail.html",
{"dino": dino, "images": images, "image_urls": image_urls},
)
@login_required
def add_dinosaur(request):
if request.method == "POST":
form = DinosaurForm(request.POST, request.FILES)
if form.is_valid():
dino = form.save()
messages.success(request, f"{dino.name} has been added to the database!")
return redirect("dinosaur_detail", pk=dino.pk)
else:
form = DinosaurForm()
return render(request, "add_dinosaur.html", {"form": form})
@login_required
def search_results(request):
query = request.GET.get("q")
print(
query
) # Add this line to check that the search query is being retrieved correctly
if query:
dinosaurs = Dinosaur.objects.filter(name__icontains=query)
else:
dinosaurs = Dinosaur.objects.all()
return render(request, "search_results.html", {"dinosaurs": dinosaurs})
@login_required
def edit_dinosaur(request, pk):
dino = get_object_or_404(Dinosaur, pk=pk)
if request.method == "POST":
form = DinosaurForm(request.POST, request.FILES, instance=dino)
if form.is_valid():
form.save()
return redirect("dinosaur_detail", pk=dino.pk)
else:
form = DinosaurForm(instance=dino)
return render(request, "dinosaur_edit.html", {"form": form, "dino": dino})
@login_required
def update_dinosaur(request, pk):
dino = get_object_or_404(Dinosaur, pk=pk)
if request.method == "POST":
form = DinosaurForm(request.POST, request.FILES, instance=dino)
if form.is_valid():
dino = form.save()
messages.success(request, f"{dino.name} has been updated!")
return redirect("dinosaur_detail", pk=dino.pk)
else:
form = DinosaurForm(instance=dino)
return render(request, "add_dinosaur.html", {"form": form})
@login_required
def delete_dinosaur(request, pk):
dino = get_object_or_404(Dinosaur, pk=pk)
if request.method == "POST":
dino.delete()
messages.success(request, f"{dino.name} has been deleted from the database!")
return redirect("home")
return render(request, "delete_dinosaur.html", {"dino": dino})
@login_required
@require_http_methods(["POST"])
def add_image(request, pk):
dinosaur = get_object_or_404(Dinosaur, pk=pk)
if request.method == "POST":
form = DinoImageForm(request.POST, request.FILES)
if form.is_valid():
image = form.save(commit=False)
image.dinosaur = dinosaur
image.save()
return redirect("dinosaur_detail", pk=dinosaur.pk)
else:
form = DinoImageForm()
return render(request, "add_image.html", {"form": form, "dinosaur": dinosaur})
@login_required
@require_http_methods(["POST"])
def delete_image(request, pk):
dino_image = get_object_or_404(DinoImage, pk=pk)
dinosaur_pk = dino_image.dinosaur.pk
dino_image.delete()
return redirect("dinosaur_detail", pk=dinosaur_pk)
@login_required
@require_http_methods(["POST"])
def toggle_favorite(request, pk):
dino = get_object_or_404(Dinosaur, pk=pk)
fav, created = Favorite.objects.get_or_create(user=request.user, dinosaur=dino)
if not created:
fav.delete()
return JsonResponse({"success": True, "is_favorited": not created})
@login_required
def add_favorite(request, pk):
dino = get_object_or_404(Dinosaur, pk=pk)
fav, created = Favorite.objects.get_or_create(user=request.user, dinosaur=dino)
if created:
messages.success(request, "Added to favorites.")
else:
messages.error(request, "This dinosaur is already in your favorites.")
return redirect("dinosaur_detail", pk=dino.pk)
@login_required
def list_favorites(request):
favorites = Favorite.objects.filter(user=request.user)
dinos = [fav.dinosaur for fav in favorites]
return render(request, "list_favorites.html", {"dinos": dinos})
|
Vleyked/django-template
|
dinosaur_app/dinosaurs/views.py
|
views.py
|
py
| 6,837 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.contrib.auth.forms.UserCreationForm",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.forms.UserCreationForm",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.forms.AuthenticationForm",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.forms.AuthenticationForm",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.logout",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "models.Dinosaur.objects.all",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "models.Dinosaur.objects",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "models.Dinosaur",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "models.Dinosaur",
"line_number": 72,
"usage_type": "argument"
},
{
"api_name": "models.DinoImage.objects.filter",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "models.DinoImage.objects",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "models.DinoImage",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.MEDIA_ROOT",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "base64.b64encode",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "forms.DinosaurForm",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "forms.DinosaurForm",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "models.Dinosaur.objects.filter",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "models.Dinosaur.objects",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "models.Dinosaur",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "models.Dinosaur.objects.all",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "models.Dinosaur.objects",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "models.Dinosaur",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "models.Dinosaur",
"line_number": 117,
"usage_type": "argument"
},
{
"api_name": "forms.DinosaurForm",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "forms.DinosaurForm",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "models.Dinosaur",
"line_number": 130,
"usage_type": "argument"
},
{
"api_name": "forms.DinosaurForm",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "forms.DinosaurForm",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "models.Dinosaur",
"line_number": 144,
"usage_type": "argument"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "models.Dinosaur",
"line_number": 155,
"usage_type": "argument"
},
{
"api_name": "forms.DinoImageForm",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "forms.DinoImageForm",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "django.views.decorators.http.require_http_methods",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "models.DinoImage",
"line_number": 171,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "django.views.decorators.http.require_http_methods",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "models.Dinosaur",
"line_number": 180,
"usage_type": "argument"
},
{
"api_name": "models.Favorite.objects.get_or_create",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "models.Favorite.objects",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "models.Favorite",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "django.views.decorators.http.require_http_methods",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "models.Dinosaur",
"line_number": 189,
"usage_type": "argument"
},
{
"api_name": "models.Favorite.objects.get_or_create",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "models.Favorite.objects",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "models.Favorite",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "models.Favorite.objects.filter",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "models.Favorite.objects",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "models.Favorite",
"line_number": 200,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 198,
"usage_type": "name"
}
] |
38714648302
|
from rest_framework import serializers
from .models import Animal
import serializer_company
class AnimalSerializer(serializers.HyperlinkedModelSerializer):
company = serializer_company.CompanySerializer()
class Meta:
model = Animal
fields = [
'id',
'name',
'type',
'sub_type',
'header_image',
'profile_image',
'tag_number',
'registration_number',
'dob',
'father',
'mother',
'attachment',
'company'
]
|
pohara9720/lma-python
|
lma/api/serializer_animal.py
|
serializer_animal.py
|
py
| 591 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rest_framework.serializers.HyperlinkedModelSerializer",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "serializer_company.CompanySerializer",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "models.Animal",
"line_number": 10,
"usage_type": "name"
}
] |
31126427233
|
from gpiozero import DigitalInputDevice
import time
class EncoderCounter(object):
def __init__(self,pin_number,side):
self.side = side
self.test_mode = False
self.pulse_count = 0
self.device = DigitalInputDevice(pin=pin_number)
self.device.pin.when_changed = self.count_pulses
self.previous = None
self.difs = []
def record_gap_between_high(self,_,state):
if(state == 1):
if self.previous==None:
self.previous = time.time()
else:
next = time.time()
dif = next-self.previous
self.previous = next
self.difs.append(dif)
def count_pulses(self,_,state):
self.pulse_count +=1
def reset(self):
self.pulse_count=0
def set_mode(self,mode):
if(mode=='normal'):
self.test_mode = False
self.device.pin.when_changed = self.count_ticks
if(mode=='test'):
self.test_mode = True
self.device.pin.when_changed = self.record_gap_between_high
def report_test(self,np):
result = np.array(self.difs)
result = result[20:-20]
centred = result - result.mean()
centred = np.absolute(centred)
sd = result.std()
outliers = result[centred>sd*2]
print(f'result for side: {self.side}')
print(f'max: {result.max()}, min: {result.min()}, mean {result.mean()} , sd {result.std()}')
print('outliers',outliers)
return result
# bot.stop()
|
gregorianrants/legobot-7
|
Encoder.py
|
Encoder.py
|
py
| 1,404 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "gpiozero.DigitalInputDevice",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 19,
"usage_type": "call"
}
] |
19475609630
|
from django.apps import AppConfig
from django.conf import settings
import os
import joblib
class SentimentConfig(AppConfig):
name = 'sentiment'
path = os.path.join(settings.MODELS, 'models.p')
path_emosi = os.path.join(settings.MODELS, 'models_emotion.p')
path_general = os.path.join(settings.MODELS, 'models_general.p')
# separation of data packed in the model joblib
with open(path, 'rb') as joblibFile:
data = joblib.load(joblibFile)
with open(path_emosi, 'rb') as joblibFile:
data_emosi = joblib.load(joblibFile)
with open(path_general, 'rb') as joblibFile:
data_general = joblib.load(joblibFile)
model = data['classifier']
vectorizer = data['vectorizer']
model_emosi = data_emosi['classifier']
vectorizer_emosi = data_emosi['vectorizer']
model_general = data_general['classifier']
vectorizer_general = data_general['vectorizer']
|
kholiqcode/skripsi
|
sentiment/apps.py
|
apps.py
|
py
| 927 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.apps.AppConfig",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.MODELS",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.MODELS",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.MODELS",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "joblib.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "joblib.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "joblib.load",
"line_number": 22,
"usage_type": "call"
}
] |
16970226908
|
from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open("README.md") as f:
long_description = f.read()
version = {}
with open(path.join(here, "emv", "__init__.py")) as fp:
exec(fp.read(), version)
setup(
name="emv",
version=version["__version__"],
description="EMV Smartcard Protocol Library",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
author="Russ Garrett",
author_email="[email protected]",
url="https://github.com/russss/python-emv",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
],
keywords="smartcard emv payment",
python_requires=">=3.4",
packages=["emv", "emv.protocol", "emv.command"],
install_requires=[
"pyscard==2.0.0",
"pycountry==20.7.3",
"terminaltables==3.1.0",
"click==7.1.2",
],
entry_points={"console_scripts": {"emvtool=emv.command.client:run"}},
)
|
russss/python-emv
|
setup.py
|
setup.py
|
py
| 1,114 |
python
|
en
|
code
| 100 |
github-code
|
6
|
[
{
"api_name": "os.path.abspath",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "setuptools.setup",
"line_number": 14,
"usage_type": "call"
}
] |
40411376601
|
#!/usr/bin/env python3
"""
Name: locator_led_status.py
Description: NXAPI: display locator-led status for chassis, modules, fans
Example output:
% ./locator_led_status.py --vault hashicorp --devices cvd_bgw_1 --module 1,2 --fan 1,2
ip hostname status locator-led
192.168.11.110 cvd-1111-bgw ON chassis
192.168.11.110 cvd-1111-bgw OFF module_1
192.168.11.110 cvd-1111-bgw ON module_2
192.168.11.110 cvd-1111-bgw ON fan_1
192.168.11.110 cvd-1111-bgw OFF fan_2
%
"""
our_version = 106
script_name = "locator_led_status"
# standard libraries
import argparse
import re
from concurrent.futures import ThreadPoolExecutor
# local libraries
from nxapi_netbox.args.args_cookie import ArgsCookie
from nxapi_netbox.args.args_nxapi_tools import ArgsNxapiTools
from nxapi_netbox.general.log import get_logger
from nxapi_netbox.netbox.netbox_session import netbox, get_device_mgmt_ip
from nxapi_netbox.vault.vault import get_vault
from nxapi_netbox.nxapi.nxapi_locator_led import NxapiLocatorLedStatus
def get_parser():
ex_prefix = "Example:"
help_module = (
"Either a single module/linecard, or a comma-separate list of modules/linecards"
)
help_fan = "Either a single fan, or a comma-separate list of fans"
help_on = "If present, print only locator-leds whose status is ON. If not present, print status for all locator-leds"
ex_module = "{} --module 2,3,6".format(ex_prefix)
ex_fan = "{} --fan 3".format(ex_prefix)
ex_on = "{} --on".format(ex_prefix)
parser = argparse.ArgumentParser(
description="DESCRIPTION: NXAPI: display locator-led status for chassis, modules, fans",
parents=[ArgsCookie, ArgsNxapiTools],
)
mandatory = parser.add_argument_group(title="MANDATORY SCRIPT ARGS")
optional = parser.add_argument_group(title="OPTIONAL SCRIPT ARGS")
optional.add_argument(
"--on",
dest="on",
required=False,
action="store_true",
default=False,
help="{} {}".format(help_on, ex_on),
)
optional.add_argument(
"--module",
dest="module",
required=False,
default=None,
help="(default: %(default)s) " + help_module + ex_module,
)
optional.add_argument(
"--fan",
dest="fan",
required=False,
default=None,
help="(default: %(default)s) " + help_fan + ex_fan,
)
parser.add_argument(
"--version", action="version", version="{} v{}".format("%(prog)s", our_version)
)
return parser.parse_args()
def get_device_list():
try:
return cfg.devices.split(",")
except:
log.error(
"exiting. Cannot parse --devices {}. Example usage: --devices leaf_1,spine_2,leaf_2".format(
cfg.devices
)
)
exit(1)
def print_output(futures):
for future in futures:
output = future.result()
if output == None:
continue
for line in output:
print(line)
if len(output) > 0:
print()
def print_header():
print(fmt.format("ip", "hostname", "status", "locator-led"))
def collect_output(ip, nx, modules, fans):
lines = list()
if not cfg.on:
lines.append(fmt.format(ip, nx.hostname, nx.chassis, "chassis"))
elif cfg.on and nx.chassis == "ON":
lines.append(fmt.format(ip, nx.hostname, nx.chassis, "chassis"))
for module in modules:
nx.module = module
if cfg.on and nx.module_status != "ON":
continue
lines.append(
fmt.format(ip, nx.hostname, nx.module_status, "module_{}".format(module))
)
for fan in fans:
nx.fan = fan
if cfg.on and nx.fan_status != "ON":
continue
lines.append(fmt.format(ip, nx.hostname, nx.fan_status, "fan_{}".format(fan)))
return lines
def worker(device, vault, modules, fans):
ip = get_device_mgmt_ip(nb, device)
nx = NxapiLocatorLedStatus(vault.nxos_username, vault.nxos_password, ip, log)
nx.nxapi_init(cfg)
nx.refresh()
return collect_output(ip, nx, modules, fans)
def cfg_to_list(cfg_list, desc):
_list = list()
if cfg_list == None:
return _list
for item in re.split(",", str(cfg_list)):
if item == None:
continue
try:
_list.append(int(item))
except:
log.error("Exiting. Expected int() for {}. Got {}".format(desc, cfg_list))
log.error("Usage examples:")
log.error(" --{} 3".format(desc))
log.error(" --{} 1,2,4".format(desc))
exit(1)
return _list
cfg = get_parser()
modules = cfg_to_list(cfg.module, "module")
fans = cfg_to_list(cfg.fan, "fan")
log = get_logger(script_name, cfg.loglevel, "DEBUG")
vault = get_vault(cfg.vault)
vault.fetch_data()
nb = netbox(vault)
devices = get_device_list()
fmt = "{:<15} {:<18} {:<6} {:<12}"
print_header()
executor = ThreadPoolExecutor(max_workers=len(devices))
futures = list()
for device in devices:
args = [device, vault, modules, fans]
futures.append(executor.submit(worker, *args))
print_output(futures)
|
allenrobel/nxapi-netbox
|
scripts/locator_led_status.py
|
locator_led_status.py
|
py
| 5,240 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "nxapi_netbox.args.args_cookie.ArgsCookie",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "nxapi_netbox.args.args_nxapi_tools.ArgsNxapiTools",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "nxapi_netbox.netbox.netbox_session.get_device_mgmt_ip",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "nxapi_netbox.nxapi.nxapi_locator_led.NxapiLocatorLedStatus",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "nxapi_netbox.general.log.get_logger",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "nxapi_netbox.vault.vault.get_vault",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "nxapi_netbox.netbox.netbox_session.netbox",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.ThreadPoolExecutor",
"line_number": 169,
"usage_type": "call"
}
] |
21569497560
|
#!/usr/bin/env python
""" Script to:
Extract 3D images from a 4D image and then extract one selected slice
from each of these 3D images and combine them as a gif.
"""
# Author: Bishesh Khanal <[email protected]>
# Asclepios INRIA Sophia Antipolis
import subprocess
import sys
import argparse as ag
import bish_utils as bu
#import time as tm
def get_input_options():
''' Command line interface, get user input options.
'''
parser = ag.ArgumentParser()
parser.add_argument('in_img', help='Filename of the input 4D image')
parser.add_argument('out_gif', help='Filename of the output file')
parser.add_argument('total_tpts', help='Total number of time points to '
'extract from the input image', type=int)
parser.add_argument('slice_axis', help='Slice axis: 0,1 or 2')
parser.add_argument(dest='slice_num', help='slice number to be extracted')
parser.add_argument('resize', help='resize to maximum possible size in each'
' direction, e.g. 400x400')
parser.add_argument('-time_unit', help='unit of time for display. E.g yrs'
'\n If none, time info not overlayed over the video')
parser.add_argument(
'-time_step', help='time between tpt1 and tpt2. Used only when '
'time_unit is also used. Default 1.0', type=float)
parser.add_argument(
'delay', help='time delay between frames in milliseconds')
parser.add_argument(
'-rotate', help='If given overrides the default rotation used for '
'proper orientation of the slice.')
parser.add_argument(
'-crop', help='convert -crop ops: If given crops 2D slices before '
'combining. wxh+x+y')
ops = parser.parse_args()
return ops
def main():
'''
Extract 3D images from a 4D image and then extract one selected slice
from each of these 3D images and combine them as a gif.
'''
ops = get_input_options()
#split 4d image to get all the 3D images:
tmp3d_basename, file_ext = 'tmp3DImage', '.nii.gz'
cmd = 'ImageMath 4 %s%s TimeSeriesDisassemble %s' % (
tmp3d_basename, file_ext, ops.in_img)
bu.print_and_execute(cmd)
#ImageMath 4 tmp3DImage.nii.gz TimeSeriesDisassemble ops.in_img
#convert (from ImageMagick)
#Executables that must exist:
#ImageMath
extract_slice = "/home/bkhanal/works/tools/marcoSliceExtractor/myImgSliceExtractor"
axis = ops.slice_axis
num = 0
#print('number of time steps: %s \n' % (str(ops.total_tpts)))
while num < ops.total_tpts:
#outputs/results of the executables
index = str(num+100) #ImageMath extracted slice names start from 100.
tmp3DImage = '%s%s%s' % (tmp3d_basename, index, file_ext)
tmp2DImage = 'slice%s.png' % (index,)
cmd = '%s %s %s %s %s' % (
extract_slice, tmp3DImage, axis, ops.slice_num, tmp2DImage)
bu.print_and_execute(cmd, False)
# Rotate the image for proper orientation.
if ops.rotate is None:
cmd = 'convert -rotate 180 %s %s' % (tmp2DImage, tmp2DImage)
else:
cmd = 'convert -rotate %s %s %s' % (ops.rotate, tmp2DImage, tmp2DImage)
bu.print_and_execute(cmd, False)
if ops.crop:
cmd = 'convert %s -crop %s +repage %s' % (tmp2DImage, ops.crop, tmp2DImage)
bu.print_and_execute(cmd, False)
# Write time-point info
if ops.time_unit is not None:
if ops.time_step is not None:
tpt = float(num) * ops.time_step
else:
tpt = num
cmd = ('convert %s -gravity SouthWest -fill orange -pointsize 12 '
'-annotate +0+0 "%s %s" %s' % (
tmp2DImage, str(tpt), ops.time_unit, tmp2DImage))
bu.print_and_execute(cmd, False)
#Delete individual 3D files.
bu.print_and_execute('rm ' + tmp3DImage, False)
#Go to next file
num += 1
#Now make the animation and delete individual 2D slices:
cmd = 'convert slice1*.png -resize %s -set delay %s %s' % (
ops.resize, ops.delay, ops.out_gif)
bu.print_and_execute(cmd)
bu.print_and_execute('rm slice1*.png', False)
if __name__ == "__main__":
main()
|
Inria-Asclepios/simul-atrophy
|
scripts/extractSliceVideoFrom4d.py
|
extractSliceVideoFrom4d.py
|
py
| 4,284 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "bish_utils.print_and_execute",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "bish_utils.print_and_execute",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "bish_utils.print_and_execute",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "bish_utils.print_and_execute",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "bish_utils.print_and_execute",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "bish_utils.print_and_execute",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "bish_utils.print_and_execute",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "bish_utils.print_and_execute",
"line_number": 101,
"usage_type": "call"
}
] |
22329982080
|
import imp
import discord
from discord.ext import commands
import json
import os
from os import listdir
from os.path import isfile, join
from datetime import datetime
import subprocess
from discordLevelingSystem import DiscordLevelingSystem
import aiosqlite
def micsid(ctx):
return ctx.author.id == 481377376475938826 or ctx.author.id == 624076054969188363
def log(log):
now = datetime.now()
timern = now.strftime("%d/%m/%Y %H:%M:%S")
with open('./other/log.txt', 'a') as f:
f.write('\n')
f.write(f"{timern} | {log}")
cogs = []
for i in os.listdir("cogs/"):
if i == "__pycache__":
pass
else:
print(i[:-3])
class BotMakerCommands(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
@commands.check(micsid)
async def logs(self, ctx):
file = discord.File("./other/log.txt")
await ctx.author.send(file=file)
@commands.command()
@commands.check(micsid)
async def msgserver(self, ctx, id:int, *, message):
for guild in self.client.guilds:
if guild.id == id:
return await guild.text_channels[0].send(message)
await ctx.send("guild not found")
@commands.command()
@commands.check(micsid)
async def reloadall(self, ctx):
lst = [f for f in listdir("cogs/") if isfile(join("cogs/", f))]
no_py = [s.replace('.py', '') for s in lst]
startup_extensions = ["cogs." + no_py for no_py in no_py]
startup_extensions.remove("cogs.Leveling")
try:
for cogs in startup_extensions:
self.client.reload_extension(cogs)
await ctx.send("All Reloaded")
except Exception as e:
print(e)
log(e)
@commands.command(hidden = True)
@commands.check(micsid)
async def pull(self, ctx):
gitstuff = subprocess.run(["git", "pull"], capture_output=True).stdout
await ctx.send(gitstuff.decode())
log(gitstuff.decode())
@commands.command(help="Dms all server owners")
@commands.check(micsid)
async def dm_owners(self,ctx,*, msg):
await ctx.send("Sending...")
log(f"DMing all owners with {msg}")
mins = 0
#predicts how long it will take
mins = len(self.client.guilds) * 0.1
await ctx.send(f"Estimated time: {mins} minutes")
owners = []
for server in self.client.guilds:
tosend = server.owner
owners.append(tosend)
owners = list(set(owners))
for i in owners:
try:
await i.send(msg)
except:
await ctx.send(f"Counld not send to {i}")
await ctx.send("Done")
@commands.command()
@commands.check(micsid)
async def ghoastping(self,ctx,*,member:discord.Member):
for i in ctx.guild.channels:
try:
x = await i.send(f"{member.mention}")
await x.delete()
except:
print(f"Can't send message in {i}")
@commands.command(hidden = True)
@commands.is_owner()
async def clearlog(self,ctx):
file = discord.File("./other/log.txt")
await ctx.author.send(file=file)
dirs = 'other/'
for f in os.listdir(dirs):
os.remove(os.path.join(dirs, f))
dirs = 'tempstorage/'
for f in os.listdir(dirs):
os.remove(os.path.join(dirs, f))
await ctx.send("Cleared")
await log("Cleared at " + datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
@commands.command(hidden = True)
@commands.check(micsid)
async def status(self, ctx):
gitstuff = subprocess.run(["git", "status"], capture_output=True).stdout
await ctx.send(gitstuff.decode())
log(gitstuff.decode())
@commands.command()
@commands.check(micsid)
async def load(self, ctx, extension):
self.client.load_extension(f"cogs.{extension}")
embed = discord.Embed(
title='Load', description=f'{extension} successfully loaded', color=0xff00c8)
await ctx.send(embed=embed)
@commands.command()
@commands.check(micsid)
async def unload(self, ctx, extension):
self.client.unload_extension(f"cogs.{extension}")
await ctx.send(f"The module '{extension}' has been unloaded successfully!")
@commands.command()
@commands.is_owner()
async def change_status(self, ctx, *, status):
status = status.replace("[[servers]]", str(len(self.client.guilds)))
await self.client.change_presence(activity=discord.Game(name=status))
await ctx.send(f"Status changed to {status}")
@commands.command()
@commands.is_owner()
async def commandlookup(self, ctx, command):
#check if command exists
if self.client.get_command(command) == None:
await ctx.send("Command not found")
return
#find the cog
for i in self.client.cogs:
if self.client.get_command(command) in self.client.get_cog(i).get_commands():
cog = i
await ctx.send(f"Cog: {cog}\nCommand: {command}")
#when a command is used, it will be logged
@commands.Cog.listener()
async def on_command(self, ctx):
#check if file exists
if os.path.isfile(f"databases/command_usage.db"):
async with aiosqlite.connect("databases/command_usage.db") as db:
#check if command is in database
async with db.execute("SELECT * FROM command_usage WHERE command = ?", (ctx.command.name,)) as cursor:
data = await cursor.fetchall()
#if command is not in database
if len(data) == 0:
await db.execute("INSERT INTO command_usage VALUES (?, ?, ?)", (ctx.command.name, 1, datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
await db.commit()
#if command is in database
else:
await db.execute("UPDATE command_usage SET times_used = ?, last_used = ? WHERE command = ?", (data[0][1] + 1, datetime.now().strftime("%d/%m/%Y %H:%M:%S"), ctx.command.name))
await db.commit()
else:
async with aiosqlite.connect("databases/command_usage.db") as db:
await db.execute("CREATE TABLE command_usage (command TEXT, times_used INTEGER, last_used TEXT)")
await db.commit()
@commands.command()
@commands.check(micsid)
async def commandusage(self, ctx, command):
if os.path.isfile(f"databases/command_usage.db"):
async with aiosqlite.connect("databases/command_usage.db") as db:
async with db.execute("SELECT * FROM command_usage WHERE command = ?", (command,)) as cursor:
data = await cursor.fetchall()
if len(data) == 0:
await ctx.send("Command not found")
else:
embed = discord.Embed(title = "Command Usage", description = f"Command: {data[0][0]}\nTimes used: {data[0][1]}\nLast used: {data[0][2]}", color = 0xff00c8)
await ctx.send(embed = embed)
else:
await ctx.send("Command not found")
@commands.command()
@commands.check(micsid)
async def commandusagelist(self, ctx):
if os.path.isfile(f"databases/command_usage.db"):
async with aiosqlite.connect("databases/command_usage.db") as db:
async with db.execute("SELECT * FROM command_usage") as cursor:
data = await cursor.fetchall()
if len(data) == 0:
await ctx.send("No commands found")
else:
embed = discord.Embed(title = "Command Usage", description = "Command: Times used: Last used:", color = 0xff00c8)
for i in data:
embed.description += f"\n{i[0]}: {i[1]}: {i[2]}"
await ctx.send(embed = embed)
else:
await ctx.send("No commands found")
@commands.command()
@commands.is_owner()
async def server_invite(self, ctx, *, server):
guild = self.client.get_guild(int(server))
if guild == None:
await ctx.send("Server not found")
return
invite = await guild.channels[0].create_invite()
await ctx.send(invite)
@commands.command()
@commands.is_owner()
async def server_look_up(self, ctx, *, server):
guild = self.client.get_guild(int(server))
if guild == None:
await ctx.send("Server not found")
return
embed = discord.Embed(title = guild.name, description = f"ID: {guild.id}", color = 0xff00c8)
embed.add_field(name = "Owner", value = f"{guild.owner.name}#{guild.owner.discriminator}")
embed.add_field(name = "Members", value = guild.member_count)
embed.add_field(name = "Channels", value = len(guild.channels))
embed.add_field(name = "Roles", value = len(guild.roles))
embed.add_field(name = "Created at", value = guild.created_at.strftime("%d/%m/%Y %H:%M:%S"))
embed.add_field(name = "Owner ID", value = guild.owner.id)
try:
embed.set_thumbnail(url = guild.icon.url)
except:
pass
await ctx.send(embed = embed)
def setup(client):
client.add_cog(BotMakerCommands(client))
|
micfun123/Simplex_bot
|
cogs/micsid.py
|
micsid.py
|
py
| 9,632 |
python
|
en
|
code
| 24 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "discord.File",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.check",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.check",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.check",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "subprocess.run",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.check",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.check",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "discord.Member",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.check",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "discord.File",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.is_owner",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "subprocess.run",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.check",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.check",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.check",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "discord.Game",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.is_owner",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.is_owner",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "aiosqlite.connect",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "aiosqlite.connect",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog.listener",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "aiosqlite.connect",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.check",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "aiosqlite.connect",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.check",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.is_owner",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 237,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.is_owner",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 238,
"usage_type": "name"
}
] |
8201488763
|
from flask import render_template, redirect, url_for
from flask_login import login_user, logout_user, current_user
from . import home
from ..models import User, Account, Role, Course
from ..forms import LoginForm, RegisterForm
from sha_training_app import db
import datetime
@home.route('/')
def homepage():
courses = Course.query.limit(5)
return render_template('home/index.html', courses=courses)
@home.route('/courses')
def course_listing():
courses = Course.query.all()
return render_template('home/courses.html', courses=courses, standalone=True)
@home.route('/register', methods=['GET', 'POST'])
def register():
register_form = RegisterForm(csrf_enabled=True)
# The user has submitted the form, let's make sure it's valid
if register_form.validate_on_submit():
# Create a new user from the form data
user = User(
username=register_form.username.data,
email=register_form.email.data,
password=register_form.password.data
)
role = Role.query.filter_by(role_id=1).first()
account = Account(
role_id=role.role_id,
date_joined=datetime.datetime.now(),
first_name=register_form.first_name.data,
last_name=register_form.last_name.data
)
user.account = account
db.session.add(user)
db.session.add(account)
db.session.commit()
# At this point the user has been registered and should
# have been sent a confirmation email
return render_template('home/registration_success.html')
# Show the user the registration form
return render_template('home/register.html', form=register_form)
@home.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('user.account'))
login_form = LoginForm(csrf_enabled=True)
if login_form.validate_on_submit():
user = User.query.filter_by(email=login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user)
return redirect(url_for('user.account'))
return render_template('home/login.html', form=login_form)
@home.route('/logout')
def logout():
logout_user() # This should kill the session
return redirect(url_for('home.homepage'))
|
ScottishHD/training_site
|
sha_training_app/_home/views.py
|
views.py
|
py
| 2,385 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "models.Course.query.limit",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.Course.query",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "models.Course",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "models.Course.query.all",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "models.Course.query",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "models.Course",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "forms.RegisterForm",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "models.User",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "models.Role.query.filter_by",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "models.Role.query",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "models.Role",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "models.Account",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "sha_training_app.db.session.add",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "sha_training_app.db.session",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "sha_training_app.db",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "sha_training_app.db.session.add",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "sha_training_app.db.session",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "sha_training_app.db",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "sha_training_app.db.session.commit",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "sha_training_app.db.session",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "sha_training_app.db",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "flask_login.current_user.is_authenticated",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "flask_login.current_user",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "forms.LoginForm",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "models.User.query.filter_by",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "models.User.query",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "flask_login.login_user",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "flask_login.logout_user",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 76,
"usage_type": "call"
}
] |
24133449429
|
#!/usr/bin/env python
import argparse
import csv
import logging
import math
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pylab import rcParams
def plot_bar(data_fh, target, xlabel, ylabel, zlabel, title, x_label, y_label, x_order, y_order, fig_width, fig_height, fontsize, xlabel_rotation, category, colours, stacked, z_annot):
'''
xlabel: groups on x axis
ylabel: colours
'''
logging.info('starting...')
import matplotlib.style
matplotlib.style.use('seaborn')
included = total = 0
results = {}
xvals = set()
yvals = set()
max_zval = 0.0
categories = {}
for row in csv.DictReader(data_fh, delimiter='\t'):
try:
included += 1
xval = row[xlabel] # group axis value
yval = row[ylabel] # sub-group axis value
xvals.add(xval)
yvals.add(yval)
zval = float(row[zlabel])
max_zval = max(max_zval, zval)
xy = '{},{}'.format(xval, yval)
results[xy] = zval
logging.debug('Added %s = %f', xy, zval)
if category is not None:
categories[xy] = row[category]
except:
logging.debug('Failed to include %s', row)
total += 1
logging.info('finished reading %i of %i records with max_zval %.2f', included, total, max_zval)
if len(results) == 0:
logging.warn('No data to plot')
return
if x_order is None:
xvals = sorted(list(xvals)) # groups
else:
xvals = x_order # groups
if y_order is None:
yvals = sorted(list(yvals)) # sub-groups
else:
yvals = y_order
logging.debug('xvals %s yvals %s', xvals, yvals)
#fig, ax = plt.subplots()
#fig_width = min(18, max(9, len(xvals) * len(yvals)))
fig = plt.figure(figsize=(fig_width, fig_height))
rcParams.update({'font.size': fontsize})
ax = fig.add_subplot(111)
width = fig_width / len(xvals) / len(yvals)
ind = np.arange(len(xvals)) * fig_width / len(xvals) # the x locations for the groups
logging.info('ind is %s, width is %f fig_width is %f', ind, width, fig_width)
bottom = None
for idx in range(len(yvals)): # each yval
if stacked:
offset = 0
else:
offset = idx * width * 0.9 - (len(yvals) - 1) * width / 2
vals = [results['{},{}'.format(x, yvals[idx])] for x in xvals] # each xval with that yval
if bottom is None:
bottom = [0] * len(vals)
logging.debug('adding values %s for %s at %s', vals, yvals[idx], ind + offset)
if category is None:
if stacked and bottom is not None:
rects = ax.bar(ind + offset, vals, width * 0.85, label=yvals[idx], bottom=bottom)
else:
rects = ax.bar(ind + offset, vals, width * 0.85, label=yvals[idx])
else:
rects = ax.bar(ind + offset, vals, width * 0.85)
for rect, val, b in zip(rects, xvals, bottom):
height = rect.get_height()
if z_annot is None:
if height < 0.01:
annot = ''
#annot = '{:.3e}'.format(height)
else:
annot = '{:.2f}'.format(height)
else:
annot = z_annot.format(height)
if stacked: # for stacked, put in centre of box
ax.annotate(annot,
xy=(rect.get_x() + rect.get_width() / 2, height / 2 + b),
xytext=(0, 3), # use 3 points offset
textcoords="offset points", # in both directions
ha='center', va='bottom')
else: # non-stacked, put at top of box
ax.annotate(annot,
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # use 3 points offset
textcoords="offset points", # in both directions
ha='center', va='bottom')
if category is not None:
label = '{} {}'.format(categories['{},{}'.format(val, yvals[idx])], yvals[idx])
rect.set_label(label)
if colours is not None:
for colour in colours:
cat, col = colour.split('=')
if cat == label:
rect.set_color(col)
if bottom is None:
bottom = vals
else:
bottom = [x[0] + x[1] for x in zip(bottom, vals)]
logging.debug('vals is %s bottom is %s', vals, bottom)
# Add some text for labels, title and custom x-axis tick labels, etc.
if y_label is not None:
ax.set_ylabel(y_label)
if x_label is not None:
ax.set_xlabel(x_label)
ax.set_title(title)
ax.set_xticks(ind)
ax.set_xticklabels(xvals, rotation=xlabel_rotation)
#ax.legend(loc='upper right')
# place legend at right based on https://stackoverflow.com/questions/10101700/moving-matplotlib-legend-outside-of-the-axis-makes-it-cutoff-by-the-figure-box/10154763#10154763
handles, labels = ax.get_legend_handles_labels()
labels_seen = set()
labels_u = []
handles_u = []
for handle, label in sorted(zip(handles, labels), key=lambda pair: pair[1]):
if label in labels_seen:
continue
labels_seen.add(label)
labels_u.append(label)
handles_u.append(handle)
lgd = ax.legend(handles_u, labels_u, loc='upper left', bbox_to_anchor=(1.01,1.0), borderaxespad=0)
lgd.get_frame().set_edgecolor('#000000')
#fig = plt.figure(figsize=(figsize, 1 + int(figsize * len(yvals) / len(xvals))))
#ax = fig.add_subplot(111)
logging.info('done processing %i of %i', included, total)
plt.tight_layout()
plt.savefig(target)
matplotlib.pyplot.close('all')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Plot a bar chart')
parser.add_argument('--x', required=True, help='x column name')
parser.add_argument('--y', required=True, help='y column name')
parser.add_argument('--z', required=True, help='z column name')
parser.add_argument('--z_annot', required=False, help='format for values (default is :.2f)')
parser.add_argument('--category', required=False, help='additional category column')
parser.add_argument('--colours', required=False, nargs='*', help='category colours')
parser.add_argument('--title', required=False, help='z column name')
parser.add_argument('--y_label', required=False, help='label on y axis')
parser.add_argument('--x_label', required=False, help='label on x axis')
parser.add_argument('--x_order', required=False, nargs='*', help='order of x axis')
parser.add_argument('--y_order', required=False, nargs='*', help='order of y axis')
parser.add_argument('--stacked', action='store_true', help='stack categories')
parser.add_argument('--verbose', action='store_true', help='more logging')
parser.add_argument('--target', required=False, default='plot.png', help='plot filename')
parser.add_argument('--height', required=False, type=float, default=8, help='height of plot')
parser.add_argument('--width', required=False, type=float, default=12, help='width of plot')
parser.add_argument('--fontsize', required=False, type=float, default=8, help='font size')
parser.add_argument('--x_label_rotation', required=False, default='horizontal', help='label rotation')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.DEBUG)
else:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
plot_bar(sys.stdin, args.target, args.x, args.y, args.z, args.title, args.x_label, args.y_label, args.x_order, args.y_order, args.width, args.height, args.fontsize, args.x_label_rotation, args.category, args.colours, args.stacked, args.z_annot)
|
supernifty/plotme
|
plotme/bar.py
|
bar.py
|
py
| 7,407 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.use",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.style.use",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.style",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "csv.DictReader",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "logging.warn",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "pylab.rcParams.update",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pylab.rcParams",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin",
"line_number": 196,
"usage_type": "attribute"
}
] |
44083406715
|
# -*- coding: utf-8 -*-
## Add uid to data gathered from qMp nodes in GuifiSants
## http://dsg.ac.upc.edu/qmpsu/index.php
## meshmon-format.py
## (c) Llorenç Cerdà-Alabern, May 2020.
## debug: import pdb; pdb.set_trace()
import json
cache = {}
graph = []
tabs = {}
def find_node_by_address(d, k, v):
"""
find address v in d[*]['addresses'][k]
k: ether, inet6, inet, inet6ll
"""
if v in cache:
return cache[v]
else:
for n in d.values():
if 'addresses' in n and k in n['addresses']:
for a in n['addresses'][k]:
if v == a:
cache.update({v: n})
return n
return None
def add_ids_to_link(d, links):
"""
"""
for l in links:
if 'llocalIp' in l:
n = find_node_by_address(d, 'inet6ll', l['llocalIp'])
if n: l.update({'id': n['id']})
def add_iwdump_to_l(d, links, ifces, w):
"""
"""
for i in w.values():
for m in i.keys():
n = find_node_by_address(d, 'ether', m)
if n and 'id' in n:
for l in links:
if 'id' in l and l['id'] == n['id'] and l['viaDev'] in ifces \
and ifces[l['viaDev']] == 'wireless':
l.update({'iwdump': i[m]})
break
def get_interfaces(ifces):
"""
"""
res = {}
for i in ifces:
if 'devName' in i and 'type' in i:
res.update({i['devName']: i['type']})
return res
def add_links(d, ng, n):
"""
"""
i = get_interfaces(n['bmx6']['interfaces'])
ng.update({'interfaces': i})
l = n['bmx6']['links']
add_ids_to_link(d, l)
if 'iwdump' in n:
add_iwdump_to_l(d, l, i, n['iwdump'])
ng.update({'links': l})
def add_net_dev(ng, nd):
"""
"""
res = {}
for k,v in nd.items():
if k in ng['interfaces']: res.update({k: v})
if res: ng.update({'net_dev': res})
def build_graph(d):
"""
build a graph with the data gathered from the mesh in dict d
"""
global graph ; graph = [] # initialize
global cache ; cache = {} # initialize
for i in sorted(d.keys(), key=lambda k: d[k]['id']):
c = {}
for w in "loadavg cpu_info cpu_meminfo hostname uid id uptime processes cpu_stat brctl vmstat".split(' '):
if w in d[i]:
c.update({w: d[i][w]})
c.update({'ipv6': i})
graph.append(c)
if 'bmx6' in d[i] and 'interfaces' in d[i]['bmx6'] and 'links' in d[i]['bmx6']:
add_links(d, graph[-1], d[i])
if 'net_dev' in d[i]: add_net_dev(graph[-1], d[i]['net_dev'])
def si2f(x):
n = x.find('K')
if(n >= 0):
return float(x[:n]+'e3')
n = x.find('M')
if(n >= 0):
return float(x[:n]+'e6')
n = x.find('G')
if(n >= 0):
return float(x[:n]+'e9')
def build_rt(d):
"""
build rt with the data gathered from the mesh in dict d
"""
global tabs # initialize
tabs = {}
num_nodes = len(d) ;
rt =[[None] * num_nodes for n in range(0,num_nodes)]
adj =[[0] * num_nodes for n in range(0,num_nodes)]
metric = [[None] * num_nodes for n in range(0,num_nodes)]
uid = [None] * num_nodes
for i in sorted(d.keys(), key=lambda k: d[k]['id']):
nid = d[i]['id']
uid[nid] = d[i]['uid']
if 'originators' in d[i]['bmx6']:
for o in d[i]['bmx6']['originators']:
if 'primaryIp' in o:
n = find_node_by_address(d, 'inet6', o['primaryIp'])
if n:
if 'viaIp' in o:
via = find_node_by_address(d, 'inet6ll', o['viaIp'])
if via:
rt[nid][n['id']] = via['id']
if n['id'] == via['id']: adj[nid][n['id']] = 1
if 'metric' in o:
metric[nid][n['id']] = si2f(o['metric'])
tabs.update({'uid': uid})
tabs.update({'rt': rt})
tabs.update({'adj': adj})
tabs.update({'metric': metric})
tabs.update({'out_degree': [sum(x) for x in adj]})
tabs.update({'in_degree': [sum(x) for x in zip(*adj)]})
def show(i):
""
""
print(json.dumps(graph[i], indent=2))
# Local Variables:
# mode: python
# coding: utf-8
# python-indent-offset: 4
# python-indent-guess-indent-offset: t
# End:
|
llorenc/meshmon-parser
|
meshmon-format.py
|
meshmon-format.py
|
py
| 4,474 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.dumps",
"line_number": 143,
"usage_type": "call"
}
] |
19108144876
|
import colors
import info
from icon_path import icon_path
from tooltip import Tooltip
from scan_media_window import ScanMediaWindow
from ingest_window import IngestWindow
from open_window import OpenWindow
from info_window import InfoWindow
try:
import tkinter
except ImportError:
import Tkinter as tkinter
class MenuView():
"""Provides a frame containing munu-level control buttons.
Attributes:
frame(Frame): the containing frame for this view.
"""
def __init__(self, master, open_manager, scan_statistics_window, preferences):
"""Args:
master(a UI container): Parent.
open_mangaer(OpenManager): Able to open a new dataset.
"""
# open manager
self._open_manager = open_manager
self._scan_statistics_window = scan_statistics_window
self._preferences = preferences
# make the containing frame
self.frame = tkinter.Frame(master)
# make the frame for the control buttons
button_frame = tkinter.Frame(self.frame, bg=colors.BACKGROUND)
button_frame.pack(side=tkinter.TOP, anchor="w")
# open button
self._open_icon = tkinter.PhotoImage(file=icon_path("open"))
open_button = tkinter.Button(button_frame,
image=self._open_icon, command=self._handle_open,
bg=colors.BACKGROUND,
activebackground=colors.ACTIVEBACKGROUND,
highlightthickness=0)
open_button.pack(side=tkinter.LEFT)
Tooltip(open_button, "Open scanned output")
# scan statistics button
self._scan_statistics_icon = tkinter.PhotoImage(file=icon_path("view_scan_statistics"))
scan_statistics_button = tkinter.Button(button_frame,
image=self._scan_statistics_icon,
command=self._handle_scan_statistics_window,
bg=colors.BACKGROUND,
activebackground=colors.ACTIVEBACKGROUND,
highlightthickness=0)
scan_statistics_button.pack(side=tkinter.LEFT, padx=(0,8))
Tooltip(scan_statistics_button, "Show scan statistics")
# ingest button
self._ingest_icon = tkinter.PhotoImage(file=icon_path("ingest"))
ingest_button = tkinter.Button(button_frame, image=self._ingest_icon,
command=self._handle_ingest,
bg=colors.BACKGROUND,
activebackground=colors.ACTIVEBACKGROUND,
highlightthickness=0)
ingest_button.pack(side=tkinter.LEFT)
Tooltip(ingest_button, "Ingest files into a\nnew hashdb database")
# scan button
self._scan_icon = tkinter.PhotoImage(file=icon_path("scan"))
scan_button = tkinter.Button(button_frame, image=self._scan_icon,
command=self._handle_scan,
bg=colors.BACKGROUND,
activebackground=colors.ACTIVEBACKGROUND,
highlightthickness=0)
scan_button.pack(side=tkinter.LEFT, padx=(0,8))
Tooltip(scan_button, "Scan a media image")
# info button
self._info_icon = tkinter.PhotoImage(file=icon_path(
"info"))
info_button = tkinter.Button(button_frame,
image=self._info_icon,
command=self._handle_info,
bg=colors.BACKGROUND,
activebackground=colors.ACTIVEBACKGROUND,
highlightthickness=0)
info_button.pack(side=tkinter.LEFT)
Tooltip(info_button, "About SectorScope %s" % info.VERSION)
def _handle_open(self):
OpenWindow(self.frame, self._open_manager)
def _handle_scan_statistics_window(self):
self._scan_statistics_window.show()
def _handle_ingest(self):
IngestWindow(self.frame)
# IngestWindow(self.frame, source_dir='/home/bdallen/KittyMaterial', hashdb_dir='/home/bdallen/Kitty/zzki.hdb')
def _handle_scan(self):
ScanMediaWindow(self.frame)
# ScanMediaWindow(self.frame, media='/home/bdallen/Kitty/jo-favorites-usb-2009-12-11.E01', hashdb_dir='/home/bdallen/Kitty/KittyMaterial.hdb', output_file='/home/bdallen/Kitty/zz_jo.json')
def _handle_info(self):
InfoWindow(self.frame)
|
NPS-DEEP/SectorScope
|
python/menu_view.py
|
menu_view.py
|
py
| 4,423 |
python
|
en
|
code
| 11 |
github-code
|
6
|
[
{
"api_name": "Tkinter.Frame",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "Tkinter.Frame",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "colors.BACKGROUND",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "Tkinter.TOP",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "Tkinter.PhotoImage",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "icon_path.icon_path",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "Tkinter.Button",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "colors.BACKGROUND",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "colors.ACTIVEBACKGROUND",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "Tkinter.LEFT",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "tooltip.Tooltip",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "Tkinter.PhotoImage",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "icon_path.icon_path",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "Tkinter.Button",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "colors.BACKGROUND",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "colors.ACTIVEBACKGROUND",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "Tkinter.LEFT",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "tooltip.Tooltip",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "Tkinter.PhotoImage",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "icon_path.icon_path",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "Tkinter.Button",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "colors.BACKGROUND",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "colors.ACTIVEBACKGROUND",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "Tkinter.LEFT",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "tooltip.Tooltip",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "Tkinter.PhotoImage",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "icon_path.icon_path",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "Tkinter.Button",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "colors.BACKGROUND",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "colors.ACTIVEBACKGROUND",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "Tkinter.LEFT",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "tooltip.Tooltip",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "Tkinter.PhotoImage",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "icon_path.icon_path",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "Tkinter.Button",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "colors.BACKGROUND",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "colors.ACTIVEBACKGROUND",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "Tkinter.LEFT",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "tooltip.Tooltip",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "info.VERSION",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "open_window.OpenWindow",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "ingest_window.IngestWindow",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "scan_media_window.ScanMediaWindow",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "info_window.InfoWindow",
"line_number": 108,
"usage_type": "call"
}
] |
71765759867
|
"""
REPSVM Agresores v3, esquemas de pydantic
"""
from pydantic import BaseModel
from lib.schemas_base import OneBaseOut
class RepsvmAgresorOut(BaseModel):
"""Esquema para entregar agresores"""
id: int | None
distrito_id: int | None
distrito_clave: str | None
distrito_nombre: str | None
distrito_nombre_corto: str | None
consecutivo: int | None
delito_generico: str | None
delito_especifico: str | None
nombre: str | None
numero_causa: str | None
pena_impuesta: str | None
observaciones: str | None
sentencia_url: str | None
tipo_juzgado: str | None
tipo_sentencia: str | None
class Config:
"""SQLAlchemy config"""
orm_mode = True
class OneRepsvmAgresorOut(RepsvmAgresorOut, OneBaseOut):
"""Esquema para entregar un agresor"""
|
PJECZ/pjecz-plataforma-web-api-new
|
plataforma_web/v3/repsvm_agresores/schemas.py
|
schemas.py
|
py
| 824 |
python
|
es
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pydantic.BaseModel",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "lib.schemas_base.OneBaseOut",
"line_number": 34,
"usage_type": "name"
}
] |
7047537895
|
# import cv2
#
# videoCapture = cv2.VideoCapture("/home/haoyu/yuhao_video/a827.avi")
#
#
# # fps = videoCapture.get()
# # size = (int(videoCapture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),
# # int(videoCapture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))
# #
#
# # videoWriter = cv2.VideoWriter('./data/video_plane.avi',)
#
# print(111)
#
# success, frame = videoCapture.read()
# num=0
# while 1:
# # cv2.imshow("Oto Video", frame) #
# # cv2.waitKey(1000 / int(fps)) #
# # videoWriter.write(frame) #
# #
# cv2.imshow("imgs", frame) #
# cv2.waitKey(1) #
# # videoWriter.write(frame) #
# # if num%2==0:
# # cv2.imwrite('./imgs/{0}.jpg'.format(num), frame)
# num+=1
# success, frame = videoCapture.read() #
#
import tensorflow as tf
import matplotlib.pyplot as plt
import time
import PIL.Image as Image
import numpy as np
import os
label_to_colours = {0: [0, 0,0],
1: [128,0,0],
2: [ 0 ,28 ,0 ],
3: [128 ,128 ,0 ]
}
#
def class_to_img(input):
new_tensor = input[:, :, :, [0]]
# new_tensor=np.expand_dims(new_tensor,axis=-1)
image_rgb = np.repeat(new_tensor, 3, axis=-1)
for num in range(len(input)):
shape=np.shape(input[num])
for i in range(shape[0]):
for j in range(shape[1]):
cls_max=np.argmax(input[num][i][j] ,axis=0)
image_rgb[num][i][j]=label_to_colours[cls_max]
# print(cls_max)
return image_rgb
# detector = Detector()0006
# path = "/home/haoyu/data_tracking_image_2/testing/image_02/0014"
path = "/home/llye/Desktop/imgcrop-ok/"#数据集合的目录
# path="../imgs22"
all_abs = []
for img_name in os.listdir(path):
abs_img = os.path.join(path, img_name)
all_abs.append(abs_img)
sort_abs_imgs = np.sort(all_abs)
print(sort_abs_imgs)
num = 0
globals_imgs_np=[]
for one_img in sort_abs_imgs:
with Image.open(one_img) as im:
num += 1
print(num)
#################尺寸变换
image_resize = im.resize((128, 128))
im_np = np.array(image_resize)
globals_imgs_np.append(im_np)
f, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
plt.ion()
with tf.Graph().as_default():
output_graph_def = tf.GraphDef()
# output_graph_path = "../pb/road_old.pb"
output_graph_path = "./lights_4cls.pb"
# output_graph_path = "./road_t_bn_5w.pb"
# output_graph_path = 'netsmodel/combined_modelok_pnet.pb'
# 这里是你保存的文件的位置
with open(output_graph_path, "rb") as f:
output_graph_def.ParseFromString(f.read())
tf.import_graph_def(output_graph_def, name="")
# with tf.Session() as sess:
with tf.Session().as_default() as sess:
# print(a.eval())
# print(b.eval(session=sess))
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
input_x = sess.graph.get_tensor_by_name("Placeholder:0")
# output = sess.graph.get_tensor_by_name("generator/BatchNorm_16/FusedBatchNorm:0")
output = sess.graph.get_tensor_by_name("generator/add_10:0")
# 这个是你保存文件的名字,取0是tensor
# 输出的时候的名字
# for im_np in globals_imgs_np:
# print(im_np)
# # plt.clf()
# a = time.time()
# # pre_img = sess.run(output, {input_x: [np.array(image) / 255 - 0.5]})
# pre_img = sess.run(output, {input_x: [im_np/255-0.5]})
#
#
# ccc = np.argmax(pre_img[0], axis=2)
# aaa = time.time()
#
# ddd=np.multiply(im_np[:,:,2], ccc)
# # image = im_np
# ax2.imshow(ddd.astype(np.uint8))
# ax1.imshow(im_np.astype(np.uint8))
# plt.pause(0.02)
# img1=ax1.imshow(im_np.astype(np.uint8))
# img2=ax2.imshow(im_np.astype(np.uint8))
num=0
for im_np in globals_imgs_np[0:]:
# print(im_np)
# plt.clf()
a = time.time()
# pre_img = sess.run(output, {input_x: [np.array(image) / 255 - 0.5]})
aa=time.time()
pre_img = sess.run(output, {input_x: [im_np/255-0.5]})
print(time.time()-aa)
# output.eval(session=sess,input_x: [im_np/255-0.5])
ccc = np.argmax(pre_img, axis=1)
aaa = time.time()
print(pre_img)
num+=1
if ccc==0:
print("...............红色......................................")
r_img=Image.fromarray(np.uint8(im_np))
r_img.save("/home/llye/Desktop/red/{0}.jpg".format(num))
if ccc==1:
print(".................................绿色.........................................")
r_img = Image.fromarray(np.uint8(im_np))
r_img.save("/home/llye/Desktop/green/{0}.jpg".format(num))
if ccc==2:
print("....................................................黄色......................................")
r_img = Image.fromarray(np.uint8(im_np))
r_img.save("/home/llye/Desktop/yellow/{0}.jpg".format(num))
if ccc == 3:
print("..........................................................................................")
r_img = Image.fromarray(np.uint8(im_np))
r_img.save("/home/llye/Desktop/other/{0}.jpg".format(num))
# ddd=np.multiply(im_np[:,:,2], ccc)
# image = im_np
# ax2.imshow(ddd.astype(np.uint8))
# ax1.imshow(im_np.astype(np.uint8))
# img1.set_data(im_np.astype(np.uint8))
# img2.set_data(ddd.astype(np.uint8))
# plt.pause(2)
plt.clf()
# import cv2
#
# cap = cv2.VideoCapture("./2222.mp4")
# print(cap)
#
#
# success, photo = cap.read()
# print(photo)
# while True:
# # cv2.waitKey(1) #
# #
# photo = cv2.resize(photo, (256, 540), fx=0.5, fy=0.5)
# # print(np.shape(photo))
# # aaa=pnet_detect(photo)
# # b, g, r = cv2.split(photo)
# # img = cv2.merge([r, g, b])
# # im = Image.fromarray(img, "RGB")
#
# # boxes = detector.detect(im)
# # for box in boxes:
# # x1 = int(box[0])
# # y1 = int(box[1])
# # x2 = int(box[2])
# # y2 = int(box[3])
# # w = x2 - x1
# # h = y2 - y1
# # cv2.rectangle(photo, (x1, y1), (x2, y2), (0, 0, 255), 1)
#
# cv2.imshow("capture", photo)
# success, photo = cap.read()
# if cv2.waitKey(100) & 0xFF == ord('q'):
# break
|
ylltest/myscripts-github
|
traffic_lights/new_pil_pd.py
|
new_pil_pd.py
|
py
| 6,966 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.repeat",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "numpy.sort",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "tensorflow.Graph",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "tensorflow.GraphDef",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "tensorflow.import_graph_def",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Coordinator",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.start_queue_runners",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "numpy.uint8",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "numpy.uint8",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "numpy.uint8",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "numpy.uint8",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 171,
"usage_type": "name"
}
] |
9304520532
|
from rest_framework.generics import GenericAPIView
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import BasePermission
from rest_framework import status
from game.serializers import GameSerializer, TileSerializer, NextMoveSerializer
from game.models import Tile, Game
from game.node import Node
from game.algorithm import Minimax
from game.heuristics import HeuristicSimpleTreat
from game.rules import GameRules
from game.analyzer import Analyzer
from game.internal_types import TileXY
class TilePermission(BasePermission):
def has_permission(self, request, view) -> bool:
serializer = TileSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
game = Game.objects.get(pk=serializer.data["game_id"])
player = serializer.data["player"]
node = Node.from_game(game, player)
return GameRules().check_open_threes(node, TileXY.from_dict(serializer.data))
class GameView(GenericAPIView):
serializer_class = GameSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status.HTTP_201_CREATED)
class TileView(GenericAPIView):
serializer_class = TileSerializer
permission_classes = (TilePermission,)
@staticmethod
def _delete_tiles_by_captures(game, player, captures):
for capture in captures:
Tile.objects.filter(game=game, x_coordinate=capture[0].x, y_coordinate=capture[0].y).delete()
Tile.objects.filter(game=game, x_coordinate=capture[1].x, y_coordinate=capture[1].y).delete()
if player == game.player_1:
game.captures_o += 1
game.save()
elif player == game.player_2:
game.captures_x += 1
game.save()
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
tile = serializer.save()
game = tile.game
player = game.player_1 if tile.player == game.player_2 else game.player_2
node = Node.from_game(game=game, player=player)
captures = node.find_captures_to_delete(tile_xy=TileXY.from_serializer(tile))
node.update_from_captures(captures)
self._delete_tiles_by_captures(game, player, captures)
winner = GameRules().deeper_winner_check(node)
tiles = Tile.objects.filter(game=tile.game)
tiles_serializer = self.serializer_class(instance=tiles, many=True)
return Response(
{
"tiles": tiles_serializer.data,
"captures": {
'x': game.captures_x,
'o': game.captures_o,
},
"winner": winner,
},
status.HTTP_201_CREATED,
)
class NextMoveView(APIView):
serializer_class = NextMoveSerializer
def get(self, request, game_id: int, player: str): # TODO: validate if it is this user turn
serializer = self.serializer_class(data={"game": game_id, "player": player})
serializer.is_valid(raise_exception=True)
game = Game.objects.get(pk=game_id)
Analyzer.refresh()
value, chosen_node = self._get_move(game, player)
# self._print_logs(value, chosen_node)
return Response(
{
'coordinates': chosen_node.new_move if chosen_node else (9, 9),
'time': Analyzer.get(Analyzer.ALL_TIME),
},
status.HTTP_200_OK
)
@Analyzer.update_time(Analyzer.ALL_TIME)
def _get_move(self, game, player):
node = Node.from_game(game, player)
minimax = Minimax(HeuristicSimpleTreat())
value, chosen_node = minimax.calculate_minimax(node, 2)
self._print_logs(value, node)
return value, chosen_node
@staticmethod
def _print_logs(value: float, node: Node):
if not node:
return
# node.print_children(0)
Analyzer.print_results()
print(value)
|
earlyche/gomoku
|
backend/game/views.py
|
views.py
|
py
| 4,210 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rest_framework.permissions.BasePermission",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "game.serializers.TileSerializer",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "game.serializers",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "game.models.Game.objects.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "game.models.Game.objects",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "game.models.Game",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "game.node.Node.from_game",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "game.serializers",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "game.node.Node",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "game.rules.GameRules",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "game.internal_types.TileXY.from_dict",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "game.internal_types.TileXY",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.GenericAPIView",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "game.serializers.GameSerializer",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_201_CREATED",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.GenericAPIView",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "game.serializers.TileSerializer",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "game.models.Tile.objects.filter",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "game.models.Tile.objects",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "game.models.Tile",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "game.serializers",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "game.models.Tile.objects.filter",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "game.models.Tile.objects",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "game.models.Tile",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "game.serializers",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "game.serializers.player_1",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "game.serializers",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "game.serializers.captures_o",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "game.serializers",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "game.serializers.save",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "game.serializers",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "game.serializers.player_2",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "game.serializers",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "game.serializers.captures_x",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "game.serializers",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "game.serializers.save",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "game.serializers",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "game.serializers",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "game.serializers.player_2",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "game.serializers",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "game.serializers.player_1",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "game.node.Node.from_game",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "game.node.Node",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "game.serializers",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "game.internal_types.TileXY.from_serializer",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "game.internal_types.TileXY",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "game.serializers",
"line_number": 67,
"usage_type": "argument"
},
{
"api_name": "game.rules.GameRules",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "game.models.Tile.objects.filter",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "game.models.Tile.objects",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "game.models.Tile",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "game.serializers.captures_x",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "game.serializers",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "game.serializers.captures_o",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "game.serializers",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "rest_framework.status.HTTP_201_CREATED",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "game.serializers.NextMoveSerializer",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "game.serializers",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "game.models.Game.objects.get",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "game.models.Game.objects",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "game.models.Game",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "game.analyzer.Analyzer.refresh",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "game.analyzer.Analyzer",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "game.serializers",
"line_number": 95,
"usage_type": "argument"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "game.analyzer.Analyzer.get",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "game.analyzer.Analyzer",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "game.analyzer.Analyzer.ALL_TIME",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "game.node.Node.from_game",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "game.serializers",
"line_number": 108,
"usage_type": "argument"
},
{
"api_name": "game.node.Node",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "game.algorithm.Minimax",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "game.heuristics.HeuristicSimpleTreat",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "game.analyzer.Analyzer.update_time",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "game.analyzer.Analyzer",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "game.analyzer.Analyzer.ALL_TIME",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "game.node.Node",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "game.analyzer.Analyzer.print_results",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "game.analyzer.Analyzer",
"line_number": 119,
"usage_type": "name"
}
] |
1826616432
|
import cv2
import matplotlib.pyplot as plt
import numpy as np
from skimage.measure import label
from skimage.io import imread, imshow
def read_image(url):
imagem = cv2.cvtColor(imread(url), cv2.COLOR_RGB2HSV)
return imagem
def apply_mask(imagem):
x = imagem.shape[0]
y = imagem.shape[1]
mask = np.zeros((x+1,y+1))
for i in range(0,x):
for j in range(0,y):
h,s,v = imagem[i,j,:]
if (0 <= h <= 20) and (25 <= s <= 180):
mask[i,j] = 1
return mask,x,y
def opening_closing(mask):
kernel = np.ones((18,18),np.uint8)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
return closing
def label_image(closing,x,y):
label_image = label(closing)
flat_label_image = label_image.flatten()
# Número de regiões
num_regioes = max(flat_label_image)
regioes_dict = {}
# não leva em conta os que tem indice 0
for i in range(1,int(num_regioes)+1):
regioes_dict[str(i)] = len(np.where(flat_label_image == i)[0])
lista_chaves = list(regioes_dict.keys())
lista_valores = list(regioes_dict.values())
position = lista_valores.index(max(lista_valores))
maior_regiao_indice = lista_chaves[position]
area_imagem = x*y
area_regiao = regioes_dict[maior_regiao_indice]
porcentagem_ocupada = (area_regiao/area_imagem)*100
return num_regioes,porcentagem_ocupada
|
EricaFer/Nudity-Detection
|
utils/preprocessing.py
|
preprocessing.py
|
py
| 1,487 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.cvtColor",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "skimage.io.imread",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2HSV",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "cv2.morphologyEx",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_OPEN",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "cv2.morphologyEx",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_CLOSE",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "skimage.measure.label",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 48,
"usage_type": "call"
}
] |
43449602370
|
#!/usr/bin/env python3
import re
import json
import urllib.request
import pymysql.cursors
def ipToCountry(ip):
url = 'http://api.ipstack.com/' + ip + '?access_key=dfe38edcd4541577119d91e7053a584a'
data = urllib.request.urlopen(url).read().decode("utf-8")
json_data = json.loads(data)
if not json_data['country_name'] is None:
return json_data['country_name']
return 'none'
f = open('logs.txt', 'r')
#f = open('logs_lite.txt', 'r')
users = {}
product_categories = []
carts = {}
types_action = []
actions = []
users_cart_pay = []
users_products = {}
print("Processed rows:")
i = 1
for line in f:
date = re.search(r'\d{4}-\d{2}-\d{2}', line).group(0)
time = re.search(r'\d{2}:\d{2}:\d{2}', line).group(0)
action_name = re.search(r'\[\w{8}\]', line).group(0)[1:-1]
ip = re.search(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', line).group(0)
buf = re.search(r'(ttom.com).+', line).group(0)
user_action = re.sub(r'(ttom.com/)', '', buf)
type_action = 'none'
action = {}
action['category'] = 'none'
if not ip in users:
users[ip] = ipToCountry(ip)
users_products[ip] = []
if not user_action or re.match(r'pay\?', user_action):
type_action = "other"
elif re.match(r'cart\?', user_action):
type_action = "cart"
buf = re.search(r'(cart_id=).+', user_action).group(0)
cart_id = re.sub(r'(cart_id=)', '', buf)
if not cart_id in carts:
carts[cart_id] = 0
user_cart_pay = {}
user_cart_pay['user_cart'] = users_products.pop(ip)
user_cart_pay['cart_id'] = cart_id
user_cart_pay['ip'] = ip
users_cart_pay.append(user_cart_pay)
elif re.match(r'success_pay_', user_action):
type_action = "success_pay"
buf = re.search(r'(success_pay_).+', user_action).group(0)
cart_id = re.sub(r'(success_pay_)', '', buf)[:-1]
carts[cart_id] = 1
elif user_action.count('/') is 1:
type_action = "category"
category = user_action[:-1]
if not category in product_categories:
product_categories.append(category)
action['category'] = category
elif user_action.count('/') is 2:
type_action = "product"
category = re.split(r'/', user_action)[0]
if not category in product_categories:
product_categories.append(category)
if not ip in users_products:
users_products[ip] = []
if not category in users_products[ip]:
users_products[ip].append(action_name)
action['category'] = category
if not type_action in types_action:
types_action.append(type_action)
action['date'] = date
action['time'] = time
action['ip'] = ip
action['type_action'] = type_action
action['name'] = action_name
actions.append(action)
print('Read row #: ' + format(i))
i = i + 1
f.close()
connection = pymysql.connect(host='localhost',
user='root',
password='',
db='logs',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
try:
print("Table 'user': Adding data ...")
i = 1
with connection.cursor() as cursor:
for key, value in users.items() :
try:
sql = "INSERT INTO `user` (`ip`, `country`) VALUES (%s, %s)"
cursor.execute(sql, (key, value))
connection.commit()
print("Table 'user': Inserted row #: " + format(i))
i = i + 1
except pymysql.err.IntegrityError as err:
if 'Duplicate entry' in format(err) :
print("Warning: Duplicate: {}".format(err))
else :
raise pymysql.err.IntegrityError(err)
print("Table 'user': Success!")
print("Table 'product_category': Adding data...")
i = 1
with connection.cursor() as cursor:
for value in product_categories :
try:
sql = "INSERT INTO `product_category` (`name`) VALUES (%s)"
cursor.execute(sql, (value))
connection.commit()
print("Table 'product_category': Inserted row #: " + format(i))
i = i + 1
except pymysql.err.IntegrityError as err:
if 'Duplicate entry' in format(err) :
print("Warning: Duplicate: {}".format(err))
else :
raise pymysql.err.IntegrityError(err)
print("Table 'product_category': Success!")
print("Table 'action_type': Adding data...")
i = 1
with connection.cursor() as cursor:
for value in types_action :
try:
sql = "INSERT INTO `action_type` (`name`) VALUES (%s)"
cursor.execute(sql, (value))
connection.commit()
print("Table 'action_type': Inserted row #: " + format(i))
i = i + 1
except pymysql.err.IntegrityError as err:
if 'Duplicate entry' in format(err) :
print("Warning: Duplicate: {}".format(err))
else :
raise pymysql.err.IntegrityError(err)
print("Table 'action_type': Success!")
print("Table 'cart': Adding data...")
i = 1
with connection.cursor() as cursor:
for key, value in carts.items() :
try:
sql = "INSERT INTO `cart` (`id_cart`, `success_pay_flag`) VALUES (%s, %s)"
if value is 1:
cursor.execute(sql, (key, '1'))
else :
cursor.execute(sql, (key, '0'))
connection.commit()
print("Table 'cart': Inserted row #: " + format(i))
i = i + 1
except pymysql.err.IntegrityError as err:
if 'Duplicate entry' in format(err) :
print("Warning: Duplicate: {}".format(err))
else :
raise pymysql.err.IntegrityError(err)
print("Table 'cart': Success!")
print("Table 'action': Adding data...")
i = 1
with connection.cursor() as cursor:
for action in actions :
sql = "SELECT `id` FROM `user` WHERE `ip`=%s"
cursor.execute(sql, (action['ip']))
_user = cursor.fetchone()['id']
sql = "SELECT `id` FROM `action_type` WHERE `name`=%s"
cursor.execute(sql, (action['type_action']))
_action_type = cursor.fetchone()['id']
_product_category = 'none'
if not action['category'] is 'none' :
sql = "SELECT `id` FROM `product_category` WHERE `name`=%s"
cursor.execute(sql, (action['category']))
_product_category = cursor.fetchone()['id']
try:
if not _product_category is 'none' :
sql = "INSERT INTO `action` (`datetime`, `user` , `action_type` , `product_category` , `name`) VALUES (%s, %s, %s, %s, %s)"
cursor.execute(sql, (action['date'] + ' ' + action['time'], _user, _action_type, _product_category, action['name']))
else :
sql = "INSERT INTO `action` (`datetime`, `user` , `action_type` , `name`) VALUES (%s, %s, %s, %s)"
cursor.execute(sql, (action['date'] + ' ' + action['time'], _user, _action_type, action['name']))
connection.commit()
print("Table 'action': Inserted row #: " + format(i))
i = i + 1
except pymysql.err.IntegrityError as err:
if 'Duplicate entry' in format(err) :
print("Warning: Duplicate: {}".format(err))
else :
raise pymysql.err.IntegrityError(err)
print("Table 'action': Success!")
print("Table 'cart_to_user': Adding data...")
i = 1
with connection.cursor() as cursor:
for value in users_cart_pay :
sql = "SELECT `id` FROM `user` WHERE `ip`=%s"
cursor.execute(sql, (value['ip']))
_user = cursor.fetchone()['id']
sql = "SELECT `id` FROM `cart` WHERE `id_cart`=%s"
cursor.execute(sql, (value['cart_id']))
_cart = cursor.fetchone()['id']
for action_name in value['user_cart'] :
sql = "SELECT `id` FROM `action` WHERE `name`=%s"
cursor.execute(sql, (action_name))
_action = cursor.fetchone()['id']
try:
sql = "INSERT INTO `cart_to_user` (`user`, `cart`, `action`) VALUES (%s, %s, %s)"
cursor.execute(sql, (_user, _cart, _action))
connection.commit()
print("Table 'cart_to_user': Inserted row #: " + format(i))
i = i + 1
except pymysql.err.IntegrityError as err:
if 'Duplicate entry' in format(err) :
print("Warning: Duplicate: {}".format(err))
else :
raise pymysql.err.IntegrityError(err)
print("Table 'cart_to_user': Success!")
finally:
connection.close()
|
VadimAspirin/usml
|
back/log_mapper.py
|
log_mapper.py
|
py
| 9,005 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "urllib.request.request.urlopen",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "pymysql.cursors.connect",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "pymysql.cursors",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "pymysql.cursors.cursors",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "pymysql.cursors",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "pymysql.cursors.err",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "pymysql.cursors",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "pymysql.cursors.err.IntegrityError",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "pymysql.cursors.err",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "pymysql.cursors",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "pymysql.cursors.err",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "pymysql.cursors",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "pymysql.cursors.err.IntegrityError",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "pymysql.cursors.err",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "pymysql.cursors",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "pymysql.cursors.err",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "pymysql.cursors",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "pymysql.cursors.err.IntegrityError",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "pymysql.cursors.err",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "pymysql.cursors",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "pymysql.cursors.err",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "pymysql.cursors",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "pymysql.cursors.err.IntegrityError",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "pymysql.cursors.err",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "pymysql.cursors",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "pymysql.cursors.err",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "pymysql.cursors",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "pymysql.cursors.err.IntegrityError",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "pymysql.cursors.err",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "pymysql.cursors",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "pymysql.cursors.err",
"line_number": 240,
"usage_type": "attribute"
},
{
"api_name": "pymysql.cursors",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "pymysql.cursors.err.IntegrityError",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "pymysql.cursors.err",
"line_number": 244,
"usage_type": "attribute"
},
{
"api_name": "pymysql.cursors",
"line_number": 244,
"usage_type": "name"
}
] |
29572346979
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Literal
from transformers import AutoTokenizer
from partial_tagger.data.collators import TransformerCollator
from partial_tagger.encoders.transformer import (
TransformerModelEncoderFactory,
TransformerModelWithHeadEncoderFactory,
)
from partial_tagger.training import Trainer
if TYPE_CHECKING:
from partial_tagger.encoders.base import BaseEncoderFactory
def create_trainer(
model_name: str = "roberta-base",
dropout: float = 0.2,
tokenizer_args: dict[str, Any] | None = None,
encoder_type: Literal["default", "with_head"] = "default",
) -> Trainer:
"""Creates an instance of Trainer."""
encoder_factory: BaseEncoderFactory
if encoder_type == "default":
encoder_factory = TransformerModelEncoderFactory(model_name, dropout)
elif encoder_type == "with_head":
encoder_factory = TransformerModelWithHeadEncoderFactory(model_name, dropout)
else:
raise ValueError(f"{encoder_type} is not supported.")
collator = TransformerCollator(
AutoTokenizer.from_pretrained(model_name), tokenizer_args
)
return Trainer(collator=collator, encoder_factory=encoder_factory)
|
yasufumy/pytorch-partial-tagger
|
src/partial_tagger/utils.py
|
utils.py
|
py
| 1,230 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.Literal",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "partial_tagger.encoders.base.BaseEncoderFactory",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "partial_tagger.encoders.transformer.TransformerModelEncoderFactory",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "partial_tagger.encoders.transformer.TransformerModelWithHeadEncoderFactory",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "partial_tagger.data.collators.TransformerCollator",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "transformers.AutoTokenizer.from_pretrained",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "transformers.AutoTokenizer",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "partial_tagger.training.Trainer",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "partial_tagger.training.Trainer",
"line_number": 23,
"usage_type": "name"
}
] |
70205052348
|
import os
import argparse
import sys
import warnings
from pathlib import Path
warnings.filterwarnings('ignore')
import torch
import torchvision as tv
import pytorch_lightning as pl
import webdataset as wds
from resnet_sagemaker.models import ResNet
from resnet_sagemaker.callbacks import PlSageMakerLogger, ProfilerCallback
import torch.distributed as dist
from apex.parallel import DistributedDataParallel as DDP
local_rank = int(os.environ.get("LOCAL_RANK", 0))
world_size = int(os.environ.get("WORLD_SIZE", 1))
torch.cuda.set_device(local_rank)
if world_size>1:
dist.init_process_group(
backend="nccl", init_method="env://",
)
def parse_args():
cmdline = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
cmdline.add_argument('--train_file_dir', default='/opt/ml/input/data/train/',
help="""Path to dataset in WebDataset format.""")
cmdline.add_argument('--validation_file_dir', default='/opt/ml/input/data/validation/',
help="""Path to dataset in WebDataset format.""")
cmdline.add_argument('--max_epochs', default=20, type=int,
help="""Number of epochs.""")
cmdline.add_argument('--num_classes', default=1000, type=int,
help="""Number of classes.""")
cmdline.add_argument('--resnet_version', default=50, type=int,
help="""Resnet version.""")
cmdline.add_argument('-lr', '--learning_rate', default=1e-2, type=float,
help="""Base learning rate.""")
cmdline.add_argument('-b', '--batch_size', default=128, type=int,
help="""Size of each minibatch per GPU""")
cmdline.add_argument('--warmup_epochs', default=1, type=int,
help="""Number of epochs for learning rate warmup""")
cmdline.add_argument('--mixup_alpha', default=0.1, type=float,
help="""Extent of convex combination for training mixup""")
cmdline.add_argument('--optimizer', default='adamw', type=str,
help="""Optimizer type""")
cmdline.add_argument('--amp_backend', default='apex', type=str,
help="""Mixed precision backend""")
cmdline.add_argument('--amp_level', default='O2', type=str,
help="""Mixed precision level""")
cmdline.add_argument('--precision', default=16, type=int,
help="""Floating point precision""")
cmdline.add_argument('--profiler_start', default=128, type=int,
help="""Profiler start step""")
cmdline.add_argument('--profiler_steps', default=32, type=int,
help="""Profiler steps""")
cmdline.add_argument('--dataloader_workers', default=4, type=int,
help="""Number of data loaders""")
cmdline.add_argument('--profiler_type', default='smppy', type=str,
help="""Profiler type""")
return cmdline
def main(ARGS):
train_s3_loc = 'pipe:aws s3 cp {0}train_{{{1:04d}..{2:04d}}}.tar -'.format(ARGS.train_file_dir, 0, 2047)
val_s3_loc = 'pipe:aws s3 cp {0}val_{{{1:04d}..{2:04d}}}.tar -'.format(ARGS.validation_file_dir, 0, 127)
model_params = {'num_classes': ARGS.num_classes,
'resnet_version': ARGS.resnet_version,
'train_path': train_s3_loc,
'val_path': val_s3_loc,
'optimizer': ARGS.optimizer,
'lr': ARGS.learning_rate,
'batch_size': ARGS.batch_size,
'dataloader_workers': ARGS.dataloader_workers,
'max_epochs': ARGS.max_epochs,
'warmup_epochs': ARGS.warmup_epochs,
'mixup_alpha': ARGS.mixup_alpha,
}
trainer_params = {'gpus': [int(os.environ.get("LOCAL_RANK", 0))],
'max_epochs': ARGS.max_epochs,
'amp_backend': ARGS.amp_backend,
'amp_level': ARGS.amp_level,
'precision': ARGS.precision,
'progress_bar_refresh_rate': 0,
'logger': pl.loggers.TensorBoardLogger('logs/'),
'callbacks': [PlSageMakerLogger(),
ProfilerCallback(start_step=ARGS.profiler_start,
num_steps=ARGS.profiler_steps,
output_dir='logs/profiling/',
profiler_type=ARGS.profiler_type)]
}
model = ResNet(**model_params)
trainer = pl.Trainer(**trainer_params)
trainer.fit(model)
if __name__=='__main__':
cmdline = parse_args()
ARGS, unknown_args = cmdline.parse_known_args()
main(ARGS)
|
johnbensnyder/resnet-sagemaker
|
pytorch/train.py
|
train.py
|
py
| 4,937 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "warnings.filterwarnings",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.set_device",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "torch.distributed.init_process_group",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.distributed",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentDefaultsHelpFormatter",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "pytorch_lightning.loggers.TensorBoardLogger",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "pytorch_lightning.loggers",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "resnet_sagemaker.callbacks.PlSageMakerLogger",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "resnet_sagemaker.callbacks.ProfilerCallback",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "resnet_sagemaker.models.ResNet",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "pytorch_lightning.Trainer",
"line_number": 100,
"usage_type": "call"
}
] |
11946226959
|
import sys, os, urllib.request, urllib.error, urllib.parse, logging, pwd
import subprocess, site, cgi, datetime, threading, copy, json
import uuid, time, re
from html import escape # ***MUST COME before `from lxml import html`!***
from collections import defaultdict, OrderedDict
from lxml import html
from lxml.html import builder
from http.cookies import SimpleCookie
logging.basicConfig(
level=logging.DEBUG if __debug__ else logging.INFO,
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')
LOCK = threading.Lock()
try: # command-line testing won't have module available
import uwsgi
#logging.debug('uwsgi: %s', dir(uwsgi))
except ImportError:
uwsgi = type('uwsgi', (), {'opt': {}}) # object with empty opt attribute
uwsgi.lock = LOCK.acquire
uwsgi.unlock = LOCK.release
#logging.debug('uwsgi.opt: %s', repr(uwsgi.opt))
#logging.debug('sys.argv: %s', sys.argv) # only shows [uwsgi]
# 2017-12-28 set `chdir` option in pyturn.uwsgi so now PWD should be correct
#logging.debug('current working directory: %s', os.path.abspath('.')) # was '/'
# so we can see that sys.argv is useless for uwsgi operation
THISDIR = os.path.dirname(uwsgi.opt.get('wsgi-file', b'').decode())
if THISDIR and os.getcwd() != THISDIR:
logging.warning('having to chdir from %s to %s', os.getcwd(), THISDIR)
os.chdir(THISDIR)
else:
logging.warning('THISDIR: %s, os.getcwd(): %s', THISDIR, os.getcwd())
APPDIR = (uwsgi.opt.get('check_static', b'').decode() or
os.path.join(THISDIR, 'html'))
MIMETYPES = {'png': 'image/png', 'ico': 'image/x-icon', 'jpg': 'image/jpeg',
'jpeg': 'image/jpeg',}
DATA = {
'groups': {}, # active groups
'finished': {}, # inactive groups (for "Report" page)
}
HTTPSESSIONS = {} # data like username, linked with session keys, goes here
EXPECTED_ERRORS = (
NotImplementedError,
ValueError,
KeyError,
IndexError,
SystemError,
)
PARSED = html.parse(os.path.join(APPDIR, 'index.html')).getroot()
PAGE = html.tostring(PARSED.getroottree())
DEBUG = ['all'] # populate from querystring
# create translation table of illegal characters for groupnames
# ":" is used in this program for internal purposes, so disallow that
# "/" cannot be allowed because we create a filename from groupname
# otherwise, mostly being permissive
ILLEGAL = str.maketrans(dict.fromkeys('''([{:/'"}])'''))
def debug(category, *args):
'''
log debug code only for given category
reduces log size and allows for granular approach to debugging
'''
if not __debug__:
return
elif category in DEBUG:
logging.debug(*args)
def findpath(env):
'''
locate directory where files are stored, and requested file
side effect: splits off querystring and stores its debug values in DEBUG
NOTE: DEBUG is a global and as such will be affected by any client adding
`debug=` args to his querystring. so the net result in debugging will be
the union of what all the clients request.
'''
start = APPDIR
parsed = urllib.parse.urlparse(
urllib.parse.unquote(env.get('REQUEST_URI', '')))
if parsed.query:
query = urllib.parse.parse_qs(parsed.query or '')
DEBUG[:] = list(set(DEBUG) | set(query.get('debug', [])))
debug('all', 'findpath: start: %s' % start)
path = urllib.parse.unquote(env.get('HTTP_PATH', ''))
#debug('all', 'path, attempt 1: %s', path)
path = path or parsed.path
#debug('all', 'path, attempt 2: %s', path)
path = (path or '/').lstrip('/')
debug('all', 'findpath: should not be None at this point: "%s"', path)
return start, path
def loadpage(path, data=None):
'''
input template and populate the HTML with data array
eventually client-side JavaScript will perform many of these functions.
'''
data = data or DATA
parsed = html.fromstring(PAGE)
postdict = data.get('postdict', {})
debug('load', 'loadpage: postdict: %s', postdict)
set_values(parsed, postdict,
['username', 'groupname', 'httpsession_key', 'joined'])
if 'groups' in data:
groups = populate_grouplist(parsed, data)
else:
groups = []
debug('load', 'loadpage: groups: %s', groups)
# only show load indicator if no path specified;
# get rid of meta refresh if path has already been chosen
if path == '':
debug('load', 'showing load indicator')
hide_except('loading', parsed)
return html.tostring(parsed).decode()
else:
for tag in parsed.xpath('//meta[@http-equiv="refresh"]'):
tag.getparent().remove(tag)
if 'text' in postdict:
message = builder.PRE(postdict['text'])
parsed.xpath('//div[@id="error-text"]')[0].append(message)
debug('load', 'showing error page')
hide_except('error', parsed)
elif postdict.get('joined'):
debug('join', 'found "joined": %s', data['postdict'])
group = sanitize(postdict['groupname'])
if not group in groups:
if not group in data['finished']:
debug('all', 'nonexistent group, showing joinform again')
hide_except('joinform', parsed)
else:
create_report(parsed, group, data)
debug('all', 'showing report page')
hide_except('report', parsed)
else:
groupdata = data['groups'][group]
speaker = select_speaker(group, data)
userdata = groupdata['participants'][postdict['username']]
remaining = groupdata['talksession']['remaining']
set_text(parsed, ['talksession-speaker'],
['Current speaker is %s' % speaker if speaker else
'Waiting for next speaker'])
set_text(parsed, ['talksession-time'], [formatseconds(remaining)])
debug('talk', 'userdata[request]: %.6f', userdata['request'])
buttonvalue = 'Cancel request' if userdata['request'] else 'My Turn'
debug('talk', 'setting buttonvalue to %s', buttonvalue)
set_button(parsed, ['myturn-button'], [buttonvalue])
debug('talk', 'showing talk page')
hide_except('talksession', parsed)
elif (postdict.get('submit') == 'Join' and postdict.get('username') and
postdict.get('group', '') == ''):
# some browsers won't return `group` in postdict at all if
# selected element is empty (as it is by default in this case)
debug('join', 'showing groupform after joinform')
hide_except('groupform', parsed)
else:
debug('load', 'showing joinform by default')
hide_except('joinform', parsed)
return html.tostring(parsed).decode()
def create_report(parsed=None, group=None, data=None, **formatting):
'''
show participants with the amount of time each spoke
>>> parsed = html.fromstring("""
... <div id="report-body" class="body">
... <div id="report-wrapper" class="pagewrapper top">
... <div id="report-box" class="box">
... <table>
... <tr><th>Name</th><th>Elapsed Time</th></tr>
... <tr><td>(none)</td><td>00:00:00</td></tr>
... </table>
... </div><!-- box -->
... </div><!-- pagewrapper -->
... </div><!-- body -->""")
>>> data = json.loads("""{"finished": {"test": {"groupname": "test",
... "participants": {"jc": {"spoke": 48.5}, "Ed": {"spoke": 3.25}}}}}""")
>>> formatting = {'pretty_print': True, 'with_tail': False}
>>> print(create_report(parsed, 'test', data, **formatting).decode('utf8'))
<div id="report-body" class="body">
<div id="report-wrapper" class="pagewrapper top">
<div id="report-box" class="box">
<table>
<tr>
<th>Name</th>
<th>Elapsed Time</th>
</tr>
<tr>
<td>jc</td>
<td>00:00:48</td>
</tr>
<tr>
<td>Ed</td>
<td>00:00:03</td>
</tr>
</table>
</div>
<!-- box -->
</div>
<!-- pagewrapper -->
</div>
<BLANKLINE>
'''
parsed = parsed if parsed is not None else copy.deepcopy(PARSED)
data = data or DATA
body_div = parsed.xpath('//*[@id="report-body"]')[0]
rows = body_div.xpath('.//table/tr')
debug('report', 'create_report: rows: %s', rows)
template = rows[1]
table = template.getparent()
table.remove(template)
try:
participants = data['finished'][group]['participants']
except KeyError as nosuchgroup:
logging.warning('No such group %s', nosuchgroup)
participants = {}
speakers = sorted(participants, key=lambda u: -participants[u]['spoke'])
columns = template.xpath('./td')
debug('report', 'create_report: speakers: %s', speakers)
for speaker in speakers:
debug('report', 'adding speaker "%s" to report', speaker)
columns[0].text = speaker
columns[1].text = formatseconds(participants[speaker]['spoke'])
debug('report', 'template now: %s', html.tostring(template))
table.append(html.fromstring(html.tostring(template)))
debug('report', 'table now: %s', html.tostring(table))
return html.tostring(body_div, **formatting)
def set_text(parsed, idlist, values):
'''
pre-set page text
'''
debug('all', 'setting values of %s from %s', idlist, values)
for index in range(len(idlist)):
elementid = idlist[index]
value = values[index]
element = parsed.xpath('//*[@id="%s"]' % elementid)[0]
debug('all', 'before: %s', html.tostring(element))
element.text = value
debug('all', 'after: %s', html.tostring(element))
def set_button(parsed, idlist, values):
'''
modify button values
>>> content = html.fromstring('<div><input id="test" value="Test"></div>')
>>> set_button(content, ['test'], ['new value'])
>>> content.xpath('//*[@id="test"]')[0].get('value')
'new value'
'''
for index in range(len(idlist)):
elementid = idlist[index]
value = values[index]
element = parsed.xpath('//*[@id="%s"]' % elementid)[0]
debug('buttons', 'before: %s', html.tostring(element))
element.set('value', value)
debug('buttons', 'after: %s', html.tostring(element))
def set_values(parsed, postdict, fieldlist):
'''
pre-set form input values from postdict
'''
debug('hidden', 'setting values of %s from %s', fieldlist, postdict)
for fieldname in fieldlist:
value = postdict.get(fieldname, '')
if not value:
debug('hidden', 'skipping %s, no value found', fieldname)
continue
elements = parsed.xpath('//input[@name="%s"]' % fieldname)
for element in elements:
debug('hidden', 'before: %s', html.tostring(element))
element.set('value', value)
debug('hidden', 'after: %s', html.tostring(element))
def populate_grouplist(parsed=None, data=None, formatted='list', **options):
'''
fill in 'select' element with options for each available group
if `formatted` is 'list', just return list of groups, oldest first
>>> options = {'pretty_print': True, 'with_tail': False}
>>> data = {'groups': {'test': {'timestamp': 0}, 'again': {'timestamp': 1}}}
>>> print(populate_grouplist(None, data, 'element', **options))
<select id="group-select" name="group" data-contents=":test:again">
<option value="">(Create new group)</option>
<option value="test">test</option>
<option value="again" selected>again</option></select>
<BLANKLINE>
>>> data['groups']['test']['timestamp'] = 2
>>> populate_grouplist(None, data)
['again', 'test']
'''
# sorting a dict gives you a list of keys
data = data or DATA
session_key = data.get('httpsession_key', None)
session = HTTPSESSIONS.get(session_key, {})
added_group = session.get('added_group', None)
parsed = parsed if parsed is not None else html.fromstring(PAGE)
groups = sorted(data['groups'],
key=lambda g: data['groups'][g]['timestamp'])
contents = ':'.join([''] + groups)
grouplist = parsed.xpath('//select[@name="group"]')[0]
debug('grouplist', 'populate_grouplist: %s', grouplist)
for group in groups:
newgroup = builder.OPTION(group, value=group)
grouplist.append(newgroup)
# make newest group the "selected" one
# except for someone who just created a group, mark *that* one selected
for group in grouplist.getchildren():
try:
del group.attrib['selected']
except KeyError:
pass
try:
grouplist[grouplist.index(added_group)].set('selected', 'selected')
except (KeyError, ValueError, IndexError, TypeError):
grouplist[-1].set('selected', 'selected')
grouplist.set("data-contents", contents)
if formatted == 'list':
return groups
else:
return html.tostring(grouplist, **options).decode()
def hide_except(keep, tree):
'''
set "display: none" for all sections of the page we don't want to see
'''
for page in tree.xpath('//div[@class="body"]'):
if not page.get('id').startswith(keep):
page.set('style', 'display: none')
elif 'style' in page.attrib:
del page.attrib['style']
def data_merge(data, cookie):
'''
anything missing in data['postdict'] gets set from cookie if found
'''
if cookie:
if not data['postdict'].get('username'):
logging.debug('data_merge: setting username from cookie')
data['postdict']['username'] = cookie['username'].value
else:
logging.debug('data_merge: found username already in postdict')
if not data['postdict'].get('http_sessionkey'):
logging.debug('data_merge: setting session key from cookie')
data['postdict']['http_sessionkey'] = cookie['sessionid'].value
else:
logging.debug('data_merge: session key already in postdict')
else:
logging.debug('data_merge: cookie: %r, postdict: %s',
cookie, data.get('postdict'))
def server(env=None, start_response=None):
'''
primary server process, sends page with current groups list
'''
status_code, mimetype, page = '500 Server error', 'text/html', '(Unknown)'
start, path = findpath(env)
cookie, data = handle_post(env)
logging.debug('server: cookie: %s', cookie)
data_merge(data, cookie) # set any missing data from cookie
debug('all', 'server: data: %s', data)
if path in ('groups',):
page = populate_grouplist(None, data, formatted='element')
status_code = '200 OK'
elif path.startswith('report/'):
group = path.split('/')[1]
page = create_report(group=group).decode('utf8')
status_code = '200 OK'
elif path.startswith('groups/'):
group = path.split('/')[1]
try:
page = json.dumps(data['groups'][group])
except KeyError as groupname:
debug('all', 'group %s does not exist in %s', groupname, data)
page = '{}'
status_code = '200 OK'
elif path in ('', 'noscript', 'app'):
page = loadpage(path, data)
status_code = '200 OK'
elif path == 'status':
page = escape(json.dumps(data))
status_code = '200 OK'
else:
try:
page, mimetype = render(os.path.join(start, path))
status_code = '200 OK'
except (IOError, OSError) as filenotfound:
status_code = '404 File not found'
page = '<h1>No such page: %s</h1>' % str(filenotfound)
headers = [('Content-type', mimetype)]
if cookie is not None:
logging.debug('setting cookie headers %r', cookie.output())
headers.extend(cookie_headers(cookie))
start_response(status_code, headers)
debug('all', 'page: %s', page[:128])
return [page.encode('utf8')]
def cookie_headers(cookie):
'''
make list of tuples for cookie values
>>> cookie = SimpleCookie()
>>> cookie['test'] = 'this'
>>> cookie['test']['path'] = '/'
>>> cookie_headers(cookie)
[('Set-Cookie', 'test=this; Path=/')]
'''
cookies = cookie.output().split('\r\n')
return [tuple(re.compile(': ').split(c, 1)) for c in cookies]
def handle_post(env):
'''
process the form submission and return data structures
note what dict(parse_qsl(formdata)) does:
>>> from urllib.parse import parse_qsl
>>> parse_qsl('a=b&b=c&a=d&a=e')
[('a', 'b'), ('b', 'c'), ('a', 'd'), ('a', 'e')]
>>> OrderedDict(_)
OrderedDict([('a', 'e'), ('b', 'c')])
>>>
so only use it where you know that no key will have more than
one value.
parse_qs will instead return a dict of lists.
'''
uwsgi.lock() # lock access to DATA global
worker = getattr(uwsgi, 'worker_id', lambda *args: None)()
DATA['handler'] = (worker, env.get('uwsgi.core'))
timestamp = datetime.datetime.utcnow().timestamp()
cookie = SimpleCookie(env['HTTP_COOKIE']) if 'HTTP_COOKIE' in env else None
try:
if env.get('REQUEST_METHOD') != 'POST':
DATA['postdict'] = {}
return cookie, copy.deepcopy(DATA)
form = cgi.FieldStorage(fp=env['wsgi.input'], environ=env)
DATA['postdict'] = postdict = {k: form.getfirst(k) for k in form.keys()}
debug('all', 'handle_post: %s, postdict: %s', form, postdict)
# [groupname, total, turn] and submit=Submit if group creation
# [username, group] and submit=Join if joining a group
postdict['timestamp'] = timestamp
if not postdict.get('httpsession_key'):
postdict['httpsession_key'] = uuid.uuid4().hex
debug('sessions', 'set httpsession_key = %s',
postdict['httpsession_key'])
try:
buttonvalue = postdict['submit']
except KeyError:
raise ValueError('No "submit" button found')
cookie = update_httpsession(postdict)
if buttonvalue == 'Join':
# username being added to group
# don't allow if name already in group
groups = DATA['groups']
debug('join', 'processing Join: %s', postdict)
username = postdict.get('username', '')
group = sanitize(postdict.get('group', ''))
if not username:
raise ValueError('Name field cannot be empty')
elif group in groups:
postdict['groupname'] = group
if username in groups[group]['participants']:
raise ValueError('"%s" is already a member of %s' % (
username, group))
groups[group]['participants'][username] = defaultdict(
float, # for `speaking` and `spoke` times
{'timestamp': timestamp, 'requests': []}
)
postdict['joined'] = '%s:%s' % (username, group)
if 'talksession' not in groups[group]:
groups[group]['talksession'] = {
'start': timestamp,
'speaker': None,
'tick': 0,
}
counter = threading.Thread(
target=countdown,
name=group,
args=(group,))
counter.daemon = True # leave no zombies on exit
counter.start()
# else group not in groups, no problem, return to add group form
return cookie, copy.deepcopy(DATA)
elif buttonvalue == 'Submit':
# groupname, total (time), turn (time) being added to groups
# don't allow if groupname already being used
groups = DATA['groups']
group = postdict['groupname'] = sanitize(postdict['groupname'])
if not group in groups:
groups[group] = postdict
groups[group]['participants'] = {}
return cookie, copy.deepcopy(DATA)
else:
raise ValueError((
'Group {group[groupname]} already exists with total time '
'{group[total]} minutes and turn time '
'{group[turn]} seconds').format(group=groups[group]))
elif buttonvalue == 'OK':
# affirming receipt of error message or Help screen
return cookie, copy.deepcopy(DATA)
elif buttonvalue == 'Help':
raise UserWarning('Help requested')
elif buttonvalue == 'My Turn':
# attempting to speak in ongoing session
# this can be reached either by normal HTML form submission
# or by XHR from JavaScript on client side
debug('button', 'My Turn button pressed, env: %s', env)
groups = DATA['groups']
group = sanitize(postdict['groupname'])
username = postdict['username']
try:
userdata = groups[group]['participants'][username]
if not userdata['request']:
debug('button', "userdata: setting %s's request to %.6f",
username, timestamp)
userdata['request'] = timestamp
userdata['requests'].append([timestamp, None])
else:
logging.warning('ignoring newer request %.6f, '
'keeping %.6f', userdata['request'],
timestamp)
except KeyError:
raise SystemError('Group %s is no longer active' % group)
return cookie, copy.deepcopy(DATA)
elif buttonvalue == 'Cancel request':
debug('button', 'My Turn button released')
groups = DATA['groups']
group = sanitize(postdict['groupname'])
username = postdict['username']
try:
userdata = groups[group]['participants'][username]
if userdata['request']:
userdata['request'] = None
userdata['requests'][-1][1] = timestamp
else:
logging.error('no speaking request found for %s', username)
except KeyError:
raise SystemError('Group %s is no longer active' % group)
return cookie, copy.deepcopy(DATA)
elif buttonvalue == 'Check status':
return cookie, copy.deepcopy(DATA)
else:
raise ValueError('Unknown form submitted')
except UserWarning as request:
if str(request) == 'Help requested':
debug('all', 'displaying help screen')
DATA['postdict']['text'] = read(os.path.join(THISDIR, 'README.md'))
return cookie, copy.deepcopy(DATA)
except EXPECTED_ERRORS as failed:
debug('all', 'displaying error: "%r"', failed)
DATA['postdict']['text'] = repr(failed)
return cookie, copy.deepcopy(DATA)
finally:
uwsgi.unlock()
def most_eligible_speaker(group, data=None):
'''
participant who first requested to speak who has spoken least
>>> data = {
... 'groups': {
... 'test': {
... 'participants': {
... 'alice': {'spoke': 3, 'request': '2017-10-01T14:21:37.024529'},
... 'bob': {'spoke': 2, 'request': '2017-10-01T14:21:37.024531'},
... 'chuck': {'spoke': 3, 'request': '2017-10-01T14:21:37.024530'}}}}}
>>> most_eligible_speaker('test', data)
'bob'
>>> data = {
... 'groups': {
... 'test': {
... 'participants': {
... 'alice': {'spoke': 2, 'request': '2017-10-01T14:21:37.024531'},
... 'bob': {'spoke': 2, 'request': '2017-10-01T14:21:37.024531'},
... 'chuck': {'spoke': 2, 'request': '2017-10-01T14:21:37.024530'}}}}}
>>> most_eligible_speaker('test', data)
'chuck'
'''
data = data or DATA
groupdata = data['groups'][group]
people = groupdata['participants']
waiting = filter(lambda p: people[p]['request'], people)
speaker_pool = sorted(waiting, key=lambda p:
(people[p]['spoke'], people[p]['request']))
return (speaker_pool or [None])[0]
def select_speaker(group, data=None):
'''
let current speaker finish his turn before considering most eligible
SIDE EFFECTS:
when `turn` time is up or speaker voluntarily relinquishes turn:
sets speaker's `speaking` count to zero in data dict
sets speaker to new speaker
NOTE: not using uwsgi.lock for this, shouldn't be necessary. no
possible race conditions are known at time of coding (jc).
'''
data = data or DATA
groupdata = data['groups'][group]
talksession = groupdata['talksession']
turntime = float(groupdata['turn'])
if talksession['speaker']:
speaker = groupdata['participants'][talksession['speaker']]
if speaker['speaking'] >= turntime or not speaker['request']:
speaker['speaking'] = 0
talksession['speaker'] = most_eligible_speaker(group, data)
else:
talksession['speaker'] = most_eligible_speaker(group, data)
return talksession['speaker']
def sanitize(name):
'''
can't count on someone entering, say, '../../../.hidden/evil' as groupname
in addition to ILLEGAL characters, also strip leading '.' and '-',
the first hiding the file from normal listing, the second making removal
difficult because it looks like an option to rm, so one needs to
`rm -- -evilfile`.
>>> sanitize('../../../.-hidden/::evil')
'hiddenevil'
>>> sanitize(None)
'''
return name.translate(ILLEGAL).lstrip('-.') if name is not None else None
def countdown(group, data=None):
'''
expire the talksession after `minutes`
currently only using uwsgi.lock() when moving group to `finished`.
may need to reevaluate that (jc).
>>> now = datetime.datetime.utcnow().timestamp()
>>> data = {'finished': {}, 'groups': {
... 'test': {
... 'total': '.001',
... 'talksession': {'start': now, 'speaker': None},
... 'participants': {'nobody': {'requests': [[0.1, 0.2]]}},
... }}}
>>> countdown('test', data)
'''
data = data or DATA
groups = data['groups']
sleeptime = .25 # seconds
try:
minutes = float(groups[group]['total'])
groups[group]['talksession']['remaining'] = minutes * 60
ending = (datetime.datetime.fromtimestamp(
groups[group]['talksession']['start']) +
datetime.timedelta(minutes=minutes)).timestamp()
debug('countdown', 'countdown ending: %.6f', ending)
while True:
time.sleep(sleeptime)
now = datetime.datetime.utcnow().timestamp()
debug('countdown', 'countdown now: %.6f', now)
if now > ending:
debug('countdown', 'countdown ended at %.6f', now)
break
speaker = select_speaker(group, data)
debug('countdown', 'countdown: speaker: %s', speaker)
if speaker:
speakerdata = groups[group]['participants'][speaker]
speakerdata['speaking'] += sleeptime
speakerdata['spoke'] += sleeptime
groups[group]['talksession']['remaining'] -= sleeptime
groups[group]['talksession']['tick'] += 1
# should we uwsgi.lock() here in case group is currently being updated?
# if so, need uwsgi.unlock() in `finally` clause
data['finished'][group] = data['groups'].pop(group)
# now save the report of clicks, not same as report of time spoken
reportdir = os.path.join('statistics', group)
reportname = os.path.join(reportdir, '%.6f.json' % now)
try:
participants = data['finished'][group]['participants']
except KeyError:
logging.error("No such key 'participants' in %s",
data['finished'][group])
return
os.makedirs(reportdir, exist_ok=True)
report = open(reportname, 'w')
report.write(json.dumps([{speaker: participants[speaker]['requests']}
for speaker in participants],
indent=4))
report.close()
except KeyError as error:
logging.error('countdown: was group "%s" removed? KeyError: %s',
group, error, exc_info=True)
logging.info('data: %s', data)
def update_httpsession(postdict):
'''
simple implementation of user (http) sessions
this is for keeping state between client and server, this is *not*
the same as discussion (talk) sessions!
another thread should go through and remove expired httpsessions
'''
# FIXME: this session mechanism can only be somewhat secure with https
# FIXME: a thread needs to remove old httpsessions to save memory
timestamp = postdict['timestamp']
cookie = None
if 'httpsession_key' in postdict and postdict['httpsession_key']:
session_key = postdict['httpsession_key']
# only bother storing session once a username has been entered
if postdict.get('username', None):
username = postdict['username']
newgroup = sanitize(postdict.get('group', None))
if session_key in HTTPSESSIONS:
if HTTPSESSIONS[session_key]['username'] != username:
logging.warning(
'changing session username from "%s" to "%s"',
HTTPSESSIONS[session_key]['username'],
username)
if newgroup:
HTTPSESSIONS[session_key]['added_group'] = newgroup
HTTPSESSIONS[session_key]['updated'] = timestamp
else:
HTTPSESSIONS[session_key] = {
'timestamp': timestamp,
'updated': timestamp,
'added_group': None,
'username': username}
cookie = SimpleCookie()
cookie['sessionid'] = session_key
cookie['sessionid']['path'] = '/'
logging.debug('cookie: %s', cookie)
cookie['username'] = username
cookie['username']['path'] = '/'
logging.debug('cookie: %s', cookie)
else:
debug('sessions',
'no username yet associated with session %s', session_key)
else:
logging.warning('no httpsession_key in POST')
return cookie
def render(pagename, standalone=True):
'''
Return content with Content-type header
'''
debug('render', 'render(%s, %s) called', pagename, standalone)
if pagename.endswith('.html'):
debug('render', 'rendering static HTML content')
return (read(pagename), 'text/html')
elif not pagename.endswith(('.png', '.ico', '.jpg', '.jpeg')):
# assume plain text
logging.warning('app is serving %s instead of nginx', pagename)
return (read(pagename), 'text/plain')
elif standalone:
logging.warning('app is serving %s instead of nginx', pagename)
return (read(pagename),
MIMETYPES.get(os.path.splitext(pagename)[1], 'text/plain'))
else:
logging.error('not standalone, and no match for filetype')
raise OSError('File not found: %s' % pagename)
def read(filename):
'''
Return contents of a file
'''
debug('read', 'read: returning contents of %s', filename)
with open(filename) as infile:
data = infile.read()
debug('read', 'data: %s', data[:128])
return data
def formatseconds(seconds):
'''
return rounded-up seconds count as HH:MM:SS
https://stackoverflow.com/a/31946730/493161
>>> formatseconds(666.50001)
'00:11:07'
'''
return '{:0>8}'.format(str(datetime.timedelta(seconds=round(seconds))))
if __name__ == '__main__':
print(server(os.environ, lambda *args: None))
|
jcomeauictx/pyturn
|
myturn.py
|
myturn.py
|
py
| 32,136 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "threading.Lock",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "uwsgi.lock",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "uwsgi.unlock",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "uwsgi.opt.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "uwsgi.opt",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "uwsgi.opt.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "uwsgi.opt",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "lxml.html.parse",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "lxml.html.tostring",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "urllib.request.parse.urlparse",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "urllib.request.parse",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "urllib.request.parse.unquote",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "urllib.request.parse",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "urllib.request.parse.parse_qs",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "urllib.request.parse",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "urllib.request.parse.unquote",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "urllib.request.parse",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "lxml.html.fromstring",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "lxml.html.tostring",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "lxml.html.builder.PRE",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "lxml.html.builder",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "lxml.html.tostring",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "lxml.html.tostring",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "lxml.html.fromstring",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "lxml.html.tostring",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "lxml.html.tostring",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "lxml.html.tostring",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "lxml.html.tostring",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 237,
"usage_type": "name"
},
{
"api_name": "lxml.html.tostring",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "lxml.html.tostring",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "lxml.html.tostring",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "lxml.html.tostring",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 270,
"usage_type": "name"
},
{
"api_name": "lxml.html.tostring",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "lxml.html.fromstring",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 297,
"usage_type": "name"
},
{
"api_name": "lxml.html.builder.OPTION",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "lxml.html.builder",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "lxml.html.tostring",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 321,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "html.escape",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 385,
"usage_type": "attribute"
},
{
"api_name": "logging.debug",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "uwsgi.lock",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 432,
"usage_type": "attribute"
},
{
"api_name": "http.cookies.SimpleCookie",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "cgi.FieldStorage",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 467,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 502,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 521,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 526,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 541,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 543,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 549,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 549,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 550,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 554,
"usage_type": "call"
},
{
"api_name": "uwsgi.unlock",
"line_number": 556,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 651,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 651,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 653,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 656,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 657,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 657,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 674,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 674,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 675,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 675,
"usage_type": "attribute"
},
{
"api_name": "logging.error",
"line_number": 679,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 682,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 684,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 689,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 691,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 714,
"usage_type": "call"
},
{
"api_name": "http.cookies.SimpleCookie",
"line_number": 727,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 730,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 733,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 738,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 751,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 754,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 756,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 756,
"usage_type": "attribute"
},
{
"api_name": "logging.error",
"line_number": 758,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 780,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 783,
"usage_type": "attribute"
}
] |
6742136861
|
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
faceCascade = cv2.CascadeClassifier("face.xml")
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = frame
# find face
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.CASCADE_SCALE_IMAGE
)
#print ('Found {0} faces!'.format(len(faces)))
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(gray, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
khanab85/FaceDetectors
|
start.py
|
start.py
|
py
| 866 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.VideoCapture",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.CASCADE_SCALE_IMAGE",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "cv2.rectangle",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 41,
"usage_type": "call"
}
] |
111612310
|
import json
f = open('dados.json')
Dados = json.load(f)
Dados = [x for x in Dados if x['valor'] > 0]
menor = float("inf")
#menor faturamento diário
for x in Dados:
atual = x['valor']
if(menor > atual):
menor = atual
print('O menor valor de faturamento ocorrido em um dia do mês foi de ', menor)
#maior faturamento diário
maior = 0.0
for x in Dados:
atual = x['valor']
if(maior < atual):
maior = atual
print('O maior valor de faturamento ocorrido em um dia do mês foi de ', maior)
#faturamento diário superior à média mensal
totalValor = 0.0
for x in Dados:
atual = x['valor']
totalValor = totalValor + float(atual)
media = totalValor/30
dias = 0
for x in Dados:
atual = x['valor']
if(atual > media):
dias = dias + 1
print('O número de dias no mês em que o valor do faturamento diário superou à média mensal foi de ', dias)
f.close()
|
CaioPyro/Target_Sistemas
|
Faturamento/main.py
|
main.py
|
py
| 890 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.load",
"line_number": 5,
"usage_type": "call"
}
] |
73966289468
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import requests
import json
#药监总局地址:http://scxk.nmpa.gov.cn:81/xk/
if __name__ == "__main__":
url = 'http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsList'
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1'
headers = {
'User-Agent': user_agent
}
data = {
"on": "true",""
"page": "1",
"pageSize": "15",""
"productName":"",
"conditionType": "1",
"applyname":"",
"applysn":"",
}
json_ids = requests.post(url=url,headers=headers,data=data).json()
print(json_ids)
all_data_list = [] #存储所有企业的详情数据
id_list = [] #存储企业的ID
for dic in json_ids['list']:
id_list.append(dic['ID'])
print(id_list)#批量获取的ID
#获取企业详情页信息
print()
post_url = 'http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsById'
for id in id_list:
data = {
'id':id
}
detail_json = requests.post(url=url,headers=headers,data=data).json()
print(detail_json,"--------")
all_data_list.append(detail_json)
#持久化存储
fp = open('../alldata.json', 'w', encoding='utf-8')
json.dump(all_data_list,fp=fp,ensure_ascii=False)
print("over")
|
xjuun/Note
|
Python/爬虫/code/request基础/06.requests之药监总局相关数据爬取.py
|
06.requests之药监总局相关数据爬取.py
|
py
| 1,414 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.post",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 41,
"usage_type": "call"
}
] |
39556012979
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 12 10:37:33 2018
@author: Gerardo Cervantes
"""
import xml.etree.cElementTree as ET
from src.coordinates import Coordinates
from src.hotkeys import Hotkeys
class SharedPreferences():
COORDINATES_TAG = 'coordinates'
SPLIT_TAG = 'split_key'
RESET_TAG = 'reset_key'
ROUTE_TAG = 'route'
def write_preferences(self, file_name, coordinates, route_name, hotkeys):
xml_str = self.create_xml(coordinates, route_name, hotkeys)
with open(file_name, "wb") as f:
f.write(xml_str)
def create_xml(self, coordinates, route_name, hotkeys):
root = ET.Element("root")
ET.SubElement(root, self.ROUTE_TAG).text = self.to_valid_xml_str(route_name)
ET.SubElement(root, self.SPLIT_TAG).text = self.to_valid_xml_str(hotkeys.get_split_key())
ET.SubElement(root, self.RESET_TAG).text = self.to_valid_xml_str(hotkeys.get_reset_key())
xml_coordinates = ET.SubElement(root, self.COORDINATES_TAG)
x, y, width, height = coordinates.get_coordinates()
ET.SubElement(xml_coordinates, "x").text = str(x)
ET.SubElement(xml_coordinates, "y").text = str(y)
ET.SubElement(xml_coordinates, "width").text = str(width)
ET.SubElement(xml_coordinates, "height").text = str(height)
xml_str = ET.tostring(root, encoding='utf8', method='xml')
return xml_str
def to_valid_xml_str(self, text):
if text == '':
return ' '
return text
def parse_xml(self, file_name):
try:
tree = ET.parse(file_name)
except FileNotFoundError:
return None, None, None
root = tree.getroot()
route_name = root.find(self.ROUTE_TAG).text
split_key = root.find(self.SPLIT_TAG).text
reset_key = root.find(self.RESET_TAG).text
coordinates_xml = root.find(self.COORDINATES_TAG)
x = coordinates_xml.find("x").text
y = coordinates_xml.find("y").text
width = coordinates_xml.find("width").text
height = coordinates_xml.find("height").text
coordinates = Coordinates()
coordinates.set_coordinates(x, y, width, height)
hotkeys = Hotkeys()
hotkeys.set_split_key(split_key)
hotkeys.set_reset_key(reset_key)
return coordinates, route_name, hotkeys
if __name__ == "__main__":
shared_prefs = SharedPreferences()
coordinates = Coordinates()
coordinates.set_coordinates(20, 25, 50, 30)
hotkeys = Hotkeys()
file_name = 'example_pref_file.zd'
shared_prefs.write_preferences(file_name, coordinates, '', hotkeys)
coordinates, route_name, hotkeys = shared_prefs.parse_xml(file_name)
print(coordinates)
print(hotkeys.get_split_key())
print(hotkeys.get_reset_key())
print(route_name)
# xml_str = shared_prefs.create_xml(coordinates, 'Home', 'Other', 'Route')
# shared_prefs.xml_print(xml_str)
|
gcervantes8/Star-Classifier-For-Mario-64
|
src/shared_preferences.py
|
shared_preferences.py
|
py
| 3,050 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "xml.etree.cElementTree.Element",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "xml.etree.cElementTree",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "xml.etree.cElementTree.SubElement",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "xml.etree.cElementTree",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "xml.etree.cElementTree.SubElement",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "xml.etree.cElementTree",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "xml.etree.cElementTree.SubElement",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "xml.etree.cElementTree",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "xml.etree.cElementTree.SubElement",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "xml.etree.cElementTree",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "xml.etree.cElementTree.SubElement",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "xml.etree.cElementTree",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "xml.etree.cElementTree.SubElement",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "xml.etree.cElementTree",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "xml.etree.cElementTree.SubElement",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "xml.etree.cElementTree",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "xml.etree.cElementTree.SubElement",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "xml.etree.cElementTree",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "xml.etree.cElementTree.tostring",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "xml.etree.cElementTree",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "xml.etree.cElementTree.parse",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "xml.etree.cElementTree",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "src.coordinates.Coordinates",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "src.hotkeys.Hotkeys",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "src.coordinates.Coordinates",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "src.hotkeys.Hotkeys",
"line_number": 78,
"usage_type": "call"
}
] |
36721611160
|
import torch
from torch import nn
from modules import ConvSC, Inception
# stride를 만들어내는 모듈..
def stride_generator(N, reverse=False):
strides = [1, 2]*10
if reverse: return list(reversed(strides[:N]))
else: return strides[:N]
# N_S개 만큼 Stride를 생성한 후, ConvSC 생성해서
# 총 N_S의 깊이를 가진 Encoder 모듈 생성.
class Encoder(nn.Module):
def __init__(self,C_in, C_hid, N_S):
super(Encoder,self).__init__()
strides = stride_generator(N_S)
self.enc = nn.Sequential(
ConvSC(C_in, C_hid, stride=strides[0]),
*[ConvSC(C_hid, C_hid, stride=s) for s in strides[1:]]
)
def forward(self,x):
enc1 = self.enc[0](x)
latent = enc1
for i in range(1,len(self.enc)):
latent = self.enc[i](latent)
return latent,enc1
# Mid_Xnet으로부터 받은 C_hid의 채널을 토대로 최종적으로 C_out 크기의
# 채널으로 변환 여기서 C_out은 초기 C와 같음.
# 여기서는 C_hid의 채널 수로 일정하게 조금씩 Upsampling 되다가
# 마지막에 encoder에서 추출한 공간 정보를 concat한다.
class Decoder(nn.Module):
def __init__(self,C_hid, C_out, N_S):
super(Decoder,self).__init__()
strides = stride_generator(N_S, reverse=True)
self.dec = nn.Sequential(
*[ConvSC(C_hid, C_hid, stride=s, transpose=True) for s in strides[:-1]],
ConvSC(2*C_hid, C_hid, stride=strides[-1], transpose=True)
)
self.readout = nn.Conv2d(C_hid, C_out, 1)
def forward(self, hid, enc1=None):
for i in range(0,len(self.dec)-1):
hid = self.dec[i](hid)
Y = self.dec[-1](torch.cat([hid, enc1], dim=1))
Y = self.readout(Y)
return Y
# Translator를 나타내는 중앙 인셉션 네트워크 부분.
# 인셉션 네트워크에도 인코더, 디코더 부분이 존재하여 스킵 연결을 통해, 인코더에서 뽑은 코딩을
# 디코더에 주입하여 Temporal 정보를 학습함.
class Mid_Xnet(nn.Module):
def __init__(self, channel_in, channel_hid, N_T, incep_ker = [3,5,7,11], groups=8):
super(Mid_Xnet, self).__init__()
self.N_T = N_T
enc_layers = [Inception(channel_in, channel_hid//2, channel_hid, incep_ker= incep_ker, groups=groups)]
for i in range(1, N_T-1):
enc_layers.append(Inception(channel_hid, channel_hid//2, channel_hid, incep_ker= incep_ker, groups=groups))
enc_layers.append(Inception(channel_hid, channel_hid//2, channel_hid, incep_ker= incep_ker, groups=groups))
dec_layers = [Inception(channel_hid, channel_hid//2, channel_hid, incep_ker= incep_ker, groups=groups)]
for i in range(1, N_T-1):
dec_layers.append(Inception(2*channel_hid, channel_hid//2, channel_hid, incep_ker= incep_ker, groups=groups))
dec_layers.append(Inception(2*channel_hid, channel_hid//2, channel_in, incep_ker= incep_ker, groups=groups))
self.enc = nn.Sequential(*enc_layers)
self.dec = nn.Sequential(*dec_layers)
def forward(self, x):
B, T, C, H, W = x.shape
x = x.reshape(B, T*C, H, W)
# encoder
skips = []
z = x
for i in range(self.N_T):
z = self.enc[i](z)
if i < self.N_T - 1:
skips.append(z)
# decoder
z = self.dec[0](z)
for i in range(1, self.N_T):
z = self.dec[i](torch.cat([z, skips[-i]], dim=1))
y = z.reshape(B, T, C, H, W)
return y
class SimVP(nn.Module):
def __init__(self, shape_in, hid_S=16, hid_T=256, N_S=4, N_T=8, incep_ker=[3,5,7,11], groups=8):
super(SimVP, self).__init__()
T, C, H, W = shape_in
self.enc = Encoder(C, hid_S, N_S)
self.hid = Mid_Xnet(T*hid_S, hid_T, N_T, incep_ker, groups)
self.dec = Decoder(hid_S, C, N_S)
def forward(self, x_raw):
B, T, C, H, W = x_raw.shape
x = x_raw.view(B*T, C, H, W)
embed, skip = self.enc(x)
_, C_, H_, W_ = embed.shape
z = embed.view(B, T, C_, H_, W_)
hid = self.hid(z)
hid = hid.reshape(B*T, C_, H_, W_)
Y = self.dec(hid, skip)
Y = Y.reshape(B, T, C, H, W)
return Y
|
J-PARK11/Video_Prediction_using_SimVP
|
model.py
|
model.py
|
py
| 4,304 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "modules.ConvSC",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "modules.ConvSC",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "modules.ConvSC",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "modules.ConvSC",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "modules.Inception",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "modules.Inception",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "modules.Inception",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "modules.Inception",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "modules.Inception",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "modules.Inception",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 92,
"usage_type": "name"
}
] |
20538043919
|
"""
You are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order, and each of their nodes contains a single digit. Add the two numbers and return the sum as a linked list.
You may assume the two numbers do not contain any leading zero, except the number 0 itself.
"""
"""
Time complexity:- O(max(n,m))
Space Complexity:- O(1)
"""
from typing import Optional
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def addTwoNumbers(
self, l1: Optional[ListNode], l2: Optional[ListNode]
) -> Optional[ListNode]:
# Create a dummy head and a tail pointer for the result linked list
dummyHead = ListNode(0)
tail = dummyHead
carry = 0 # Initialize the carry to 0
while l1 or l2 or carry != 0:
# Get the current digits of l1 and l2 (or 0 if one of them is None)
digit1 = l1.val if l1 else 0
digit2 = l2.val if l2 else 0
# Calculate the sum of the digits and the carry
_sum = digit1 + digit2 + carry
digit = _sum % 10
carry = _sum // 10
# Create a new node with the calculated digit
newNode = ListNode(digit)
# Append the new node to the result linked list
tail.next = newNode
tail = tail.next
# Move to the next nodes in l1 and l2 (if available)
l1 = l1.next if l1 else None
l2 = l2.next if l2 else None
# Get the result linked list starting from the node after the dummy head
result = dummyHead.next
# Remove the reference to the rest of the linked list
dummyHead.next = None
return result # Return the result linked list
|
Amit258012/100daysofcode
|
Day14/add_two_numbers_linked_list.py
|
add_two_numbers_linked_list.py
|
py
| 1,871 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.Optional",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 24,
"usage_type": "name"
}
] |
42442738126
|
from django.shortcuts import render
from .models import Twit,Company
import datetime
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views import View
from django.http import HttpResponse,HttpResponseRedirect,Http404
import jdatetime
from django.db.models import Q # new
import datetime
# Create your views here.
def signal_week(mdate):
week = []
print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&',mdate,type(mdate))
for i in range(5):
print(mdate+datetime.timedelta(days=-i-1))
nextday = mdate+datetime.timedelta(days=i+1)
jnextday = jdatetime.date.fromgregorian(date=nextday)
print(':::::::::::::::::::::::::::::::::::::::::::',jnextday)
query = Twit.objects.filter(created_on__year=str(nextday.year)).filter(created_on__month=str(nextday.month)).filter(created_on__day=str(nextday.day))
if query:
week.append({
'date' : jnextday,
'len' : len(query)
})
if not week :
for i in range (5):
PerivousDay = mdate+datetime.timedelta(days=-i-1)
JPerivousDay = jdatetime.date.fromgregorian(date=PerivousDay)
query = Twit.objects.filter(created_on__year=str(PerivousDay.year)).filter(created_on__month=str(PerivousDay.month)).filter(created_on__day=str(PerivousDay.day))
if query:
week.append({
'date' : JPerivousDay,
'len' : len(query)
})
print(week)
return week
def index (request):
# test = Twit.objects.filter(id =1)
print(datetime.date.today())
# twits = Twit.objects.filter(created_on__date=datetime.date.today())
today = datetime.datetime.today()
# twits = Twit.objects.filter(created_on__year=today.year, created_on__month=today.month, created_on__day=today.day)
twits = Twit.objects.filter(created_on__year=today.year, created_on__month=today.month, created_on__day=today.day,\
status=1,avaiable=True,company__status=1)
if len(twits) == 0:
# twits = Twit.objects.all().order_by('-created_on')[:20]
twits = Twit.objects.filter(status=1,avaiable=True,company__status=1).order_by('-created_on')[:20]
companeis = Company.objects.filter(status=1)
print('twits : ',len(twits))
return render(request, 'home.html',{
'twits':twits,
'companeis': companeis,
'jdate' : jdatetime.date.today(),
'week' : signal_week(today)
})
class UnavailableTiwtView(LoginRequiredMixin,View):
def get_object(self, pk):
try:
return Twit.objects.get(pk=pk)
except Twit.DoesNotExist:
raise Http404
def get(self,request,pk) :
print("get pk : ",pk)
twit = self.get_object(pk)
twit.status =0
twit.save()
return HttpResponseRedirect('/')
def post(self,request,pk):
pass
class CompanyDetailView(View):
def get(self,request):
pass
class Search(View):
def get(self,request):
return HttpResponseRedirect('/')
def post(self,request):
today = datetime.datetime.today()
if request.POST.get('search'):
search_text = request.POST.get('search')
data = Twit.objects.filter(
Q(description__contains=search_text)|
Q(company__name__contains=search_text)|
Q(category__name__contains=search_text)).filter(status=1,avaiable=True,company__status=1)
companeis = Company.objects.filter(status=1)
print('******************twits***************** : ',len(data))
return render(request, 'search.html',{
'twits':data,
'companeis': companeis,
'jdate' : jdatetime.date.today(),
'search_text': search_text,
'week' : signal_week(today)
})
else:
return HttpResponseRedirect('/')
class SearchByDate(View):
def post(self,request):
pass
def get(self,request,year,month,day):
try:
date = jdatetime.date(int(year),int(month),int(day))
mdate = date.togregorian()
year,month,day = str(mdate).split('-')
week = signal_week(mdate)
except:
print("errror when convert data:" + str(year) + "-"+str(month) + "-" + str(day))
return HttpResponseRedirect('/')
finally:
data = Twit.objects.filter(created_on__year=str(year)).filter(created_on__month=str(month)).filter(created_on__day=str(day))
companeis = Company.objects.filter(status=1)
print('******************twits***************** : ',len(data))
return render(request, 'search.html',{
'twits':data,
'companeis': companeis,
'jdate' : date,
'search_text': date,
'week' : week,
})
return HttpResponseRedirect('/')
|
mhsharifi96/sursiz_ir
|
backend/bors/views.py
|
views.py
|
py
| 5,221 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "datetime.timedelta",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "jdatetime.date.fromgregorian",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "jdatetime.date",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "models.Twit.objects.filter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "models.Twit.objects",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "models.Twit",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "jdatetime.date.fromgregorian",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "jdatetime.date",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "models.Twit.objects.filter",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "models.Twit.objects",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "models.Twit",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.today",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "models.Twit.objects.filter",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "models.Twit.objects",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "models.Twit",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "models.Twit.objects.filter",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "models.Twit.objects",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "models.Twit",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "models.Company.objects.filter",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "models.Company.objects",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "models.Company",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "jdatetime.date.today",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "jdatetime.date",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "models.Twit.objects.get",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "models.Twit.objects",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "models.Twit",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "models.Twit.DoesNotExist",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "models.Twit",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "django.views.View",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.today",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "models.Twit.objects.filter",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "models.Twit.objects",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "models.Twit",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "django.db.models.Q",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "models.Company.objects.filter",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "models.Company.objects",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "models.Company",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "jdatetime.date.today",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "jdatetime.date",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "django.views.View",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "jdatetime.date",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "models.Twit.objects.filter",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "models.Twit.objects",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "models.Twit",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "models.Company.objects.filter",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "models.Company.objects",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "models.Company",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 142,
"usage_type": "call"
}
] |
14870918627
|
import logging
def initLogging():
# Use simple logging in this file
# See whether I can seperate logging from this program and my library
logging.basicConfig(filename='test_logging.log',level=logging.DEBUG,
format='%(asctime)s %(message)s', datefmt='%Y%m%d-%H%M%S')
logger = logging.getLogger('root')
# This will setup the default logger
# The default level for this logging system is info
# a = logging
a = logger
a.info('Hello world')
a.error('This is an error')
a.warning('This is a warning')
a.debug('Debug information')
# initLogging()
# Make sure the log information from tenon would not contaminate here
tenonpath = '..'
import sys; sys.path.append(tenonpath)
import tenon
tenon.run(__file__, '../demo.blend')
if tenon.inblender():
tenon.render.write('demo.png')
tenon.logging.info('Write image to demo.png')
logging.info('The execution is completed')
|
qiuwch/tenon
|
test/test_logging.py
|
test_logging.py
|
py
| 934 |
python
|
en
|
code
| 9 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "tenon.run",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tenon.inblender",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tenon.render.write",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tenon.render",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "tenon.logging.info",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "tenon.logging",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 32,
"usage_type": "call"
}
] |
20493833703
|
import json
d1 = {
'Pessoa 1': {
'nome:': 'Luiz Augusto',
'idade:': 25,
},
'Pessoa 2': {
'nome:': 'Adriano Santos',
'idade:': 30,
},
}
print()
print(d1,'\n')
d1_json = json.dumps(d1, indent=True)
with open('arquivo.json', 'w+') as file:
file.write(d1_json)
print(d1_json)
|
Adriano1976/Curso-de-Python
|
Secao03-Programacao-Procedural/Aula087-Arquivos-Criar-ler-escrever-e-apagar/main.py
|
main.py
|
py
| 329 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.dumps",
"line_number": 17,
"usage_type": "call"
}
] |
18405151031
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 17:56:03 2020
@author: mints
"""
import logging
import itertools
import joblib
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from pandas.core.common import SettingWithCopyWarning
from semiphore_public.cuda.cudaprocessor import CudaProcessor
from semiphore_public.utils import interpolate
warnings.filterwarnings('ignore', category=AstropyUserWarning, append=True)
warnings.filterwarnings('ignore', category=SettingWithCopyWarning, append=True)
def distance(w1, w2, sed1, sed2, err1, err2):
"""Calculate distance between two SED templates
Args:
w1 (float): weight of the first SED
w2 (float): weight of the second SED
sed1 (float[]): magnitudes of the first SED
sed2 (float[]): magnitudes of the second SED
err1 (float[]): width of the first SED
err2 (float[]): width of the second SED
Returns:
"Distance"
"""
d = (w1 * (sed1 - sed2)**2 / (err1**2 + 1e-2),
w2 * (sed1 - sed2)**2 / (err2**2 + 1e-2))
return np.sum(np.sqrt(d))
def get_order(w1, w2, sed1, sed2, err1, err2):
"""Reorder SEDs. Here all parameters are arrays along the redshift.
Args:
w1 (float[]): weight of the first SED
w2 (float[]): weight of the second SED
sed1 (float[][]): magnitudes of the first SED
sed2 (float[][]): magnitudes of the second SED
err1 (float[][]): width of the first SED
err2 (float[][]): width of the second SED
Returns:
[type]: [description]
"""
nn = len(w1)
d = np.zeros((nn, nn))
for i in range(nn):
for j in range(nn):
d[i, j] = distance(w1[i], w2[j],
sed1[i], sed2[j],
err1[i], err2[j])
smin = np.inf
tOk = None
for t in itertools.permutations(np.arange(nn, dtype=int), nn):
s = 0
for i in range(nn):
s += d[i, t[i]]
if s < smin:
smin = s
tOk = t
return tOk
if __name__ == '__main__':
import argparse
import logging
logger = logging.getLogger("FIT")
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
parser = argparse.ArgumentParser(description="""
Perform a full CUDA-based SED-PhotoZ fit.
""", formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-i', '--input', type=str, default=None,
help='Input filename')
parser.add_argument('-c', '--catalogs', type=str,
default=None, required=True,
help='Input catalogs to use (comma separated)')
parser.add_argument('-o', '--output', type=str, default=None,
help='Output filename')
parser.add_argument('-n', '--nsed', type=int, default=1,
help='Number of SEDs to fit')
parser.add_argument('-V', '--verbose', action="store_true",
default=False,
help='Be verbose')
args = parser.parse_args()
processor = CudaProcessor(args.catalogs.split(','), args.nsed)
results = []
sizes = []
logger.info("Load data from %s", args.catalogs)
processor.load_data(filename=args.input)
z_len = len(processor.z)
is_ok = []
izs = []
# Forward run
for z0, mags, errs in processor.iterate_data(size=1000):
logger.info("Forward run, redshift=%.2f", processor.z[int(z0)])
if len(results) > 0:
output = processor.run_on_data(mags, errs,
custom_params=results[-1][0])
else:
output = processor.run_on_data(mags, errs)
if output is not None:
res, size, _ = output
if res[1] >= processor.MAX_ITERATIONS * args.nsed:
logger.warn(f'Iteration count exceeded for z nr {z0}')
is_ok.append(False)
else:
is_ok.append(True)
results.append(res)
sizes.append(size)
izs.append(z0)
# Backward run:
for ii in range(len(izs)-2, 0, -1):
if not is_ok[ii + 1] or not is_ok[ii]:
continue
old_norm = results[ii][2] / sizes[ii]
if results[ii + 1][2] / sizes[ii + 1] > old_norm:
logger.info("Backward run, redshift=%.2f",
processor.z[int(izs[ii])])
mags, errs = processor.get_data_for_zs(izs[ii])
output = processor.run_on_data(mags, errs,
custom_params=results[ii+1][0])
if output is not None:
res, size, _ = output
if res[2] / size >= results[ii][2] / sizes[ii]:
logger.debug(f'...new l_norm={res[2] / size} is better')
results[ii] = res
sizes[ii] = size
else:
logger.debug(f'...new l_norm={res[2] / size} is lower, rejecting')
iz_min = int(np.ceil(np.min(izs)))
iz_max = int(np.ceil(np.max(izs)))
izs = processor.z[0] + np.array(izs) * 0.02
sed_shape = (z_len, processor.n_seds, len(processor.columns))
output = {'z': processor.z,
'names': processor.names,
'weights': np.zeros((z_len, processor.n_seds)),
'sed': np.zeros(sed_shape),
'err': np.zeros(sed_shape),
'l_values': np.zeros(len(izs)),
'iterations': np.zeros(len(izs)),
'sizes': sizes,
}
w = np.array([results[ii][0][0] for ii in range(len(results))])
sed = np.array([results[ii][0][1] for ii in range(len(results))])
err = np.array([results[ii][0][2] for ii in range(len(results))])
output['iterations'] = np.array([results[ii][1]
for ii in range(len(results))])
output['l_values'] = np.array([results[ii][2]
for ii in range(len(results))])
ind = np.argsort(w)
logger.info("Reordering...")
# Reordering
output['weights00'] = w
output['sed00'] = sed
output['err00'] = err
w_order = [w[0]]
sed_order = [sed[0]]
err_order = [err[0]]
for i in range(0, len(w)-1):
new_order = list(get_order(w_order[i], w[i+1],
sed_order[i], sed[i+1],
err_order[i], err[i+1]))
w_order.append(w[i + 1][new_order])
sed_order.append(sed[i + 1][new_order])
err_order.append(err[i + 1][new_order])
logger.info("Interpolating...")
# Interpolation
output['weights0'] = w_order
output['sed0'] = sed_order
output['err0'] = err_order
output['weights'] = interpolate.curve_processor(izs, np.array(w_order),
processor.z, is_log=True)
output['sed'] = interpolate.curve_processor(izs, np.array(sed_order),
processor.z, is_log=False)
output['err'] = interpolate.curve_processor(izs, np.array(err_order),
processor.z,
is_log=True, bounded=True)
output['weights'] = output['weights'] / \
output['weights'].sum(axis=1)[:, np.newaxis]
output['z_base'] = izs
output['input_file'] = args.input
if args.output is None:
names = '_'.join(processor.names)
outname = f'../calibrations/seds/{names}_{processor.n_seds}seds.joblib'
else:
outname = args.output
logger.info('Saving calibration to %s', outname)
joblib.dump(output, outname)
logger.info("Finished")
|
minzastro/semiphore_public
|
fit/complete_fit.py
|
complete_fit.py
|
py
| 7,837 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "warnings.filterwarnings",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "astropy.utils.exceptions.AstropyUserWarning",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "warnings.filterwarnings",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.core.common.SettingWithCopyWarning",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "numpy.sum",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "itertools.permutations",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "argparse.RawDescriptionHelpFormatter",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "semiphore_public.cuda.cudaprocessor.CudaProcessor",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "semiphore_public.utils.interpolate.curve_processor",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "semiphore_public.utils.interpolate",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "semiphore_public.utils.interpolate.curve_processor",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "semiphore_public.utils.interpolate",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "semiphore_public.utils.interpolate.curve_processor",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "semiphore_public.utils.interpolate",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "joblib.dump",
"line_number": 207,
"usage_type": "call"
}
] |
71969151547
|
from django.contrib import admin
from django.urls import path, re_path
from . import views
urlpatterns = [
path('',views.index,name="words-index"), #index homePage
path('words/',views.index,name="words-index"),#index homePage
path('random/',views.get_random,name="random"), #Random word
path('words/<str:word_name>/', views.detail, name='words-detail'), # detail page
path('words/add/<str:word_name>', views.add_word, name="words-add_word_details"),# add word page
path('add/', views.add_word, name="words-add_word_details"),
path('about/',views.about,name="about-page"), #about page
path('contact/',views.contact,name="contact"), #contact page
path('tag/',views.all_tags,name="all-tags-page"), #Case for empty tag entering
path('tag/<str:str_Tag>',views.tag_page,name="tag-detail-page"), #Tag page for words of a certain tag
path('tagList/', views.all_tags, name="all-tags-page"), #page where all tags are displayed
path('words/votes/<str:slug>/<str:direction>/',views.vote, name="vote"), # This view Manages votes
]
|
gystr/words
|
words/urls.py
|
urls.py
|
py
| 1,067 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 17,
"usage_type": "call"
}
] |
26038693036
|
from __future__ import annotations
import logging
import os
import re
import textwrap
from collections import defaultdict
from dataclasses import dataclass
from pants.backend.codegen.protobuf.protoc import Protoc
from pants.backend.codegen.protobuf.target_types import (
AllProtobufTargets,
ProtobufGrpcToggleField,
ProtobufSourceField,
ProtobufSourcesGeneratorTarget,
ProtobufSourceTarget,
)
from pants.backend.go import target_type_rules
from pants.backend.go.dependency_inference import (
GoImportPathsMappingAddressSet,
GoModuleImportPathsMapping,
GoModuleImportPathsMappings,
GoModuleImportPathsMappingsHook,
)
from pants.backend.go.target_type_rules import GoImportPathMappingRequest
from pants.backend.go.target_types import GoOwningGoModAddressField, GoPackageSourcesField
from pants.backend.go.util_rules import (
assembly,
build_pkg,
build_pkg_target,
first_party_pkg,
go_mod,
link,
sdk,
third_party_pkg,
)
from pants.backend.go.util_rules.build_opts import GoBuildOptions
from pants.backend.go.util_rules.build_pkg import (
BuildGoPackageRequest,
FallibleBuildGoPackageRequest,
)
from pants.backend.go.util_rules.build_pkg_target import (
BuildGoPackageTargetRequest,
GoCodegenBuildRequest,
)
from pants.backend.go.util_rules.first_party_pkg import FallibleFirstPartyPkgAnalysis
from pants.backend.go.util_rules.go_mod import OwningGoMod, OwningGoModRequest
from pants.backend.go.util_rules.pkg_analyzer import PackageAnalyzerSetup
from pants.backend.go.util_rules.sdk import GoSdkProcess
from pants.backend.python.util_rules import pex
from pants.build_graph.address import Address
from pants.core.util_rules.external_tool import DownloadedExternalTool, ExternalToolRequest
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.core.util_rules.stripped_source_files import StrippedSourceFiles
from pants.engine.fs import (
AddPrefix,
CreateDigest,
Digest,
DigestContents,
Directory,
FileContent,
MergeDigests,
RemovePrefix,
Snapshot,
)
from pants.engine.internals.native_engine import EMPTY_DIGEST
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.platform import Platform
from pants.engine.process import FallibleProcessResult, Process, ProcessResult
from pants.engine.rules import collect_rules, rule
from pants.engine.target import (
GeneratedSources,
GenerateSourcesRequest,
HydratedSources,
HydrateSourcesRequest,
SourcesPaths,
SourcesPathsRequest,
TransitiveTargets,
TransitiveTargetsRequest,
)
from pants.engine.unions import UnionRule
from pants.source.source_root import (
SourceRoot,
SourceRootRequest,
SourceRootsRequest,
SourceRootsResult,
)
from pants.util.dirutil import group_by_dir
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.strutil import softwrap
_logger = logging.getLogger(__name__)
class GoCodegenBuildProtobufRequest(GoCodegenBuildRequest):
generate_from = ProtobufSourceField
class GenerateGoFromProtobufRequest(GenerateSourcesRequest):
input = ProtobufSourceField
output = GoPackageSourcesField
@dataclass(frozen=True)
class _SetupGoProtocPlugin:
digest: Digest
_QUOTE_CHAR = r"(?:'|\")"
_IMPORT_PATH_RE = re.compile(rf"^\s*option\s+go_package\s+=\s+{_QUOTE_CHAR}(.*){_QUOTE_CHAR};")
def parse_go_package_option(content_raw: bytes) -> str | None:
content = content_raw.decode()
for line in content.splitlines():
m = _IMPORT_PATH_RE.match(line)
if m:
return m.group(1)
return None
class ProtobufGoModuleImportPathsMappingsHook(GoModuleImportPathsMappingsHook):
pass
@rule(desc="Map import paths for all Go Protobuf targets.", level=LogLevel.DEBUG)
async def map_import_paths_of_all_go_protobuf_targets(
_request: ProtobufGoModuleImportPathsMappingsHook,
all_protobuf_targets: AllProtobufTargets,
) -> GoModuleImportPathsMappings:
sources = await MultiGet(
Get(
HydratedSources,
HydrateSourcesRequest(
tgt[ProtobufSourceField],
for_sources_types=(ProtobufSourceField,),
enable_codegen=True,
),
)
for tgt in all_protobuf_targets
)
all_contents = await MultiGet(
Get(DigestContents, Digest, source.snapshot.digest) for source in sources
)
go_protobuf_mapping_metadata = []
owning_go_mod_gets = []
for tgt, contents in zip(all_protobuf_targets, all_contents):
if not contents:
continue
if len(contents) > 1:
raise AssertionError(
f"Protobuf target `{tgt.address}` mapped to more than one source file."
)
import_path = parse_go_package_option(contents[0].content)
if not import_path:
continue
owning_go_mod_gets.append(Get(OwningGoMod, OwningGoModRequest(tgt.address)))
go_protobuf_mapping_metadata.append((import_path, tgt.address))
owning_go_mod_targets = await MultiGet(owning_go_mod_gets)
import_paths_by_module: dict[Address, dict[str, set[Address]]] = defaultdict(
lambda: defaultdict(set)
)
for owning_go_mod, (import_path, address) in zip(
owning_go_mod_targets, go_protobuf_mapping_metadata
):
import_paths_by_module[owning_go_mod.address][import_path].add(address)
return GoModuleImportPathsMappings(
FrozenDict(
{
go_mod_addr: GoModuleImportPathsMapping(
mapping=FrozenDict(
{
import_path: GoImportPathsMappingAddressSet(
addresses=tuple(sorted(addresses)), infer_all=True
)
for import_path, addresses in import_path_mapping.items()
}
),
address_to_import_path=FrozenDict(
{
address: import_path
for import_path, addresses in import_path_mapping.items()
for address in addresses
}
),
)
for go_mod_addr, import_path_mapping in import_paths_by_module.items()
}
)
)
@dataclass(frozen=True)
class _SetupGoProtobufPackageBuildRequest:
"""Request type used to trigger setup of a BuildGoPackageRequest for entire generated Go
Protobuf package.
This type is separate so that a build of the full package can be cached no matter which one of
its component source files was requested. This occurs because a request to build any one of the
source files will be converted into this type and then built.
"""
addresses: tuple[Address, ...]
import_path: str
build_opts: GoBuildOptions
@rule
async def setup_full_package_build_request(
request: _SetupGoProtobufPackageBuildRequest,
protoc: Protoc,
go_protoc_plugin: _SetupGoProtocPlugin,
analyzer: PackageAnalyzerSetup,
platform: Platform,
) -> FallibleBuildGoPackageRequest:
output_dir = "_generated_files"
protoc_relpath = "__protoc"
protoc_go_plugin_relpath = "__protoc_gen_go"
transitive_targets, downloaded_protoc_binary, empty_output_dir = await MultiGet(
Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses)),
Get(DownloadedExternalTool, ExternalToolRequest, protoc.get_request(platform)),
Get(Digest, CreateDigest([Directory(output_dir)])),
)
go_mod_addr = await Get(OwningGoMod, OwningGoModRequest(transitive_targets.roots[0].address))
package_mapping = await Get(
GoModuleImportPathsMapping, GoImportPathMappingRequest(go_mod_addr.address)
)
all_sources = await Get(
SourceFiles,
SourceFilesRequest(
sources_fields=(
tgt[ProtobufSourceField]
for tgt in transitive_targets.closure
if tgt.has_field(ProtobufSourceField)
),
for_sources_types=(ProtobufSourceField,),
enable_codegen=True,
),
)
source_roots, input_digest = await MultiGet(
Get(SourceRootsResult, SourceRootsRequest, SourceRootsRequest.for_files(all_sources.files)),
Get(Digest, MergeDigests([all_sources.snapshot.digest, empty_output_dir])),
)
source_root_paths = sorted({sr.path for sr in source_roots.path_to_root.values()})
pkg_sources = await MultiGet(
Get(SourcesPaths, SourcesPathsRequest(tgt[ProtobufSourceField]))
for tgt in transitive_targets.roots
)
pkg_files = sorted({f for ps in pkg_sources for f in ps.files})
maybe_grpc_plugin_args = []
if any(tgt.get(ProtobufGrpcToggleField).value for tgt in transitive_targets.roots):
maybe_grpc_plugin_args = [
f"--go-grpc_out={output_dir}",
"--go-grpc_opt=paths=source_relative",
]
gen_result = await Get(
FallibleProcessResult,
Process(
argv=[
os.path.join(protoc_relpath, downloaded_protoc_binary.exe),
f"--plugin=go={os.path.join('.', protoc_go_plugin_relpath, 'protoc-gen-go')}",
f"--plugin=go-grpc={os.path.join('.', protoc_go_plugin_relpath, 'protoc-gen-go-grpc')}",
f"--go_out={output_dir}",
"--go_opt=paths=source_relative",
*(f"--proto_path={source_root}" for source_root in source_root_paths),
*maybe_grpc_plugin_args,
*pkg_files,
],
# Note: Necessary or else --plugin option needs absolute path.
env={"PATH": protoc_go_plugin_relpath},
input_digest=input_digest,
immutable_input_digests={
protoc_relpath: downloaded_protoc_binary.digest,
protoc_go_plugin_relpath: go_protoc_plugin.digest,
},
description=f"Generating Go sources from {request.import_path}.",
level=LogLevel.DEBUG,
output_directories=(output_dir,),
),
)
if gen_result.exit_code != 0:
return FallibleBuildGoPackageRequest(
request=None,
import_path=request.import_path,
exit_code=gen_result.exit_code,
stderr=gen_result.stderr.decode(),
)
# Ensure that the generated files are in a single package directory.
gen_sources = await Get(Snapshot, Digest, gen_result.output_digest)
files_by_dir = group_by_dir(gen_sources.files)
if len(files_by_dir) != 1:
return FallibleBuildGoPackageRequest(
request=None,
import_path=request.import_path,
exit_code=1,
stderr=textwrap.dedent(
f"""
Expected Go files generated from Protobuf sources to be output to a single directory.
- import path: {request.import_path}
- protobuf files: {', '.join(pkg_files)}
"""
).strip(),
)
gen_dir = list(files_by_dir.keys())[0]
# Analyze the generated sources.
input_digest = await Get(Digest, MergeDigests([gen_sources.digest, analyzer.digest]))
result = await Get(
FallibleProcessResult,
Process(
(analyzer.path, gen_dir),
input_digest=input_digest,
description=f"Determine metadata for generated Go package for {request.import_path}",
level=LogLevel.DEBUG,
env={"CGO_ENABLED": "0"}, # protobuf files should not have cgo!
),
)
# Parse the metadata from the analysis.
fallible_analysis = FallibleFirstPartyPkgAnalysis.from_process_result(
result,
dir_path=gen_dir,
import_path=request.import_path,
minimum_go_version="",
description_of_source=f"Go package generated from protobuf targets `{', '.join(str(addr) for addr in request.addresses)}`",
)
if not fallible_analysis.analysis:
return FallibleBuildGoPackageRequest(
request=None,
import_path=request.import_path,
exit_code=fallible_analysis.exit_code,
stderr=fallible_analysis.stderr,
)
analysis = fallible_analysis.analysis
# Obtain build requests for third-party dependencies.
# TODO: Consider how to merge this code with existing dependency inference code.
dep_build_request_addrs: set[Address] = set()
for dep_import_path in (*analysis.imports, *analysis.test_imports, *analysis.xtest_imports):
# Infer dependencies on other Go packages.
candidate_addresses = package_mapping.mapping.get(dep_import_path)
if candidate_addresses:
# TODO: Use explicit dependencies to disambiguate? This should never happen with Go backend though.
if candidate_addresses.infer_all:
dep_build_request_addrs.update(candidate_addresses.addresses)
else:
if len(candidate_addresses.addresses) > 1:
return FallibleBuildGoPackageRequest(
request=None,
import_path=request.import_path,
exit_code=result.exit_code,
stderr=textwrap.dedent(
f"""
Multiple addresses match import of `{dep_import_path}`.
addresses: {', '.join(str(a) for a in candidate_addresses.addresses)}
"""
).strip(),
)
dep_build_request_addrs.update(candidate_addresses.addresses)
dep_build_requests = await MultiGet(
Get(BuildGoPackageRequest, BuildGoPackageTargetRequest(addr, build_opts=request.build_opts))
for addr in sorted(dep_build_request_addrs)
)
return FallibleBuildGoPackageRequest(
request=BuildGoPackageRequest(
import_path=request.import_path,
pkg_name=analysis.name,
digest=gen_sources.digest,
dir_path=analysis.dir_path,
go_files=analysis.go_files,
s_files=analysis.s_files,
direct_dependencies=dep_build_requests,
minimum_go_version=analysis.minimum_go_version,
build_opts=request.build_opts,
),
import_path=request.import_path,
)
@rule
async def setup_build_go_package_request_for_protobuf(
request: GoCodegenBuildProtobufRequest,
) -> FallibleBuildGoPackageRequest:
# Hydrate the protobuf source to parse for the Go import path.
sources = await Get(HydratedSources, HydrateSourcesRequest(request.target[ProtobufSourceField]))
sources_content = await Get(DigestContents, Digest, sources.snapshot.digest)
assert len(sources_content) == 1
import_path = parse_go_package_option(sources_content[0].content)
if not import_path:
return FallibleBuildGoPackageRequest(
request=None,
import_path="",
exit_code=1,
stderr=f"No import path was set in Protobuf file via `option go_package` directive for {request.target.address}.",
)
go_mod_addr = await Get(OwningGoMod, OwningGoModRequest(request.target.address))
package_mapping = await Get(
GoModuleImportPathsMapping, GoImportPathMappingRequest(go_mod_addr.address)
)
# Request the full build of the package. This indirection is necessary so that requests for two or more
# Protobuf files in the same Go package result in a single cacheable rule invocation.
protobuf_target_addrs_set_for_import_path = package_mapping.mapping.get(import_path)
if not protobuf_target_addrs_set_for_import_path:
return FallibleBuildGoPackageRequest(
request=None,
import_path=import_path,
exit_code=1,
stderr=softwrap(
f"""
No Protobuf files exists for import path `{import_path}`.
Consider whether the import path was set correctly via the `option go_package` directive.
"""
),
)
return await Get(
FallibleBuildGoPackageRequest,
_SetupGoProtobufPackageBuildRequest(
addresses=protobuf_target_addrs_set_for_import_path.addresses,
import_path=import_path,
build_opts=request.build_opts,
),
)
@rule(desc="Generate Go source files from Protobuf", level=LogLevel.DEBUG)
async def generate_go_from_protobuf(
request: GenerateGoFromProtobufRequest,
protoc: Protoc,
go_protoc_plugin: _SetupGoProtocPlugin,
platform: Platform,
) -> GeneratedSources:
output_dir = "_generated_files"
protoc_relpath = "__protoc"
protoc_go_plugin_relpath = "__protoc_gen_go"
downloaded_protoc_binary, empty_output_dir, transitive_targets = await MultiGet(
Get(DownloadedExternalTool, ExternalToolRequest, protoc.get_request(platform)),
Get(Digest, CreateDigest([Directory(output_dir)])),
Get(TransitiveTargets, TransitiveTargetsRequest([request.protocol_target.address])),
)
# NB: By stripping the source roots, we avoid having to set the value `--proto_path`
# for Protobuf imports to be discoverable.
all_sources_stripped, target_sources_stripped = await MultiGet(
Get(
StrippedSourceFiles,
SourceFilesRequest(
tgt[ProtobufSourceField]
for tgt in transitive_targets.closure
if tgt.has_field(ProtobufSourceField)
),
),
Get(
StrippedSourceFiles, SourceFilesRequest([request.protocol_target[ProtobufSourceField]])
),
)
input_digest = await Get(
Digest, MergeDigests([all_sources_stripped.snapshot.digest, empty_output_dir])
)
maybe_grpc_plugin_args = []
if request.protocol_target.get(ProtobufGrpcToggleField).value:
maybe_grpc_plugin_args = [
f"--go-grpc_out={output_dir}",
"--go-grpc_opt=paths=source_relative",
]
result = await Get(
ProcessResult,
Process(
argv=[
os.path.join(protoc_relpath, downloaded_protoc_binary.exe),
f"--plugin=go={os.path.join('.', protoc_go_plugin_relpath, 'protoc-gen-go')}",
f"--plugin=go-grpc={os.path.join('.', protoc_go_plugin_relpath, 'protoc-gen-go-grpc')}",
f"--go_out={output_dir}",
"--go_opt=paths=source_relative",
*maybe_grpc_plugin_args,
*target_sources_stripped.snapshot.files,
],
# Note: Necessary or else --plugin option needs absolute path.
env={"PATH": protoc_go_plugin_relpath},
input_digest=input_digest,
immutable_input_digests={
protoc_relpath: downloaded_protoc_binary.digest,
protoc_go_plugin_relpath: go_protoc_plugin.digest,
},
description=f"Generating Go sources from {request.protocol_target.address}.",
level=LogLevel.DEBUG,
output_directories=(output_dir,),
),
)
normalized_digest, source_root = await MultiGet(
Get(Digest, RemovePrefix(result.output_digest, output_dir)),
Get(SourceRoot, SourceRootRequest, SourceRootRequest.for_target(request.protocol_target)),
)
source_root_restored = (
await Get(Snapshot, AddPrefix(normalized_digest, source_root.path))
if source_root.path != "."
else await Get(Snapshot, Digest, normalized_digest)
)
return GeneratedSources(source_root_restored)
# Note: The versions of the Go protoc and gRPC plugins are hard coded in the following go.mod. To update,
# copy the following go.mod and go.sum contents to go.mod and go.sum files in a new directory. Then update the
# versions and run `go mod download all`. Copy the go.mod and go.sum contents back into these constants,
# making sure to replace tabs with `\t`.
GO_PROTOBUF_GO_MOD = """\
module org.pantsbuild.backend.go.protobuf
go 1.17
require (
\tgoogle.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0
\tgoogle.golang.org/protobuf v1.27.1
)
require (
\tgithub.com/golang/protobuf v1.5.0 // indirect
\tgithub.com/google/go-cmp v0.5.5 // indirect
\tgolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 // indirect
)
"""
GO_PROTOBUF_GO_SUM = """\
github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/grpc v1.2.0 h1:v8eFdETH8nqZHQ9x+0f2PLuU6W7zo5PFZuVEwH5126Y=
google.golang.org/grpc v1.2.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
"""
@rule
async def setup_go_protoc_plugin() -> _SetupGoProtocPlugin:
go_mod_digest = await Get(
Digest,
CreateDigest(
[
FileContent("go.mod", GO_PROTOBUF_GO_MOD.encode()),
FileContent("go.sum", GO_PROTOBUF_GO_SUM.encode()),
]
),
)
download_sources_result = await Get(
ProcessResult,
GoSdkProcess(
["mod", "download", "all"],
input_digest=go_mod_digest,
output_directories=("gopath",),
description="Download Go `protoc` plugin sources.",
allow_downloads=True,
),
)
go_plugin_build_result, go_grpc_plugin_build_result = await MultiGet(
Get(
ProcessResult,
GoSdkProcess(
["install", "google.golang.org/protobuf/cmd/[email protected]"],
input_digest=download_sources_result.output_digest,
output_files=["gopath/bin/protoc-gen-go"],
description="Build Go protobuf plugin for `protoc`.",
),
),
Get(
ProcessResult,
GoSdkProcess(
[
"install",
"google.golang.org/grpc/cmd/[email protected]",
],
input_digest=download_sources_result.output_digest,
output_files=["gopath/bin/protoc-gen-go-grpc"],
description="Build Go gRPC protobuf plugin for `protoc`.",
),
),
)
if go_plugin_build_result.output_digest == EMPTY_DIGEST:
raise AssertionError(
f"Failed to build protoc-gen-go:\n"
f"stdout:\n{go_plugin_build_result.stdout.decode()}\n\n"
f"stderr:\n{go_plugin_build_result.stderr.decode()}"
)
if go_grpc_plugin_build_result.output_digest == EMPTY_DIGEST:
raise AssertionError(
f"Failed to build protoc-gen-go-grpc:\n"
f"stdout:\n{go_grpc_plugin_build_result.stdout.decode()}\n\n"
f"stderr:\n{go_grpc_plugin_build_result.stderr.decode()}"
)
merged_output_digests = await Get(
Digest,
MergeDigests(
[go_plugin_build_result.output_digest, go_grpc_plugin_build_result.output_digest]
),
)
plugin_digest = await Get(Digest, RemovePrefix(merged_output_digests, "gopath/bin"))
return _SetupGoProtocPlugin(plugin_digest)
def rules():
return (
*collect_rules(),
UnionRule(GenerateSourcesRequest, GenerateGoFromProtobufRequest),
UnionRule(GoCodegenBuildRequest, GoCodegenBuildProtobufRequest),
UnionRule(GoModuleImportPathsMappingsHook, ProtobufGoModuleImportPathsMappingsHook),
ProtobufSourcesGeneratorTarget.register_plugin_field(GoOwningGoModAddressField),
ProtobufSourceTarget.register_plugin_field(GoOwningGoModAddressField),
# Rules needed for this to pass src/python/pants/init/load_backends_integration_test.py:
*assembly.rules(),
*build_pkg.rules(),
*build_pkg_target.rules(),
*first_party_pkg.rules(),
*go_mod.rules(),
*link.rules(),
*sdk.rules(),
*target_type_rules.rules(),
*third_party_pkg.rules(),
*pex.rules(),
)
|
pantsbuild/pants
|
src/python/pants/backend/codegen/protobuf/go/rules.py
|
rules.py
|
py
| 25,015 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg_target.GoCodegenBuildRequest",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.ProtobufSourceField",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "pants.engine.target.GenerateSourcesRequest",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.ProtobufSourceField",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.target_types.GoPackageSourcesField",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.dependency_inference.GoModuleImportPathsMappingsHook",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.AllProtobufTargets",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "pants.engine.internals.selectors.MultiGet",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.HydratedSources",
"line_number": 134,
"usage_type": "argument"
},
{
"api_name": "pants.engine.target.HydrateSourcesRequest",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.ProtobufSourceField",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.ProtobufSourceField",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "pants.engine.internals.selectors.MultiGet",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.DigestContents",
"line_number": 145,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 145,
"usage_type": "argument"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.go_mod.OwningGoMod",
"line_number": 162,
"usage_type": "argument"
},
{
"api_name": "pants.backend.go.util_rules.go_mod.OwningGoModRequest",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.MultiGet",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "pants.build_graph.address.Address",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.dependency_inference.GoModuleImportPathsMappings",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "pants.util.frozendict.FrozenDict",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.dependency_inference.GoModuleImportPathsMapping",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "pants.util.frozendict.FrozenDict",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.dependency_inference.GoImportPathsMappingAddressSet",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "pants.util.frozendict.FrozenDict",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.rule",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "pants.util.logging.LogLevel.DEBUG",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "pants.util.logging.LogLevel",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.dependency_inference.GoModuleImportPathsMappings",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "pants.build_graph.address.Address",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.util_rules.build_opts.GoBuildOptions",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "pants.backend.codegen.protobuf.protoc.Protoc",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.util_rules.pkg_analyzer.PackageAnalyzerSetup",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "pants.engine.platform.Platform",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "pants.engine.internals.selectors.MultiGet",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.TransitiveTargets",
"line_number": 230,
"usage_type": "argument"
},
{
"api_name": "pants.engine.target.TransitiveTargetsRequest",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "pants.core.util_rules.external_tool.DownloadedExternalTool",
"line_number": 231,
"usage_type": "argument"
},
{
"api_name": "pants.core.util_rules.external_tool.ExternalToolRequest",
"line_number": 231,
"usage_type": "argument"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 232,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.CreateDigest",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Directory",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.go_mod.OwningGoMod",
"line_number": 235,
"usage_type": "argument"
},
{
"api_name": "pants.backend.go.util_rules.go_mod.OwningGoModRequest",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.dependency_inference.GoModuleImportPathsMapping",
"line_number": 237,
"usage_type": "argument"
},
{
"api_name": "pants.backend.go.target_type_rules.GoImportPathMappingRequest",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "pants.core.util_rules.source_files.SourceFiles",
"line_number": 241,
"usage_type": "argument"
},
{
"api_name": "pants.core.util_rules.source_files.SourceFilesRequest",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.ProtobufSourceField",
"line_number": 244,
"usage_type": "name"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.ProtobufSourceField",
"line_number": 246,
"usage_type": "argument"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.ProtobufSourceField",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "pants.engine.internals.selectors.MultiGet",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "pants.source.source_root.SourceRootsResult",
"line_number": 253,
"usage_type": "argument"
},
{
"api_name": "pants.source.source_root.SourceRootsRequest",
"line_number": 253,
"usage_type": "argument"
},
{
"api_name": "pants.source.source_root.SourceRootsRequest.for_files",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 254,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.MergeDigests",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.MultiGet",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.SourcesPaths",
"line_number": 260,
"usage_type": "argument"
},
{
"api_name": "pants.engine.target.SourcesPathsRequest",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.ProtobufSourceField",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.ProtobufGrpcToggleField",
"line_number": 266,
"usage_type": "argument"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "pants.engine.process.FallibleProcessResult",
"line_number": 273,
"usage_type": "argument"
},
{
"api_name": "pants.engine.process.Process",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 276,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 277,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 278,
"usage_type": "attribute"
},
{
"api_name": "pants.util.logging.LogLevel.DEBUG",
"line_number": 293,
"usage_type": "attribute"
},
{
"api_name": "pants.util.logging.LogLevel",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg.FallibleBuildGoPackageRequest",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Snapshot",
"line_number": 306,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 306,
"usage_type": "argument"
},
{
"api_name": "pants.util.dirutil.group_by_dir",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg.FallibleBuildGoPackageRequest",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "textwrap.dedent",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 324,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.MergeDigests",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "pants.engine.process.FallibleProcessResult",
"line_number": 326,
"usage_type": "argument"
},
{
"api_name": "pants.engine.process.Process",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "pants.util.logging.LogLevel.DEBUG",
"line_number": 331,
"usage_type": "attribute"
},
{
"api_name": "pants.util.logging.LogLevel",
"line_number": 331,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.util_rules.first_party_pkg.FallibleFirstPartyPkgAnalysis.from_process_result",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.first_party_pkg.FallibleFirstPartyPkgAnalysis",
"line_number": 337,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg.FallibleBuildGoPackageRequest",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "pants.build_graph.address.Address",
"line_number": 355,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg.FallibleBuildGoPackageRequest",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "textwrap.dedent",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.MultiGet",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg.BuildGoPackageRequest",
"line_number": 380,
"usage_type": "argument"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg_target.BuildGoPackageTargetRequest",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg.FallibleBuildGoPackageRequest",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg.BuildGoPackageRequest",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.rule",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg.FallibleBuildGoPackageRequest",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.HydratedSources",
"line_number": 405,
"usage_type": "argument"
},
{
"api_name": "pants.engine.target.HydrateSourcesRequest",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.ProtobufSourceField",
"line_number": 405,
"usage_type": "name"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.DigestContents",
"line_number": 406,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 406,
"usage_type": "argument"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg.FallibleBuildGoPackageRequest",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.go_mod.OwningGoMod",
"line_number": 417,
"usage_type": "argument"
},
{
"api_name": "pants.backend.go.util_rules.go_mod.OwningGoModRequest",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.dependency_inference.GoModuleImportPathsMapping",
"line_number": 419,
"usage_type": "argument"
},
{
"api_name": "pants.backend.go.target_type_rules.GoImportPathMappingRequest",
"line_number": 419,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg.FallibleBuildGoPackageRequest",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "pants.util.strutil.softwrap",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg.FallibleBuildGoPackageRequest",
"line_number": 439,
"usage_type": "argument"
},
{
"api_name": "pants.engine.rules.rule",
"line_number": 400,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg.FallibleBuildGoPackageRequest",
"line_number": 403,
"usage_type": "name"
},
{
"api_name": "pants.backend.codegen.protobuf.protoc.Protoc",
"line_number": 451,
"usage_type": "name"
},
{
"api_name": "pants.engine.platform.Platform",
"line_number": 453,
"usage_type": "name"
},
{
"api_name": "pants.engine.internals.selectors.MultiGet",
"line_number": 459,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "pants.core.util_rules.external_tool.DownloadedExternalTool",
"line_number": 460,
"usage_type": "argument"
},
{
"api_name": "pants.core.util_rules.external_tool.ExternalToolRequest",
"line_number": 460,
"usage_type": "argument"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 461,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.CreateDigest",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Directory",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.TransitiveTargets",
"line_number": 462,
"usage_type": "argument"
},
{
"api_name": "pants.engine.target.TransitiveTargetsRequest",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.MultiGet",
"line_number": 467,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "pants.core.util_rules.stripped_source_files.StrippedSourceFiles",
"line_number": 469,
"usage_type": "argument"
},
{
"api_name": "pants.core.util_rules.source_files.SourceFilesRequest",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.ProtobufSourceField",
"line_number": 471,
"usage_type": "name"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.ProtobufSourceField",
"line_number": 473,
"usage_type": "argument"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "pants.core.util_rules.stripped_source_files.StrippedSourceFiles",
"line_number": 477,
"usage_type": "argument"
},
{
"api_name": "pants.core.util_rules.source_files.SourceFilesRequest",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.ProtobufSourceField",
"line_number": 477,
"usage_type": "name"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 481,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 482,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.MergeDigests",
"line_number": 482,
"usage_type": "call"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.ProtobufGrpcToggleField",
"line_number": 486,
"usage_type": "argument"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 492,
"usage_type": "call"
},
{
"api_name": "pants.engine.process.ProcessResult",
"line_number": 493,
"usage_type": "argument"
},
{
"api_name": "pants.engine.process.Process",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 496,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 496,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 497,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 498,
"usage_type": "attribute"
},
{
"api_name": "pants.util.logging.LogLevel.DEBUG",
"line_number": 512,
"usage_type": "attribute"
},
{
"api_name": "pants.util.logging.LogLevel",
"line_number": 512,
"usage_type": "name"
},
{
"api_name": "pants.engine.internals.selectors.MultiGet",
"line_number": 517,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 518,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 518,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.RemovePrefix",
"line_number": 518,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 519,
"usage_type": "call"
},
{
"api_name": "pants.source.source_root.SourceRoot",
"line_number": 519,
"usage_type": "argument"
},
{
"api_name": "pants.source.source_root.SourceRootRequest",
"line_number": 519,
"usage_type": "argument"
},
{
"api_name": "pants.source.source_root.SourceRootRequest.for_target",
"line_number": 519,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 523,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Snapshot",
"line_number": 523,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.AddPrefix",
"line_number": 523,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 525,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Snapshot",
"line_number": 525,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 525,
"usage_type": "argument"
},
{
"api_name": "pants.engine.target.GeneratedSources",
"line_number": 527,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.rule",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "pants.util.logging.LogLevel.DEBUG",
"line_number": 448,
"usage_type": "attribute"
},
{
"api_name": "pants.util.logging.LogLevel",
"line_number": 448,
"usage_type": "name"
},
{
"api_name": "pants.engine.target.GeneratedSources",
"line_number": 454,
"usage_type": "name"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 570,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 571,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.CreateDigest",
"line_number": 572,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.FileContent",
"line_number": 574,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.FileContent",
"line_number": 575,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 580,
"usage_type": "call"
},
{
"api_name": "pants.engine.process.ProcessResult",
"line_number": 581,
"usage_type": "argument"
},
{
"api_name": "pants.backend.go.util_rules.sdk.GoSdkProcess",
"line_number": 582,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.MultiGet",
"line_number": 591,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 592,
"usage_type": "call"
},
{
"api_name": "pants.engine.process.ProcessResult",
"line_number": 593,
"usage_type": "argument"
},
{
"api_name": "pants.backend.go.util_rules.sdk.GoSdkProcess",
"line_number": 594,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 601,
"usage_type": "call"
},
{
"api_name": "pants.engine.process.ProcessResult",
"line_number": 602,
"usage_type": "argument"
},
{
"api_name": "pants.backend.go.util_rules.sdk.GoSdkProcess",
"line_number": 603,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.native_engine.EMPTY_DIGEST",
"line_number": 614,
"usage_type": "name"
},
{
"api_name": "pants.engine.internals.native_engine.EMPTY_DIGEST",
"line_number": 620,
"usage_type": "name"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 627,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 628,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.MergeDigests",
"line_number": 629,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 633,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 633,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.RemovePrefix",
"line_number": 633,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.rule",
"line_number": 568,
"usage_type": "name"
},
{
"api_name": "pants.engine.rules.collect_rules",
"line_number": 639,
"usage_type": "call"
},
{
"api_name": "pants.engine.unions.UnionRule",
"line_number": 640,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.GenerateSourcesRequest",
"line_number": 640,
"usage_type": "argument"
},
{
"api_name": "pants.engine.unions.UnionRule",
"line_number": 641,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg_target.GoCodegenBuildRequest",
"line_number": 641,
"usage_type": "argument"
},
{
"api_name": "pants.engine.unions.UnionRule",
"line_number": 642,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.dependency_inference.GoModuleImportPathsMappingsHook",
"line_number": 642,
"usage_type": "argument"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.ProtobufSourcesGeneratorTarget.register_plugin_field",
"line_number": 643,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.target_types.GoOwningGoModAddressField",
"line_number": 643,
"usage_type": "argument"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.ProtobufSourcesGeneratorTarget",
"line_number": 643,
"usage_type": "name"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.ProtobufSourceTarget.register_plugin_field",
"line_number": 644,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.target_types.GoOwningGoModAddressField",
"line_number": 644,
"usage_type": "argument"
},
{
"api_name": "pants.backend.codegen.protobuf.target_types.ProtobufSourceTarget",
"line_number": 644,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.util_rules.assembly.rules",
"line_number": 646,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.assembly",
"line_number": 646,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg.rules",
"line_number": 647,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg",
"line_number": 647,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg_target.rules",
"line_number": 648,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.build_pkg_target",
"line_number": 648,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.util_rules.first_party_pkg.rules",
"line_number": 649,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.first_party_pkg",
"line_number": 649,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.util_rules.go_mod.rules",
"line_number": 650,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.go_mod",
"line_number": 650,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.util_rules.link.rules",
"line_number": 651,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.link",
"line_number": 651,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.util_rules.sdk.rules",
"line_number": 652,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.sdk",
"line_number": 652,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.target_type_rules.rules",
"line_number": 653,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.target_type_rules",
"line_number": 653,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.util_rules.third_party_pkg.rules",
"line_number": 654,
"usage_type": "call"
},
{
"api_name": "pants.backend.go.util_rules.third_party_pkg",
"line_number": 654,
"usage_type": "name"
},
{
"api_name": "pants.backend.python.util_rules.pex.rules",
"line_number": 655,
"usage_type": "call"
},
{
"api_name": "pants.backend.python.util_rules.pex",
"line_number": 655,
"usage_type": "name"
}
] |
33850641451
|
# 第 0002 题:生成的200个激活码保存在mysql关系型数据库中
import random, string
import pymysql
def get_string(num, length=10):
codes = []
chars = string.ascii_uppercase + string.digits
for i in range(num):
one_code = random.sample(chars, length)
codes.append(''.join(one_code))
return codes
def save_code_mysql():
try:
conn = pymysql.connect(host='localhost', user='root', password='123456', charset='UTF8')
cur = conn.cursor()
except BaseException as e:
print(e)
else:
try:
cur.execute("CREATE DATABASE IF NOT EXISTS code_mysql")
cur.execute("USE code_mysql")
cur.execute("CREATE TABLE IF NOT EXISTS codes (id INT AUTO_INCREMENT PRIMARY KEY, code VARCHAR(32))")
codes = get_string(200)
for code in codes:
cur.execute("INSERT INTO codes(code) values(%s)", (code))
conn.commit()
cur.execute("SELECT * FROM codes")
result = cur.fetchall()
for i in result:
print(i)
except BaseException as e:
print(e)
finally:
cur.close()
conn.close()
if __name__ == '__main__':
save_code_mysql()
|
akenYu/learnpy
|
showme/02/savemysql.py
|
savemysql.py
|
py
| 1,070 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "string.ascii_uppercase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "string.digits",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "random.sample",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pymysql.connect",
"line_number": 18,
"usage_type": "call"
}
] |
35374859965
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 8 22:42:38 2022
@author: sanggupark
"""
import numpy as np
import traci
from dataclasses import dataclass
import math
from shapely.geometry import LineString, Point
from SimpleMath import create_vehicle_shape
@dataclass(init = True)
class Object_sensed:
ID: str
xpos: float
ypos: float
vel: float
angle: float
width: float
length: float
acc_max: float = 4.0
dec_max: float = 7.0
dec_min: float = 2.0
response: float = 0.2
blinker: int = 0
def lidar_sensing(ego, veh_other):
xpos = ego.xpos
ypos = ego.ypos
length = ego.length
rad = np.radians(ego.angle)
p_tail = Point([xpos-(length)*math.sin(rad),
ypos-(length)*math.cos(rad)])
# FOV control
if "LF" in ego.behavior:
angles = np.linspace(rad+(ego.sensor.fov/4), rad-(ego.sensor.fov/4), 100)
elif "LC_R" in ego.behavior:
angles = np.linspace(rad+(ego.sensor.fov/2.0), rad-(ego.sensor.fov/2.0), 100)
elif "LC_L" in ego.behavior:
angles = np.linspace(rad+(ego.sensor.fov/2.0), rad-(ego.sensor.fov/2.0), 100)
distance = ego.sensor.radius * 1.00
lines = []
for angle in angles:
line = LineString([[p_tail.x, p_tail.y], [p_tail.x + distance*math.sin(angle), p_tail.y + distance*math.cos(angle)]])
lines.append(line)
vehicles_sensed = []
follower = traci.vehicle.getFollower(ego.ID, dist=5.0)[0]
""" LIDAR Sensing """
for veh in veh_other:
is_detected = False
poly_veh = create_vehicle_shape(veh)
if veh.ID != ego.ID:
for line in lines:
is_detected = poly_veh.intersects(line)
if is_detected:
break
if is_detected and not (veh.ID == follower):
vehicles_sensed.append(veh)
return vehicles_sensed
def blinker_sensing(ego, vehicles_sensed):
""" Blinker Sensing """
for veh in vehicles_sensed:
blinker = traci.vehicle.getSignals(veh.ID)
# If LF """
if blinker == 0:
veh.blinker = 0
# If LC_R """
elif blinker == 1:
veh.blinker = -1
# If LC_L """
elif blinker == 2:
veh.blinker = 1
return vehicles_sensed
def update_prev_info(ego, vehicles_sensed):
""" Update Old info """
for veh in vehicles_sensed:
if 'auv' in veh.ID:
object_add = Object_sensed(veh.ID, veh.xpos, veh.ypos, veh.vel, veh.angle, veh.width, veh.length, blinker=veh.blinker)
elif 'huv' in veh.ID:
blinker = traci.vehicle.getSignals(veh.ID)
if blinker == 1:
blinker = -1
elif blinker == -1:
blinker = 1
else:
blinker = 0
object_add = Object_sensed(veh.ID, veh.xpos, veh.ypos, veh.vel, veh.angle, veh.width, veh.length, blinker=-traci.vehicle.getSignals(veh.ID))
if len(ego.objects_sensed):
flag = False
for obj in ego.objects_sensed:
if obj.ID == object_add.ID:
# replacement due to overlaps
ego.objects_sensed[np.where(ego.objects_sensed==obj)] = object_add
flag = True
if not flag:
# add if no overlaps
ego.objects_sensed = np.append(ego.objects_sensed, object_add)
else:
# if the list is empty
ego.objects_sensed = np.append(ego.objects_sensed, object_add)
return
|
sanggu-park/blaft_simulation
|
Sensing.py
|
Sensing.py
|
py
| 3,640 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dataclasses.dataclass",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "shapely.geometry.Point",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "shapely.geometry.LineString",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "traci.vehicle.getFollower",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "traci.vehicle",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "SimpleMath.create_vehicle_shape",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "traci.vehicle.getSignals",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "traci.vehicle",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "traci.vehicle.getSignals",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "traci.vehicle",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "traci.vehicle.getSignals",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "traci.vehicle",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "numpy.where",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 122,
"usage_type": "call"
}
] |
71674830588
|
import genanki
import functools
import os
TRUE_FALSE_MODEL_ID = 1803127777
@functools.lru_cache()
def load_true_false_model():
data = {}
for fname in ['fields.json', 'templates.yaml', 'cards.css']:
path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'true_false_model',
fname)
with open(path) as f:
data[fname] = f.read()
return genanki.Model(
TRUE_FALSE_MODEL_ID,
'Anatomy True False',
fields=data['fields.json'],
templates=data['templates.yaml'],
css=data['cards.css'],
)
class AnatomyTrueFalseNote(genanki.Note):
def __init__(self, *args, **kwargs):
super().__init__(load_true_false_model(), *args, **kwargs)
MULTIPLE_CHOICE_MODEL_ID = 1803127778
@functools.lru_cache()
def load_multiple_choice_model():
data = {}
for fname in ['fields.json', 'templates.yaml', 'cards.css']:
path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'multiple_choice_model',
fname)
with open(path) as f:
data[fname] = f.read()
return genanki.Model(
MULTIPLE_CHOICE_MODEL_ID,
'Anatomy Multiple Choice',
fields=data['fields.json'],
templates=data['templates.yaml'],
css=data['cards.css'],
)
class AnatomyMultipleChoiceNote(genanki.Note):
def __init__(self, *args, **kwargs):
super().__init__(load_multiple_choice_model(), *args, **kwargs)
|
kerrickstaley/anatomyquestions
|
note.py
|
note.py
|
py
| 1,414 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "genanki.Model",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "functools.lru_cache",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "genanki.Note",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "genanki.Model",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "functools.lru_cache",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "genanki.Note",
"line_number": 50,
"usage_type": "attribute"
}
] |
70167475709
|
#!/usr/bin/env python
from setuptools import setup, Extension
import os
from os import popen
from os.path import dirname, join
class lazy_cythonize(list):
def __init__(self, callback):
self._list = None
self.callback = callback
def c_list(self):
if self._list is None:
self._list = self.callback()
return self._list
def __iter__(self):
return iter(self.c_list())
def __getitem__(self, ii):
return self.c_list()[ii]
def __len__(self):
return len(self.c_list())
# for CWB 2.2
#extra_libs = []
# for CWB >= 3.0
extra_libs = ['pcre', 'glib-2.0']
if 'CWB_DIR' in os.environ:
cqp_dir = os.environ['CWB_DIR']
else:
cqp_location = popen('which cqp').read().rstrip()
cqp_dir = dirname(cqp_location)
def extensions():
try:
from Cython.Build import cythonize
incdirs = ['src', join(cqp_dir, 'include')]
except ImportError:
cythonize = lambda x: x
incdirs = []
ext_modules = [Extension('CWB.CL', ['src/CWB/CL.pyx'],
include_dirs=incdirs,
library_dirs=[join(cqp_dir, 'lib')],
libraries=['cl'] + extra_libs)]
return cythonize(ext_modules)
def read(fname):
return open(fname).read()
setup(
name='cwb-python',
description='CQP and CL interfaces for Python',
author='Yannick Versley / Jorg Asmussen',
version='0.2.1',
author_email='[email protected]',
url='https://bitbucket.org/yannick/cwb-python',
ext_modules=lazy_cythonize(extensions),
py_modules=['PyCQP_interface'],
packages=['CWB', 'CWB.tools'],
long_description=read('README'),
entry_points={
'console_scripts': [
'cqp2conll = CWB.tools.cqp2conll:main',
'cqp_bitext = CWB.tools.make_bitext:main',
'cqp_vocab = CWB.tools.cqp2vocab:cqp2vocab_main'
]},
install_requires=['setuptools>=17', 'cython>=0.19', 'six'],
package_dir={'': 'py_src'})
|
bogdanbabych/paralex4cfields
|
tests/cwb-python/setup.py
|
setup.py
|
py
| 2,041 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "os.popen",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "Cython.Build.cythonize",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "setuptools.Extension",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "Cython.Build.cythonize",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 56,
"usage_type": "call"
}
] |
3990110801
|
from functions import *
from create_directory import *
from Crypto.Cipher import AES
import os
import shutil
import time
home = user_home()
if os.path.exists(home + "DataShareSecure") == False:
print("\nNous vous prions de lire le fichier \"Readme.txt\" et de suivre ces consignes.\n")
sys.exit()
print("BIENVENUE DANS CE PROGRAMME DE DECHIFFREMENT DE FICHIERS\n")
print("######### BON À SAVOIR ##########\n")
print("Vous exécutez ce programme stipule que:\n\n"
"1- Vous avez pris connaissance du fonctionnement de DataShareSecure grâce au \"Readme.txt\" \n"
"2- Vous avez exécuté le programme \"Public_Key_Manage.py\" au moins une fois et disposer donc d'une "
"paire de clés\n"
"3- Vous désirez déchiffrer des fichiers que vous avez reçus d'un correspondant\n")
print("Si vous ne remplissez pas toutes les conditions du \"BON À SAVOIR\", je vous invite à fermer ce programme.\n"
"Et à prendre le temps de remplir ces conditions.\n")
choix = input("Remplissez-vous les conditions sus-cités ? (O)ui ou (N)on : ")
if choix == 'O' or choix =='o':
print("\nBien. Nous pouvons donc continuer\n")
vide_directory(home + "DataShareSecure/Encrypted")
vide_directory(home + "DataShareSecure/Decrypted")
os.chdir(home + "DataShareSecure/Received")
path = home + 'DataShareSecure/Received/key_used'
with open(path, "r") as file:
key_encrypted = file.read()
key = dechiffrer(key_encrypted)
buffer_size = 65536 # 64kb
########## MOVE FILE ############
print("######## DECHIFFREMENT DES FICHIERS ET VERIFIVATION DES SIGNATURES ######## \n")
file_dir = []
file = [f for f in os.listdir(home + "DataShareSecure/Received") if os.path.isfile(f)]
for f in file:
if ".dss" in f:
shutil.copy(f, home + "DataShareSecure/Encrypted")
elif ".asc" in f:
shutil.copy(f, home + "DataShareSecure/Decrypted")
########## DECRYPT ###############
print("\n############# DECHIFFREMENT DES FICHIERS REÇUES ############\n")
os.chdir(home + "DataShareSecure/Encrypted")
files_dir = []
files = [f for f in os.listdir(home + "DataShareSecure/Encrypted") if os.path.isfile(f)]
for f in files:
files_dir.append(f)
for x in files_dir:
with open(home + "DataShareSecure/Encrypted/" + x, "rb") as f:
f.seek(0)
path = home + 'DataShareSecure/Decrypted/' + x
output_file = open(path[:-4], "wb")
iv = f.read(16)
cipher_encrypt = AES.new(key, AES.MODE_CFB, iv=iv)
buffer = f.read(buffer_size)
while len(buffer) > 0:
decrypted_bytes = cipher_encrypt.decrypt(buffer)
output_file.write(decrypted_bytes)
buffer = f.read(buffer_size)
print("Vos fichiers déchiffrés sont enregistrés dans le repertoire \"Decrypted\". \n")
########## VERIFY SIGNATURE ###############
print("\n############ VERIFICATION DES FICHERS REÇUES #################\n")
os.chdir(home + "DataShareSecure/Decrypted/")
files_dir = []
files = [f for f in os.listdir(home + "DataShareSecure/Decrypted/") if os.path.isfile(f)]
for f in files:
if ".asc" in f:
files_dir.append(f)
for x in files_dir:
with open(home + "DataShareSecure/Decrypted/" + x, "rb") as f:
file = x[:-4]
verified = gpg.verify_file(f, file)
print(file + " : ", verified.status + "")
print("\nNOUS VOICI À LA FIN\n")
|
Su1M01/DataShareSecure
|
Receiver.py
|
Receiver.py
|
py
| 3,616 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.exists",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.chdir",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "shutil.copy",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "shutil.copy",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "Crypto.Cipher.AES.new",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "Crypto.Cipher.AES",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "Crypto.Cipher.AES.MODE_CFB",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "os.chdir",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 94,
"usage_type": "attribute"
}
] |
26061079286
|
import nltk
from nltk.tokenize import *
import numpy as np
#--------------------------------------------------------
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
punctuation = ['?',',','!','.',':',';']
char_count= [0] * len(alphabet)
punctuation_count = [0] * len(punctuation)
#--------------------------------------------------------
# PART OF SPEECH STUFF
#--------------------------------------------------------
#part of speech ratios + lexical variety
# - determiners
# - prepositions
# - pronouns
# - modal auxiliary-verbs -> CAN, COULD, WILL, WOULD
# - adverbs
# - coord-conjuctions
# - nouns
# - proper-nouns
# - adjectives
# - verbs
# - lexical variety = nouns + proper_nouns + adjectives + verbs + adverbs
pronouns_list = ['PRP', 'PRP$', 'WP', 'WP$']
adverbs_list = ['RB' ,'RBR', 'RBS', 'WRB']
adjectives_list = ['JJ', 'JJR', 'JJS']
verbs_list = ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']
pos_ratios = [0] * 11
avg_sentence_length = 0
avg_word_length = 0
total_words = 0
#--------------------------------------------------------
def main():
np.set_printoptions(suppress=True)
features = []
text = open("training_set\Abraham Lincoln\Abraham Lincoln___Lincoln Letters.txt").read()
#total useful char
t_u_c = total_useful_char(text)
total_puctuation = count_punctuation(text)
total_words = len(word_tokenize(text))
#FEATURES 1 - 26
letters_frequency(text, t_u_c)
#FEATURES 27 - 32
punctuation_frequency(text, total_puctuation)
#FEATIRES 33 - 44
part_of_speech_ratios(text, total_words)
#FEATURES 44 - 45
avg_sentence_length = average_sentence_length(text)
avg_word_length = average_word_length(text)
features.extend(char_count)
features.extend(punctuation_count)
features.extend(pos_ratios)
features.append(avg_sentence_length)
features.append(avg_word_length)
features.append(total_words)
features = np.array(features).reshape(-1,1)
print("\n\n FEATURES final array: \n", features)
print(features.shape)
def average_word_length(text):
words = word_tokenize(text)
sum = 0
for word in words:
sum += len(word)
return sum/len(words)
def average_sentence_length(text):
sentences = sent_tokenize(text)
sum = 0
for sentence in sentences:
sum += len(word_tokenize(sentence))
return sum/len(sentences)
def count_punctuation(text):
return text.count('?') + text.count(',') + text.count('!') + text.count('.') + text.count(':') + text.count(';')
def total_useful_char(text):
return len(text) - text.count(" ") - text.count("\n")
def letters_frequency(text, tChar):
for char in text.lower():
if char in alphabet:
char_count[alphabet.index(char)] += 1
for letter in char_count:
char_count[char_count.index(letter)] /= tChar
def punctuation_frequency(text, total_puctuation):
for char in text:
if char in punctuation:
punctuation_count[punctuation.index(char)] += 1
for element in punctuation_count:
punctuation_count[punctuation_count.index(element)] /= total_puctuation
def part_of_speech_ratios(text, total_words):
words = word_tokenize(text)
tagged_words = nltk.pos_tag(words)
# lexical variety = nouns + proper_nouns + adjectives + verbs + adverbs
for tagged_word in tagged_words:
is_a_pronoun = [pronoun for pronoun in pronouns_list if(pronoun in tagged_word)]
is_a_adverb = [adverb for adverb in adverbs_list if(adverb in tagged_word)]
is_a_adjective = [adjective for adjective in adjectives_list if(adjective in tagged_word)]
is_a_verb = [verb for verb in verbs_list if(verb in tagged_word)]
if 'DT' in tagged_word:
pos_ratios[0] += 1
elif 'IN' in tagged_word:
pos_ratios[1] += 1
elif is_a_pronoun:
pos_ratios[2] += 1
elif 'MD' in tagged_word:
pos_ratios[3] += 1
elif is_a_adverb:
pos_ratios[4] += 1
pos_ratios[10] += 1
elif 'CC' in tagged_word:
pos_ratios[5] += 1
elif ('NN' in tagged_word or 'NNS' in tagged_word):
pos_ratios[6] += 1
pos_ratios[10] += 1
elif ('NNP' in tagged_word or 'NNPS' in tagged_word):
pos_ratios[7] += 1
pos_ratios[10] += 1
elif is_a_adjective:
pos_ratios[8] += 1
pos_ratios[10] += 1
elif is_a_verb:
pos_ratios[9] += 1
pos_ratios[10] += 1
for element in pos_ratios:
pos_ratios[pos_ratios.index(element)] /= total_words
if __name__ == '__main__':
main()
|
andresOchoaHernandez/AuthorshipRecognition
|
PythonPrototype/extract_features.py
|
extract_features.py
|
py
| 4,777 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.set_printoptions",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "nltk.pos_tag",
"line_number": 129,
"usage_type": "call"
}
] |
71842718268
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable #autograd oops
import torch.optim as optim
# core code of TRADES
# shhhhhh where does the author use l2-norm?????
def squared_l2_norm(x):
flattened = x.view(x.unsqueeze(0).shape[0], -1)
return (flattened ** 2).sum(1)
def l2_norm(x):
return squared_l2_norm(x).sqrt()
# core function for TRADES calculating traded_loss
def trades_loss(model,
x_natural,
y,
optimizer,
step_size=0.003,
epsilon=0.031,
perturb_steps=10,
beta=1.0, # the coeff of second term
distance='l_inf'):
# define KL-loss for inner maximization https://pytorch.org/docs/stable/generated/torch.nn.KLDivLoss.html?highlight=kldivloss#torch.nn.KLDivLoss
# If the field size_average is set to False, the losses are instead summed for each minibatch.
criterion_kl = nn.KLDivLoss(size_average=False)
# how to use loss : f_loss(*args)(input) <- two parenthesis
#eval() for BN and Dropout
model.eval()
# feed x_natural here into the loss as a batch
batch_size = len(x_natural)
# generate adversarial example
# initiate an x_adv for skipping the concave
x_adv = x_natural.detach() + 0.001 * torch.randn(x_natural.shape).cpu().detach()
# detach() tensor won't give it grad calculations anymore.
if distance == 'l_inf': # L-infinity ball # no random start here
for _ in range(perturb_steps): # FGSM_k
x_adv.requires_grad_() # start from x_adv
with torch.enable_grad(): # enable_grad vs no_grad
# For the maximization problem, using torch.nn.KLDivLoss and cross entropy is equivalent because they differ by a constant additive term.
loss_kl = criterion_kl(F.log_softmax(model(x_adv), dim=1),
F.softmax(model(x_natural), dim=1)) # why first term log while second term origin: because in the loss_criteria, there is no "log_target = True"
grad = torch.autograd.grad(loss_kl, [x_adv])[0] # Computes and returns the sum of gradients of outputs w.r.t. the inputs.
x_adv = x_adv.detach() + step_size * torch.sign(grad.detach())
#clamp ..
x_adv = torch.min(torch.max(x_adv, x_natural - epsilon), x_natural + epsilon)
#clamp original pic
x_adv = torch.clamp(x_adv, 0.0, 1.0)
elif distance == 'l_2':# L_2 we will come back later about l_2....not commented yet
delta = 0.001 * torch.randn(x_natural.shape).cpu().detach()
delta = Variable(delta.data, requires_grad=True)
# Setup optimizers
optimizer_delta = optim.SGD([delta], lr=epsilon / perturb_steps * 2)
for _ in range(perturb_steps):
adv = x_natural + delta
# optimize
optimizer_delta.zero_grad()
with torch.enable_grad():
loss = (-1) * criterion_kl(F.log_softmax(model(adv), dim=1),
F.softmax(model(x_natural), dim=1))
loss.backward()
# renorming gradient
grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
optimizer_delta.step()
# projection
delta.data.add_(x_natural)
delta.data.clamp_(0, 1).sub_(x_natural)
delta.data.renorm_(p=2, dim=0, maxnorm=epsilon)
x_adv = Variable(x_natural + delta, requires_grad=False)
# not implemented for other losses
else:
x_adv = torch.clamp(x_adv, 0.0, 1.0)
# adding two losses: L(fx,y) , L(fx',fx)
model.train()
x_adv = Variable(torch.clamp(x_adv, 0.0, 1.0), requires_grad=False)
# not the main part, code related only
# zero gradient again, zero_grad -> loss_back -> updae?
optimizer.zero_grad()
# calculate robust loss
logits = model(x_natural) # pred of fx
loss_natural = F.cross_entropy(logits, y) # loss of fx,y
# loss of fx' fx
loss_robust = (1.0 / batch_size) * criterion_kl(F.log_softmax(model(x_adv), dim=1),
F.softmax(model(x_natural), dim=1))
loss = loss_natural + beta * loss_robust
return loss
|
yaoyugua/TRADES
|
TRADES-master/trades.py
|
trades.py
|
py
| 4,661 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.KLDivLoss",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "torch.randn",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.enable_grad",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.log_softmax",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "torch.autograd.grad",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "torch.sign",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "torch.optim.SGD",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "torch.enable_grad",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.log_softmax",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "torch.randn_like",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.cross_entropy",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.log_softmax",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 117,
"usage_type": "name"
}
] |
25066459905
|
from django.contrib.auth import mixins
from oauth2_provider.contrib.rest_framework import (
OAuth2Authentication as BaseOAuth2Authentication,
)
from purplship.server.core.authentication import (
JWTAuthentication,
TokenAuthentication,
get_request_org,
)
class OAuth2Authentication(BaseOAuth2Authentication):
def authenticate(self, request):
auth = super().authenticate(request)
if auth is not None:
user, _ = auth
request.org = get_request_org(request, user)
return auth
class AccessMixin(mixins.AccessMixin):
"""Verify that the current user is authenticated."""
def dispatch(self, request, *args, **kwargs):
try:
auth = (
OAuth2Authentication().authenticate(request)
or JWTAuthentication().authenticate(request)
or TokenAuthentication().authenticate(request)
)
if auth is not None:
user, *_ = auth
request.user = user
finally:
return super().dispatch(request, *args, **kwargs)
|
danh91/purplship
|
insiders/server/iam/purplship/server/iam/authentication.py
|
authentication.py
|
py
| 1,109 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "oauth2_provider.contrib.rest_framework.OAuth2Authentication",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "purplship.server.core.authentication.get_request_org",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.AccessMixin",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.mixins",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "purplship.server.core.authentication.JWTAuthentication",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.authentication.TokenAuthentication",
"line_number": 33,
"usage_type": "call"
}
] |
45897381316
|
import os
from PIL import Image
class ImageUpscaler:
def __init__(self, image_path, scale_factor):
self.image_path = image_path
self.scale_factor = scale_factor
def upscale_image(self, image_file):
# Open the image
image = Image.open(image_file)
# Calculate the new dimensions
width, height = image.size
new_width = int(width * self.scale_factor)
new_height = int(height * self.scale_factor)
# Resize the image
upscaled_image = image.resize((new_width, new_height), Image.BICUBIC)
# Save the upscaled image
upscaled_folder = os.path.join(self.image_path, 'upscaled')
os.makedirs(upscaled_folder, exist_ok=True)
file_name = os.path.splitext(os.path.basename(image_file))[0]
save_path = os.path.join(upscaled_folder, f'{file_name}_upscaled.png')
upscaled_image.save(save_path)
# print(f"Upscaled image saved: {save_path}")
def upscale_images_in_directory(self):
# Get a list of all image files in the directory
image_files = [
os.path.join(self.image_path, file_name)
for file_name in os.listdir(self.image_path)
if file_name.endswith(('.jpg', '.jpeg', '.png'))
]
for image_file in image_files:
self.upscale_image(image_file)
if __name__ == '__main__':
directory_path = '../private_keys'
scale_factor = 4 # Increase the dimensions by a factor of 4
upscaler = ImageUpscaler(directory_path, scale_factor)
upscaler.upscale_images_in_directory()
|
huju-tub/visual-cryptography-generator
|
classes/image_upscaler.py
|
image_upscaler.py
|
py
| 1,598 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PIL.Image.open",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "PIL.Image.BICUBIC",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 36,
"usage_type": "call"
}
] |
8469582185
|
from calculations import get_standings, get_current_track_data
from utils import get_player_name, get_telegram_name
from plots import timedelta_to_string
def info_about_current_weeks_ladder_changes(old_data, new_data):
new_data = get_current_track_data(new_data)
new_data = new_data[new_data["Origin"] == "Player"]
new_ladder = get_standings(new_data)
current_track = new_data["track_id"].unique()[0]
old_data = old_data[old_data["track_id"] == current_track]
old_data = old_data[old_data["Origin"] == "Player"]
old_ladder = get_standings(old_data)
player_overlap = list(set(new_ladder.index) & set(old_ladder.index))
new_ladder = new_ladder.loc[new_ladder.index.isin(player_overlap)]
old_ladder = old_ladder.loc[old_ladder.index.isin(player_overlap)]
changes = new_ladder.index != old_ladder.index
new_ladder = new_ladder[changes].reset_index().reset_index().set_index("Player")
old_ladder = old_ladder[changes].reset_index().reset_index().set_index("Player")
new_ladder["index_change"] = new_ladder["index"] - old_ladder["index"]
messages = []
for player in new_ladder.index.values:
overtakes = new_ladder.loc[player, "index_change"]
if not overtakes > 0:
continue
index = new_ladder.loc[player, "index"]
overtook = new_ladder[(new_ladder["index"] >= index-overtakes) & (new_ladder["index"] < index)].index.values
have_scored = old_data.loc[old_data["Origin"] == "Player", "Player"].unique()
overtook = ", ".join([get_telegram_name(p) for p in overtook if p in have_scored])
new_record = new_data.groupby(["Player", "track_id"])["Time"].min().loc[player, current_track]
messages.append(
f"{get_player_name(player)} scored a {timedelta_to_string(new_record)} and overtook {overtook}."
)
return messages
def info_about_new_times(old_data, new_data):
messages = []
new_entries_index = new_data[~new_data.isin(old_data)].dropna(how="all").index
new_entries = new_data.loc[new_entries_index]
for row_index, entry in new_entries.iterrows():
player_name = entry["Player"]
new_record = timedelta_to_string(entry["Time"])
track = entry["Track"]
message = f"{track}: {get_player_name(player_name)} scored a new record of {new_record}!"
messages.append(message)
return messages
|
Excidion/trackmania_nations_challenge_bot
|
messages.py
|
messages.py
|
py
| 2,399 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "calculations.get_current_track_data",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "calculations.get_standings",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "calculations.get_standings",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "utils.get_telegram_name",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "utils.get_player_name",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "plots.timedelta_to_string",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "plots.timedelta_to_string",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "utils.get_player_name",
"line_number": 47,
"usage_type": "call"
}
] |
30357818911
|
from pyface.qt import QtCore, QtGui, is_qt4
from pyface.image_resource import ImageResource
from pyface.timer.api import do_later
from pyface.ui_traits import Image
from traits.api import (
Any,
Bool,
Button,
Dict,
Event,
List,
HasTraits,
Instance,
Int,
Property,
Str,
cached_property,
observe,
)
from traitsui.api import (
EnumEditor,
InstanceEditor,
Group,
Item,
Label,
ObjectColumn,
TableColumn,
TableFilter,
UI,
View,
default_handler,
spring,
)
from traitsui.editors.table_editor import (
BaseTableEditor,
ReversedList,
customize_filter,
)
from traitsui.ui_traits import SequenceTypes
from .editor import Editor
from .table_model import TableModel, SortFilterTableModel
if is_qt4:
def set_qheader_section_resize_mode(header):
return header.setResizeMode
else:
def set_qheader_section_resize_mode(header):
return header.setSectionResizeMode
class TableEditor(Editor, BaseTableEditor):
"""Editor that presents data in a table. Optionally, tables can have
a set of filters that reduce the set of data displayed, according to
their criteria.
"""
# -------------------------------------------------------------------------
# Trait definitions:
# -------------------------------------------------------------------------
#: The table view control associated with the editor:
table_view = Any()
def _table_view_default(self):
return TableView(editor=self)
#: A wrapper around the source model which provides filtering and sorting:
model = Instance(SortFilterTableModel)
def _model_default(self):
return SortFilterTableModel(editor=self)
#: The table model associated with the editor:
source_model = Instance(TableModel)
def _source_model_default(self):
return TableModel(editor=self)
#: The set of columns currently defined on the editor:
columns = List(TableColumn)
#: The currently selected row(s), column(s), or cell(s).
selected = Any()
#: The current selected row
selected_row = Property(Any, observe="selected")
selected_indices = Property(Any, observe="selected")
#: Current filter object (should be a TableFilter or callable or None):
filter = Any()
#: The indices of the table items currently passing the table filter:
filtered_indices = List(Int)
#: Current filter summary message
filter_summary = Str("All items")
#: Update the filtered contents.
update_filter = Event()
#: The event fired when a cell is clicked on:
click = Event()
#: The event fired when a cell is double-clicked on:
dclick = Event()
#: The Traits UI associated with the table editor toolbar:
toolbar_ui = Instance(UI)
#: The index of the row that was last right clicked on its vertical header
header_row = Int()
#: Whether to auto-size the columns or not.
auto_size = Bool(False)
#: Dictionary mapping image names to QIcons
images = Dict()
#: Dictionary mapping ImageResource objects to QIcons
image_resources = Dict()
#: An image being converted:
image = Image
def init(self, parent):
"""Finishes initializing the editor by creating the underlying toolkit
widget."""
factory = self.factory
self.filter = factory.filter
columns = factory.columns[:]
if (len(columns) == 0) and (len(self.value) > 0):
columns = [
ObjectColumn(name=name)
for name in self.value[0].editable_traits()
]
self.columns = columns
if factory.table_view_factory is not None:
self.table_view = factory.table_view_factory(editor=self)
if factory.source_model_factory is not None:
self.source_model = factory.source_model_factory(editor=self)
if factory.model_factory is not None:
self.model = factory.model_factory(editor=self)
# Create the table view and model
self.model.setDynamicSortFilter(True)
self.model.setSourceModel(self.source_model)
self.table_view.setModel(self.model)
# When sorting is enabled, the first column is initially displayed with
# the triangle indicating it is the sort index, even though no sorting
# has actually been done. Sort here for UI/model consistency.
if self.factory.sortable and not self.factory.reorderable:
self.model.sort(0, QtCore.Qt.SortOrder.AscendingOrder)
# Connect to the mode specific selection handler and select the first
# row/column/cell. Do this before creating the edit_view to make sure
# that it has a valid item to use when constructing its view.
smodel = self.table_view.selectionModel()
mode_slot = getattr(self, "_on_%s_selection" % factory.selection_mode)
smodel.selectionChanged.connect(mode_slot)
self.table_view.setCurrentIndex(self.model.index(0, 0))
# Create the toolbar if necessary
if factory.show_toolbar and len(factory.filters) > 0:
main_view = QtGui.QWidget()
layout = QtGui.QVBoxLayout(main_view)
layout.setContentsMargins(0, 0, 0, 0)
self.toolbar_ui = self.edit_traits(
parent=parent,
kind="subpanel",
view=View(
Group(
Item("filter{View}", editor=factory._filter_editor),
Item("filter_summary{Results}", style="readonly"),
spring,
orientation="horizontal",
),
resizable=True,
),
)
self.toolbar_ui.parent = self.ui
layout.addWidget(self.toolbar_ui.control)
layout.addWidget(self.table_view)
else:
main_view = self.table_view
# Create auxiliary editor and encompassing splitter if necessary
mode = factory.selection_mode
if (factory.edit_view == " ") or mode not in {"row", "rows"}:
self.control = main_view
else:
if factory.orientation == "horizontal":
self.control = QtGui.QSplitter(QtCore.Qt.Orientation.Horizontal)
else:
self.control = QtGui.QSplitter(QtCore.Qt.Orientation.Vertical)
self.control.setSizePolicy(
QtGui.QSizePolicy.Policy.Expanding, QtGui.QSizePolicy.Policy.Expanding
)
self.control.addWidget(main_view)
self.control.setStretchFactor(0, 2)
# Create the row editor below the table view
editor = InstanceEditor(view=factory.edit_view, kind="subpanel")
self._ui = self.edit_traits(
parent=self.control,
kind="subpanel",
view=View(
Item(
"selected_row",
style="custom",
editor=editor,
show_label=False,
resizable=True,
width=factory.edit_view_width,
height=factory.edit_view_height,
),
resizable=True,
handler=factory.edit_view_handler,
),
)
self._ui.parent = self.ui
self.control.addWidget(self._ui.control)
self.control.setStretchFactor(1, 1)
# Connect to the click and double click handlers
self.table_view.clicked.connect(self._on_click)
self.table_view.doubleClicked.connect(self._on_dclick)
# Make sure we listen for 'items' changes as well as complete list
# replacements
self.context_object.on_trait_change(
self.update_editor, self.extended_name + "_items", dispatch="ui"
)
# Listen for changes to traits on the objects in the list
self.context_object.on_trait_change(
self.refresh_editor, self.extended_name + ".-", dispatch="ui"
)
# Listen for changes on column definitions
self.on_trait_change(self._update_columns, "columns", dispatch="ui")
self.on_trait_change(
self._update_columns, "columns_items", dispatch="ui"
)
# Set up the required externally synchronized traits
is_list = mode in ("rows", "columns", "cells")
self.sync_value(factory.click, "click", "to")
self.sync_value(factory.dclick, "dclick", "to")
self.sync_value(factory.columns_name, "columns", is_list=True)
self.sync_value(factory.selected, "selected", is_list=is_list)
self.sync_value(
factory.selected_indices, "selected_indices", is_list=is_list
)
self.sync_value(factory.filter_name, "filter", "from")
self.sync_value(factory.filtered_indices, "filtered_indices", "to")
self.sync_value(factory.update_filter_name, "update_filter", "from")
self.auto_size = self.factory.auto_size
# Initialize the ItemDelegates for each column
self._update_columns()
def dispose(self):
"""Disposes of the contents of an editor."""
self.model.beginResetModel()
self.model.endResetModel()
# Make sure that the auxiliary UIs are properly disposed
if self.toolbar_ui is not None:
self.toolbar_ui.dispose()
if self._ui is not None:
self._ui.dispose()
# Remove listener for 'items' changes on object trait
self.context_object.on_trait_change(
self.update_editor, self.extended_name + "_items", remove=True
)
# Remove listener for changes to traits on the objects in the list
self.context_object.on_trait_change(
self.refresh_editor, self.extended_name + ".-", remove=True
)
# Remove listeners for column definition changes
self.on_trait_change(self._update_columns, "columns", remove=True)
self.on_trait_change(
self._update_columns, "columns_items", remove=True
)
super().dispose()
def update_editor(self):
"""Updates the editor when the object trait changes externally to the
editor."""
if self._no_notify:
return
self.table_view.setUpdatesEnabled(False)
try:
filtering = (
len(self.factory.filters) > 0 or self.filter is not None
)
if filtering:
self._update_filtering()
# invalidate the model, but do not reset it. Resetting the model
# may cause problems if the selection sync'ed traits are being used
# externally to manage the selections
self.model.invalidate()
self.table_view.resizeColumnsToContents()
if self.auto_size:
self.table_view.resizeRowsToContents()
finally:
self.table_view.setUpdatesEnabled(True)
def restore_prefs(self, prefs):
"""Restores any saved user preference information associated with the
editor.
"""
header = self.table_view.horizontalHeader()
if header is not None and "column_state" in prefs:
header.restoreState(prefs["column_state"])
def save_prefs(self):
"""Returns any user preference information associated with the editor."""
prefs = {}
header = self.table_view.horizontalHeader()
if header is not None:
prefs["column_state"] = header.saveState().data()
return prefs
def refresh_editor(self):
"""Requests that the underlying table widget to redraw itself."""
self.table_view.viewport().update()
def create_new_row(self):
"""Creates a new row object using the provided factory."""
factory = self.factory
kw = factory.row_factory_kw.copy()
if "__table_editor__" in kw:
kw["__table_editor__"] = self
return self.ui.evaluate(
factory.row_factory, *factory.row_factory_args, **kw
)
def items(self):
"""Returns the raw list of model objects."""
items = self.value
if not isinstance(items, SequenceTypes):
items = [items]
if self.factory and self.factory.reverse:
items = ReversedList(items)
return items
def callx(self, func, *args, **kw):
"""Call a function without notifying the underlying table view or
model."""
old = self._no_notify
self._no_notify = True
try:
func(*args, **kw)
finally:
self._no_notify = old
def setx(self, **keywords):
"""Set one or more attributes without notifying the underlying table
view or model."""
old = self._no_notify
self._no_notify = True
try:
for name, value in keywords.items():
setattr(self, name, value)
finally:
self._no_notify = old
def set_selection(self, objects=[], notify=True):
"""Sets the current selection to a set of specified objects."""
if not isinstance(objects, list):
objects = [objects]
mode = self.factory.selection_mode
indexes = []
flags = QtGui.QItemSelectionModel.SelectionFlag.ClearAndSelect
# In the case of row or column selection, we need a dummy value for the
# other dimension that has not been filtered.
source_index = self.model.mapToSource(self.model.index(0, 0))
source_row, source_column = source_index.row(), source_index.column()
# Selection mode is 'row' or 'rows'
if mode.startswith("row"):
flags |= QtGui.QItemSelectionModel.SelectionFlag.Rows
items = self.items()
for obj in objects:
try:
row = items.index(obj)
except ValueError:
continue
indexes.append(self.source_model.index(row, source_column))
# Selection mode is 'column' or 'columns'
elif mode.startswith("column"):
flags |= QtGui.QItemSelectionModel.SelectionFlag.Columns
for name in objects:
column = self._column_index_from_name(name)
if column != -1:
indexes.append(self.source_model.index(source_row, column))
# Selection mode is 'cell' or 'cells'
else:
items = self.items()
for obj, name in objects:
try:
row = items.index(obj)
except ValueError:
continue
column = self._column_index_from_name(name)
if column != -1:
indexes.append(self.source_model.index(row, column))
# Perform the selection so that only one signal is emitted
selection = QtGui.QItemSelection()
smodel = self.table_view.selectionModel()
if smodel is None:
# guard against selection during tear-down
return
for index in indexes:
index = self.model.mapFromSource(index)
if index.isValid():
smodel.setCurrentIndex(
index, QtGui.QItemSelectionModel.SelectionFlag.NoUpdate
)
selection.select(index, index)
smodel.blockSignals(not notify)
try:
if len(selection.indexes()):
smodel.clear()
smodel.select(selection, flags)
else:
smodel.clear()
finally:
smodel.blockSignals(False)
self.refresh_editor()
# -------------------------------------------------------------------------
# Private methods:
# -------------------------------------------------------------------------
def _column_index_from_name(self, name):
"""Returns the index of the column with the given name or -1 if no
column exists with that name."""
for i, column in enumerate(self.columns):
if name == column.name:
return i
return -1
def _customize_filters(self, filter):
"""Allows the user to customize the current set of table filters."""
filter_editor = TableFilterEditor(editor=self)
ui = filter_editor.edit_traits(parent=self.control)
if ui.result:
self.factory.filters = filter_editor.templates
self.filter = filter_editor.selected_filter
else:
self.setx(filter=filter)
def _update_filtering(self):
"""Update the filter summary and the filtered indices."""
items = self.items()
num_items = len(items)
f = self.filter
if f is None:
self._filtered_cache = None
self.filtered_indices = list(range(num_items))
self.filter_summary = "All %i items" % num_items
else:
if not callable(f):
f = f.filter
self._filtered_cache = fc = [f(item) for item in items]
self.filtered_indices = fi = [i for i, ok in enumerate(fc) if ok]
self.filter_summary = "%i of %i items" % (len(fi), num_items)
def _add_image(self, image_resource):
"""Adds a new image to the image map."""
image = image_resource.create_icon()
self.image_resources[image_resource] = image
self.images[image_resource.name] = image
return image
def _get_image(self, image):
"""Converts a user specified image to a QIcon."""
if isinstance(image, str):
self.image = image
image = self.image
if isinstance(image, ImageResource):
result = self.image_resources.get(image)
if result is not None:
return result
return self._add_image(image)
return self.images.get(image)
def _create_empty_menu(self):
"""Create a QMenu to display in empty space below the rows.
Returns a QMenu or None if no menu to display.
"""
if not self.factory.editable or self.factory.row_factory is None:
return None
empty_menu = QtGui.QMenu(self.table_view)
action = empty_menu.addAction("Add new item")
action.triggered.connect(self._on_context_append)
return empty_menu
def _create_header_menu(self):
"""Create a QMenu to display in the vertical header.
Returns a QMenu or None if no menu to display.
"""
header_menu = QtGui.QMenu(self.table_view)
if self.factory.editable:
if self.factory.row_factory is not None:
action = header_menu.addAction("Insert new item")
action.triggered.connect(self._on_context_insert)
if self.factory.deletable:
action = header_menu.addAction("Delete item")
action.triggered.connect(self._on_context_remove)
if self.factory.reorderable:
show_up = (self.header_row > 0)
show_down = (self.header_row < self.model.rowCount() - 1)
if not header_menu.isEmpty() and (show_up or show_down):
header_menu.addSeparator()
if show_up:
header_menu_up = header_menu.addAction("Move item up")
header_menu_up.triggered.connect(self._on_context_move_up)
if show_down:
header_menu_down = header_menu.addAction("Move item down")
header_menu_down.triggered.connect(self._on_context_move_down)
if header_menu.isEmpty():
return None
else:
return header_menu
# -- Trait Property getters/setters ---------------------------------------
@cached_property
def _get_selected_row(self):
"""Gets the selected row, or the first row if multiple rows are
selected."""
mode = self.factory.selection_mode
if mode.startswith("column"):
return None
elif mode == "row":
return self.selected
try:
if mode == "rows":
return self.selected[0]
elif mode == "cell":
return self.selected[0]
elif mode == "cells":
return self.selected[0][0]
except IndexError:
return None
@cached_property
def _get_selected_indices(self):
"""Gets the row,column indices which match the selected trait"""
selection_items = self.table_view.selectionModel().selection()
indices = self.model.mapSelectionToSource(selection_items).indexes()
if self.factory.selection_mode.startswith("row"):
indices = sorted(set(index.row() for index in indices))
elif self.factory.selection_mode.startswith("column"):
indices = sorted(set(index.column() for index in indices))
else:
indices = [(index.row(), index.column()) for index in indices]
if self.factory.selection_mode in {"rows", "columns", "cells"}:
return indices
elif len(indices) > 0:
return indices[0]
else:
return -1
def _set_selected_indices(self, indices):
if not isinstance(indices, list):
indices = [indices]
selected = []
if self.factory.selection_mode.startswith("row"):
for row in indices:
selected.append(self.value[row])
elif self.factory.selection_mode.startswith("column"):
for col in indices:
selected.append(self.columns[col].name)
else:
for row, col in indices:
selected.append((self.value[row], self.columns[col].name))
self.selected = selected
self.set_selection(self.selected, False)
# -- Trait Change Handlers ------------------------------------------------
def _filter_changed(self, old_filter, new_filter):
"""Handles the current filter being changed."""
if not self._no_notify:
if new_filter is customize_filter:
do_later(self._customize_filters, old_filter)
else:
self._update_filtering()
self.model.invalidate()
self.set_selection(self.selected)
def _update_columns(self):
"""Handle the column list being changed."""
self.table_view.setItemDelegate(TableDelegate(self.table_view))
for i, column in enumerate(self.columns):
if column.renderer:
self.table_view.setItemDelegateForColumn(i, column.renderer)
self.model.invalidate()
self.table_view.resizeColumnsToContents()
if self.auto_size:
self.table_view.resizeRowsToContents()
def _selected_changed(self, new):
"""Handle the selected row/column/cell being changed externally."""
if not self._no_notify:
self.set_selection(self.selected, notify=False)
def _update_filter_changed(self):
"""The filter has changed internally."""
self._filter_changed(self.filter, self.filter)
# -- Event Handlers -------------------------------------------------------
def _on_row_selection(self, added, removed):
"""Handle the row selection being changed."""
items = self.items()
indexes = self.table_view.selectionModel().selectedRows()
if len(indexes):
index = self.model.mapToSource(indexes[0])
selected = items[index.row()]
else:
selected = None
self.setx(selected=selected)
self.ui.evaluate(self.factory.on_select, self.selected)
def _on_rows_selection(self, added, removed):
"""Handle the rows selection being changed."""
items = self.items()
indexes = self.table_view.selectionModel().selectedRows()
selected = [
items[self.model.mapToSource(index).row()] for index in indexes
]
self.setx(selected=selected)
self.ui.evaluate(self.factory.on_select, self.selected)
def _on_column_selection(self, added, removed):
"""Handle the column selection being changed."""
indexes = self.table_view.selectionModel().selectedColumns()
if len(indexes):
index = self.model.mapToSource(indexes[0])
selected = self.columns[index.column()].name
else:
selected = ""
self.setx(selected=selected)
self.ui.evaluate(self.factory.on_select, self.selected)
def _on_columns_selection(self, added, removed):
"""Handle the columns selection being changed."""
indexes = self.table_view.selectionModel().selectedColumns()
selected = [
self.columns[self.model.mapToSource(index).column()].name
for index in indexes
]
self.setx(selected=selected)
self.ui.evaluate(self.factory.on_select, self.selected)
def _on_cell_selection(self, added, removed):
"""Handle the cell selection being changed."""
items = self.items()
indexes = self.table_view.selectionModel().selectedIndexes()
if len(indexes):
index = self.model.mapToSource(indexes[0])
obj = items[index.row()]
column_name = self.columns[index.column()].name
else:
obj = None
column_name = ""
selected = (obj, column_name)
self.setx(selected=selected)
self.ui.evaluate(self.factory.on_select, self.selected)
def _on_cells_selection(self, added, removed):
"""Handle the cells selection being changed."""
items = self.items()
indexes = self.table_view.selectionModel().selectedIndexes()
selected = []
for index in indexes:
index = self.model.mapToSource(index)
obj = items[index.row()]
column_name = self.columns[index.column()].name
selected.append((obj, column_name))
self.setx(selected=selected)
self.ui.evaluate(self.factory.on_select, self.selected)
def _on_click(self, index):
"""Handle a cell being clicked."""
index = self.model.mapToSource(index)
column = self.columns[index.column()]
obj = self.items()[index.row()]
# Fire the same event on the editor after mapping it to a model object
# and column name:
self.click = (obj, column)
# Invoke the column's click handler:
column.on_click(obj)
def _on_dclick(self, index):
"""Handle a cell being double clicked."""
index = self.model.mapToSource(index)
column = self.columns[index.column()]
obj = self.items()[index.row()]
# Fire the same event on the editor after mapping it to a model object
# and column name:
self.dclick = (obj, column)
# Invoke the column's double-click handler:
column.on_dclick(obj)
def _on_context_insert(self):
"""Handle 'insert item' being selected from the header context menu."""
self.model.insertRow(self.header_row)
def _on_context_append(self):
"""Handle 'add item' being selected from the empty space context
menu."""
self.model.insertRow(self.model.rowCount())
def _on_context_remove(self):
"""Handle 'remove item' being selected from the header context menu."""
self.model.removeRow(self.header_row)
def _on_context_move_up(self):
"""Handle 'move up' being selected from the header context menu."""
self.model.moveRow(self.header_row, self.header_row - 1)
def _on_context_move_down(self):
"""Handle 'move down' being selected from the header context menu."""
self.model.moveRow(self.header_row, self.header_row + 1)
# Define the SimpleEditor class.
SimpleEditor = TableEditor
# Define the ReadonlyEditor class.
ReadonlyEditor = TableEditor
# -------------------------------------------------------------------------
# Qt widgets that have been configured to behave as expected by Traits UI:
# -------------------------------------------------------------------------
class TableDelegate(QtGui.QStyledItemDelegate):
"""A QStyledItemDelegate which fetches Traits UI editors."""
def createEditor(self, parent, option, index):
"""Reimplemented to return the editor for a given index."""
model = index.model()
index = model.mapToSource(index)
table_editor = model._editor
column = table_editor.columns[index.column()]
obj = table_editor.items()[index.row()]
factory = column.get_editor(obj)
style = column.get_style(obj)
if factory is None:
return None
target, name = column.target_name(obj)
handler = default_handler()
if table_editor.ui.context is None:
ui = UI(handler=handler)
else:
context = table_editor.ui.context.copy()
context["table_editor_object"] = context["object"]
context["object"] = target
ui = UI(handler=handler, context=context)
# Create and initialize the editor
factory_method = getattr(factory, style + "_editor")
editor = factory_method(ui, target, name, "", parent)
editor.prepare(parent)
control = editor.control
control.setParent(parent)
# Required for QMouseEvents to propagate to the widget
control.setFocusPolicy(QtCore.Qt.FocusPolicy.StrongFocus)
# The table view's background will shine through unless the editor
# paints its own background
control.setAutoFillBackground(True)
# Make sure that editors are disposed of correctly
# will be disposed in closeEditor of the TableView
control._editor = editor
return control
def updateEditorGeometry(self, editor, option, index):
"""Update the editor's geometry."""
editor.setGeometry(option.rect)
def paint(self, painter, option, index):
self.initStyleOption(option, index)
if (option.state & QtGui.QStyle.StateFlag.State_Selected) and (
option.state & QtGui.QStyle.StateFlag.State_Active
):
factory = self.parent()._editor.factory
if factory.selection_bg_color is not None:
option.palette.setColor(
QtGui.QPalette.ColorRole.Highlight, factory.selection_bg_color_
)
if factory.selection_color is not None:
option.palette.setColor(
QtGui.QPalette.ColorRole.HighlightedText, factory.selection_color_
)
QtGui.QApplication.style().drawControl(
QtGui.QStyle.ControlElement.CE_ItemViewItem, option, painter, None
)
class TableView(QtGui.QTableView):
"""A QTableView configured to behave as expected by TraitsUI."""
_SELECTION_MAP = {
"row": (
QtGui.QAbstractItemView.SelectionBehavior.SelectRows,
QtGui.QAbstractItemView.SelectionMode.SingleSelection,
),
"rows": (
QtGui.QAbstractItemView.SelectionBehavior.SelectRows,
QtGui.QAbstractItemView.SelectionMode.ExtendedSelection,
),
"column": (
QtGui.QAbstractItemView.SelectionBehavior.SelectColumns,
QtGui.QAbstractItemView.SelectionMode.SingleSelection,
),
"columns": (
QtGui.QAbstractItemView.SelectionBehavior.SelectColumns,
QtGui.QAbstractItemView.SelectionMode.ExtendedSelection,
),
"cell": (
QtGui.QAbstractItemView.SelectionBehavior.SelectItems,
QtGui.QAbstractItemView.SelectionMode.SingleSelection,
),
"cells": (
QtGui.QAbstractItemView.SelectionBehavior.SelectItems,
QtGui.QAbstractItemView.SelectionMode.ExtendedSelection,
),
}
def __init__(self, editor):
"""Initialise the object."""
QtGui.QTableView.__init__(self)
self._initial_size = False
self._editor = editor
factory = editor.factory
# Configure the grid lines.
self.setShowGrid(factory.show_lines)
# Configure the selection behaviour.
self.setCornerButtonEnabled(False)
behav, mode = self._SELECTION_MAP[factory.selection_mode]
self.setSelectionBehavior(behav)
self.setSelectionMode(mode)
# Configure the editing behavior.
triggers = (
QtGui.QAbstractItemView.EditTrigger.DoubleClicked
| QtGui.QAbstractItemView.EditTrigger.SelectedClicked
)
if factory.edit_on_first_click and not factory.reorderable:
triggers |= QtGui.QAbstractItemView.EditTrigger.CurrentChanged
self.setEditTriggers(triggers)
# Configure the reordering and sorting behavior.
self.setDragEnabled(True)
self.viewport().setAcceptDrops(True)
self.setDropIndicatorShown(True)
if factory.reorderable:
self.setDragDropMode(QtGui.QAbstractItemView.DragDropMode.InternalMove)
if factory.sortable:
self.setSortingEnabled(True)
if factory._qt_stylesheet is not None:
self.setStyleSheet(factory._qt_stylesheet)
self.resizeColumnsToContents()
def setModel(self, model):
super().setModel(model)
self._update_header_sizing()
def contextMenuEvent(self, event):
"""Reimplemented to create context menus for cells and empty space."""
# Determine the logical indices of the cell where click occured
hheader, vheader = self.horizontalHeader(), self.verticalHeader()
position = event.globalPos()
row = vheader.logicalIndexAt(vheader.mapFromGlobal(position))
column = hheader.logicalIndexAt(hheader.mapFromGlobal(position))
# Map the logical row index to a real index for the source model
model = self.model()
row = model.mapToSource(model.index(row, 0)).row()
# Show a context menu for empty space at bottom of table...
editor = self._editor
if row == -1:
empty_menu = editor._create_empty_menu()
if empty_menu is not None:
event.accept()
empty_menu.exec_(position)
# ...or show a context menu for a cell.
elif column != -1:
obj = editor.items()[row]
column = editor.columns[column]
menu_manager = column.get_menu(obj)
if menu_manager is None:
menu_manager = editor.factory.menu
if menu_manager is not None:
event.accept()
selected = editor.selected
if not isinstance(selected, SequenceTypes):
selected = [selected]
if obj not in selected:
selected = [obj]
editor.set_menu_context(selected, obj, column)
menu = menu_manager.create_menu(self, controller=editor)
menu.exec_(position)
def eventFilter(self, obj, event):
"""Reimplemented to create context menu for the vertical header."""
vheader = self.verticalHeader()
if obj is vheader and event.type() == QtCore.QEvent.Type.ContextMenu:
position = event.globalPos()
editor = self._editor
row = vheader.logicalIndexAt(event.pos().y())
if row == -1:
empty_menu = editor._create_empty_menu()
if empty_menu is not None:
event.accept()
empty_menu.exec_(position)
else:
editor.header_row = row
header_menu = editor._create_header_menu()
if header_menu is not None:
event.accept()
header_menu.exec_(position)
return True
else:
return QtGui.QTableView.eventFilter(self, obj, event)
def resizeEvent(self, event):
"""Reimplemented to size the table columns when the size of the table
changes. Because the layout algorithm requires that the available
space be known, we have to wait until the UI that contains this table
gives it its initial size."""
QtGui.QTableView.resizeEvent(self, event)
if self._editor.auto_size:
self.resizeColumnsToContents()
self.resizeRowsToContents()
else:
parent = self.parent()
if (
not self._initial_size
and parent
and (self.isVisible() or isinstance(parent, QtGui.QMainWindow))
):
self._initial_size = True
if self._editor.auto_size:
self.resizeColumnsToContents()
self.resizeRowsToContents()
def sizeHint(self):
"""Reimplemented to define a better size hint for the width of the
TableEditor."""
size_hint = QtGui.QTableView.sizeHint(self)
# This method is sometimes called by Qt after the editor has been
# disposed but before this control has been deleted:
if self._editor.factory is None:
return size_hint
width = self.style().pixelMetric(
QtGui.QStyle.PixelMetric.PM_ScrollBarExtent, QtGui.QStyleOptionHeader(), self
)
for column in range(len(self._editor.columns)):
width += self.sizeHintForColumn(column)
size_hint.setWidth(width)
return size_hint
def sizeHintForColumn(self, column_index):
"""Reimplemented to support absolute width specification via
TableColumns and to improve the metric for autosizing columns."""
editor = self._editor
column = editor.columns[column_index]
requested_width = column.get_width()
# Autosize based on column contents and label width. Qt's default
# implementation of this function does content, we handle the label.
if requested_width < 1:
base_width = QtGui.QTableView.sizeHintForColumn(self, column_index)
# Determine what font to use in the calculation
font = column.get_text_font(None)
if font is None:
font = self.font()
font.setBold(True)
else:
font = QtGui.QFont(font)
# Determine the width of the column label
text = column.get_label()
# QFontMetrics.width() is deprecated and Qt docs suggest using
# horizontalAdvance() instead, but is only available since Qt 5.11
if QtCore.__version_info__ >= (5, 11):
width = QtGui.QFontMetrics(font).horizontalAdvance(text)
else:
width = QtGui.QFontMetrics(font).width(text)
# Add margin to the calculated width as appropriate
style = self.style()
option = QtGui.QStyleOptionHeader()
width += (
style.pixelMetric(
QtGui.QStyle.PixelMetric.PM_HeaderGripMargin, option, self
)
* 2
)
if editor.factory.sortable and not editor.factory.reorderable:
# Add size of sort indicator
width += style.pixelMetric(
QtGui.QStyle.PixelMetric.PM_HeaderMarkSize, option, self
)
# Add distance between sort indicator and text
width += style.pixelMetric(
QtGui.QStyle.PixelMetric.PM_HeaderMargin, option, self
)
return max(base_width, width)
# Or else set width absolutely
else:
return requested_width
def resizeColumnsToContents(self):
"""Support proportional column width specifications."""
# TODO: The proportional size specification approach found in the
# TableColumns is not entirely compatible with the ability to
# specify the resize_mode. Namely, there are combinations of
# specifications that are redundant, and others which are
# contradictory. Rework this method so that the various values
# for **width** have a well-defined, sensible meaning for each
# of the possible values of resize_mode.
editor = self._editor
available_space = self.viewport().width()
hheader = self.horizontalHeader()
# Compute sizes for columns with absolute or no size requests
proportional = []
for column_index in range(len(editor.columns)):
column = editor.columns[column_index]
requested_width = column.get_width()
if (
column.resize_mode in ("interactive", "stretch")
and 0 < requested_width <= 1.0
):
proportional.append((column_index, requested_width))
elif (
column.resize_mode == "interactive"
and requested_width < 0
and self._initial_size
):
# Keep previous size if initial sizing has been done
available_space -= hheader.sectionSize(column_index)
else:
base_width = hheader.sectionSizeHint(column_index)
width = max(base_width, self.sizeHintForColumn(column_index))
hheader.resizeSection(column_index, width)
available_space -= width
# Now use the remaining space for columns with proportional width
# requests
for column_index, percent in proportional:
base_width = hheader.sectionSizeHint(column_index)
width = max(base_width, int(percent * available_space))
hheader.resizeSection(column_index, width)
def closeEditor(self, control, hint):
# dispose traits editor associated with control if any
editor = getattr(control, "_editor", None)
if editor is not None:
editor.dispose()
delattr(control, "_editor")
return super().closeEditor(control, hint)
def _update_header_sizing(self):
"""Header section sizing can be done only after a valid model is set.
Otherwise results in segfault with Qt5.
"""
editor = self._editor
factory = editor.factory
# Configure the row headings.
vheader = self.verticalHeader()
set_resize_mode = set_qheader_section_resize_mode(vheader)
insertable = factory.row_factory is not None
if (
factory.editable and (insertable or factory.deletable)
) or factory.reorderable:
vheader.installEventFilter(self)
set_resize_mode(QtGui.QHeaderView.ResizeMode.ResizeToContents)
elif not factory.show_row_labels:
vheader.hide()
if factory.row_height > 0:
vheader.setDefaultSectionSize(factory.row_height)
self.setAlternatingRowColors(factory.alternate_bg_color)
self.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollMode.ScrollPerPixel)
# Configure the column headings.
# We detect if there are any stretchy sections at all; if not, then
# we make the last non-fixed-size column stretchy.
hheader = self.horizontalHeader()
set_resize_mode = set_qheader_section_resize_mode(hheader)
resize_mode_map = dict(
interactive=QtGui.QHeaderView.ResizeMode.Interactive,
fixed=QtGui.QHeaderView.ResizeMode.Fixed,
stretch=QtGui.QHeaderView.ResizeMode.Stretch,
resize_to_contents=QtGui.QHeaderView.ResizeMode.ResizeToContents,
)
stretchable_columns = []
for i, column in enumerate(editor.columns):
set_resize_mode(i, resize_mode_map[column.resize_mode])
if column.resize_mode in ("stretch", "interactive"):
stretchable_columns.append(i)
if not stretchable_columns:
# Use the behavior from before the "resize_mode" trait was added
# to TableColumn
hheader.setStretchLastSection(True)
else:
# hheader.setSectionResizeMode(
# stretchable_columns[-1], QtGui.QHeaderView.ResizeMode.Stretch)
hheader.setStretchLastSection(False)
if factory.show_column_labels:
hheader.setHighlightSections(False)
else:
hheader.hide()
# -------------------------------------------------------------------------
# Editor for configuring the filters available to a TableEditor:
# -------------------------------------------------------------------------
class TableFilterEditor(HasTraits):
"""An editor that manages table filters."""
# -------------------------------------------------------------------------
# Trait definitions:
# -------------------------------------------------------------------------
#: TableEditor this editor is associated with
editor = Instance(TableEditor)
#: The list of filters
filters = List(TableFilter)
#: The list of available templates from which filters can be created
templates = Property(List(TableFilter), observe="filters")
#: The currently selected filter template
selected_template = Instance(TableFilter)
#: The currently selected filter
selected_filter = Instance(TableFilter, allow_none=True)
#: The view to use for the current filter
selected_filter_view = Property(observe="selected_filter")
#: Buttons for add/removing filters
add_button = Button("New")
remove_button = Button("Delete")
# The default view for this editor
view = View(
Group(
Group(
Group(
Item("add_button", enabled_when="selected_template"),
Item(
"remove_button",
enabled_when="len(templates) > 1 and "
"selected_filter is not None",
),
orientation="horizontal",
show_labels=False,
),
Label("Base filter for new filters:"),
Item("selected_template", editor=EnumEditor(name="templates")),
Item(
"selected_filter",
style="custom",
editor=EnumEditor(name="filters", mode="list"),
),
show_labels=False,
),
Item(
"selected_filter",
width=0.75,
style="custom",
editor=InstanceEditor(view_name="selected_filter_view"),
),
id="TableFilterEditorSplit",
show_labels=False,
layout="split",
orientation="horizontal",
),
id="traitsui.qt.table_editor.TableFilterEditor",
buttons=["OK", "Cancel"],
kind="livemodal",
resizable=True,
width=800,
height=400,
title="Customize filters",
)
# -------------------------------------------------------------------------
# Private methods:
# -------------------------------------------------------------------------
# -- Trait Property getter/setters ----------------------------------------
@cached_property
def _get_selected_filter_view(self):
view = None
if self.selected_filter:
model = self.editor.model
index = model.mapToSource(model.index(0, 0))
if index.isValid():
obj = self.editor.items()[index.row()]
else:
obj = None
view = self.selected_filter.edit_view(obj)
return view
@cached_property
def _get_templates(self):
templates = [f for f in self.editor.factory.filters if f.template]
templates.extend(self.filters)
return templates
# -- Trait Change Handlers ------------------------------------------------
def _editor_changed(self):
self.filters = [
f.clone_traits()
for f in self.editor.factory.filters
if not f.template
]
self.selected_template = self.templates[0]
@observe('add_button')
def _create_and_select_new_filter(self, event):
"""Create a new filter based on the selected template and select it."""
new_filter = self.selected_template.clone_traits()
new_filter.template = False
new_filter.name = new_filter._name = "New filter"
self.filters.append(new_filter)
self.selected_filter = new_filter
@observe("remove_button")
def _delete_selected_filter(self, event):
"""Delete the currently selected filter."""
if self.selected_template == self.selected_filter:
self.selected_template = self.templates[0]
index = self.filters.index(self.selected_filter)
del self.filters[index]
if index < len(self.filters):
self.selected_filter = self.filters[index]
else:
self.selected_filter = None
@observe("selected_filter:name")
def _update_filter_list(self, event):
"""A hack to make the EnumEditor watching the list of filters refresh
their text when the name of the selected filter changes.
"""
filters = self.filters
self.filters = []
self.filters = filters
|
enthought/traitsui
|
traitsui/qt/table_editor.py
|
table_editor.py
|
py
| 49,857 |
python
|
en
|
code
| 290 |
github-code
|
6
|
[
{
"api_name": "pyface.qt.is_qt4",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "editor.Editor",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "traitsui.editors.table_editor.BaseTableEditor",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "traits.api.Any",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "traits.api.Instance",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "table_model.SortFilterTableModel",
"line_number": 74,
"usage_type": "argument"
},
{
"api_name": "table_model.SortFilterTableModel",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "traits.api.Instance",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "table_model.TableModel",
"line_number": 80,
"usage_type": "argument"
},
{
"api_name": "table_model.TableModel",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "traits.api.List",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "traitsui.api.TableColumn",
"line_number": 86,
"usage_type": "argument"
},
{
"api_name": "traits.api.Any",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "traits.api.Property",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "traits.api.Any",
"line_number": 92,
"usage_type": "argument"
},
{
"api_name": "traits.api.Property",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "traits.api.Any",
"line_number": 94,
"usage_type": "argument"
},
{
"api_name": "traits.api.Any",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "traits.api.List",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "traits.api.Int",
"line_number": 100,
"usage_type": "argument"
},
{
"api_name": "traits.api.Str",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "traits.api.Event",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "traits.api.Event",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "traits.api.Event",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "traits.api.Instance",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "traitsui.api.UI",
"line_number": 115,
"usage_type": "argument"
},
{
"api_name": "traits.api.Int",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "traits.api.Bool",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "traits.api.Dict",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "traits.api.Dict",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "pyface.ui_traits.Image",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "traitsui.api.ObjectColumn",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtCore.Qt",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtCore",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QWidget",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QVBoxLayout",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "traitsui.api.View",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "traitsui.api.Group",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "traitsui.api.spring",
"line_number": 185,
"usage_type": "argument"
},
{
"api_name": "traitsui.api.Item",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "traitsui.api.Item",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QSplitter",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtCore.Qt",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtCore",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QSplitter",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtCore.Qt",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtCore",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QSizePolicy",
"line_number": 207,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "traitsui.api.InstanceEditor",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "traitsui.api.View",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "traitsui.api.Item",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "traitsui.ui_traits.SequenceTypes",
"line_number": 367,
"usage_type": "argument"
},
{
"api_name": "traitsui.editors.table_editor.ReversedList",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QItemSelectionModel",
"line_number": 406,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 406,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QItemSelectionModel",
"line_number": 415,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 415,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QItemSelectionModel",
"line_number": 426,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 426,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QItemSelection",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 445,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QItemSelectionModel",
"line_number": 454,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 454,
"usage_type": "name"
},
{
"api_name": "pyface.image_resource.ImageResource",
"line_number": 527,
"usage_type": "argument"
},
{
"api_name": "pyface.qt.QtGui.QMenu",
"line_number": 543,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 543,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QMenu",
"line_number": 553,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 553,
"usage_type": "name"
},
{
"api_name": "traits.api.cached_property",
"line_number": 580,
"usage_type": "name"
},
{
"api_name": "traits.api.cached_property",
"line_number": 602,
"usage_type": "name"
},
{
"api_name": "traitsui.editors.table_editor.customize_filter",
"line_number": 644,
"usage_type": "name"
},
{
"api_name": "pyface.timer.api.do_later",
"line_number": 645,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QStyledItemDelegate",
"line_number": 824,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 824,
"usage_type": "name"
},
{
"api_name": "traitsui.api.default_handler",
"line_number": 842,
"usage_type": "call"
},
{
"api_name": "traitsui.api.UI",
"line_number": 844,
"usage_type": "call"
},
{
"api_name": "traitsui.api.UI",
"line_number": 849,
"usage_type": "call"
},
{
"api_name": "editor.prepare",
"line_number": 854,
"usage_type": "call"
},
{
"api_name": "editor.control",
"line_number": 855,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtCore.Qt",
"line_number": 859,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtCore",
"line_number": 859,
"usage_type": "name"
},
{
"api_name": "editor.setGeometry",
"line_number": 872,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QStyle",
"line_number": 877,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 877,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QStyle",
"line_number": 878,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 878,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QPalette",
"line_number": 883,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 883,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QPalette",
"line_number": 887,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 887,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QApplication.style",
"line_number": 890,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QApplication",
"line_number": 890,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 890,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QStyle",
"line_number": 891,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 891,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QTableView",
"line_number": 895,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 895,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAbstractItemView",
"line_number": 900,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 900,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAbstractItemView",
"line_number": 901,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 901,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAbstractItemView",
"line_number": 904,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 904,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAbstractItemView",
"line_number": 905,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 905,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAbstractItemView",
"line_number": 908,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 908,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAbstractItemView",
"line_number": 909,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 909,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAbstractItemView",
"line_number": 912,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 912,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAbstractItemView",
"line_number": 913,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 913,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAbstractItemView",
"line_number": 916,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 916,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAbstractItemView",
"line_number": 917,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 917,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAbstractItemView",
"line_number": 920,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 920,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAbstractItemView",
"line_number": 921,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 921,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QTableView.__init__",
"line_number": 928,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QTableView",
"line_number": 928,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 928,
"usage_type": "name"
},
{
"api_name": "editor.factory",
"line_number": 932,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui.QAbstractItemView",
"line_number": 945,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 945,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAbstractItemView",
"line_number": 946,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 946,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAbstractItemView",
"line_number": 949,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 949,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAbstractItemView",
"line_number": 958,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 958,
"usage_type": "name"
},
{
"api_name": "editor._create_empty_menu",
"line_number": 987,
"usage_type": "call"
},
{
"api_name": "editor.items",
"line_number": 994,
"usage_type": "call"
},
{
"api_name": "editor.columns",
"line_number": 995,
"usage_type": "attribute"
},
{
"api_name": "editor.factory",
"line_number": 998,
"usage_type": "attribute"
},
{
"api_name": "editor.selected",
"line_number": 1001,
"usage_type": "attribute"
},
{
"api_name": "traitsui.ui_traits.SequenceTypes",
"line_number": 1002,
"usage_type": "argument"
},
{
"api_name": "editor.set_menu_context",
"line_number": 1006,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtCore.QEvent",
"line_number": 1014,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtCore",
"line_number": 1014,
"usage_type": "name"
},
{
"api_name": "editor._create_empty_menu",
"line_number": 1019,
"usage_type": "call"
},
{
"api_name": "editor.header_row",
"line_number": 1024,
"usage_type": "attribute"
},
{
"api_name": "editor._create_header_menu",
"line_number": 1025,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QTableView.eventFilter",
"line_number": 1032,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QTableView",
"line_number": 1032,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1032,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QTableView.resizeEvent",
"line_number": 1040,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QTableView",
"line_number": 1040,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1040,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QMainWindow",
"line_number": 1051,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1051,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QTableView.sizeHint",
"line_number": 1062,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QTableView",
"line_number": 1062,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1062,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QStyle",
"line_number": 1070,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1070,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QStyleOptionHeader",
"line_number": 1070,
"usage_type": "call"
},
{
"api_name": "editor.columns",
"line_number": 1082,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui.QTableView.sizeHintForColumn",
"line_number": 1088,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QTableView",
"line_number": 1088,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1088,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QFont",
"line_number": 1096,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1096,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtCore.__version_info__",
"line_number": 1102,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtCore",
"line_number": 1102,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QFontMetrics",
"line_number": 1103,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1103,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QFontMetrics",
"line_number": 1105,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1105,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QStyleOptionHeader",
"line_number": 1109,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1109,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QStyle",
"line_number": 1112,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1112,
"usage_type": "name"
},
{
"api_name": "editor.factory",
"line_number": 1116,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui.QStyle",
"line_number": 1119,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1119,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QStyle",
"line_number": 1123,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1123,
"usage_type": "name"
},
{
"api_name": "editor.columns",
"line_number": 1148,
"usage_type": "attribute"
},
{
"api_name": "editor.columns",
"line_number": 1149,
"usage_type": "attribute"
},
{
"api_name": "editor.dispose",
"line_number": 1180,
"usage_type": "call"
},
{
"api_name": "editor.factory",
"line_number": 1190,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui.QHeaderView",
"line_number": 1199,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1199,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QAbstractItemView",
"line_number": 1205,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1205,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QHeaderView",
"line_number": 1212,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1212,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QHeaderView",
"line_number": 1213,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1213,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QHeaderView",
"line_number": 1214,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1214,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QHeaderView",
"line_number": 1215,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 1215,
"usage_type": "name"
},
{
"api_name": "editor.columns",
"line_number": 1218,
"usage_type": "attribute"
},
{
"api_name": "traits.api.HasTraits",
"line_number": 1241,
"usage_type": "name"
},
{
"api_name": "traits.api.Instance",
"line_number": 1249,
"usage_type": "call"
},
{
"api_name": "traits.api.List",
"line_number": 1252,
"usage_type": "call"
},
{
"api_name": "traitsui.api.TableFilter",
"line_number": 1252,
"usage_type": "argument"
},
{
"api_name": "traits.api.Property",
"line_number": 1255,
"usage_type": "call"
},
{
"api_name": "traits.api.List",
"line_number": 1255,
"usage_type": "call"
},
{
"api_name": "traitsui.api.TableFilter",
"line_number": 1255,
"usage_type": "argument"
},
{
"api_name": "traits.api.Instance",
"line_number": 1258,
"usage_type": "call"
},
{
"api_name": "traitsui.api.TableFilter",
"line_number": 1258,
"usage_type": "argument"
},
{
"api_name": "traits.api.Instance",
"line_number": 1261,
"usage_type": "call"
},
{
"api_name": "traitsui.api.TableFilter",
"line_number": 1261,
"usage_type": "argument"
},
{
"api_name": "traits.api.Property",
"line_number": 1264,
"usage_type": "call"
},
{
"api_name": "traits.api.Button",
"line_number": 1267,
"usage_type": "call"
},
{
"api_name": "traits.api.Button",
"line_number": 1268,
"usage_type": "call"
},
{
"api_name": "traitsui.api.View",
"line_number": 1271,
"usage_type": "call"
},
{
"api_name": "traitsui.api.Group",
"line_number": 1272,
"usage_type": "call"
},
{
"api_name": "traitsui.api.Group",
"line_number": 1273,
"usage_type": "call"
},
{
"api_name": "traitsui.api.Group",
"line_number": 1274,
"usage_type": "call"
},
{
"api_name": "traitsui.api.Item",
"line_number": 1275,
"usage_type": "call"
},
{
"api_name": "traitsui.api.Item",
"line_number": 1276,
"usage_type": "call"
},
{
"api_name": "traitsui.api.Label",
"line_number": 1284,
"usage_type": "call"
},
{
"api_name": "traitsui.api.Item",
"line_number": 1285,
"usage_type": "call"
},
{
"api_name": "traitsui.api.EnumEditor",
"line_number": 1285,
"usage_type": "call"
},
{
"api_name": "traitsui.api.Item",
"line_number": 1286,
"usage_type": "call"
},
{
"api_name": "traitsui.api.EnumEditor",
"line_number": 1289,
"usage_type": "call"
},
{
"api_name": "traitsui.api.Item",
"line_number": 1293,
"usage_type": "call"
},
{
"api_name": "traitsui.api.InstanceEditor",
"line_number": 1297,
"usage_type": "call"
},
{
"api_name": "traits.api.cached_property",
"line_number": 1319,
"usage_type": "name"
},
{
"api_name": "traits.api.cached_property",
"line_number": 1332,
"usage_type": "name"
},
{
"api_name": "traits.api.observe",
"line_number": 1348,
"usage_type": "call"
},
{
"api_name": "traits.api.observe",
"line_number": 1357,
"usage_type": "call"
},
{
"api_name": "traits.api.observe",
"line_number": 1370,
"usage_type": "call"
}
] |
17270713471
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_regression
from mpl_toolkits import mplot3d
# Training phase/ Training the LR model/ Find optimal weights
def fit(X, y):
"""
X: Feature matrix: (n_samples, n_features)
y: y_true: (n_samples,1)
Returns: weights
weights: optimal weights (n_features, 1)
"""
X = X.copy()
ones_column = np.ones((len(X),1))
X = np.concatenate([ones_column, X], axis=1)
w = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y)
return w
# prediction
def predict(X, w):
"""
X: Feature matrix: (n_samples, n_features)
w: weight vector: (n_fetures, 1)
Returns:
y: y_pred = X.w (n_samples,1)
"""
X = X.copy()
ones_column = np.ones((len(X),1))
X = np.concatenate([ones_column, X], axis=1)
return X.dot(w)
# r_squared
def r_squared(ytrue, ypred):
e_method = ((ytrue-ypred)**2).sum() # sum of squares of residuals
e_baseline = ((ytrue-ytrue.mean())**2).sum() # total sum of squares
return 1 - e_method/e_baseline
# loss function
def loss(ytrue, ypred):
return ((ytrue-ypred)**2).sum()
X, y, coeff = make_regression(n_samples=100, n_features=2, coef=True, noise=0.5, bias=3, random_state=70)
# print(X.shape, y.shape)
# Train the model/ learn the optimal weights
w = fit(X, y)
####################################################
fig = plt.figure(figsize=(8,8))
ax = plt.axes(projection='3d')
ax.scatter(X[:,0], X[:,1], y, c=y, cmap='seismic')
f1 = np.linspace(X[:,0].min(), X[:,0].max(), 50)
f2 = np.linspace(X[:,1].min(), X[:,1].max(), 50)
f1, f2 = np.meshgrid(f1, f2)
# prediction plane
X_ = np.concatenate([f1.reshape(-1,1), f2.reshape(-1,1)], axis=1)
pred = predict(X_, w).reshape(f1.shape)
ax.plot_surface(f1, f2, pred, alpha=0.5, cmap='seismic')
ax.set_xlabel("Feature 1")
ax.set_ylabel("Feature 2")
ax.set_zlabel("Output (y)")
plt.show()
|
princeyyadav/CB-DS-LV-May21
|
DS/S13-linear-regression/viz.py
|
viz.py
|
py
| 1,905 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.ones",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.inv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.make_regression",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axes",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 73,
"usage_type": "name"
}
] |
485113359
|
import pytest
from graph_pkg.edit_cost.edit_cost_proteins_tu import EditCostProteinsTU
from graph_pkg.graph.label.label_node_proteins_tu import LabelNodeProteinsTU
from graph_pkg.graph.node import Node
@pytest.mark.parametrize('coord1, e_cost, expected',
[
((1,), (1., 1., 1., 1., 'dirac'), 1.),
((0,), (1., 1., 1., 1., 'dirac'), 1.),
((2,), (1., 1., 1., 1., 'dirac'), 1.),
((0,), (11., 1., 1., 1., 'dirac'), 11.),
((0,), (1., 1.9, 1.9, 1.9, 'dirac'), 1.),
])
def test_dirac_proteins_tu_add_node(coord1, e_cost, expected):
node0 = Node(0, LabelNodeProteinsTU(*coord1))
edit_cost = EditCostProteinsTU(*e_cost)
result = edit_cost.cost_insert_node(node0)
assert result == expected
@pytest.mark.parametrize('coord1, e_cost, expected',
[
((1,), (1., 1., 1., 1., 'dirac'), 1.),
((0,), (1., 1., 1., 1., 'dirac'), 1.),
((1,), (16., 12., 18., 17., 'dirac'), 12.),
])
def test_dirac_proteins_tu_delete_node(coord1, e_cost, expected):
node0 = Node(0, LabelNodeProteinsTU(*coord1))
edit_cost = EditCostProteinsTU(*e_cost)
result = edit_cost.cost_delete_node(node0)
assert result == expected
@pytest.mark.parametrize('coord1, coord2, e_cost, expected',
[
((1,), (1,), (1., 1., 1., 1., 'dirac'), 0.),
((0,), (1,), (1., 1., 1., 1., 'dirac'), 2.),
((1,), (0,), (1., 1., 1., 1., 'dirac'), 2.),
((1,), (2,), (3., 2., 2.5, 1., 'dirac'), 5.),
])
def test_dirac_proteins_tu_substitution(coord1, coord2, e_cost, expected):
node0 = Node(0, LabelNodeProteinsTU(*coord1))
node1 = Node(1, LabelNodeProteinsTU(*coord2))
edit_cost = EditCostProteinsTU(*e_cost)
result = edit_cost.cost_substitute_node(node0, node1)
assert result == expected
|
CheshireCat12/graph_project
|
tests/unit_edit_cost/test_edit_cost_proteins_tu.py
|
test_edit_cost_proteins_tu.py
|
py
| 2,168 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "graph_pkg.graph.node.Node",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "graph_pkg.graph.label.label_node_proteins_tu.LabelNodeProteinsTU",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "graph_pkg.edit_cost.edit_cost_proteins_tu.EditCostProteinsTU",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "graph_pkg.graph.node.Node",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "graph_pkg.graph.label.label_node_proteins_tu.LabelNodeProteinsTU",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "graph_pkg.edit_cost.edit_cost_proteins_tu.EditCostProteinsTU",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "graph_pkg.graph.node.Node",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "graph_pkg.graph.label.label_node_proteins_tu.LabelNodeProteinsTU",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "graph_pkg.graph.node.Node",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "graph_pkg.graph.label.label_node_proteins_tu.LabelNodeProteinsTU",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "graph_pkg.edit_cost.edit_cost_proteins_tu.EditCostProteinsTU",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 42,
"usage_type": "attribute"
}
] |
27016970830
|
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
from django.utils.translation import ugettext_lazy as _
from GameFeb19_intro.models import add_currency, add_tokens, TRNSL_ERR_MSG, translated_languages
import csv
import random
author = 'Tatiana Mayskaya'
doc = """
Cognitive Reflection Test & IQ Test & GRE-based Test :: whatever counts as cognitive test
"""
class Constants(BaseConstants):
name_in_url = 'GameFeb19_questions_cognitive'
players_per_group = None
# this is done only to count the number of questions in the quiz
# (assuming Russian and English versions have the same number)
with open('GameFeb19_questions_cognitive/cognitive_en.csv') as file:
questions = list(csv.DictReader(file))
num_rounds = len(questions)
class Subsession(BaseSubsession):
def creating_session(self):
assert self.session.config['language'] in translated_languages, TRNSL_ERR_MSG
if self.round_number == 1:
if self.session.config['language'] == 'en':
with open('GameFeb19_questions_cognitive/cognitive_en.csv', encoding='utf-8-sig') as test_file:
self.session.vars['test_file_list'] = list(csv.DictReader(test_file))
else:
with open('GameFeb19_questions_cognitive/cognitive_ru.csv', encoding='utf-8-sig') as test_file:
self.session.vars['test_file_list'] = list(csv.DictReader(test_file))
for p in self.get_players():
p.random_questions()
self.session.vars['num_questions_CT'] = Constants.num_rounds
for p in self.get_players():
question_data = p.current_question()
p.question_id = question_data['id']
p.question = question_data['question']
p.solution = int(question_data['solution'])
if int(question_data['n_choices']) == 0:
p.solution_text = question_data['solution']
else:
p.solution_text = question_data['choice{}'.format(p.solution)]
p.participant.vars['questions_CT'] = []
def vars_for_admin_report(self):
players = []
for p in self.get_players():
players.append((p.participant.label, p.question, p.submitted_answer_text, p.solution_text,
p.get_is_correct_display()))
return {'players': players}
class Group(BaseGroup):
pass
class Player(BasePlayer):
question_id = models.IntegerField()
question = models.StringField()
solution = models.IntegerField()
solution_text = models.StringField()
submitted_answer = models.IntegerField()
submitted_answer_options = models.IntegerField(widget=widgets.RadioSelect)
submitted_answer_text = models.StringField()
is_correct = models.BooleanField(initial=False, choices=[[True, _('Yes')], [False, _('No')]])
def random_questions(self):
randomized_questions = random.sample(range(1, Constants.num_rounds + 1, 1), Constants.num_rounds)
self.participant.vars['questions_order_CT'] = randomized_questions
def current_question(self):
num = self.participant.vars['questions_order_CT'][self.round_number - 1]
return self.session.vars['test_file_list'][num - 1]
def check_correct(self):
question_data = self.current_question()
if int(question_data['n_choices']) > 0:
self.submitted_answer = self.submitted_answer_options
self.is_correct = (self.submitted_answer == self.solution)
if int(question_data['n_choices']) == 0:
self.submitted_answer_text = str(self.submitted_answer)
else:
self.submitted_answer_text = question_data['choice{}'.format(self.submitted_answer)]
self.participant.vars['questions_CT'].append(
(self.round_number, self.question, self.submitted_answer_text, self.solution_text,
self.get_is_correct_display()))
if self.is_correct:
self.payoff = self.session.vars['rate_CT']
def set_payoffs(self):
self.participant.vars['questions_correct_CT'] = sum([int(p.is_correct) for p in self.in_all_rounds()])
self.participant.vars['payment_formula'] = \
self.participant.vars['payment_formula'] + \
' + ' + str(self.participant.vars['questions_correct_CT']) + '*' + \
add_currency(self.session.config['currency_used'],
self.session.vars['rate_CT'] * self.session.config['real_world_currency_per_point'])
|
TatianaMayskaya/oTree
|
GameFeb19_questions_cognitive/models.py
|
models.py
|
py
| 4,609 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "otree.api.BaseConstants",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "csv.DictReader",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "otree.api.BaseSubsession",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "GameFeb19_intro.models.translated_languages",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "GameFeb19_intro.models.TRNSL_ERR_MSG",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "csv.DictReader",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "otree.api.BaseGroup",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "otree.api.BasePlayer",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "otree.api.models.IntegerField",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "otree.api.models",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "otree.api.models.StringField",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "otree.api.models",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "otree.api.models.IntegerField",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "otree.api.models",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "otree.api.models.StringField",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "otree.api.models",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "otree.api.models.IntegerField",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "otree.api.models",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "otree.api.models.IntegerField",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "otree.api.models",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "otree.api.widgets.RadioSelect",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "otree.api.widgets",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "otree.api.models.StringField",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "otree.api.models",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "otree.api.models.BooleanField",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "otree.api.models",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "GameFeb19_intro.models.add_currency",
"line_number": 106,
"usage_type": "call"
}
] |
9963863734
|
import numpy as np
import scipy.sparse as sp
import torch
import time
import random
from utils.tool import read_data, write_dic, dictionary, normalize, sparse_mx_to_torch_sparse_tensor
def encoding_test(test_graph_path, test_fact_path, train_dataset = "fb237_v1"):
"""load test-graph and test-facts, and do the encoding on the test-graph"""
t_start = time.time()
path = "data"
#these two paths are for loading
relation_dic_path = "{}/{}/train/relation-dic.txt".format(path, train_dataset)
type_dic_path = "{}/{}/train/type-dic.txt".format(path, train_dataset)
test_graph_triples = read_data(test_graph_path)
test_fact_triples_with_label = read_data(test_fact_path)
#load relation dic and type dic generated by training
f_relation_dic = open(relation_dic_path)
relations = []
for line in f_relation_dic:
relation_new = line.strip().split("\t")[1]
relations.append(relation_new)
f_type_dic = open(type_dic_path)
types = []
for line in f_type_dic:
type_new = line.strip().split("\t")[1]
types.append(type_new)
relation_set = set(relations)
all_triples_with_label = test_graph_triples + test_fact_triples_with_label
test_graph_real_triples = []
test_graph_type_triples = []
for triple in test_graph_triples:
if triple[1] != "<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>":
test_graph_real_triples.append(triple)
else:
test_graph_type_triples.append(triple)
test_fact_real_triples_with_label = []
test_fact_type_triples_with_label = []
for triple in test_fact_triples_with_label:
if triple[1] != "<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>":
test_fact_real_triples_with_label.append(triple)
else:
test_fact_type_triples_with_label.append(triple)
all_real_triples_with_label = []
all_type_triples_with_label = []
constant_set = set()
for triple in all_triples_with_label:
if triple[1] != "<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>":
constant_set.add(triple[0])
constant_set.add(triple[2])
all_real_triples_with_label.append(triple)
else:
constant_set.add(triple[0])
all_type_triples_with_label.append(triple)
constants = list(constant_set)
constant2index = dictionary(constants)
relation2index = dictionary(relations)
type2index = dictionary(types)
#print("time:",time.time()-t_start)
#generate list of pairs for encoding
pairs = []
pair_set = set()
for triple in all_real_triples_with_label:
sub_idx = constant2index[triple[0]]
obj_idx = constant2index[triple[2]]
if sub_idx < obj_idx:
if (sub_idx, obj_idx) not in pair_set:
pair_set.add((sub_idx, obj_idx))
pairs.append((sub_idx, obj_idx))
if sub_idx > obj_idx:
if (obj_idx, sub_idx) not in pair_set:
pair_set.add((obj_idx, sub_idx))
pairs.append((obj_idx, sub_idx))
for constant_idx in range(len(constants)):
pairs.append((constant_idx, constant_idx))
pair_set.add((constant_idx, constant_idx))
pair2index = dictionary(pairs)
s_time = time.time()
#collect related pairs for each constant
pairs_for_constant = dict([(i,set()) for i in range(len(constants))])
p_idx = 0
for pair in pairs:
p_idx = pair2index[pair]
c1 = pair[0]
c2 = pair[1]
pairs_for_constant[c1].add(p_idx)
pairs_for_constant[c2].add(p_idx)
#collect neighbors for each pair node
pneighbors_for_pair = dict([(i,set()) for i in range(len(pairs))])
for c_idx in range(len(constants)):
pairs_c = set(pairs_for_constant[c_idx])
#pair and n_pair would contain one common constant
for pair in pairs_c:
for n_pair in pairs_c:
if pair != n_pair:
pneighbors_for_pair[pair].add(n_pair)
#generate edge list
edges = []
for i in range(len(pairs)):
pneighbors = pneighbors_for_pair[i]
for pneighbor in pneighbors:
edges.append([i, pneighbor])
edges.append([pneighbor, i])
#print("Finished generating edges", time.time() - s_time)
#generate a normalized adjencency matrix (strategy for GCN)
#print(edges)
edges = np.array(edges)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])), shape=(len(pairs), len(pairs)), dtype=np.float32)
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
adj = normalize(adj + sp.eye(adj.shape[0]))
adj = sparse_mx_to_torch_sparse_tensor(adj)
del edges
#print("Total time for adj: {:.4f}s".format(time.time() - s_time))
#print("Start to generate features, labels, and masks")
def initialize(test_graph_real_triples, test_graph_type_triples, test_fact_real_triples_with_label, test_fact_type_triples_with_label):
labels = torch.zeros(len(pairs), len(types) + 2*len(relations))
masks = torch.zeros(len(pairs), len(types) + 2*len(relations))
features = torch.zeros(len(pairs), len(types) + 2*len(relations))
#labels and masks are generated for all triples in test-facts (pos&neg)
for triple in test_fact_type_triples_with_label:
cons = triple[0]
typ = triple[2]
label = triple[3]
pair_idx= pair2index[(constant2index[cons], constant2index[cons])]
typ_idx = type2index[typ]
if label == "1":
labels[pair_idx][typ_idx] = 1
elif label == "0":
labels[pair_idx][typ_idx] = 0
masks[pair_idx][typ_idx] = 1
for triple in test_fact_real_triples_with_label:
sub = triple[0]
rel = triple[1]
obj = triple[2]
label = triple[3]
sub_idx = constant2index[sub]
rel_idx = relation2index[rel]
obj_idx = constant2index[obj]
try:
pair_idx = pair2index[(sub_idx, obj_idx)]
except:
pair_idx = pair2index[(obj_idx, sub_idx)]
rel_idx = rel_idx + len(relations)
if label == "1":
labels[pair_idx][len(types) + rel_idx] = 1
elif label == "0":
labels[pair_idx][len(types) + rel_idx] = 0
masks[pair_idx][len(types) + rel_idx] = 1
#features are generated for all triples in test-graph (pos&neg)
for triple in test_graph_type_triples:
cons = triple[0]
typ = triple[2]
pair_idx= pair2index[(constant2index[cons], constant2index[cons])]
typ_idx = type2index[typ]
features[pair_idx][typ_idx] = 1
for triple in test_graph_real_triples:
sub = triple[0]
rel = triple[1]
obj = triple[2]
sub_idx = constant2index[sub]
rel_idx = relation2index[rel]
obj_idx = constant2index[obj]
try:
pair_idx = pair2index[(sub_idx, obj_idx)]
except:
pair_idx = pair2index[(obj_idx, sub_idx)]
rel_idx = rel_idx + len(relations)
features[pair_idx][len(types) + rel_idx] = 1
features.requires_grad = True
labels.requires_grad = False
return features, labels, masks
features, labels, masks = initialize(test_graph_real_triples, test_graph_type_triples, test_fact_real_triples_with_label, test_fact_type_triples_with_label)
num_type = len(types)
num_relation = len(relations)
def triple2index(triple_now):
sub_idx = constant2index[triple_now[0]]
try:
relation_idx = relation2index[triple_now[1]]
except:
pair_idx = pair2index[(sub_idx, sub_idx)]
dim_idx = type2index[triple_now[2]]
return pair_idx, dim_idx
obj_idx = constant2index[triple_now[2]]
if (sub_idx, obj_idx) in pair_set:
pair_idx = pair2index[(sub_idx, obj_idx)]
dim_idx = len(types) + relation_idx
elif (obj_idx, sub_idx) in pair_set:
pair_idx = pair2index[(obj_idx, sub_idx)]
dim_idx = len(types) + len(relations) + relation_idx
else:
print(triple_now, sub_idx, relation_idx, obj_idx)
print("wrong")
return pair_idx, dim_idx
hits_true = []
for triple in test_fact_triples_with_label:
if triple[-1] == "1":
hits_true.append(triple2index(triple))
#print("Finished generation")
#print("Total time elapsed for encoding: {:.4f}s".format(time.time() - t_start))
return adj, features, labels, masks, num_type, num_relation, constants, relations, types, pairs, hits_true
|
shuwen-liu-ox/INDIGO
|
utils/utils_test_pattern.py
|
utils_test_pattern.py
|
py
| 9,170 |
python
|
en
|
code
| 22 |
github-code
|
6
|
[
{
"api_name": "time.time",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "utils.tool.read_data",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "utils.tool.read_data",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "utils.tool.dictionary",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "utils.tool.dictionary",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "utils.tool.dictionary",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "utils.tool.dictionary",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.coo_matrix",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "numpy.ones",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "utils.tool.normalize",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.eye",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "utils.tool.sparse_mx_to_torch_sparse_tensor",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 161,
"usage_type": "call"
}
] |
43077970864
|
from typing import Any, Callable, Dict, Optional, Type, Union
from fugue.execution.execution_engine import ExecutionEngine, SQLEngine
from fugue.execution.native_execution_engine import NativeExecutionEngine
from triad.utils.convert import to_instance
from triad import assert_or_throw, ParamDict
class _ExecutionEngineFactory(object):
def __init__(self):
self._funcs: Dict[str, Callable] = {}
self._type_funcs: Dict[Type, Callable] = {}
self._sql_funcs: Dict[str, Callable] = {}
self.register_default(lambda conf, **kwargs: NativeExecutionEngine(conf=conf))
self.register_default_sql_engine(lambda engine, **kwargs: engine.sql_engine)
def register(
self, name_or_type: Union[str, Type], func: Callable, on_dup="overwrite"
) -> None:
if isinstance(name_or_type, str):
self._register(self._funcs, name=name_or_type, func=func, on_dup=on_dup)
else:
self._register(
self._type_funcs, name=name_or_type, func=func, on_dup=on_dup
)
def register_default(self, func: Callable, on_dup="overwrite") -> None:
self.register("", func, on_dup)
def register_sql_engine(
self, name: str, func: Callable, on_dup="overwrite"
) -> None:
self._register(self._sql_funcs, name=name, func=func, on_dup=on_dup)
def register_default_sql_engine(self, func: Callable, on_dup="overwrite") -> None:
self.register_sql_engine("", func, on_dup)
def make(
self, engine: Any = None, conf: Any = None, **kwargs: Any
) -> ExecutionEngine:
if isinstance(engine, tuple):
execution_engine = self.make_execution_engine(
engine[0], conf=conf, **kwargs
)
sql_engine = self.make_sql_engine(engine[1], execution_engine)
execution_engine.set_sql_engine(sql_engine)
return execution_engine
else:
return self.make((engine, None), conf=conf, **kwargs)
def make_execution_engine(
self, engine: Any = None, conf: Any = None, **kwargs: Any
) -> ExecutionEngine:
# Apply this function to an Execution Engine instance can
# make sure the compile conf is a superset of conf
# TODO: it's a mess here, can we make the logic more intuitive?
def make_engine(engine: Any) -> ExecutionEngine:
if isinstance(engine, str) and engine in self._funcs:
return self._funcs[engine](conf, **kwargs)
for k, f in self._type_funcs.items():
if isinstance(engine, k):
return f(engine, conf, **kwargs)
if isinstance(engine, ExecutionEngine):
if conf is not None:
engine.compile_conf.update(conf)
engine.compile_conf.update(kwargs)
return engine
return to_instance(
engine, ExecutionEngine, kwargs=dict(conf=conf, **kwargs)
)
result = make_engine(engine or "")
result.compile_conf.update(result.conf, on_dup=ParamDict.IGNORE)
result.compile_conf.update(conf, on_dup=ParamDict.OVERWRITE)
result.compile_conf.update(kwargs, on_dup=ParamDict.OVERWRITE)
return result
def make_sql_engine(
self,
engine: Any = None,
execution_engine: Optional[ExecutionEngine] = None,
**kwargs: Any,
) -> SQLEngine:
if engine is None:
engine = ""
if isinstance(engine, str) and engine in self._sql_funcs:
return self._sql_funcs[engine](execution_engine, **kwargs)
if isinstance(engine, SQLEngine):
assert_or_throw(
execution_engine is None and len(kwargs) == 0,
lambda: ValueError(
f"{engine} is an instance, can't take arguments "
f"execution_engine={execution_engine}, kwargs={kwargs}"
),
)
return engine
return to_instance(
engine, SQLEngine, kwargs=dict(execution_engine=execution_engine, **kwargs)
)
def _register(
self,
callables: Dict[Any, Callable],
name: Any,
func: Callable,
on_dup="overwrite",
) -> None:
if name not in callables:
callables[name] = func
if on_dup in ["raise", "throw"]:
raise KeyError(f"{name} is already registered")
if on_dup == "overwrite":
callables[name] = func
return
if on_dup == "ignore":
return
raise ValueError(on_dup)
_EXECUTION_ENGINE_FACTORY = _ExecutionEngineFactory()
def register_execution_engine(
name_or_type: Union[str, Type], func: Callable, on_dup="overwrite"
) -> None:
"""Register :class:`~fugue.execution.execution_engine.ExecutionEngine` with
a given name.
:param name_or_type: alias of the execution engine, or type of an object that
can be converted to an execution engine
:param func: a callable taking |ParamsLikeObject| and ``**kwargs`` and returning an
:class:`~fugue.execution.execution_engine.ExecutionEngine` instance
:param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore"
(not overwriting) or "throw" (throw exception), defaults to "overwrite".
:raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists
.. admonition:: Examples
Alias registration examples:
.. code-block:: python
# create a new engine with name my (overwrites if existed)
register_execution_engine("my", lambda conf: MyExecutionEngine(conf))
# 0
make_execution_engine("my")
make_execution_engine("my", {"myconfig":"value})
# 1
with FugueWorkflow("my") as dag:
dag.create([[0]],"a:int").show()
# 2
dag = FugueWorkflow()
dag.create([[0]],"a:int").show()
dag.run("my", {"myconfig":"value})
# 3
fsql('''
CREATE [[0]] SCHEMA a:int
PRINT
''').run("my")
Type registration examples:
.. code-block:: python
from pyspark.sql import SparkSession
from fugue_spark import SparkExecutionEngine
from fugue_sql import fsql
register_execution_engine(
SparkSession,
lambda session, conf: SparkExecutionEngine(session, conf))
spark_session = SparkSession.builder.getOrCreate()
fsql('''
CREATE [[0]] SCHEMA a:int
PRINT
''').run(spark_session)
"""
_EXECUTION_ENGINE_FACTORY.register(name_or_type, func, on_dup)
def register_default_execution_engine(func: Callable, on_dup="overwrite") -> None:
"""Register :class:`~fugue.execution.execution_engine.ExecutionEngine` as the
default engine.
:param func: a callable taking |ParamsLikeObject| and ``**kwargs`` and returning an
:class:`~fugue.execution.execution_engine.ExecutionEngine` instance
:param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore"
(not overwriting) or "throw" (throw exception), defaults to "overwrite".
:raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists
.. admonition:: Examples
.. code-block:: python
# create a new engine with name my (overwrites if existed)
register_default_execution_engine(lambda conf: MyExecutionEngine(conf))
# the following examples will use MyExecutionEngine
# 0
make_execution_engine()
make_execution_engine(None, {"myconfig":"value})
# 1
with FugueWorkflow() as dag:
dag.create([[0]],"a:int").show()
# 2
dag = FugueWorkflow()
dag.create([[0]],"a:int").show()
dag.run(None, {"myconfig":"value})
# 3
fsql('''
CREATE [[0]] SCHEMA a:int
PRINT
''').run("", {"myconfig":"value})
"""
_EXECUTION_ENGINE_FACTORY.register_default(func, on_dup)
def register_sql_engine(name: str, func: Callable, on_dup="overwrite") -> None:
"""Register :class:`~fugue.execution.execution_engine.SQLEngine` with
a given name.
:param name: name of the SQL engine
:param func: a callable taking
:class:`~fugue.execution.execution_engine.ExecutionEngine`
and ``**kwargs`` and returning a
:class:`~fugue.execution.execution_engine.SQLEngine` instance
:param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore"
(not overwriting) or "throw" (throw exception), defaults to "overwrite".
:raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists
.. admonition:: Examples
.. code-block:: python
# create a new engine with name my (overwrites if existed)
register_sql_engine("mysql", lambda engine: MySQLEngine(engine))
# create execution engine with MySQLEngine as the default
make_execution_engine(("", "mysql"))
# create DaskExecutionEngine with MySQLEngine as the default
make_execution_engine(("dask", "mysql"))
# default execution engine + MySQLEngine
with FugueWorkflow(("","mysql")) as dag:
dag.create([[0]],"a:int").show()
"""
_EXECUTION_ENGINE_FACTORY.register_sql_engine(name, func, on_dup)
def register_default_sql_engine(func: Callable, on_dup="overwrite") -> None:
"""Register :class:`~fugue.execution.execution_engine.SQLEngine` as the
default engine
:param func: a callable taking
:class:`~fugue.execution.execution_engine.ExecutionEngine`
and ``**kwargs`` and returning a
:class:`~fugue.execution.execution_engine.SQLEngine` instance
:param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore"
(not overwriting) or "throw" (throw exception), defaults to "overwrite".
:raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists
.. note::
You should be careful to use this function, because when you set a custom
SQL engine as default, all execution engines you create will use this SQL
engine unless you are explicit. For example if you set the default SQL engine
to be a Spark specific one, then if you start a NativeExecutionEngine, it will
try to use it and will throw exceptions.
So it's always a better idea to use ``register_sql_engine`` instead
.. admonition:: Examples
.. code-block:: python
# create a new engine with name my (overwrites if existed)
register_default_sql_engine(lambda engine: MySQLEngine(engine))
# create NativeExecutionEngine with MySQLEngine as the default
make_execution_engine()
# create SparkExecutionEngine with MySQLEngine instead of SparkSQLEngine
make_execution_engine("spark")
# NativeExecutionEngine with MySQLEngine
with FugueWorkflow() as dag:
dag.create([[0]],"a:int").show()
"""
_EXECUTION_ENGINE_FACTORY.register_default_sql_engine(func, on_dup)
def make_execution_engine(
engine: Any = None, conf: Any = None, **kwargs: Any
) -> ExecutionEngine:
"""Create :class:`~fugue.execution.execution_engine.ExecutionEngine`
with specified ``engine``
:param engine: it can be empty string or null (use the default execution
engine), a string (use the registered execution engine), an
:class:`~fugue.execution.execution_engine.ExecutionEngine` type, or
the :class:`~fugue.execution.execution_engine.ExecutionEngine` instance
, or a tuple of two values where the first value represents execution
engine and the second value represents the sql engine (you can use ``None``
for either of them to use the default one), defaults to None
:param conf: |ParamsLikeObject|, defaults to None
:param kwargs: additional parameters to initialize the execution engine
:return: the :class:`~fugue.execution.execution_engine.ExecutionEngine`
instance
.. admonition:: Examples
.. code-block:: python
register_default_execution_engine(lambda conf: E1(conf))
register_execution_engine("e2", lambda conf, **kwargs: E2(conf, **kwargs))
register_sql_engine("s", lambda conf: S2(conf))
# E1 + E1.default_sql_engine
make_execution_engine()
# E2 + E2.default_sql_engine
make_execution_engine(e2)
# E1 + S2
make_execution_engine((None, "s"))
# E2(conf, a=1, b=2) + S2
make_execution_engine(("e2", "s"), conf, a=1, b=2)
# SparkExecutionEngine + SparkSQLEngine
make_execution_engine(SparkExecutionEngine)
make_execution_engine(SparkExecutionEngine(spark_session, conf))
# SparkExecutionEngine + S2
make_execution_engine((SparkExecutionEngine, "s"))
"""
import fugue._utils.register # pylint: disable=W0611 # noqa: F401
return _EXECUTION_ENGINE_FACTORY.make(engine, conf, **kwargs)
def make_sql_engine(
engine: Any = None,
execution_engine: Optional[ExecutionEngine] = None,
**kwargs: Any,
) -> SQLEngine:
"""Create :class:`~fugue.execution.execution_engine.SQLEngine`
with specified ``engine``
:param engine: it can be empty string or null (use the default SQL
engine), a string (use the registered SQL engine), an
:class:`~fugue.execution.execution_engine.SQLEngine` type, or
the :class:`~fugue.execution.execution_engine.SQLEngine` instance
(you can use ``None`` to use the default one), defaults to None
:param execution_engine: the
:class:`~fugue.execution.execution_engine.ExecutionEngine` instance
to create
the :class:`~fugue.execution.execution_engine.SQLEngine`. Normally you
should always provide this value.
:param kwargs: additional parameters to initialize the sql engine
:return: the :class:`~fugue.execution.execution_engine.SQLEngine`
instance
.. note::
For users, you normally don't need to call this function directly.
Use ``make_execution_engine`` instead
.. admonition:: Examples
.. code-block:: python
register_default_sql_engine(lambda conf: S1(conf))
register_sql_engine("s2", lambda conf: S2(conf))
engine = NativeExecutionEngine()
# S1(engine)
make_sql_engine(None, engine)
# S1(engine, a=1)
make_sql_engine(None, engine, a=1)
# S2(engine)
make_sql_engine("s2", engine)
# SqliteEngine(engine)
make_sql_engine(SqliteEngine)
"""
import fugue._utils.register # pylint: disable=W0611 # noqa: F401
return _EXECUTION_ENGINE_FACTORY.make_sql_engine(engine, execution_engine, **kwargs)
|
ofili/Wrangle-and-Analyze-Data
|
venv/Lib/site-packages/fugue/execution/factory.py
|
factory.py
|
py
| 15,192 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.Dict",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "fugue.execution.native_execution_engine.NativeExecutionEngine",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "fugue.execution.execution_engine.ExecutionEngine",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "fugue.execution.execution_engine.ExecutionEngine",
"line_number": 64,
"usage_type": "argument"
},
{
"api_name": "triad.utils.convert.to_instance",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "fugue.execution.execution_engine.ExecutionEngine",
"line_number": 70,
"usage_type": "argument"
},
{
"api_name": "fugue.execution.execution_engine.ExecutionEngine",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "triad.ParamDict.IGNORE",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "triad.ParamDict",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "triad.ParamDict.OVERWRITE",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "triad.ParamDict",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "triad.ParamDict.OVERWRITE",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "triad.ParamDict",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "fugue.execution.execution_engine.ExecutionEngine",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "fugue.execution.execution_engine.ExecutionEngine",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "fugue.execution.execution_engine.SQLEngine",
"line_number": 89,
"usage_type": "argument"
},
{
"api_name": "triad.assert_or_throw",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "triad.utils.convert.to_instance",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "fugue.execution.execution_engine.SQLEngine",
"line_number": 99,
"usage_type": "argument"
},
{
"api_name": "fugue.execution.execution_engine.SQLEngine",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 265,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "fugue.execution.execution_engine.ExecutionEngine",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 361,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 362,
"usage_type": "name"
},
{
"api_name": "fugue.execution.execution_engine.ExecutionEngine",
"line_number": 362,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 363,
"usage_type": "name"
},
{
"api_name": "fugue.execution.execution_engine.SQLEngine",
"line_number": 364,
"usage_type": "name"
}
] |
17609260691
|
# encoding: utf-8
import badgrlog
import datetime
from django.utils import timezone
from rest_framework import permissions
from rest_framework.response import Response
from rest_framework import serializers
from rest_framework import status
from backpack.models import BackpackCollection, BackpackBadgeShare, BackpackCollectionShare
from backpack.serializers_v1 import CollectionSerializerV1, LocalBadgeInstanceUploadSerializerV1
from backpack.serializers_v2 import BackpackAssertionSerializerV2, BackpackCollectionSerializerV2, \
BackpackImportSerializerV2, BackpackAssertionAcceptanceSerializerV2
from entity.api import BaseEntityListView, BaseEntityDetailView
from issuer.models import BadgeInstance
from issuer.permissions import AuditedModelOwner, VerifiedEmailMatchesRecipientIdentifier, BadgrOAuthTokenHasScope
from issuer.public_api import ImagePropertyDetailView
from apispec_drf.decorators import apispec_list_operation, apispec_post_operation, apispec_get_operation, \
apispec_delete_operation, apispec_put_operation, apispec_operation
from mainsite.permissions import AuthenticatedWithVerifiedIdentifier
logger = badgrlog.BadgrLogger()
_TRUE_VALUES = ['true', 't', 'on', 'yes', 'y', '1', 1, 1.0, True]
_FALSE_VALUES = ['false', 'f', 'off', 'no', 'n', '0', 0, 0.0, False]
def _scrub_boolean(boolean_str, default=None):
if boolean_str in _TRUE_VALUES:
return True
if boolean_str in _FALSE_VALUES:
return False
return default
class BackpackAssertionList(BaseEntityListView):
model = BadgeInstance
v1_serializer_class = LocalBadgeInstanceUploadSerializerV1
v2_serializer_class = BackpackAssertionSerializerV2
create_event = badgrlog.BadgeUploaded
permission_classes = (AuthenticatedWithVerifiedIdentifier, VerifiedEmailMatchesRecipientIdentifier, BadgrOAuthTokenHasScope)
http_method_names = ('get', 'post')
valid_scopes = {
'get': ['r:backpack', 'rw:backpack'],
'post': ['rw:backpack'],
}
include_defaults = {
'include_expired': {'v1': 'true', 'v2': 'false'},
'include_revoked': {'v1': 'false', 'v2': 'false'},
'include_pending': {'v1': 'false', 'v2': 'false'},
}
def get_objects(self, request, **kwargs):
version = kwargs.get('version', 'v1')
include_expired = request.query_params.get(
'include_expired', self.include_defaults['include_expired'][version]
).lower() in ['1', 'true']
include_revoked = request.query_params.get(
'include_revoked', self.include_defaults['include_revoked'][version]
).lower() in ['1', 'true']
include_pending = request.query_params.get(
'include_pending', self.include_defaults['include_pending'][version]
).lower() in ['1', 'true']
def badge_filter(b):
if ((b.acceptance == BadgeInstance.ACCEPTANCE_REJECTED) or
(not include_expired and b.expires_at != None and b.expires_at < timezone.now()) or
(not include_revoked and b.revoked) or
(not include_pending and b.pending)):
return False
return True
return list(filter(badge_filter, self.request.user.cached_badgeinstances()))
@apispec_list_operation('Assertion',
summary="Get a list of Assertions in authenticated user's backpack ",
tags=['Backpack']
)
def get(self, request, **kwargs):
mykwargs = kwargs.copy()
mykwargs['expands'] = []
expands = request.GET.getlist('expand', [])
if 'badgeclass' in expands:
mykwargs['expands'].append('badgeclass')
if 'issuer' in expands:
mykwargs['expands'].append('issuer')
return super(BackpackAssertionList, self).get(request, **mykwargs)
@apispec_post_operation('Assertion',
summary="Upload a new Assertion to the backpack",
tags=['Backpack']
)
def post(self, request, **kwargs):
if kwargs.get('version', 'v1') == 'v1':
try:
return super(BackpackAssertionList, self).post(request, **kwargs)
except serializers.ValidationError as e:
self.log_not_created(e)
raise e
raise NotImplementedError("use BackpackImportBadge.post instead")
def log_not_created(self, error):
request = self.request
user = request.user
image_data = ''
user_entity_id = ''
error_name = ''
error_result = ''
if request.data.get('image', None) is not None:
image_data = request.data.get('image', '')[:1024]
if user is not None:
user_entity_id = user.entity_id
if len(error.detail) <= 1:
#grab first error
e = error.detail[0]
error_name = e.get('name', '')
error_result = e.get('result', '')
invalid_badge_upload_report = badgrlog.InvalidBadgeUploadReport(image_data, user_entity_id, error_name, error_result)
logger.event(badgrlog.InvalidBadgeUploaded(invalid_badge_upload_report))
def get_context_data(self, **kwargs):
context = super(BackpackAssertionList, self).get_context_data(**kwargs)
context['format'] = self.request.query_params.get('json_format', 'v1') # for /v1/earner/badges compat
return context
class BackpackAssertionDetail(BaseEntityDetailView):
model = BadgeInstance
v1_serializer_class = LocalBadgeInstanceUploadSerializerV1
v2_serializer_class = BackpackAssertionSerializerV2
permission_classes = (AuthenticatedWithVerifiedIdentifier, VerifiedEmailMatchesRecipientIdentifier, BadgrOAuthTokenHasScope)
http_method_names = ('get', 'delete', 'put')
valid_scopes = {
'get': ['r:backpack', 'rw:backpack'],
'put': ['rw:backpack'],
'delete': ['rw:backpack'],
}
def get_context_data(self, **kwargs):
context = super(BackpackAssertionDetail, self).get_context_data(**kwargs)
context['format'] = self.request.query_params.get('json_format', 'v1') # for /v1/earner/badges compat
return context
@apispec_get_operation('BackpackAssertion',
summary="Get detail on an Assertion in the user's Backpack",
tags=['Backpack']
)
def get(self, request, **kwargs):
mykwargs = kwargs.copy()
mykwargs['expands'] = []
expands = request.GET.getlist('expand', [])
if 'badgeclass' in expands:
mykwargs['expands'].append('badgeclass')
if 'issuer' in expands:
mykwargs['expands'].append('issuer')
return super(BackpackAssertionDetail, self).get(request, **mykwargs)
@apispec_delete_operation('BackpackAssertion',
summary='Remove an assertion from the backpack',
tags=['Backpack']
)
def delete(self, request, **kwargs):
obj = self.get_object(request, **kwargs)
related_collections = list(BackpackCollection.objects.filter(backpackcollectionbadgeinstance__badgeinstance=obj))
if obj.source_url is None:
obj.acceptance = BadgeInstance.ACCEPTANCE_REJECTED
obj.save()
else:
obj.delete()
for collection in related_collections:
collection.save()
request.user.save()
return Response(status=status.HTTP_204_NO_CONTENT)
@apispec_put_operation('BackpackAssertion',
summary="Update acceptance of an Assertion in the user's Backpack",
tags=['Backpack']
)
def put(self, request, **kwargs):
fields_whitelist = ('acceptance',)
data = {k: v for k, v in list(request.data.items()) if k in fields_whitelist}
obj = self.get_object(request, **kwargs)
if not self.has_object_permissions(request, obj):
return Response(status=status.HTTP_404_NOT_FOUND)
context = self.get_context_data(**kwargs)
update_serializer = BackpackAssertionAcceptanceSerializerV2(obj, data, context=context)
update_serializer.is_valid(raise_exception=True)
update_serializer.save(updated_by=request.user)
main_serializer_class = self.get_serializer_class()
serializer = main_serializer_class(update_serializer.instance, context=context)
return Response(serializer.data)
class BackpackAssertionDetailImage(ImagePropertyDetailView, BadgrOAuthTokenHasScope):
model = BadgeInstance
prop = 'image'
valid_scopes = ['r:backpack', 'rw:backpack']
class BackpackCollectionList(BaseEntityListView):
model = BackpackCollection
v1_serializer_class = CollectionSerializerV1
v2_serializer_class = BackpackCollectionSerializerV2
permission_classes = (AuthenticatedWithVerifiedIdentifier, AuditedModelOwner, BadgrOAuthTokenHasScope)
valid_scopes = {
'get': ['r:backpack', 'rw:backpack'],
'post': ['rw:backpack'],
}
def get_objects(self, request, **kwargs):
return self.request.user.cached_backpackcollections()
@apispec_get_operation('Collection',
summary='Get a list of Collections',
tags=['Backpack']
)
def get(self, request, **kwargs):
return super(BackpackCollectionList, self).get(request, **kwargs)
@apispec_post_operation('Collection',
summary='Create a new Collection',
tags=['Backpack']
)
def post(self, request, **kwargs):
return super(BackpackCollectionList, self).post(request, **kwargs)
class BackpackCollectionDetail(BaseEntityDetailView):
model = BackpackCollection
v1_serializer_class = CollectionSerializerV1
v2_serializer_class = BackpackCollectionSerializerV2
permission_classes = (AuthenticatedWithVerifiedIdentifier, AuditedModelOwner, BadgrOAuthTokenHasScope)
valid_scopes = {
'get': ['r:backpack', 'rw:backpack'],
'post': ['rw:backpack'],
'put': ['rw:backpack'],
'delete': ['rw:backpack']
}
@apispec_get_operation('Collection',
summary='Get a single Collection',
tags=['Backpack']
)
def get(self, request, **kwargs):
return super(BackpackCollectionDetail, self).get(request, **kwargs)
@apispec_put_operation('Collection',
summary='Update a Collection',
tags=['Backpack']
)
def put(self, request, **kwargs):
return super(BackpackCollectionDetail, self).put(request, **kwargs)
@apispec_delete_operation('Collection',
summary='Delete a collection',
tags=['Backpack']
)
def delete(self, request, **kwargs):
return super(BackpackCollectionDetail, self).delete(request, **kwargs)
class BackpackImportBadge(BaseEntityListView):
v2_serializer_class = BackpackImportSerializerV2
permission_classes = (AuthenticatedWithVerifiedIdentifier, BadgrOAuthTokenHasScope,)
http_method_names = ('post',)
valid_scopes = ['rw:backpack']
@apispec_operation(
summary="Import a new Assertion to the backpack",
tags=['Backpack'],
parameters=[
{
"in": "body",
"name": "body",
"required": True,
"schema": {
"type": "object",
"properties": {
"url": {
"type": "string",
"format": "url",
"description": "URL to an OpenBadge compliant badge",
'required': False
},
"image": {
'type': "string",
'format': "data:image/png;base64",
'description': "base64 encoded Baked OpenBadge image",
'required': False
},
"assertion": {
'type': "json",
'description': "OpenBadge compliant json",
'required': False
},
}
},
}
]
)
def post(self, request, **kwargs):
context = self.get_context_data(**kwargs)
serializer_class = self.get_serializer_class()
serializer = serializer_class(data=request.data, context=context)
serializer.is_valid(raise_exception=True)
new_instance = serializer.save(created_by=request.user)
self.log_create(new_instance)
response_serializer = BackpackAssertionSerializerV2(new_instance, context=context)
return Response(response_serializer.data, status=status.HTTP_201_CREATED)
class ShareBackpackAssertion(BaseEntityDetailView):
model = BadgeInstance
permission_classes = (permissions.AllowAny,) # this is AllowAny to support tracking sharing links in emails
http_method_names = ('get',)
allow_any_unauthenticated_access = True
def get(self, request, **kwargs):
"""
Share a single badge to a support share provider
---
parameters:
- name: provider
description: The identifier of the provider to use. Supports 'facebook', 'linkedin'
required: true
type: string
paramType: query
"""
redirect = _scrub_boolean(request.query_params.get('redirect', "1"))
provider = request.query_params.get('provider')
if not provider:
return Response({'error': "unspecified share provider"}, status=status.HTTP_400_BAD_REQUEST)
provider = provider.lower()
source = request.query_params.get('source', 'unknown')
badge = self.get_object(request, **kwargs)
if not badge:
return Response(status=status.HTTP_404_NOT_FOUND)
include_identifier = _scrub_boolean(request.query_params.get('include_identifier', False))
share = BackpackBadgeShare(provider=provider, badgeinstance=badge, source=source)
share_url = share.get_share_url(provider, include_identifier=include_identifier)
if not share_url:
return Response({'error': "invalid share provider"}, status=status.HTTP_400_BAD_REQUEST)
share.save()
logger.event(badgrlog.BadgeSharedEvent(badge, provider, datetime.datetime.now(), source))
if redirect:
headers = {'Location': share_url}
return Response(status=status.HTTP_302_FOUND, headers=headers)
else:
return Response({'url': share_url})
class ShareBackpackCollection(BaseEntityDetailView):
model = BackpackCollection
permission_classes = (permissions.AllowAny,) # this is AllowAny to support tracking sharing links in emails
http_method_names = ('get',)
def get(self, request, **kwargs):
"""
Share a collection to a supported share provider
---
parameters:
- name: provider
description: The identifier of the provider to use. Supports 'facebook', 'linkedin'
required: true
type: string
paramType: query
"""
redirect = _scrub_boolean(request.query_params.get('redirect', "1"))
provider = request.query_params.get('provider')
if not provider:
return Response({'error': "unspecified share provider"}, status=status.HTTP_400_BAD_REQUEST)
provider = provider.lower()
source = request.query_params.get('source', 'unknown')
collection = self.get_object(request, **kwargs)
if not collection:
return Response(status=status.HTTP_404_NOT_FOUND)
share = BackpackCollectionShare(provider=provider, collection=collection, source=source)
share_url = share.get_share_url(provider, title=collection.name, summary=collection.description)
if not share_url:
return Response({'error': "invalid share provider"}, status=status.HTTP_400_BAD_REQUEST)
share.save()
if redirect:
headers = {'Location': share_url}
return Response(status=status.HTTP_302_FOUND, headers=headers)
else:
return Response({'url': share_url})
|
reedu-reengineering-education/badgr-server
|
apps/backpack/api.py
|
api.py
|
py
| 16,711 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "badgrlog.BadgrLogger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "entity.api.BaseEntityListView",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "issuer.models.BadgeInstance",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "backpack.serializers_v1.LocalBadgeInstanceUploadSerializerV1",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "backpack.serializers_v2.BackpackAssertionSerializerV2",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "badgrlog.BadgeUploaded",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "mainsite.permissions.AuthenticatedWithVerifiedIdentifier",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "issuer.permissions.VerifiedEmailMatchesRecipientIdentifier",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "issuer.permissions.BadgrOAuthTokenHasScope",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "issuer.models.BadgeInstance.ACCEPTANCE_REJECTED",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "issuer.models.BadgeInstance",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "apispec_drf.decorators.apispec_list_operation",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers.ValidationError",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "apispec_drf.decorators.apispec_post_operation",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "badgrlog.InvalidBadgeUploadReport",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "badgrlog.InvalidBadgeUploaded",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "entity.api.BaseEntityDetailView",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "issuer.models.BadgeInstance",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "backpack.serializers_v1.LocalBadgeInstanceUploadSerializerV1",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "backpack.serializers_v2.BackpackAssertionSerializerV2",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "mainsite.permissions.AuthenticatedWithVerifiedIdentifier",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "issuer.permissions.VerifiedEmailMatchesRecipientIdentifier",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "issuer.permissions.BadgrOAuthTokenHasScope",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "apispec_drf.decorators.apispec_get_operation",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "backpack.models.BackpackCollection.objects.filter",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "backpack.models.BackpackCollection.objects",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "backpack.models.BackpackCollection",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "issuer.models.BadgeInstance.ACCEPTANCE_REJECTED",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "issuer.models.BadgeInstance",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_204_NO_CONTENT",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "apispec_drf.decorators.apispec_delete_operation",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_404_NOT_FOUND",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "backpack.serializers_v2.BackpackAssertionAcceptanceSerializerV2",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "apispec_drf.decorators.apispec_put_operation",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "issuer.public_api.ImagePropertyDetailView",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "issuer.permissions.BadgrOAuthTokenHasScope",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "issuer.models.BadgeInstance",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "entity.api.BaseEntityListView",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "backpack.models.BackpackCollection",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "backpack.serializers_v1.CollectionSerializerV1",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "backpack.serializers_v2.BackpackCollectionSerializerV2",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "mainsite.permissions.AuthenticatedWithVerifiedIdentifier",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "issuer.permissions.AuditedModelOwner",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "issuer.permissions.BadgrOAuthTokenHasScope",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "apispec_drf.decorators.apispec_get_operation",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "apispec_drf.decorators.apispec_post_operation",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "entity.api.BaseEntityDetailView",
"line_number": 245,
"usage_type": "name"
},
{
"api_name": "backpack.models.BackpackCollection",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "backpack.serializers_v1.CollectionSerializerV1",
"line_number": 247,
"usage_type": "name"
},
{
"api_name": "backpack.serializers_v2.BackpackCollectionSerializerV2",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "mainsite.permissions.AuthenticatedWithVerifiedIdentifier",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "issuer.permissions.AuditedModelOwner",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "issuer.permissions.BadgrOAuthTokenHasScope",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "apispec_drf.decorators.apispec_get_operation",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "apispec_drf.decorators.apispec_put_operation",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "apispec_drf.decorators.apispec_delete_operation",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "entity.api.BaseEntityListView",
"line_number": 279,
"usage_type": "name"
},
{
"api_name": "backpack.serializers_v2.BackpackImportSerializerV2",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "mainsite.permissions.AuthenticatedWithVerifiedIdentifier",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "issuer.permissions.BadgrOAuthTokenHasScope",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "backpack.serializers_v2.BackpackAssertionSerializerV2",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_201_CREATED",
"line_number": 327,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 327,
"usage_type": "name"
},
{
"api_name": "apispec_drf.decorators.apispec_operation",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "entity.api.BaseEntityDetailView",
"line_number": 330,
"usage_type": "name"
},
{
"api_name": "issuer.models.BadgeInstance",
"line_number": 331,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.AllowAny",
"line_number": 332,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.permissions",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 351,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 351,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_404_NOT_FOUND",
"line_number": 358,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 358,
"usage_type": "name"
},
{
"api_name": "backpack.models.BackpackBadgeShare",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 365,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 365,
"usage_type": "name"
},
{
"api_name": "badgrlog.BadgeSharedEvent",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 368,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_302_FOUND",
"line_number": 372,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 372,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "entity.api.BaseEntityDetailView",
"line_number": 377,
"usage_type": "name"
},
{
"api_name": "backpack.models.BackpackCollection",
"line_number": 378,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.AllowAny",
"line_number": 379,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.permissions",
"line_number": 379,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 397,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 397,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_404_NOT_FOUND",
"line_number": 404,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 404,
"usage_type": "name"
},
{
"api_name": "backpack.models.BackpackCollectionShare",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 409,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 409,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_302_FOUND",
"line_number": 415,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 415,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 417,
"usage_type": "call"
}
] |
22195221109
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import numpy as np
from btk import btk
import os
import matplotlib
import matplotlib.pyplot as plt
from tkinter import *
from tkinter.messagebox import *
from tkinter import ttk
# Label = strike / off ; context = droite / gauche
def filtreExtremum(extrem, originalData):
if 0 in extrem:
extrem = extrem[1:]
if len(originalData)-1 in extrem:
extrem = extrem[:-1]
return extrem
# But : trouvé tous les maximum locaux
# In : un vecteur de taille nx1
# Out : les positions x des max locaux (pas leur valeur y)
def maxLocal(a):
TFarray = np.array(np.r_[True, a[1:] > a[:-1]] & np.r_[a[:-1] > a[1:], True]) # Rempli un vecteur avec que des False, sauf lorsqu'une donnée dans le vecteur est plus grande que son voisin de droite et de gauche (il met alors True)
indMax = np.ravel( np.where( TFarray == True ) ) # On récupère les index où il y a les True
indMax = filtreExtremum(indMax, a)
return indMax
# Fonctions en cours, pas encore effective
def semiMaxLocal(a):
TFarray = np.array(np.r_[True, a[1:] > a[:-1]] & np.r_[a[:-1] == a[1:], True])
indSemiMax = np.where( TFarray == True )
return indSemiMax
def findMinMin(data, Min):
minMin = minLocal(data[Min])
return Min[minMin]
# Pareil que maxLocal, mais pour trouver les minimum locaux
def minLocal(a):
TFarray = np.array(np.r_[True, a[1:] < a[:-1]] & np.r_[a[:-1] < a[1:], True])
indMin = np.ravel( np.where( TFarray == True ) )
indMin = filtreExtremum(indMin, a)
return indMin
#clean the arrays of all local extremum that are too close of eachother (extremums super local)
def cleanMinMax(indMin, indMax):
for i in indMax:
for j in np.flip(indMin,0):
if(np.abs(i-j)<7):
indMax = np.extract(indMax!=i,indMax)
indMin = np.extract(indMin!=j,indMin)
break
return indMin, indMax
# Dico avec comme clé les labels, et comme valeur un entier
# e.g. : DicoLabels = {"LSHO" = 0, "RSHO" = 1, "RANK" = 2, ...}
# Fonction jamais utilisé pour l'instant
def dicLab(metadata):
point_labels = metadata.FindChild("POINT").value().FindChild("LABELS").value().GetInfo().ToString()
dicoLabels = {}
index = 0
for lab in point_labels:
dicoLabels[lab] = index
index += 1
return dicoLabels
# Plot les events dans la figure "ax", lignes verticales
# In : acq, qui contient les events ; ax, où on va ploter les lignes verticales
# Out : la nouvelle figure, où on a ploté les lignes
def plotEvent(acq, ax):
n_events = acq.GetEventNumber() # On récupère le nombre d'évènements, pour les parcourirs
for numevent in range(n_events): # On parcours les indices des évènements
event = acq.GetEvent(numevent) # On récupère un évènement, grâce à son indice correspondant
event_frame = event.GetFrame() # On récupère la frame où se situe l'évènement
context = event.GetContext() # On récupère le context (e.g. Left ou Right)
label = event.GetLabel() # On récupère le label (e.g. : Foot_Strike_GS)
if context == 'Left': # Test si c'est le pied gauche
if label == 'Foot_Strike_GS': # Test si c'est quand le pied touche le sol
leftLineStrike = ax.axvline(x = event_frame, color='r', label='Left - Strike', linestyle='--') # Plot en rouge, avec des tirets
# ax.legend([leftLineStrike], 'Left - Strike')
elif label == 'Foot_Off_GS': # Test si c'est quand le pied ne touche plus le sol
leftLineOff = ax.axvline(x = event_frame, color='r', label='Left - Off', linestyle='-.') # Plot en rouge, avec des tirets et des points
if context == 'Right': # Test si c'est le pied droit
if label == 'Foot_Strike_GS': # Test si c'est quand le pied touche le sol
rightLineStrike = ax.axvline(x = event_frame, color='g', label='Righ - Strike', linestyle='--') # Plot en vert, avec des tirets
elif label == 'Foot_Off_GS': # Test si c'est quand le pied ne touche plus le sol
rightLineOff = ax.axvline(x = event_frame, color='g', label='Right - Off', linestyle='-.') # Plot en rouge, avec des tirets et des points
# On rajoute la légende
# S'IL Y A UNE ERREUR, ENLEVER CETTE LIGNE
# ax.legend((leftLineOff, rightLineStrike, rightLineOff), ('Left - Off', 'Right - Strike', 'Right - Off'))
return ax
# Selectionne les éléments de files ayant un event correspondant au label et au contexte
# Renvoie en training set (3/4) et un testing set (1/4) constitués de ces éléments.
def selectWithExistingEvent(files, lab, cont):
eventfiles = []
for acq in files:
n_events = acq.GetEventNumber() # On récupère le nombre d'évènements, pour les parcourirs
for numevent in range(n_events): # On parcours les indices des évènements
event = acq.GetEvent(numevent) # On récupère un évènement, grâce à son indice correspondant
if event.GetLabel() == lab and event.GetContext()==cont: # Test si c'est le label recherché
eventfiles.append(acq)
break
test = np.random.choice(eventfiles, (len(eventfiles)//4), replace = False).tolist()
train = list(set(eventfiles)-set(test))
return train, test
# But : Récupérer les données
# In : path des données (Attention : le chemin commence de là où est le fichier)
# Out : les données
def initial(pathFile):
reader = btk.btkAcquisitionFileReader()
reader.SetFilename(pathFile)
reader.Update()
acq = reader.GetOutput()
return acq
def save(acq, pathFile):
writer = btk.btkAcquisitionFileWriter()
writer.SetInput(acq)
writer.SetFilename(pathFile)
writer.Update()
def allFiles(path):
files = []
# Pour trouver tous les fichiers .c3d
for r, d, f in os.walk(path):
for file in f:
if '.c3d' in file:
files.append(initial(os.path.join(r, file)))
return files
# But : avoir des infos à propos des frames de "acq"
# In : les données acq
# Out : nombres de frames, numéro de la 1ère frame, numéro de la dernière frame
def frameData(acq):
# get some parameters
n_frames = acq.GetPointFrameNumber() # give the number of frames
first_frame = acq.GetFirstFrame()
last_frame = acq.GetLastFrame()
return n_frames, first_frame, last_frame
# But : créer un nouvel évènement
# Un nouvel évènement est caractérisé par un label, un context, et un numéro de frame
# In : les données "acq", un label, un context, et une frame
def addEvent(acq, label, context, frameNumber):
newEvent = btk.btkEvent() # Créer un nouvel évènement vide
newEvent.SetLabel(label) # Met un label
newEvent.SetContext(context) # Met un context
newEvent.SetFrame(frameNumber) # Met la positoin, la frame
acq.AppendEvent(newEvent) # Rajoute l'évènement parmi tous les autres évènements
# But : équivalent à print('obj = ', obj)
# Pas nécessaire pour le projet
def printName(obj, namespace):
nom = [name for name in namespace if namespace[name] is obj]
print(nom[0],' = ', obj)
# But : Avoir toutes les infos d'un évènements
# In : les données "acq", et le numéro de l'évènement
# Out : l'évènement, le label, le context, et le num de la frame
def eventInfo(acq, numEvent):
event = acq.GetEvent(0) # extract the first event of the aquisition
label = event.GetLabel() # return a string representing the Label
context = event.GetContext() # return a string representing the Context
frame = event.GetFrame() # return the frame as an integer
return event, label, context, frame
# But : trouver l'évènement le plus proche d'une position, frame donnée
# In : des données "data", l'ensemble des évènements (AllEvents), le label et le context recherché , et la position depuis laquel on recherche
# Out : l'évènement, et la frame cprrespondante
def closestEvent(data, AllEvents, label=0, context=0, start=1):
if (label == 0) and (context == 0):
return AllEvents.GetItem(0), AllEvents.GetItem(0).GetFrame()
eventVIP = [] # Array qui contiendra tous les évènements correspondant au même label et même contexte que demandé
numberEvent = AllEvents.GetItemNumber() # Nombre d'évènements au total
for num in range(numberEvent): # On regarde tout les évènement
event = AllEvents.GetItem(num) # On récupère un évènement
if (event.GetContext() == context) and (event.GetLabel() == label): # Test si on a les mêmes context et label
eventVIP.append(event) # On rajoute l'évènement
if len(eventVIP) == 0: # Si on a trouvé aucun évènement recherché, on arrête
return 0, 0
dist = 1000 # On initialise une distance très grande, qui diminuera
even = eventVIP[0] # On commence par le premier évènement
for event in eventVIP: # On parcours les évènements
if np.abs(event.GetFrame() - start) < dist: # On test si la distance entre la position de départ et un évènement correspondant
dist = np.abs(event.GetFrame() - start) # On mémorise la nouvel distance
even = event # On mémorise le nouvel évènement
return even, even.GetFrame()
# But : trouver l'extremum le plus proche d'une position de départ
# In : position de départ "start", les indices (position x) d'extremum (les min ou les max)
# Out : position x de l'extremum, la distance par rapport au point de départ (start), et l'indice dans l'array des min ou max
def closestExtrem(start, indExtrem): # Renvoie la position de l'extrem par rapport à la frame Start
AllDistance = indExtrem - start # Soustraction d'un vecteur par un scalaire, ici les positions des indices moins la position de départ (start)
absDist = np.abs(AllDistance) # On met en valeur absolue
indexMinimDist = np.argmin(absDist) # On récupère l'indice de la distance minimale
positionExtrem = indExtrem[indexMinimDist] # On récupère la position x de l'extremum
distance = AllDistance[indexMinimDist] # On récupère la distance (sans la valeur absolue)
return positionExtrem, distance, indexMinimDist
def plotPosi(acq, position, axe, ax, event=0):
dicoAxe = {"x" : 0, "y" : 1, "z" : 2}
data = np.array(acq.GetPoint(position).GetValues()[:, dicoAxe[axe]])
n_frames, first_frame, last_frame = frameData(acq)
Min, Max = minLocal(data), maxLocal(data)
Min, Max = cleanMinMax(Min, Max) #used to clean some local extremums
# Plot part
ax.plot(np.array(range(first_frame, last_frame + 1)), data, 'k')
ax.plot(Min, data[Min], 'o b')
ax.plot(Max, data[Max], 'o', color='purple')
ax = plotEvent(acq, ax)
if (event != 0):
print('Position de depart :', event.GetFrame())
positionExtrem, distance, indexMinimDist = closestExtrem(event.GetFrame(), Max)
ax.plot(positionExtrem, data[positionExtrem], 'o g')
print('Position :', positionExtrem)
plt.title(" Position = {} - axis = {}".format(position, axe))
# ax.show(block = False)
return ax
def simple(files, posiCombo, axeCombo , buttonCombo, fileCombo):
posiCombo['values'] = ['LFHD', 'RFHD', 'LBHD', 'RBHD', 'C7', 'T10', 'STRN', 'CLAV', 'RBAK', 'LSHO', 'LELB', 'LWRA', 'LWRB', 'RSHO', 'RELB', 'RWRA', 'RWRB', 'LASI', 'RASI', 'LPSI', 'RPSI', 'LTHI', 'RTHI', 'LKNE', 'RKNE', 'LTIB', 'RTIB', 'LANK', 'RANK', 'LHEE', 'RHEE', 'RTOE', 'LTOE']
posiCombo.current(0)
buttonCombo["text"] = "PLOT"
buttonCombo["command"] = lambda: onePlot(files, posiCombo, axeCombo, fileCombo )
def double(files, posiCombo, axeCombo , buttonCombo, fileCombo):
posiCombo['values'] = ["FHD", "BHD", "SHO", "ELB", "WRA", "WRB", "ASI", "PSI", "THI", "KNE", "TIB", "ANK", "HEE", "TOE"]
posiCombo.current(0)
buttonCombo["text"] = "PLOT x2"
buttonCombo["command"] = lambda: twoPlot(files, posiCombo, axeCombo, fileCombo )
def onePlot (files, posiCombo, axeCombo, fileCombo ):
acq = files[int(fileCombo.get())] # voir le chapitre sur les événements
n_frames, first_frame, last_frame = frameData(acq)
plt.figure(figsize=(9,7))
guiPlot = plt.subplot()
guiPlot = plotPosi(acq, posiCombo.get(), axeCombo.get(), guiPlot)
plt.show(block=False)
def twoPlot(files, posiCombo, axeCombo, fileCombo ): # voir le chapitre sur les événements
acq = files[int(fileCombo.get())]
n_frames, first_frame, last_frame = frameData(acq)
dr = 'R' + posiCombo.get()
ga = 'L' + posiCombo.get()
plt.figure(figsize=(9,7))
guiPlot = plt.subplot(2,1,1)
guiPlot = plotPosi(acq, dr, axeCombo.get(), guiPlot)
guiPlot = plt.subplot(2,1,2)
guiPlot = plotPosi(acq, ga, axeCombo.get(), guiPlot)
plt.show(block=False)
def GUIplot(files):
acq = files[0]
metadata = acq.GetMetaData()
point_labels = list(metadata.FindChild("POINT").value().FindChild("LABELS").value().GetInfo().ToString())
win = Tk()
win.title("BTK Project")
# win.geometry("500x100")
ttk.Label(win, text="Choix du capteur").grid(column=1, row=0)
posiCombo = ttk.Combobox(win, values=point_labels)
posiCombo.grid(column=1, row=1)
ttk.Label(win, text="Choix de l'axe").grid(column=2, row=0)
axeCombo = ttk.Combobox(win, values=["x", "y", "z"])
axeCombo.grid(column=2, row=1)
ttk.Label(win, text="Choix du fichier").grid(column=0, row=0)
fileCombo = ttk.Combobox(win, values=list(range(len(files))))
fileCombo.grid(column=0, row=1)
posiCombo.current(newindex=28)
axeCombo.current(2)
fileCombo.current(0)
buttonCombo = Button (win, text="PLOT", command= lambda: onePlot(files, posiCombo, axeCombo, fileCombo ))
buttonCombo.grid(column=3, row=1)
v = IntVar()
# v.set(1)
R1 = Radiobutton(win, text="Plot unique", variable=v, value=1, command= lambda: simple(files, posiCombo, axeCombo , buttonCombo, fileCombo))
R1.grid(column=0, row=2)
R2 = Radiobutton(win, text="Double Plot", variable=v, value=2, command= lambda: double(files, posiCombo, axeCombo , buttonCombo, fileCombo))
R2.grid(column=1, row=2)
v.set(1)
win.mainloop()
|
staufga0/DM_Project
|
Source/file.py
|
file.py
|
py
| 14,754 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.r_",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "numpy.ravel",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.r_",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "numpy.where",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.r_",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "numpy.ravel",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.flip",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.extract",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.extract",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "btk.btk.btkAcquisitionFileReader",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "btk.btk",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "btk.btk.btkAcquisitionFileWriter",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "btk.btk",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "os.walk",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "btk.btk.btkEvent",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "btk.btk",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "numpy.abs",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 270,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 271,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 273,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "tkinter.ttk.Label",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "tkinter.ttk.Combobox",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "tkinter.ttk.Label",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "tkinter.ttk.Combobox",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "tkinter.ttk.Label",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "tkinter.ttk.Combobox",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 294,
"usage_type": "name"
}
] |
21095535591
|
import torch
import torch.nn as nn
import numpy as np
from torch.nn.functional import upsample, interpolate
from Spa_downs import *
import torch.nn.functional as F
from torch.autograd import Variable
import argparse
from torch.nn import init
import scipy.io as sio
import os
import random
class ReshapeTo2D(nn.Module):
def __init__(self):
super(ReshapeTo2D, self).__init__()
def forward(self,x):
return torch.reshape(x, (x.shape[0], x.shape[1], x.shape[2]*x.shape[3]))
class ReshapeTo3D(nn.Module):
def __init__(self):
super(ReshapeTo3D, self).__init__()
def forward(self,x):
return torch.reshape(x, (x.shape[0], x.shape[1], int(np.sqrt(x.shape[2])), int(np.sqrt(x.shape[2]))))
class TransDimen(nn.Module):
def __init__(self):
super(TransDimen, self).__init__()
def forward(self,x):
return torch.Tensor.permute(x,[0,2,1])
def channel_crop(data, position, length):
assert data.size(1) >= position + length, 'the cropped channel out of size.'
return data[:, position: position + length, :, :]
def ins (list_, data, index):
list_start = list_[:index]
list_start = [ Variable(i, requires_grad=False).type(torch.cuda.FloatTensor) for i in list_start]
data = [Variable(data, requires_grad=False).type(torch.cuda.FloatTensor)]
list_end = list_[index:]
list_end = [ Variable(i, requires_grad=False).type(torch.cuda.FloatTensor) for i in list_end]
return list_start + data + list_end
def to_gpu(data):
return Variable(data, requires_grad=False).type(torch.cuda.FloatTensor)
class L_Dspec(nn.Module):
def __init__(self,in_channel,out_channel,P_init):
super(L_Dspec, self).__init__()
self.in_channle = in_channel
self.out_channel = out_channel
self.P = nn.Parameter(P_init)
def forward(self,input):
S = input.shape
out = torch.reshape(input,[S[0],S[1],S[2]*S[3]])
out = torch.matmul(self.P,out)
return torch.reshape(out,[S[0],self.out_channel,S[2],S[3]])
def add_wgn(x, snr):
P_signal=torch.sum(x.abs()**2)
P_noise = P_signal/10**(snr/10.0)
sigma = torch.sqrt(P_noise/x.numel())
noise = torch.randn(x.shape).type(torch.cuda.FloatTensor)
return x + sigma * noise
def tensor_copy(x):
return x.clone()
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument('--model' , default='MSDANet', help='MSDANet')
parser.add_argument('--fusion' , default='Concate', help='Concate')
parser.add_argument('--lr' , default=1e-4, type=float, help='learning rate for optimizer')
parser.add_argument('--batch_size', default=16, type=int, help='batch size for training')
parser.add_argument('--factor' , default=8, type=int, help='scale factor. 4/8/16')
parser.add_argument('--dataset' , default='Houston', help='Houston/PaviaU/dc/PaviaC')
parser.add_argument('--patch_size', default=64, type=int, help='patch size of training')
parser.add_argument('--stride' , default=32, type=int, help='stride of training')
parser.add_argument('--pan' , action='store_true', help='pan_sharpening or MSI + HSI')
parser.add_argument('--mem_load' , action='store_true', help='load the all dataset into memory or disk')
parser.add_argument('--phase' , default='train', help='train/test')
parser.add_argument('--noise' , action='store_true', help='wheater to add noise to LR_HSI and HR_MSI')
return parser.parse_args()
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
init.kaiming_normal(m.weight.data)
def split(full_list,shuffle=False,ratio=0.2):
n_total = len(full_list)
offset = int(n_total * ratio)
if n_total==0 or offset<1:
return [],full_list
random.seed(4)
if shuffle:
random.shuffle(full_list)
sublist_1 = full_list[:offset]
sublist_2 = full_list[offset:]
return sublist_1,sublist_2
def all_data_in(Path='Data/Houston/', datasets='Houston', Train_image_num=10):
names = get_img_name(Path=Path, datasets=datasets)
allData = []
for i in range(Train_image_num):
Data = sio.loadmat(os.path.join(Path, names[i])+'.mat')
HSI = Data['hsi']
HSI = HSI.transpose((2, 0, 1))
allData.append(HSI)
return allData
dataset_dict = dict(
PaviaC = [10, 5, 300, 8000, 102, 1, (55, 41, 12)], ### [train_img_num, val_img_num, stop epoch, max_value, band_number, RGB]
PaviaU = [10, 5, 300, 8000, 103, 1, (46, 27, 10)],
Houston = [3, 2, 300, 65535, 144, 1, (65, 51, 22)],
dc = [11, 5, 300, 65535, 191, 4, (51, 35, 21)],
)
def get_img_name(Path='Data/Houston/', datasets='Houston'):
names_PaviaC_list = [
'PaviaC_01', 'PaviaC_02', 'PaviaC_03', 'PaviaC_04', 'PaviaC_05', 'PaviaC_06',
'PaviaC_07', 'PaviaC_08', 'PaviaC_09', 'PaviaC_10', 'PaviaC_11', 'PaviaC_12',
'PaviaC_13', 'PaviaC_14', 'PaviaC_15'
]
names_Houston_list = [
'Houston_01', 'Houston_02', 'Houston_03', 'Houston_04', 'Houston_05'
]
names_dc_list = [
'dc_01', 'dc_02', 'dc_03', 'dc_04',
'dc_05', 'dc_06', 'dc_07', 'dc_08',
'dc_09', 'dc_10', 'dc_11', 'dc_12',
'dc_13', 'dc_14', 'dc_15', 'dc_16',
]
names_PaviaU_list = [
'PaviaU_01', 'PaviaU_02', 'PaviaU_03', 'PaviaU_04', 'PaviaU_05', 'PaviaU_06',
'PaviaU_07', 'PaviaU_08', 'PaviaU_09', 'PaviaU_10', 'PaviaU_11', 'PaviaU_12',
'PaviaU_13', 'PaviaU_14', 'PaviaU_15'
]
names_Houston, names_Houston_valid = split(names_Houston_list, shuffle=True, ratio=0.6)
names_dc, names_dc_valid = split(names_dc_list, shuffle=True, ratio=0.7)
names_PaviaU, names_PaviaU_valid = split(names_PaviaU_list, shuffle=True, ratio=0.67)
names_PaviaC, names_PaviaC_valid = split(names_PaviaC_list, shuffle=True, ratio=0.67)
if datasets == 'PaviaC':
names = names_PaviaC
elif datasets == 'PaviaC_val':
names = names_PaviaC_valid
elif datasets == 'PaviaU':
names = names_PaviaU
elif datasets == 'PaviaU_val':
names = names_PaviaU_valid
elif datasets == 'Houston':
names = names_Houston
elif datasets == 'Houston_val':
names = names_Houston_valid
elif datasets == 'dc':
names = names_dc
elif datasets == 'dc_val':
names = names_dc_valid
else:
assert 'wrong dataset name'
return names
|
pyguan88/MDA-Net
|
function.py
|
function.py
|
py
| 6,689 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.reshape",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "torch.reshape",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torch.Tensor.permute",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "torch.reshape",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "torch.reshape",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "torch.sqrt",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.nn.init.kaiming_normal",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "random.seed",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "scipy.io.loadmat",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 121,
"usage_type": "attribute"
}
] |
18834111771
|
# -*- coding: utf-8 -*-
# Author:sen
# Date:9090/3/94 10:48
from typing import List
from heapq import *
class Solution:
def majorityElement(self, nums: List[int]) -> int:
from collections import Counter
counter = Counter(nums)
for item in counter.items(): # item: (元素, 数量)
if item[1] > (len(nums) / 2.0):
return item[0]
class Solution2:
def majorityElement(self, nums: List[int]) -> int:
# 不使用自带Counter
counter = {}
for num in nums:
counter[num] = counter.get(num, 0) + 1
for item in counter.items():
if item[1] > (len(nums) / 2.0):
return item[0]
if __name__ == '__main__':
nums = [9,9,8,8,8,9,9]
so = Solution()
print(so.majorityElement(nums))
so = Solution2()
print(so.majorityElement(nums))
|
PandoraLS/CodingInterview
|
ProgrammingOJ/LeetCode_python/169_多数元素.py
|
169_多数元素.py
|
py
| 892 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "collections.Counter",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "{'Counter': 'collections.Counter'}",
"line_number": 27,
"usage_type": "call"
}
] |
39201428004
|
import cv2
import time
import os
import HandTrackingModule as htm
from dronekit import connect, VehicleMode, LocationGlobalRelative, APIException
import time
import socket
import math
import argparse
from pymavlink import mavutil
from time import sleep
import numpy as np
###################################################################################
def connectMyCopter():
parser=argparse.ArgumentParser(description='commands')
parser.add_argument('--connect', default='127.0.0.1:14550')
args=parser.parse_args()
connection_string=args.connect
baud_rate=921600
vehicle=connect(connection_string, baud=baud_rate, wait_ready=True)
return vehicle
###################################################################################
# Function to arm and takeoff
def arm_and_takeoff(TargetAltitude):
# Switch vehicle to Guided Mode
vehicle.mode = VehicleMode("GUIDED")
while vehicle.mode!="GUIDED":
print("Waiting for guided mode")
time.sleep(1)
# Arming the Vehicle
vehicle.armed = True
while vehicle.armed == False:
print("Waiting for the vehicle to be armed")
time.sleep(1)
vehicle.simple_takeoff(TargetAltitude)
while True:
print("Current Altitude: %d" , vehicle.location.global_relative_frame.alt)
if vehicle.location.global_relative_frame.alt >= TargetAltitude*.95:
break
time.sleep(1)
print("Target Altitude reached")
return None
##################################################################
#-- Define the function for sending mavlink velocity command in body frame
def set_velocity_body(vehicle, vx, vy, vz):
""" Remember: vz is positive downward!!!
http://ardupilot.org/dev/docs/copter-commands-in-guided-mode.html
Bitmask to indicate which dimensions should be ignored by the vehicle
(a value of 0b0000000000000000 or 0b0000001000000000 indicates that
none of the setpoint dimensions should be ignored). Mapping:
bit 1: x, bit 2: y, bit 3: z,
bit 4: vx, bit 5: vy, bit 6: vz,
bit 7: ax, bit 8: ay, bit 9:
"""
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0,
0, 0,
mavutil.mavlink.MAV_FRAME_BODY_NED,
0b0000111111000111, #-- BITMASK -> Consider only the velocities
0, 0, 0, #-- POSITION
vx, vy, vz, #-- VELOCITY
0, 0, 0, #-- ACCELERATIONS
0, 0)
vehicle.send_mavlink(msg)
vehicle.flush()
###################################################################
vehicle = connectMyCopter()
wCam, hCam = 640, 480
deadZone = 100
pTime = 0
cap = cv2.VideoCapture(0)
cap.set(3, wCam)
cap.set(4, hCam)
detector = htm.handDetector(detectionCon=0.8, maxHands=1)
x = [300, 245, 200, 170, 145, 130, 112, 103, 93, 87, 80, 75, 70, 67, 62, 59, 57]
y = [20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100]
coff = np.polyfit(x, y, 2) # y = AX^2 + BX + C
c = []
i = 0
tipIds = [4, 8, 12, 16, 20]
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList = detector.findPosition(img, draw=False)
#print(lmList)
if len(lmList) !=0:
fingers = []
# Thumb . Here the x value of thumb tip is compared with the x value of mid thumb
if lmList[tipIds[0]][1] > lmList[tipIds[0] - 1][1]:
fingers.append(1)
else:
fingers.append(0)
# Other Fingers
for id in range(1,5):
if lmList[tipIds[id]][2] < lmList[tipIds[id]-2][2]:
fingers.append(1)
else:
fingers.append(0)
#print(sum(fingers))
x1, y1 = lmList[5][1], lmList[5][2]
x2, y2 = lmList[17][1], lmList[17][2]
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
ty = lmList[4][2]
#print(cx, cy)
cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED)
#length = int(math.hypot(x2 - x1, y2 - y1))
#A, B, C = coff
#distanceCM = A*length**2 + B*length + C
#print(distanceCM)
if sum(fingers) == 0:
print(" Arm and Takeoff ")
arm_and_takeoff(2)
if sum(fingers) == 5:
if ((cx < int(wCam/2) + deadZone) and (cx > int(wCam/2) - deadZone)):
print("Hold Position")
set_velocity_body(vehicle, 0, 0, 0)
if (cx < int(wCam/2) - deadZone):
print("Moving Right")
set_velocity_body(vehicle, 0, 0.5, 0)
if (cx > int(wCam/2) + deadZone):
print("Moving Left")
set_velocity_body(vehicle, 0, -0.5, 0)
if sum(fingers) == 1:
if ((ty < int(hCam/2) + deadZone) and (ty > int(hCam/2) - deadZone)):
print("Hold Position")
set_velocity_body(vehicle, 0, 0, 0)
if (ty < int(hCam/2) - deadZone):
print("Moving Up")
set_velocity_body(vehicle, 0, 0, -1)
if (ty > int(hCam/2) + deadZone):
print("Moving Down")
set_velocity_body(vehicle, 0, 0, 1)
#if sum(fingers) == 5:
# c.append(cx)
# if len(c)!=0:
# for i in range(len(c)):
# difference = c[i]-c[i-1]
#print(difference)
# if difference > 0:
# print("Moving Left")
# set_velocity_body(vehicle, 0, -3, 0)
# elif difference < 0:
# print("Moving Right")
# set_velocity_body(vehicle, 0, 3, 0)
# elif difference == 0:
# print("Hold Position")
# set_velocity_body(vehicle, 0, 0, 0)
#
#print(" Moving Right ")
#set_velocity_body(vehicle, distanceCM*0.05, 0, 0)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, f'FPS: {int(fps)}', (40, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
|
2ashishmohan/Hand-Gesture-Controlled-Quadcopter-UAV
|
HandTrackingDemo.py
|
HandTrackingDemo.py
|
py
| 6,355 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "dronekit.connect",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "dronekit.VehicleMode",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pymavlink.mavutil.mavlink",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "pymavlink.mavutil",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "HandTrackingModule.handDetector",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.polyfit",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "cv2.FILLED",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_COMPLEX",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 195,
"usage_type": "call"
}
] |
22248410521
|
"""
Very simple HTTP server in python for logging requests
Usage::
./server.py [<port>]
"""
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib import parse
import os
import logging
class S(BaseHTTPRequestHandler):
def _set_response(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_POST(self):
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
# logging.info("POST request,\nPath: %s\nHeaders:\n%s\n\nBody:\n%s\n",
# str(self.path), str(self.headers), post_data.decode('utf-8'))
standard = post_data.decode("utf-8")
dump = parse.parse_qs(standard)
if "type" in dump.keys():
if dump["type"][0] == "vote":
writeVoteToFile(dump["titleID"][0])
if dump["type"][0] == "chat":
writeChatToFile(dump)
self._set_response()
def run(server_class=HTTPServer, handler_class=S, port=3000):
# logging.basicConfig(level=logging.INFO)
server_address = ('', port)
httpd = server_class(server_address, handler_class)
# logging.info('Starting httpd...\n')
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
# logging.info('Stopping httpd...\n')
# Safety operations
def doesIDExist(titleID):
with open("/resources/text/titleinfo.txt", "r") as f:
for line in f:
if line[8:16] == titleID[8:16]:
return True
return False
# Saving operations
def writeVoteToFile(titleID):
if doesIDExist(titleID):
with open("/resources/text/vote.txt", "a") as f:
f.write(titleID + "\r")
os.system("echo \"$(tail -n 200 /resources/text/vote.txt)\" > /resources/text/vote.txt")
else:
print("Could not write vote for: " + titleID)
def writeChatToFile(details):
with open("/resources/text/msg.txt", "a") as f:
f.write(details["author"][0] + ";;" + details["time"][0] + ";;" + details["message"][0] +"\r")
os.system("echo \"$(tail -n 200 /resources/text/msg.txt)\" > /resources/text/msg.txt")
# Main function
if __name__ == '__main__':
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
|
jacquesCedric/newWWP-server
|
listener/Listener.py
|
Listener.py
|
py
| 2,426 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "http.server.BaseHTTPRequestHandler",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "urllib.parse.parse_qs",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "http.server.HTTPServer",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "os.system",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 76,
"usage_type": "argument"
},
{
"api_name": "sys.argv",
"line_number": 77,
"usage_type": "name"
}
] |
8692787672
|
from strong.models import Project, Images
from rest_framework import serializers
class ImagesSerializers(serializers.HyperlinkedModelSerializer):
project_id = serializers.PrimaryKeyRelatedField(queryset=Project.objects.all(),source='project.id')
class Meta:
model = Images
fields = ('project_id', 'image')
def create(self, validated_data):
subject = Images.objects.create(parent=validated_data['project']['id'], child_name=validated_data['image'])
return child
class ProjectSerializers(serializers.ModelSerializer):
images = ImagesSerializers(many=True, read_only=True)
class Meta:
model = Project
fields = ('type_of_project', 'description', 'images')
# fields = "__all__"
|
urielcookies/RESTFUL_API
|
strong/serializers.py
|
serializers.py
|
py
| 753 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rest_framework.serializers.HyperlinkedModelSerializer",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.PrimaryKeyRelatedField",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "strong.models.Project.objects.all",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "strong.models.Project.objects",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "strong.models.Project",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "strong.models.Images",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "strong.models.Images.objects.create",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "strong.models.Images.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "strong.models.Images",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "strong.models.Project",
"line_number": 17,
"usage_type": "name"
}
] |
72579481788
|
from flask import Flask, request, redirect, url_for
from flask_jsonpify import jsonify
from flask import render_template
from flask import abort
from flask import Response
from flask_api import status
import json
from flaskext.mysql import MySQL
import pandas as pd
import requests
from datetime import datetime, timedelta
import matplotlib as plt
import base64
import io
app = Flask(__name__)
mysql = MySQL()
# MySQL configurations
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = ''
app.config['MYSQL_DATABASE_DB'] = 'cloud'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
mysql.init_app(app)
db = mysql.connect()
cursor = db.cursor()
@app.route('/')
def homepage():
return render_template('forecast.html')
@app.route('/historical/', methods=['GET','POST']) #lists all the dates
def historical():
if(request.method=='GET'):
dates_list = []
cursor.execute("select DATE from dailyweather")
query=cursor.fetchall()
my_hist = [i[0] for i in query]
for item in my_hist:
a = {"DATE":str(item)}
dates_list.append(a)
js = json.dumps(dates_list)
return js, 200
else:
l=request.get_json()
d=l['DATE']
tmax=l['TMAX']
tmin=l['TMIN']
obj = {}
cursor.execute("select DATE from dailyweather")
q=cursor.fetchall()
list=[i[0] for i in q]
x=0
for item in list:
if(int(d)==item):
x=1
if(x==1):
cursor.execute("update dailyweather set TMAX=%f, TMIN=%f where DATE=%d" %(float(tmax),float(tmin),int(d)))
else:
cursor.execute("insert into dailyweather values(%d,%f,%f)" % (int(d),float(tmax),float(tmin)))
db.commit()
obj={"DATE":str(d)}
return jsonify(obj), 201
@app.route('/historical/<string:DATE>', methods=['GET']) #gets the weather info of a particular day
def get_info(DATE):
obj = {}
l=[]
cursor.execute("select DATE,TMAX,TMIN from dailyweather where DATE =%d" % int(DATE))
q=cursor.fetchall()
if(len(q)>0):
for i in range(3):
l.append(q[0][i])
obj = {
"DATE": str(l[0]),
"TMAX": l[1],
"TMIN": l[2]
}
return jsonify(obj), 200
else:
return '', 404
@app.route('/historical/<int:DATE>', methods=['DELETE'])
def del_info(DATE):
obj={}
l=[]
cursor.execute("select DATE,TMAX,TMIN from dailyweather where DATE=%d" % int(DATE))
query=cursor.fetchall()
cursor.execute("delete from dailyweather where DATE=%d" % int(DATE))
db.commit()
if(len(query)>0):
for i in range(3):
l.append(str(query[0][i]))
obj = {
"DATE": l[0],
"TMAX": l[1],
"TMIN": l[2]
}
return jsonify(obj), 200
else:
return '', 204
@app.route('/forecast/<DATE>', methods=['GET']) #forecasts weather info of the next 7days
def forecast(DATE):
lst_dates = []
lst_obj = []
current_date = pd.to_datetime(DATE,format='%Y%m%d')
stop_date = current_date+timedelta(days=7)
while current_date<stop_date:
lst_dates.append(str(pd.to_datetime(current_date)).split(' ')[0].replace("-",""))
current_date = current_date+timedelta(days=1)
for curr_date in lst_dates:
cursor.execute("select DATE,TMAX,TMIN from dailyweather where DATE =%d" % int(curr_date))
query=cursor.fetchall()
if (len(query) > 0):
obj = {
"DATE": curr_date,
"TMAX": query[0][1],
"TMIN": query[0][2]
}
lst_obj.append(obj)
else:
cursor.execute("select ROUND(RAND()*(80-75+1),1)+75")
q=cursor.fetchall()
cursor.execute("select ROUND(RAND()*(50-45+1),1)+45")
q1=cursor.fetchall()
obj = {
"DATE": curr_date,
"TMAX": q[0][0],
"TMIN": q1[0][0]
}
lst_obj.append(obj)
return jsonify(lst_obj), 200
if __name__ == '__main__':
app.run(host='0.0.0.0',debug=True,port=80)
|
cotraak/weather-app-flask
|
app.py
|
app.py
|
py
| 4,277 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flaskext.mysql.MySQL",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "flask_jsonpify.jsonify",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "flask_jsonpify.jsonify",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "flask_jsonpify.jsonify",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "flask_jsonpify.jsonify",
"line_number": 134,
"usage_type": "call"
}
] |
31211286041
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 28 07:07:43 2015
@author: RAHUL JAIN
"""
from xml.dom.minidom import parse
import xml.dom.minidom
DOMTree = xml.dom.minidom.parse("chemsep1.xml")
compounds = DOMTree.documentElement
compound = compounds.getElementsByTagName("compound")
i = 1
for comp in compound:
compName = comp.getElementsByTagName("CompoundID")[0].getAttribute("value")
CompName = compName.replace(" ","")
CompName = CompName.replace("-","")
CompName = CompName.replace(",","")
CompName = CompName.replace("1","One")
CompName = CompName.replace("2","Two")
CompName = CompName.replace("3","Three")
CompName = CompName.replace("4","Four")
CompName = CompName.replace("5","Five")
CriticalTemp = comp.getElementsByTagName("CriticalTemperature")[0].getAttribute("value")
CriticalPres = comp.getElementsByTagName("CriticalPressure")[0].getAttribute("value")
CriticalVol = comp.getElementsByTagName("CriticalVolume")[0].getAttribute("value")
CriticalComp = comp.getElementsByTagName("CriticalCompressibility")[0].getAttribute("value")
try:
NormalBoilPoint = comp.getElementsByTagName("NormalBoilingPointTemperature")[0].getAttribute("value")
except IndexError:
NormalBoilPoint = "0"
try:
NormalMeltingPoint = comp.getElementsByTagName("NormalMeltingPointTemperature")[0].getAttribute("value")
except IndexError:
NormalMeltingPoint = "0"
try:
TripPntTemp = comp.getElementsByTagName("TriplePointTemperature")[0].getAttribute("value")
except IndexError:
TripPntTemp = "0"
try:
TripPntPres = comp.getElementsByTagName("TriplePointPressure")[0].getAttribute("value")
except IndexError:
TripPntPres = "0"
MolWt = comp.getElementsByTagName("MolecularWeight")[0].getAttribute("value")
try:
LiqVolAtBoilPnt = comp.getElementsByTagName("LiquidVolumeAtNormalBoilingPoint")[0].getAttribute("value")
except IndexError:
LiqVolAtBoilPnt = "0"
try:
AcenFactor = comp.getElementsByTagName("AcentricityFactor")[0].getAttribute("value")
except IndexError:
AcenFactor = "0"
try:
SolParam = comp.getElementsByTagName("SolubilityParameter")[0].getAttribute("value")
except IndexError:
SolParam = "0"
try:
DipoleMoment = comp.getElementsByTagName("DipoleMoment")[0].getAttribute("value")
except IndexError:
DipoleMoment = "0"
try:
IGHF = comp.getElementsByTagName("HeatOfFormation")[0].getAttribute("value")
except IndexError:
IGHF = "0"
try:
GEF = comp.getElementsByTagName("GibbsEnergyOfFormation")[0].getAttribute("value")
except IndexError:
GEF = "0"
try:
AbsEntropy = comp.getElementsByTagName("AbsEntropy")[0].getAttribute("value")
except IndexError:
AbsEntropy = "0"
try:
HeatFusionMeltPnt = comp.getElementsByTagName("HeatOfFusionAtMeltingPoint")[0].getAttribute("value")
except IndexError:
HeatFusionMeltPnt = "0"
try:
HOC = comp.getElementsByTagName("HeatOfCombustion")[0].getAttribute("value")
except IndexError:
HOC = "0"
try:
UniquacR = comp.getElementsByTagName("UniquacR")[0].getAttribute("value")
except IndexError:
UniquacR = "0"
try:
UniquacQ = comp.getElementsByTagName("UniquacQ")[0].getAttribute("value")
except IndexError:
UniquacQ = "0"
try:
RacketParam = comp.getElementsByTagName("RacketParameter")[0].getAttribute("value")
except IndexError:
RacketParam = "0"
try:
LiqDen = comp.getElementsByTagName("LiquidDensity")[0]
LiqDenEqn = LiqDen.getElementsByTagName("eqno")[0].getAttribute("value")
A=LiqDen.getElementsByTagName("A")[0].getAttribute("value")
B=LiqDen.getElementsByTagName("B")[0].getAttribute("value")
C=LiqDen.getElementsByTagName("C")[0].getAttribute("value")
D=LiqDen.getElementsByTagName("D")[0].getAttribute("value")
try:
E=LiqDen.getElementsByTagName("E")[0].getAttribute("value")
except IndexError:
E = "0"
except IndexError:
LiqDenEqn = "0"
A = "0"
B = "0"
C = "0"
D = "0"
E = "0"
try:
VapPres = comp.getElementsByTagName("VaporPressure")[0]
VapPresEqn = VapPres.getElementsByTagName("eqno")[0].getAttribute("value")
VA=VapPres.getElementsByTagName("A")[0].getAttribute("value")
VB=VapPres.getElementsByTagName("B")[0].getAttribute("value")
VC=VapPres.getElementsByTagName("C")[0].getAttribute("value")
try:
VD=VapPres.getElementsByTagName("D")[0].getAttribute("value")
except IndexError:
VD = "0"
try:
VE=VapPres.getElementsByTagName("E")[0].getAttribute("value")
except IndexError:
VE = "0"
except IndexError:
VapPresEqn = "0"
VA = "0"
VB = "0"
VC = "0"
VD = "0"
VE = "0"
try:
LiqCp = comp.getElementsByTagName("LiquidHeatCapacityCp")[0]
LiqCpEqn = LiqCp.getElementsByTagName("eqno")[0].getAttribute("value")
LCpA=LiqCp.getElementsByTagName("A")[0].getAttribute("value")
LCpB=LiqCp.getElementsByTagName("B")[0].getAttribute("value")
LCpC=LiqCp.getElementsByTagName("C")[0].getAttribute("value")
try:
LCpD=LiqCp.getElementsByTagName("D")[0].getAttribute("value")
except IndexError:
LCpD = "0"
try:
LCpE=LiqCp.getElementsByTagName("E")[0].getAttribute("value")
except IndexError:
LCpE = "0"
except IndexError:
LiqCpEqn = "0"
LCpA = "0"
LCpB = "0"
LCpC = "0"
LCpD = "0"
LCpE = "0"
try:
HOV = comp.getElementsByTagName("HeatOfVaporization")[0]
HOVEqn = HOV.getElementsByTagName("eqno")[0].getAttribute("value")
HOVA=HOV.getElementsByTagName("A")[0].getAttribute("value")
HOVB=HOV.getElementsByTagName("B")[0].getAttribute("value")
HOVC=HOV.getElementsByTagName("C")[0].getAttribute("value")
try:
HOVD=HOV.getElementsByTagName("D")[0].getAttribute("value")
except IndexError:
HOVD = "0"
try:
HOVE=HOV.getElementsByTagName("E")[0].getAttribute("value")
except IndexError:
HOVE = "0"
except IndexError:
HOVEqn = "0"
HOVA = "0"
HOVB = "0"
HOVC = "0"
HOVD = "0"
HOVE = "0"
if (float(NormalBoilPoint) > 298.15 ):
HA = float(HOVA)
HB = float(HOVB)
HC = float(HOVC)
HD = float(HOVD)
HE = float(HOVE)
Tr = 298.15/float(CriticalTemp)
SHOV = HA*(pow((1-Tr),(HB + HC*Tr + HD*pow(Tr,2) + HE*pow(Tr,3))))
AbsEnthalpy = float(IGHF) - SHOV
else:
AbsEnthalpy = float(IGHF)
SH = str(AbsEnthalpy)
try:
VapCp = comp.getElementsByTagName("IdealGasHeatCapacityCp")[0]
VapCpEqn = VapCp.getElementsByTagName("eqno")[0].getAttribute("value")
VCpA=VapCp.getElementsByTagName("A")[0].getAttribute("value")
VCpB=VapCp.getElementsByTagName("B")[0].getAttribute("value")
VCpC=VapCp.getElementsByTagName("C")[0].getAttribute("value")
try:
VCpD=VapCp.getElementsByTagName("D")[0].getAttribute("value")
except IndexError:
VCpD = "0"
try:
VCpE=VapCp.getElementsByTagName("E")[0].getAttribute("value")
except IndexError:
VCpE = "0"
except IndexError:
VapCpEqn = "0"
VCpA = "0"
VCpB = "0"
VCpC = "0"
VCpD = "0"
VCpE = "0"
try:
LiqVis = comp.getElementsByTagName("LiquidViscosity")[0]
LiqVisEqn = LiqVis.getElementsByTagName("eqno")[0].getAttribute("value")
LiqVisA=LiqVis.getElementsByTagName("A")[0].getAttribute("value")
LiqVisB=LiqVis.getElementsByTagName("B")[0].getAttribute("value")
LiqVisC=LiqVis.getElementsByTagName("C")[0].getAttribute("value")
try:
LiqVisD=LiqVis.getElementsByTagName("D")[0].getAttribute("value")
except IndexError:
LiqVisD = "0"
try:
LiqVisE=LiqVis.getElementsByTagName("E")[0].getAttribute("value")
except IndexError:
LiqVisE = "0"
except IndexError:
LiqVisEqn = "0"
LiqVisA = "0"
LiqVisB = "0"
LiqVisC = "0"
LiqVisD = "0"
LiqVisE = "0"
try:
VapVis = comp.getElementsByTagName("VaporViscosity")[0]
VapVisEqn = VapVis.getElementsByTagName("eqno")[0].getAttribute("value")
VapVisA=VapVis.getElementsByTagName("A")[0].getAttribute("value")
VapVisB=VapVis.getElementsByTagName("B")[0].getAttribute("value")
VapVisC=VapVis.getElementsByTagName("C")[0].getAttribute("value")
try:
VapVisD=VapVis.getElementsByTagName("D")[0].getAttribute("value")
except IndexError:
VapVisD = "0"
try:
VapVisE=VapVis.getElementsByTagName("E")[0].getAttribute("value")
except IndexError:
VapVisE = "0"
except IndexError:
VapVisEqn = "0"
VapVisA = "0"
VapVisB = "0"
VapVisC = "0"
VapVisD = "0"
VapVisE = "0"
try:
LiqK = comp.getElementsByTagName("LiquidThermalConductivity")[0]
LiqKEqn = LiqK.getElementsByTagName("eqno")[0].getAttribute("value")
LiqKA=LiqK.getElementsByTagName("A")[0].getAttribute("value")
LiqKB=LiqK.getElementsByTagName("B")[0].getAttribute("value")
LiqKC=LiqK.getElementsByTagName("C")[0].getAttribute("value")
try:
LiqKD=LiqK.getElementsByTagName("D")[0].getAttribute("value")
except IndexError:
LiqKD = "0"
try:
LiqKE=LiqK.getElementsByTagName("E")[0].getAttribute("value")
except IndexError:
LiqKE = "0"
except IndexError:
LiqKEqn = "0"
LiqKA = "0"
LiqKB = "0"
LiqKC = "0"
LiqKD = "0"
LiqKE = "0"
try:
VapK = comp.getElementsByTagName("VaporThermalConductivity")[0]
VapKEqn = VapK.getElementsByTagName("eqno")[0].getAttribute("value")
VapKA=VapK.getElementsByTagName("A")[0].getAttribute("value")
VapKB=VapK.getElementsByTagName("B")[0].getAttribute("value")
VapKC=VapK.getElementsByTagName("C")[0].getAttribute("value")
try:
VapKD=VapK.getElementsByTagName("D")[0].getAttribute("value")
except IndexError:
VapKD = "0"
try:
VapKE=VapK.getElementsByTagName("E")[0].getAttribute("value")
except IndexError:
VapKE = "0"
except IndexError:
VapKEqn = "0"
VapKA = "0"
VapKB = "0"
VapKC = "0"
VapKD = "0"
VapKE = "0"
f = open('File5.txt','a')
f.write('model '+CompName)
f.write('\n')
f.write('extends General_Properties(')
f.write('\n')
f.write('SN ' + '=' + str(i) +',')
f.write('\n')
f.write('name' + '=' + '"'+ CompName + '",')
f.write('\n')
f.write('Tc ' + '=' + CriticalTemp + ',')
f.write('\n')
f.write('Pc ' + '=' + CriticalPres + ',')
f.write('\n')
f.write('Vc ' + '=' + CriticalVol + ',')
f.write('\n')
f.write('Cc ' + '=' + CriticalComp + ',')
f.write('\n')
f.write('Tb ' + '=' + NormalBoilPoint + ',')
f.write('\n')
f.write('Tm ' + '=' + NormalMeltingPoint + ',')
f.write('\n')
f.write('TT ' + '=' + TripPntTemp + ',')
f.write('\n')
f.write('TP ' + '=' + TripPntPres + ',')
f.write('\n')
f.write('MW ' + '=' + MolWt + ',')
f.write('\n')
f.write('LVB ' + '=' + LiqVolAtBoilPnt + ',')
f.write('\n')
f.write('AF ' + '=' + AcenFactor + ',')
f.write('\n')
f.write('SP ' + '=' + SolParam + ',')
f.write('\n')
f.write('DM ' + '=' + DipoleMoment + ',')
f.write('\n')
f.write('SH ' + '=' + SH + ',')
f.write('\n')
f.write('IGHF ' + '=' + IGHF + ',')
f.write('\n')
f.write('GEF ' + '=' + GEF + ',')
f.write('\n')
f.write('AS ' + '=' + AbsEntropy + ',')
f.write('\n')
f.write('HFMP ' + '=' + HeatFusionMeltPnt + ',')
f.write('\n')
f.write('HOC ' + '=' + HOC + ',')
f.write('\n')
f.write('LiqDen = {'+LiqDenEqn+","+A+","+B+","+C+","+D+","+E+'},')
f.write('\n')
f.write('VP = {'+VapPresEqn+","+VA+","+VB+","+VC+","+VD+","+VE+'},')
f.write('\n')
f.write('LiqCp = {'+LiqCpEqn+","+LCpA+","+LCpB+","+LCpC+","+LCpD+","+LCpE+'},')
f.write('\n')
f.write('HOV = {'+HOVEqn+","+HOVA+","+HOVB+","+HOVC+","+HOVD+","+HOVE+'},')
f.write('\n')
f.write('VapCp = {'+VapCpEqn+","+VCpA+","+VCpB+","+VCpC+","+VCpD+","+VCpE+'},')
f.write('\n')
f.write('LiqVis = {'+LiqVisEqn+","+LiqVisA+","+LiqVisB+","+LiqVisC+","+LiqVisD+","+LiqVisE+'},')
f.write('\n')
f.write('VapVis = {'+VapVisEqn+","+VapVisA+","+VapVisB+","+VapVisC+","+VapVisD+","+VapVisE+'},')
f.write('\n')
f.write('LiqK = {'+LiqKEqn+","+LiqKA+","+LiqKB+","+LiqKC+","+LiqKD+","+LiqKE+'},')
f.write('\n')
f.write('VapK = {'+VapKEqn+","+VapKA+","+VapKB+","+VapKC+","+VapKD+","+VapKE+'},')
f.write('\n')
f.write('Racketparam = '+RacketParam +',')
f.write('\n')
f.write('UniquacR = '+ UniquacR + ',')
f.write('\n')
f.write('UniquacQ = '+ UniquacQ + ');')
f.write('\n')
f.write('end '+CompName+';')
f.write('\n')
f.write('\n')
# f.write('function Psat')
# f.write('\n')
# f.write('input Real T;')
# f.write('\n')
# f.write('output Real Vp;')
# f.write('\n')
# f.write('algorithm')
# f.write('\n')
# f.write('Vp := exp(VP[2] + VP[3] / T + VP[4] * log(T) + VP[5] * T ^ VP[6]);')
# f.write('\n')
# f.write('end Psat;')
# f.write('\n')
# f.write('\n')
#
# f.write('function LCp')
# f.write('\n')
# f.write('input Real T;')
# f.write('\n')
# f.write('output Real Cp;')
# f.write('\n')
# f.write('algorithm')
# f.write('\n')
# f.write('Cp := (LiqCp[2] + exp(LiqCp[3] / T + LiqCp[4] + LiqCp[5] * T + LiqCp[6] * T ^ 2)) / 1000;')
# f.write('\n')
# f.write('end LCp;')
# f.write('\n')
# f.write('\n')
#
# f.write('function HV')
# f.write('\n')
# f.write('input Real T;')
# f.write('\n')
# f.write('output Real Hv;')
# f.write('\n')
# f.write('protected')
# f.write('\n')
# f.write('Real Tr = T / Tc;')
# f.write('\n')
# f.write('algorithm')
# f.write('\n')
# f.write('Hv := HOV[2] * (1 - Tr) ^ (HOV[3] + HOV[4] * Tr + HOV[5] * Tr ^ 2 + HOV[6] * Tr ^ 3) / 1000;')
# f.write('\n')
# f.write('end HV;')
# f.write('\n')
# f.write('\n')
#
# f.write('function HLiq')
# f.write('\n')
# f.write('input Real T;')
# f.write('\n')
# f.write('output Real Ent;')
# f.write('\n')
# f.write('protected')
# f.write('\n')
# f.write('Real Temp = 298.15;')
# f.write('\n')
# f.write('algorithm')
# f.write('\n')
# f.write('Ent := 0;')
# f.write('\n')
# f.write('while Temp < T loop')
# f.write('\n')
# f.write('Ent := Ent + LCp(Temp) * 1;')
# f.write('\n')
# f.write('Temp := Temp + 1;')
# f.write('\n')
# f.write('end while;')
# f.write('\n')
# f.write('Ent := SH / 1000 + Ent;')
# f.write('\n')
# f.write('end HLiq;')
# f.write('\n')
# f.write('\n')
#
# f.write('function HVap')
# f.write('\n')
# f.write('input Real T;')
# f.write('\n')
# f.write('output Real Ent;')
# f.write('\n')
# f.write('algorithm')
# f.write('\n')
# f.write('Ent := HLiq(T) + HV(T);')
# f.write('\n')
# f.write('end HVap;')
# f.write('\n')
i = i + 1
f.close()
|
RahulJain7/Openmodelica-Thermodynamic-Engine
|
PythonFiles/getComp.py
|
getComp.py
|
py
| 15,689 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "xml.dom.minidom.dom.minidom.parse",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.dom",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "xml.dom.minidom",
"line_number": 10,
"usage_type": "name"
}
] |
16480507143
|
"""Original from https://github.com/yhenon/pytorch-retinanet"""
import torch
import torch.nn as nn
import numpy as np
import skimage.io
import skimage.transform
import skimage.color
import skimage
from PIL import Image
def comptue_dim(dim, padding, kernel_size, stride):
return np.floor((dim + 2*padding - kernel_size) / stride) + 1
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class BBoxTransform(nn.Module):
def __init__(self, mean=None, std=None):
super(BBoxTransform, self).__init__()
if mean is None:
self.mean = torch.from_numpy(np.array([0, 0, 0, 0]).astype(np.float32)).cuda()
else:
self.mean = mean
if std is None:
self.std = torch.from_numpy(np.array([0.1, 0.1, 0.2, 0.2]).astype(np.float32)).cuda()
else:
self.std = std
def forward(self, boxes, deltas):
widths = boxes[:, :, 2] - boxes[:, :, 0]
heights = boxes[:, :, 3] - boxes[:, :, 1]
ctr_x = boxes[:, :, 0] + 0.5 * widths
ctr_y = boxes[:, :, 1] + 0.5 * heights
dx = deltas[:, :, 0] * self.std[0] + self.mean[0]
dy = deltas[:, :, 1] * self.std[1] + self.mean[1]
dw = deltas[:, :, 2] * self.std[2] + self.mean[2]
dh = deltas[:, :, 3] * self.std[3] + self.mean[3]
pred_ctr_x = ctr_x + dx * widths
pred_ctr_y = ctr_y + dy * heights
pred_w = torch.exp(dw) * widths
pred_h = torch.exp(dh) * heights
pred_boxes_x1 = pred_ctr_x - 0.5 * pred_w
pred_boxes_y1 = pred_ctr_y - 0.5 * pred_h
pred_boxes_x2 = pred_ctr_x + 0.5 * pred_w
pred_boxes_y2 = pred_ctr_y + 0.5 * pred_h
pred_boxes = torch.stack([pred_boxes_x1, pred_boxes_y1, pred_boxes_x2, pred_boxes_y2], dim=2)
return pred_boxes
class ClipBoxes(nn.Module):
def __init__(self, width=None, height=None):
super(ClipBoxes, self).__init__()
def forward(self, boxes, img):
batch_size, num_channels, height, width = img.shape
boxes[:, :, 0] = torch.clamp(boxes[:, :, 0], min=0)
boxes[:, :, 1] = torch.clamp(boxes[:, :, 1], min=0)
boxes[:, :, 2] = torch.clamp(boxes[:, :, 2], max=width)
boxes[:, :, 3] = torch.clamp(boxes[:, :, 3], max=height)
return boxes
class Resizer(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, image, annots, min_side=608, max_side=1024):
image = np.array(image)
annots = np.array([[*annot['bbox'], annot['category_id']] for annot in annots])
rows, cols, cns = image.shape
smallest_side = min(rows, cols)
# rescale the image so the smallest side is min_side
scale = min_side / smallest_side
# check if the largest side is now greater than max_side, which can happen
# when images have a large aspect ratio
largest_side = max(rows, cols)
if largest_side * scale > max_side:
scale = max_side / largest_side
# resize the image with the computed scale
image = skimage.transform.resize(image, (int(round(rows * scale)), int(round((cols * scale)))))
rows, cols, cns = image.shape
pad_w = 32 - rows % 32
pad_h = 32 - cols % 32
new_image = np.zeros((rows + pad_w, cols + pad_h, cns)).astype(np.float32)
new_image[:rows, :cols, :] = image.astype(np.float32)
annots[:, 4] = annots[:, 4] * scale
return Image.fromarray(np.uint8(new_image)), torch.from_numpy(annots), scale
class Augmenter(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample, flip_x=0.5):
if np.random.rand() < flip_x:
image, annots = sample['img'], sample['annot']
image = image[:, ::-1, :]
rows, cols, channels = image.shape
x1 = annots[:, 0].copy()
x2 = annots[:, 2].copy()
x_tmp = x1.copy()
annots[:, 0] = cols - x2
annots[:, 2] = cols - x_tmp
sample = {'img': image, 'annot': annots}
return sample
class Normalizer(object):
def __init__(self):
self.mean = np.array([[[0.485, 0.456, 0.406]]])
self.std = np.array([[[0.229, 0.224, 0.225]]])
def __call__(self, sample):
image, annots = sample['img'], sample['annot']
return {'img': ((image.astype(np.float32) - self.mean) / self.std), 'annot': annots}
class UnNormalizer(object):
def __init__(self, mean=None, std=None):
if mean == None:
self.mean = [0.485, 0.456, 0.406]
else:
self.mean = mean
if std == None:
self.std = [0.229, 0.224, 0.225]
else:
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
|
sebastiani/pytorch-attention-augmented-convolution
|
utils/utils.py
|
utils.py
|
py
| 7,137 |
python
|
en
|
code
| 18 |
github-code
|
6
|
[
{
"api_name": "numpy.floor",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "torch.from_numpy",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "torch.from_numpy",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "torch.exp",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "torch.exp",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "torch.clamp",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "skimage.transform.resize",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "skimage.transform",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "numpy.uint8",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 221,
"usage_type": "attribute"
}
] |
37021992627
|
from django.core.exceptions import ObjectDoesNotExist
from django.http import StreamingHttpResponse, HttpResponse
from rest_framework.response import Response
from .models import AudioSlice, Audio
from .serializers import AudioSerializer, AudioSliceSerializer
from rest_framework.decorators import api_view
from rest_framework.decorators import parser_classes
from rest_framework.parsers import MultiPartParser
from .utils import utils as ut
from audio.dbmanager.redis_dao import *
from audio.services.preprocessor import AudioPreprocessor
from audio.dbmanager.youtube_handler import *
import re
import time
range_re = re.compile(r'bytes\s*=\s*(\d+)\s*-\s*(\d*)', re.I)
file_count = 0
"""
timeline
t0 : page 1에서 사용자가 오디오 요청을 보낼 때
t1 : page 2에서 사용자가 오디오의 구간을 선택할 때
"""
"""
t0 > CELERY RESULT BACKEND
사용자의 오디오 요청에 대한 전처리 실행 후 기록 --> view 함수에
"""
# preprocessor = AudioPreprocessor()
# # task_id는 audio의 id
# # audio_id = uuid.uuid4() # 처음 들어오는 경우, 그게 아니면 database에서 꺼내오기
# AudioPreprocessor().preprocess.apply_async((3, 56), task_id="hiek", expires=datetime.now() + timedelta(days=1))
"""
t1 > USER INFO RECORD : (audio <----> choreo <----> product) Inter-server communication
KEY "a30gk3" <-- uuid.uuid4()
VAL (HSET)
{ audio_id : e317fce <-- 클라이언트에게 받을 것
start : 13 <-- audio_handler가 계산하도록
end : 31 <-- audio_handler가 계산하도록
progress : 0.0 } <-- 어느정도 진행되었는지 percentage
"""
"""
t1 > SIMILARITY : (audio <----> choreo) Inter-server communication
KEY e317fce-14 <-- 노래 구간 id
VAL [ "af3g0s39_13 : 89", "ldf9a8i_4 : 90", "fk02j3bu_9 : 99", ... ] <-- 노래구간 id 와 점수가 매핑된 요소들로 구성된 list
"""
"""
t1 > AMPLITUDE : (audio <----> choreo) Inter-server communication
KEY e317fce-14 <-- 노래 구간 id
VAL [ 7 2 9 8 6 ] <-- 점수 list
"""
"""
===================================================================================================================
"""
# def upload_file(request):
# if request.method == 'POST':
# form = UploadFileForm(request.POST, request.FILES)
# if form.is_valid():
# instance = ModelWithFileField(file_field=request.FILES['file'])
# instance.save()
# return HttpResponseRedirect('/success/url/')
# else:
# form = UploadFileForm()
# return render(request, 'upload.html', {'form': form})
@api_view(['POST'])
@parser_classes([MultiPartParser])
def meta(request):
data = MultiPartParser.parse(request)
print(data)
res = write_from_meta()
return Response(AudioSerializer(Audio.objects.all(), many=True).data)
@api_view(['POST'])
async def youtube_url(request):
download_url = request.data.get("download_url")
try:
# 이미 있는 경우
return Response(AudioSerializer(Audio.objects.get(download_url=download_url)).data)
except ObjectDoesNotExist:
try:
print(f"started at {time.strftime('%X')}")
_id, _title, _duration = await write_from_link(download_url)
audio = Audio(audio_id=_id, title=_title, download_url=download_url, duration=_duration)
audio.save()
serializer = AudioSerializer(audio)
# 이게 tasks에 해당됨
AudioPreprocessor(audio=audio).preprocess()
# 파일 찾아서 정보와 함께 보내주기
return Response(serializer.data)
except:
print("===========download failure=============")
return Response("cannot open file.", status=400)
# response = StreamingHttpResponse(streaming_content=request.FILES["audio_file"])
# response['Content-Disposition'] = f'attachment; filename="{request.data["audio_file"]}"'
# return response
@api_view(['POST'])
@parser_classes([MultiPartParser])
# @renderer_classes([MultiPartRenderer])
def file(request):
"""
:param request:
:return: audio_id 와 file을 streaming 형태로
"""
ext = request.data.get("ext")
global file_count
filename = "up" + str(file_count)
if ext != "wav":
ut.get_console_output(
'ffmpeg -n -i "{}/{}.{}" "{}/{}.wav"'.format("../../media/ORG", filename, ext, "../../media/WAV",
filename))
# 바로 파일 저장 - store in the volume
file_count += 1
response = StreamingHttpResponse(streaming_content=request.data["audio_file"])
response['Content-Disposition'] = f'attachment; filename="{request.data["audio_file"]}"'
return response
@api_view(['POST'])
def skeletal_after_interval(request):
"""
:param request: audio_id, start_sec, end_sec
:return:
"""
audio_id = request.data.get('audio_id')
user_start_sec = request.data['start_sec']
user_end_sec = request.data['end_sec']
UserRedisHandler.set_user_info(audio_id, user_start_sec, user_end_sec)
if bool(AudioSlice.objects.filter(audio_slice_id__contains=audio_id)):
start_arr = AudioSlice.objects.values_list('start_sec', flat=True)
start_audio_slice_id = AudioSlice.objects.get(
start_sec=ut.find_nearest(start_arr, user_start_sec)).only('audio_slice_id')
end_audio_slice_id = request.data.get('audio_id') + AudioSlice.objects.get(
start_sec=ut.find_nearest(start_arr, user_end_sec)).only('audio_slice_id').split("_")[1]
else:
audio_handler = AudioPreprocessor(Audio.objects.get(audio_id=audio_id))
audio_handler.preprocess()
start_audio_slice_id = audio_handler.get_slice_id(ut.find_nearest(audio_handler.beat_track, user_start_sec))
end_audio_slice_id = audio_handler.get_slice_id(ut.find_nearest(audio_handler.beat_track, user_end_sec))
interval_number = int(end_audio_slice_id.split("ㅡ")[1]) - int(start_audio_slice_id.split("ㅡ")[1])
# Task 1. Similarity process & get into redis
# smlr_app = Celery('redis_dao', backend=cc.result_smlr_backend, broker=cc.broker_smlr_url)
# smlr_app.config_from_object('celery_config') --꼭 안해도 될 듯
# 여기에 혜린이가 한 부분을 어떻게 어떻게 만들어서..
# cluster_smlr.apply_async(filter_kmeans_labels, filter_feat, 0, 6))
# Task 2. Amplitude process & get into redis
# ampl_app = Celery(backend=cc.result_ampl_backend, broker=cc.broker_ampl_url)
# get_amplitude.apply_async((3, 56), task_id=audio_id, expires=datetime.now() + timedelta(days=1))
return Response(
AudioSliceSerializer(start_audio_slice_id=start_audio_slice_id, end_audio_slice_id=end_audio_slice_id,
interval_number=interval_number).data)
# app = Celery('redis_dao', backend=cc.result_backend, broker=cc.broker_url)
# app.config_from_object('celery_config')
# def youtube(request):
# # task_id는 audio의 id
# audio_id = uuid.uuid4() # 처음 들어오는 경우, 그게 아니면 database에서 꺼내오기
# preprocess.apply_async((3, 56), task_id=audio_id, expires=datetime.now() + timedelta(days=1))
# def serve(request):
# return FileResponse(open(request.data.get('music'), 'rb'))
@api_view(['POST'])
def get_music(request):
with open(request.data.get('music'), 'rb') as f:
# 필요한 응답헤더 세팅
return set_audio_response('오디오파일 경로, wav 확장자까지 꼭 입력할 것', "오디오 파일 id(youtube id)", "wav",
"오디오파일 duration float 형태로")
def set_audio_response(audio_src, audio_id, ext, duration):
response = HttpResponse(open(audio_src, "rb"))
response["Access-Control-Allow-Origin"] = "*"
response['Content-Type'] = "application/octet-stream"
response['Content-Disposition'] = f'attachment; filename="{audio_id}.{ext}"' # wav만 보내지 않아도 되도록
response['audio_id'] = audio_id
response['duration'] = duration
return response
# data = {
# "audio_id": "dfsdff",
# "interval_number": 14,
# "music": open(request.data.get('music'), 'rb')
# }
# return HttpResponse(data)
# response = HttpResponse(content=open(request.data.get('music'), 'rb'))
# response['Content-Type'] = 'application/json'
# return FileResponse(open(request.data.get('music'), 'rb'))
|
Choleor/choleor-audio-reboot
|
audio/views_old.py
|
views_old.py
|
py
| 8,450 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "re.compile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.parsers.MultiPartParser.parse",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "rest_framework.parsers.MultiPartParser",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "serializers.AudioSerializer",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "models.Audio.objects.all",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "models.Audio.objects",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "models.Audio",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.parser_classes",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "rest_framework.parsers.MultiPartParser",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "serializers.AudioSerializer",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "models.Audio.objects.get",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "models.Audio.objects",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "models.Audio",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.ObjectDoesNotExist",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "time.strftime",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "audio.dbmanager.redis_dao",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "models.Audio",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "audio.dbmanager.redis_dao.save",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "audio.dbmanager.redis_dao",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "serializers.AudioSerializer",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "audio.dbmanager.redis_dao",
"line_number": 95,
"usage_type": "argument"
},
{
"api_name": "audio.services.preprocessor.AudioPreprocessor",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "audio.dbmanager.redis_dao",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "utils.utils.get_console_output",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "django.http.StreamingHttpResponse",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.parser_classes",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "rest_framework.parsers.MultiPartParser",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "models.AudioSlice.objects.filter",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "models.AudioSlice.objects",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "models.AudioSlice",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "models.AudioSlice.objects.values_list",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "models.AudioSlice.objects",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "models.AudioSlice",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "models.AudioSlice.objects.get",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "models.AudioSlice.objects",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "models.AudioSlice",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "utils.utils.find_nearest",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "models.AudioSlice.objects.get",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "models.AudioSlice.objects",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "models.AudioSlice",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "utils.utils.find_nearest",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "audio.services.preprocessor.AudioPreprocessor",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "models.Audio.objects.get",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "models.Audio.objects",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "models.Audio",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "utils.utils.find_nearest",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "utils.utils.find_nearest",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "serializers.AudioSliceSerializer",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 197,
"usage_type": "call"
}
] |
30282625342
|
from dataclasses import dataclass, replace
from typing import Any
from uuid import UUID
from topics.domain.repositories.topic_repository import TopicRepository
from topics.domain.usecases.base import Usecase
@dataclass(kw_only=True)
class UpdateTopicRequest:
id: UUID
content: str | None = None
discussed: bool | None = None
@dataclass(kw_only=True)
class UpdateTopicUsecase(Usecase[UpdateTopicRequest, None]):
topic_repository: TopicRepository
def handle(self, request: UpdateTopicRequest) -> None:
topic = self.topic_repository.get(request.id)
updated_fields: dict[str, Any] = {}
if request.content is not None:
updated_fields["content"] = request.content
if request.discussed is not None:
updated_fields["discussed"] = request.discussed
self.topic_repository.update(replace(topic, **updated_fields))
|
cbenavid/topics
|
src/topics/domain/usecases/topic/update_topic.py
|
update_topic.py
|
py
| 893 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "uuid.UUID",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "topics.domain.usecases.base.Usecase",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "topics.domain.repositories.topic_repository.TopicRepository",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "dataclasses.replace",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 16,
"usage_type": "call"
}
] |
30503616911
|
# Author: Ron Jones
# Date Created: 7-3-17
# Date Last Modified: 7-4-17
# Purpose: Check CDS Overlay Excel Sheet with Master Data Sheet
# Status: Working perfectly with MDS and CDS_Overlay_Final2.xlsx as of July 4, 2017
'''Note: The "compare dicts function iterates through every
correct combination of entries from the overlay and data files to check
for any discrepancies, then checks every entry from the overlay against
the data to see if there are any entire records erroneously absent from
the MDS. For more detailed instructions, check FM_Overlay_Script, the
structure is basically the same'''
# Import openpyxl module to allow python to access data from Excel documents
import openpyxl as xl, sys
def main():
# Pull data from workbooks
data = xl.load_workbook(sys.argv[1])
overlay = xl.load_workbook(sys.argv[2])
# Pull worksheets from workbooks
data_sheet = data.get_sheet_by_name('Data')
overlay_sheet = overlay.get_sheet_by_name('Table 1')
# Open output file (validation comments) for writing
comments = open('Classified_Information_Comments', 'w')
#Write heading to output file
comments.write("Inconsistencies:" + "\n" + "\n")
# Open empty dictionary for overlay info
overlay_dict = {}
# Open empty dictionary for master info
data_dict = {}
populate_overlay_dict(overlay_sheet, overlay_dict)
populate_data_dict(data_sheet, data_dict)
compare_dicts(data_dict, overlay_dict, comments)
def populate_overlay_dict(sheet, inp_dict):
titles = ['CONTROL', 'CLASSIFIED INFORMATION OVERLAY']
for i in range(60, 157):
if not sheet.cell(row=i, column=1).value in titles:
inp_dict[sheet.cell(row=i, column=1).value] = sheet.cell(row=i, column=2).value
#print("Overlay dictionary: ", inp_dict)
def populate_data_dict(worksheet, inp):
for i in range(4, worksheet.max_row + 1):
if not worksheet.cell(row=i, column=3).value in inp:
inp[worksheet.cell(row=i, column=3).value] = [worksheet.cell(row=i, column=50).value]
else:
inp[worksheet.cell(row=i, column=3).value].append(worksheet.cell(row=i, column=50).value)
#print("Data Dict: ", inp)
def compare_dicts(data, overlay, outfile):
switch = 0
#For loop to check for incorrect/missing entries
for key in data:
for key2 in overlay:
if key == key2:
for elt in data[key]:
if elt == overlay[key2]:
#Can uncomment for visual evidence that loop executed
#print("Data validated " + str(key) + " " + str(key2))
continue
else:
outfile.write("Discrepancy with control " + str(key) + "\n" + "\n")
switch = 1
break
continue
#For loop to check for missing records
for key2 in overlay:
if not key2 in data:
outfile.write(((str(key2) + " should include a " + str(overlay[key2]) + " in the overlay column of MDS, but the record itself does not exist" + "\n" + "\n")))
switch = 1
if switch == 0:
print("No discrepancies found")
else:
print("There were some discrepancies. Check 'Classified_Information_Comments for more information")
main()
|
NISTBoard/data_validation
|
Classified_Info_Script.py
|
Classified_Info_Script.py
|
py
| 3,365 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "openpyxl.load_workbook",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "openpyxl.load_workbook",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 21,
"usage_type": "attribute"
}
] |
22514895256
|
#!/usr/bin/env python3
from fastapi import APIRouter, Body, Request, Response, HTTPException, status
from bson.objectid import ObjectId
from typing import List
from lib.mongo import insert_one, find_one, find_many, update_one
from models.prescription import Prescription, PrescriptionUpdate
router = APIRouter()
coll = "prescription"
@router.get("/{nss}", response_description="Get all prescriptions for a patient", status_code=status.HTTP_200_OK,
response_model=List[Prescription])
def find_precriptions(request: Request, nss: str):
find_criteria = {"nss": nss}
return find_many(request, find_criteria, coll)
@router.post("/", response_description="Create a new prescription", status_code=status.HTTP_201_CREATED,
response_model=Prescription)
def create_prescription(request: Request, prescription: PrescriptionUpdate = Body(...)):
inserted = insert_one(request, prescription, coll)
return find_one(request, {'_id': inserted.inserted_id}, coll)
@router.post("/associate_checkup",
response_description="Links a checkup to the 'checkup' field for a prescription",
status_code=status.HTTP_200_OK,
response_model=Prescription)
def associate_checkup_with_prescription(request: Request, data=Body(...)):
print(data)
prescription_find_criteria = {"_id": ObjectId(data['prescription_id'])}
update_one(request, prescription_find_criteria, {
"$set": {
"consulta": data['checkup_id']
}
}, coll)
return find_one(request, prescription_find_criteria, coll)
|
Serlych/national-medical-record
|
routes/prescription.py
|
prescription.py
|
py
| 1,582 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fastapi.APIRouter",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "fastapi.Request",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "lib.mongo.find_many",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "fastapi.status.HTTP_200_OK",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "fastapi.status",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "models.prescription.Prescription",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "fastapi.Request",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "models.prescription.PrescriptionUpdate",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "fastapi.Body",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "lib.mongo.insert_one",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "lib.mongo.find_one",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "fastapi.status.HTTP_201_CREATED",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "fastapi.status",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "models.prescription.Prescription",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "fastapi.Request",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "fastapi.Body",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "bson.objectid.ObjectId",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "lib.mongo.update_one",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "lib.mongo.find_one",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "fastapi.status.HTTP_200_OK",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "fastapi.status",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "models.prescription.Prescription",
"line_number": 31,
"usage_type": "name"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.