seq_id
stringlengths 4
11
| text
stringlengths 113
2.92M
| repo_name
stringlengths 4
125
⌀ | sub_path
stringlengths 3
214
| file_name
stringlengths 3
160
| file_ext
stringclasses 18
values | file_size_in_byte
int64 113
2.92M
| program_lang
stringclasses 1
value | lang
stringclasses 93
values | doc_type
stringclasses 1
value | stars
int64 0
179k
⌀ | dataset
stringclasses 3
values | pt
stringclasses 78
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|
70497094234
|
# coding=utf-8
import json
import os
from study.day12.file_path_manager import FilePathManager
jsondata = '''
{
"Uin":0,
"UserName":"@c482d142bc698bc3971d9f8c26335c5c",
"NickName":"小帅b",
"HeadImgUrl":"/cgi-bin/mmwebwx-bin/webwxgeticon?seq=500080&username=@c482d142bc698bc3971d9f8c26335c5c&skey=@crypt_b0f5e54e_b80a5e6dffebd14896dc9c72049712bf",
"DisplayName":"赵敏",
"ChatRoomId":0,
"KeyWord":"che",
"EncryChatRoomId":"",
"IsOwner":0
}
'''
def json2dict(json_data):
"""将json字符串转化为python的字典对象"""
return json.loads(json_data)
def read2json(file_name):
"""读取json文件,并转换为字典/列表"""
with open(file_name, "r", encoding="utf-8") as fp:
dict = json.load(fp)
print(dict)
return dict
def writer2json(file_name, dict):
"""将字典对象保存为json字符串"""
# 删除旧文件
if file_name in os.listdir():
os.remove(file_name)
# dumps()默认中文为ascii编码格式,ensure_ascii默认为Ture
# 禁用ascii编码格式,返回的Unicode字符串,方便使用
json_str = json.dumps(dict, ensure_ascii=False)
with open(file_name, "wb") as fp:
fp.write(json_str.encode('utf-8'))
if __name__ == "__main__":
# 将json字符串转化为python的字典对象
my_friend = json2dict(jsondata)
print("{} : {}".format(my_friend["NickName"], my_friend["DisplayName"]))
file_manager = FilePathManager()
file_manager.mkdir("/json/")
file_name = "./json/my_friend.json"
# 将字典对象保存为json字符串
writer2json(file_name, my_friend)
# 读取json文件, 并转换为字典 / 列表
my_friend = read2json(file_name)
print("{} : {}".format(my_friend["NickName"], my_friend["DisplayName"]))
|
Youngfellows/PythonStudy
|
study/day12/02_json_str_to_dict.py
|
02_json_str_to_dict.py
|
py
| 1,833 |
python
|
en
|
code
| 0 |
github-code
|
50
|
26193919219
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('esg_leipzig_homepage_2015', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='event',
name='css_class_name',
field=models.CharField(choices=[('event-default', 'Grau'), ('event-primary', 'Dunkelblau'), ('event-success', 'Grün'), ('event-info', 'Hellblau'), ('event-warning', 'Gelb'), ('event-danger', 'Rot')], help_text='Die Farben entsprechen den Farben für Buttons, Labels usw. bei Twitter Bootstrap.', default='event-primary', verbose_name='Farbe', max_length=255),
preserve_default=True,
),
]
|
ESG-Leipzig/Homepage-2015
|
esg_leipzig_homepage_2015/migrations/0002_event_css_class_name.py
|
0002_event_css_class_name.py
|
py
| 771 |
python
|
en
|
code
| 0 |
github-code
|
50
|
74960190875
|
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import numpy as np
import torch
def remove_max(x):
x[x.argmax()] = np.median(x)
print(x.argmax(), ":", x[x.argmax()], "=>", np.median(x))
return x
def groupby_datapoint(df,
gb='YYYYMMDD',
target='Qty'):
df_ts = df.groupby([gb])[target].sum().sort_index()
df_ts = df_ts.reset_index()
#df_ts[gb] = pd.to_datetime(df_ts[gb], format='%Y%m%d')
df_ts[gb] = df_ts[gb].astype(int)
df_ts.set_index([gb], inplace=True)
df_ts.sort_index(inplace=True)
return df_ts
def windowed_dataset(y, input_window = 5, output_window = 1, stride = 1, num_features = 1):
'''
create a windowed dataset
: param y: time series feature (array)
: param input_window: number of y samples to give model
: param output_window: number of future y samples to predict
: param stide: spacing between windows
: param num_features: number of features (i.e., 1 for us, but we could have multiple features)
: return X, Y: arrays with correct dimensions for LSTM
: (i.e., [input/output window size # examples, # features])
'''
L = y.shape[0]
num_samples = (L - input_window - output_window) // stride + 1
X = np.zeros([input_window, num_samples, num_features])
Y = np.zeros([output_window, num_samples, num_features])
for ff in np.arange(num_features):
for ii in np.arange(num_samples):
start_x = stride * ii
end_x = start_x + input_window
X[:, ii, ff] = y[start_x:end_x, ff]
start_y = stride * ii + input_window
end_y = start_y + output_window
Y[:, ii, ff] = y[start_y:end_y, ff]
return X, Y
def numpy_to_torch(x):
return torch.from_numpy(x).type(torch.Tensor)
|
CentralPark-gichan/AI_hub
|
수요예측 모델[한국타이어]/preprocessing.py
|
preprocessing.py
|
py
| 1,934 |
python
|
en
|
code
| 3 |
github-code
|
50
|
71498263194
|
def binarySearch(values, k):
top = len(values)-1
bottom = 0
values.sort()
print(values)
found = False
while(found != True):
mid = (top+bottom) // 2
if(values[mid] == k):
print(str(k) + " found at index: " + str(top))
found = True
return True
elif(values[mid] < k):
bottom = mid + 1
elif(values[mid] > k):
top = mid - 1
else:
return "Not found"
values = [1,2,3,4,5,6,7]
binarySearch(values, 3)
|
timlaroche/PyCLRS
|
binarysearch.py
|
binarysearch.py
|
py
| 429 |
python
|
en
|
code
| 0 |
github-code
|
50
|
2348556791
|
import numpy as np
from batch import Batch
import scipy.io
class Perceptron():
def __init__(self, X, Y):
no_weights = X.shape[1]
self.weights = np.random.uniform(0,size=[no_weights])
self.tresh = 0
self.X = X
self.Y = Y
self.l_rate = 0.025
self.train()
def predict(self,X):
Y = []
for x in X:
Y.append(np.sign(np.dot(self.weights.T,x)))
return np.squeeze(Y)
def descent(self,x,y):
y_hat = np.dot(self.weights.T,x)
error = y-y_hat
gradient = (self.l_rate/2) * error * x
return gradient
def train(self,batch_size=50):
for ix in range(300):
gradients = []
for b in range(batch_size):
rx = np.random.randint(self.X.shape[0])
x = self.X[rx,:]
y = self.Y[rx]
gradient = self.descent(x,y)
gradients.append(gradient)
gradients = np.sum(np.array(gradients),0)
self.weights += (1/batch_size) * np.squeeze(np.array([gradients]))
#
#mat = scipy.io.loadmat('trainingData.mat')
#
#data = mat['U']
#targets = np.squeeze(mat['v'])
#
#
#p = Perceptron(data.shape[1])
#
#p.train(data,targets,1)
#
#r = [np.sign(p.predict(data[ix,:]))==np.sign(targets[ix]) for ix in range(data.shape[0])]
#
#print('correct classified: {0}'.format(np.mean(r)))
|
NilusvanEdel/EnsembleMethods
|
decision_tree_constantin/perceptron.py
|
perceptron.py
|
py
| 1,202 |
python
|
en
|
code
| 2 |
github-code
|
50
|
18863045142
|
import pathlib
import os
import subprocess
import json
from functools import lru_cache
class CuratedAppService:
DIST_CURATED_APPS_FOLDER = "libboutique/curated_apps/dist"
SCRIPT_CURATED_APPS_FOLDER = "libboutique/curated_apps/scripts"
CURATED_APPS_APPLICATION_INDEX = "libboutique/curated_apps/dist/applications-en.json"
def __init__(self):
self._curated_apps_json = None
def build_index(self, *_, **__):
if self._curated_apps_json is None:
command = ["sh", self._generate_path_to_build_index()]
if subprocess.check_output(command, stderr=subprocess.PIPE):
with open(self.CURATED_APPS_APPLICATION_INDEX, "r") as f:
self._curated_apps_json = json.load(f)
@lru_cache(maxsize=1)
def _generate_path_to_build_index(self):
return os.path.join(os.path.join(str(pathlib.Path.cwd()), self.SCRIPT_CURATED_APPS_FOLDER), "build.sh")
|
ubuntu-mate/python3-libboutique
|
libboutique/services/curated_app_service.py
|
curated_app_service.py
|
py
| 857 |
python
|
en
|
code
| 6 |
github-code
|
50
|
40232711881
|
"""__author__ = 唐宏进 """
if __name__ == '__main__':
def fun1():
for x in range(10):
return x
# 0 < class 'int'> < class 'function' >
print(fun1(),type(fun1()),type(fun1))
# 1.yield关键字
"""
只要函数中有yield关键字,那么这个函数就会变成一个生成器。
a.有yield的函数,在调用函数的时候不再是获取返回值,
而是产生一个生成器对象,生成器对象中保留的是函数体
b.当通过next获取生成器中的数据的时候,才会去执行函数体,执行到yield为止,
并且将yield后面的结果作为生成器的数据返回,同时记录结束的位置,下次再调用next的时候,
从上次结束的位置接着往后执行
"""
def fun2():
print('abc')
for x in range(10):
yield x
print('aaa')
# 注意:函数中只要有yield,不管yield会不会执行到,函数的调用结果都是生成器
def fun3(x):
print('abc')
if x > 10:
yield 100
return 20
# print(fun2(), type(fun2()), type(fun2))
# 这儿的fun2()是一个生成器
gen = fun2()
print(next(gen))
print(next(gen))
print(next(gen))
gen2 = fun3(1)
print(gen2)
# 练习:写一个生成器,可以产生斐波那契数列
def fibo():
a = 0
b = 1
while True:
yield b
a,b = b,a+b
gen3 = fibo()
for _ in range(10):
print(next(gen3))
# 生成器和生成式产生的对象就是迭代器
# 将列表转换成迭代器对象
iter1 = iter([1,2,3,4])
print(iter1)
print(next(iter1))
print(next(iter1))
for item in iter1:
print(item)
|
M0use7/python
|
day10-函数和文件操作/04-生成器.py
|
04-生成器.py
|
py
| 1,782 |
python
|
zh
|
code
| 0 |
github-code
|
50
|
7273497639
|
"""
Schema aplicatiei:
oferim o locatie si vrem sa vedem cate grade sunt acolo acum, si eventual alte date
vrem sa trimitem aceste informatii o data pe ora la un cont de telegram
"""
# import the module
import python_weather
#pip install python-weather
import asyncio
import telegram
#pip install python-telegram-bot - -upgrade
token_telegram = '6003491369:AAH2kovznvZqCNKWTlIzehnVzhFsDkkcqH8'
# from time import sleep
# import os
"""i_1 = ia-mi datele de pe un site #astepte
i_2 = print(1+3)
i_3 = ia datele de pe site_2
#in mod clasic instructiunile se executa pe rand = sincroniza.
#Cu asyncio se pot executa intr-un mod ASyncronizat, aprox pararalel"""
async def getweather(cityx, idt):
# declare the client. the measuring unit used defaults to the metric system (celcius, km/h, etc.)
async with python_weather.Client(unit=python_weather.METRIC) as client:
# fetch a weather forecast from a city
weather = await client.get(cityx)
# returns the current day's forecast temperature (int)
text = f"In {cityx} temperatura actuala este {weather.current.temperature} grade Celsius"
await telegram_send_message(chat_idx = idt, text_x = text)
# get the weather forecast for a few days
# for forecast in weather.forecasts:
# print(forecast)
#
# # hourly forecasts
# for hourly in forecast.hourly:
# print(f' --> {hourly!r}')
async def telegram_send_message(chat_idx, text_x):
bot = telegram.Bot(token_telegram)
async with bot:
await bot.send_message(text = text_x, chat_id = chat_idx)
chat_id_con = 1307289323
#programul va rula, apoi va astepta 60*60 secunde, adica ruleaza la fiecare ora.
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
#pentru MacOS comentati linia de mai sus
asyncio.run(getweather(cityx = "Bucharest ", idt= chat_id_con))
#pentru uninstall, folositi pip uninstall nume_librarie
|
constantinus345/sda_47_con
|
weather_app.py
|
weather_app.py
|
py
| 1,969 |
python
|
en
|
code
| 0 |
github-code
|
50
|
28402807345
|
import logging.config
from random import choice
#
from conf import *
logging.config.dictConfig(LOG_CONFIG)
logger = logging.getLogger('Utils')
def dict_to_format_string(dct: dict) -> str:
"""Переводит словарь в строку, которую можно отправить в виде сообщения"""
res = ''
for key, val in dct.items():
res += key + '\n\n' + '\n'.join(['• ' + day for day in val]) + '\n\n'
return res
def generate_greeting():
"""Генерирует приветствие из возможных вариантов, прописанных в конфиге"""
what = choice(WHAT)
which = choice(WHICH)
who = choice(WHO)
return f'{what}, {which} {who}!'
|
kosumosuSpb/tgbot_sun_it_people
|
utils.py
|
utils.py
|
py
| 747 |
python
|
ru
|
code
| 0 |
github-code
|
50
|
24497008116
|
from django.conf.urls import url
from . import views
from django.conf.urls import include, url
from django.conf import settings
from django.conf.urls.static import static
media_root = getattr(settings, 'MEDIA_ROOT', '/media')
app_name = 'polls'
urlpatterns = [
url(r'^$',views.test),
url(r'^vocabtest/$', views.vocabindex),
url(r'vocabtest/result/$',views.vocabresult),
url(r'compretest/$', views.compreindex),
url(r'compretest/result/$',views.compreresult),
url(r'mathtest/$',views.mathindex),
url(r'mathtest/result/$',views.mathresult),
url(r'^studentstd/$', views.std),
url(r'compositiontest/$', views.Compositionindex),
url(r'compositiontest/result/$', views.Compositionresult),
url(r'dict/$',views.dict),
url(r'dict/result/$',views.dictresult),
url(r'count/$',views.count),
url(r'ml/$',views.detect),
url(r'user/$',views.user),
url(r'^resource/$', views.resource, name='urlname')
#(r'^user/(?P<username>\w{0,50})/$', views.profile_page,),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
ameyashirke13/Ld_detect
|
polls/urls.py
|
urls.py
|
py
| 1,053 |
python
|
en
|
code
| 0 |
github-code
|
50
|
18093083869
|
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import time
from graphviz import Graph
import argparse
g = Graph(format='png')
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--primary", help="primary artist")
parser.add_argument("-s", "--secondary", help="secondary artist")
args = parser.parse_args()
print(args)
primary = args.primary
secondary = args.secondary
spotify = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials())
results1 = spotify.search(q='artist:' + primary, type='artist')
items1 = results1['artists']['items']
if len(items1) > 0:
artist1 = items1[0]
print(artist1['name'], artist1['images'][0]['url'])
results2 = spotify.search(q='artist:' + secondary, type='artist')
items2 = results2['artists']['items']
if len(items2) > 0:
artist2 = items2[0]
print(artist2['name'], artist2['images'][0]['url'])
relation_list = []
step = 1
with g.subgraph(name=f"STEP: {step}") as c:
c.attr(color='blue')
c.node(artist1["name"], fontcolor='red')
artist_ids = [artist1["id"]]
last_name = artist1["name"]
flag = False
readed = []
while flag == False:
with g.subgraph(name=f"STEP: {step}") as cfor:
cfor.attr(color='blue')
for artist_id in artist_ids:
last_name = spotify.artist(artist_id=artist_id)["name"]
artist_id_tmp = []
relatetions = spotify.artist_related_artists(artist_id=artist_id)
time.sleep(0.5)
for relation in relatetions["artists"]:
print(step, relation["name"], relation["id"])
if relation["id"] == artist2["id"]:
print("!!!!!!TRUE!!!!!")
flag = True
cfor.node(relation["name"], fontcolor="red")
cfor .edge(last_name, relation["name"])
break
else:
cfor.node(relation["name"])
cfor.edge(last_name, relation["name"])
flag = False
# relation_list.append(relation)
# relation_list = list(set (relation_list))
if relation["id"] in readed:
readed.append(relation["id"])
readed = list(set(readed))
continue
artist_id_tmp.append(relation["id"])
artist_id_tmp = list(set(artist_id_tmp))
readed.append(relation["id"])
readed = list(set(readed))
artist_ids = artist_id_tmp
if flag == True:
break
step += 1
if step >= 6:
break
g.render('./graph', view=True)
|
mi-ki-ri/six-step
|
app.py
|
app.py
|
py
| 2,693 |
python
|
en
|
code
| 0 |
github-code
|
50
|
12650428269
|
import pandas as pd
from rdkit.Chem.Descriptors import fr_benzene # type: ignore
def fr_benzene_1000_heavy_atoms_count(mol):
return 1000 * fr_benzene(mol) / mol.GetNumHeavyAtoms()
# From https://github.com/rinikerlab/molecular_time_series/blob/55eb420ab0319fbb18cc00fe62a872ac568ad7f5/ga_lib_3.py#L323
DEFAULT_SIMPD_DESCRIPTORS = pd.DataFrame(
[
{
"name": "SA_Score",
"function": "datamol.descriptors.sas",
"target_delta_value": 0.10 * 2.8,
},
{
"name": "HeavyAtomCount",
"function": "datamol.descriptors.n_heavy_atoms",
"target_delta_value": 0.1 * 31,
},
{
"name": "TPSA",
"function": "datamol.descriptors.tpsa",
"target_delta_value": 0.15 * 88.0,
},
{
"name": "fr_benzene/1000 HeavyAtoms",
"function": "splito.simpd.descriptors.fr_benzene_1000_heavy_atoms_count",
"target_delta_value": -0.2 * 0.44,
},
]
)
|
datamol-io/splito
|
splito/simpd/descriptors.py
|
descriptors.py
|
py
| 1,034 |
python
|
en
|
code
| 5 |
github-code
|
50
|
1532402723
|
import argparse
import os
import glob
import ot
from sklearn.manifold import TSNE
import matplotlib.patheffects as PathEffects
import seaborn as sns
import numpy
from tqdm import tqdm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, TruncatedSVD
from torch.utils.data import DataLoader
from generate import gen_testloss, gen_training_accuracy
import train_func as tf
import utils
import scipy
def get_cost_matrix(feature1, feature2, metric= "cosine"):
C = scipy.spatial.distance.cdist(feature1, feature2, metric=metric)
return C
def prolong_batch(batch_vector1, batch_vector2):
gap = len(batch_vector1) - len(batch_vector2)
assert gap >= 0
if gap == 0:
return batch_vector1, batch_vector2
rest = abs(gap)
step = len(batch_vector2)
extend_list = [batch_vector2]
while rest > step:
extend_list.append(batch_vector2)
rest = rest - step
copied_samples = batch_vector2[-abs(rest):]
extend_list.append(copied_samples)
batch_vector2 = np.concatenate(extend_list, axis=0)
return batch_vector1, batch_vector2
def align_batch(batch_vector1, batch_vector2):
gap = len(batch_vector1) - len(batch_vector2)
if gap >= 0:
aligned_batch_vector1, aligned_batch_vector2 = prolong_batch(batch_vector1, batch_vector2)
else:
aligned_batch_vector1, aligned_batch_vector2 = prolong_batch(batch_vector2, batch_vector1)
return aligned_batch_vector1, aligned_batch_vector2
def compare_feature(features_before, labels_before, features_after, labels_after, num_classes=4):
num_sample = len(features_before)
features_sort_before, labels_sort_before = utils.sort_dataset(features_before.numpy(), labels_before.numpy(),
num_classes=num_classes, stack=False)
features_sort_after, labels_sort_after = utils.sort_dataset(features_after.numpy(), labels_after.numpy(),
num_classes=num_classes, stack=False)
outer_scores = []
inner_scores = []
before_distr = []
after_distr = []
for class_feature_before, class_feature_after in zip(features_sort_before, features_sort_after):
# inner class score
_, s_b, _ = np.linalg.svd(class_feature_before)
_, s_a, _ = np.linalg.svd(class_feature_after)
inner_scores.append(np.log(numpy.prod(s_a[:10])/numpy.prod(s_b[:10])))
# between-class score
class_num_before = len(class_feature_before)
class_num_after = len(class_feature_after)
aug = 2.0 * (class_num_after > class_num_before) - 1.0
minor = 2.0 * (class_num_before < num_sample/num_classes) - 1.0
before_distr.append(class_num_before/num_sample)
after_distr.append(class_num_after/num_sample)
class_num_samples = max(len(class_feature_before), len(class_feature_after))
aligned_class_feature_before, aligned_class_feature_after = \
align_batch(class_feature_before, class_feature_after)
cost_matrix = get_cost_matrix(aligned_class_feature_before, aligned_class_feature_after)
a = numpy.ones(class_num_samples) / class_num_samples
b = numpy.ones(class_num_samples) / class_num_samples
Wd = ot.emd2(a, b, cost_matrix)
outer_scores.append(minor*aug*Wd)
return inner_scores, outer_scores, before_distr, after_distr
def tsne_vis(feature_origin, pre_labels_origin, feature_before, pre_labels_before, feature_after, pre_labels_after, \
path="tsne_figs"):
num_samples = len(feature_origin)
assert len(feature_origin) == len(feature_before) == len(feature_after)
all_features = np.concatenate([feature_origin, feature_before, feature_after], axis=0)
all_labels = np.concatenate([labels_origin, labels_before, labels_after], axis=0)
tsne = TSNE(n_components=2, verbose=1, perplexity=10, n_iter=300, metric="cosine")
tsne_res_all = tsne.fit_transform(all_features)
tsne_res_before_mix = tsne_res_all[: 2*num_samples]
tsne_res_after_mix = tsne_res_all[-2*num_samples:]
pre_labels_before_mix = all_labels[: 2*num_samples]
pre_labels_after_mix = all_labels[-2*num_samples:]
tsne_res_org = tsne_res_all[: num_samples]
tsne_res_before = tsne_res_all[num_samples: 2*num_samples]
tsne_res_after = tsne_res_all[-num_samples:]
tsne_list = [tsne_res_org, tsne_res_before, tsne_res_after, tsne_res_before_mix, tsne_res_after_mix]
pre_labels_list = [pre_labels_origin, pre_labels_before, pre_labels_after, pre_labels_before_mix, pre_labels_after_mix]
classes = [i for i in range(4)]
name_list = ["tsne_res_org", "tsne_res_before", "tsne_res_after", "tsne_res_before_mix", "tsne_res_after_mix"]
for i, (tsne_res, pre_labels) in enumerate(zip(tsne_list, pre_labels_list)):
plt.figure(figsize=(10, 10))
for label in classes:
indices = pre_labels == label
plt.scatter(tsne_res[indices, 0], tsne_res[indices, 1], label=str(label))
plt.legend()
plt.title(f"{name_list[i]}")
plt.savefig(f"{path}/{name_list[i]}.png")
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Ploting')
parser.add_argument('--model_dir', type=str, help='base directory for saving PyTorch model.')
parser.add_argument('--before', type=str, help='before augmentation dir')
parser.add_argument('--after', type=str, help='after augmentation dir')
parser.add_argument('--epoch', type=int, default=None, help='which epoch for evaluation')
args = parser.parse_args()
params = utils.load_params(args.model_dir)
net, epoch = tf.load_checkpoint(args.model_dir, args.epoch, eval_=True)
transforms = tf.load_transforms('test')
origin = tf.load_trainset("compare", transforms, file_path=args.before, max_imgNum=1000)
trainloader_origin = DataLoader(origin, batch_size=64, num_workers=4)
features_origin, labels_origin = tf.get_features(net, trainloader_origin)
before = tf.load_trainset("compare", transforms, file_path=args.before, max_imgNum=1000)
trainloader_before = DataLoader(before, batch_size=64, num_workers=4)
features_before, labels_before = tf.get_features(net, trainloader_before)
after = tf.load_trainset("compare", transforms, file_path=args.after, max_imgNum=1000)
trainloader_after = DataLoader(after, batch_size=64, num_workers=4)
features_after, labels_after = tf.get_features(net, trainloader_after)
inner_score_org, outer_score_org, distr_org, distr_before = compare_feature(features_origin, labels_origin, features_before, labels_before,
num_classes=before.num_classes)
inner_score_sty, outer_score_sty, distr_org, distr_after = compare_feature(features_origin, labels_origin, features_after, labels_after, \
num_classes=before.num_classes)
import csv
file_name = "feature_compare_res.csv"
with open(file_name, 'w', newline='') as csv_file:
csv_writer = csv.writer(csv_file)
names = ["class", "inner_org", "inner_sty", "inter_org", "inter_sty", "distr_org", "distr_before", "distr_after"]
csv_writer.writerow(names),
for i in range(before.num_classes):
data = [i, inner_score_org[i], inner_score_sty[i], outer_score_org[i], outer_score_sty[i], \
distr_org[i], distr_before[i], distr_after[i]]
csv_writer.writerow(data)
print(f"class{i} | inner_org: {inner_score_org[i]} | inner_sty: {inner_score_sty[i]}")
print(f"class{i} | inter_org: {outer_score_org[i]} | inter_sty: {outer_score_sty[i]}")
print(f"class{i} | before_distr: {distr_before[i]} | after_distr: {distr_after[i]}")
tsne_vis(features_origin, labels_origin, features_before, labels_before, features_after, labels_after)
# tsne_vis(features_after, labels_after, "after.png")
|
Fusang-Wang/mcr2
|
compare.py
|
compare.py
|
py
| 7,976 |
python
|
en
|
code
| null |
github-code
|
50
|
4353031890
|
def display_diagonal_elements(matrix):
diagonal = []
non_diagonal = []
upper_diagonal = []
lower_diagonal = []
rows = len(matrix)
cols = len(matrix[0])
for i in range(rows):
for j in range(cols):
if i == j:
diagonal.append(matrix[i][j])
else:
non_diagonal.append(matrix[i][j])
if i < j:
upper_diagonal.append(matrix[i][j])
elif i > j:
lower_diagonal.append(matrix[i][j])
print("Diagonal elements:", diagonal)
print("Non-diagonal elements:", non_diagonal)
print("Upper diagonal elements:", upper_diagonal)
print("Lower diagonal elements:", lower_diagonal)
matrix = []
rows = int(input("Enter the number of rows: "))
cols = int(input("Enter the number of columns: "))
print("Enter the matrix elements:")
for i in range(rows):
row = []
for j in range(cols):
element = int(input("Enter element at position ({}, {}): ".format(i, j)))
row.append(element)
matrix.append(row)
display_diagonal_elements(matrix)
|
SAIKRISHNA239/EZ-TS-2
|
diagional elements.py
|
diagional elements.py
|
py
| 1,127 |
python
|
en
|
code
| 0 |
github-code
|
50
|
10503617394
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#uncomment the plt.show() function to display the charts.
df = pd.read_csv('test1.csv')
null_val=df.isnull().sum(axis = 0)
print(null_val)
null_val.plot(kind="barh")
plt.tight_layout()
plt.show()
df_tmp = df['What is your gender?']
all_males = df_tmp.value_counts()[0]
all_females = df_tmp.value_counts()[1]
all_unk = df_tmp.value_counts()[2]
male=0
female=0
ukn=0
for index, row in df.iterrows():
if(pd.isnull(row['When is your birthday (date)?'])):
if row['What is your gender?'] == "male":
male+=1
elif row['What is your gender?'] == "female":
female+=1
else:
ukn+=1
x = np.array(["all_males", "null_birthday", "all_females", "null_birthdays"])
y = np.array([all_males, male, all_females, female])
data=[['all_males',all_males], ['null_birthday',male],['all_female',all_females],['null_birthday',female]]
df_gender_nulldate = pd.DataFrame(data,columns = ['Gender', 'Counts'])
#print(df_gender_nulldate)
plt.bar(x, y)
#plt.show()
|
sfikouris/DM
|
task1/A1/plots.py
|
plots.py
|
py
| 1,089 |
python
|
en
|
code
| 0 |
github-code
|
50
|
11963146837
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:
node = head
while node and node.next:
temp = node.next
while temp and node.val == temp.val:
prev = temp
temp = temp.next
del prev
node.next = temp
node = node.next
return head
|
duressa-feyissa/A2SV_Programming
|
0083-remove-duplicates-from-sorted-list/0083-remove-duplicates-from-sorted-list.py
|
0083-remove-duplicates-from-sorted-list.py
|
py
| 569 |
python
|
en
|
code
| 0 |
github-code
|
50
|
18207164077
|
#!/usr/bin/env python
##
# ____ _ _ _ _ _
# | _ \ / \ | | | | | / \
# | |_) / _ \| | | | | / _ \
# | __/ ___ \ |_| | |___ / ___ \
# |_| /_/ \_\___/|_____/_/ \_\
#
#
# Personal
# Artificial
# Unintelligent
# Life
# Assistant
#
##
from paula.core import system
from paula.core import outputs
from . import sleep_conf as conf
def go_to_sleep_mode(seconds):
if seconds == 0:
if not conf.DEBUG:
cmd = "pm-suspend"
system.call_silently(cmd, sudo=True)
else:
outputs.print_debug("going to sleep indefinitely")
else:
cmd = "sudo rtcwake --mode mem "
if conf.DEBUG:
cmd += "--dry-run "
cmd += "--seconds " + str(seconds)
if not conf.DEBUG:
system.call_silently(cmd, sudo=True)
else:
system.call(cmd, sudo=True)
|
NorfairKing/PAULA-proof-of-concept
|
paula/sleep/sleep.py
|
sleep.py
|
py
| 901 |
python
|
en
|
code
| 5 |
github-code
|
50
|
7365275059
|
from models import Model
from models.ajaxcomment import Ajaxcomment
from models.comment import Comment
from models.user import User
class AjaxWeibo(Model):
"""
微博类
"""
def __init__(self, form, user_id=-1):
super().__init__(form)
self.content = form.get('content', '')
self.user_id = form.get('user_id', user_id)
@classmethod
def add(cls, form, user_id):
form['user_id'] = user_id
w = cls.new(form)
return w
@classmethod
def delete_with_comments(cls, user_id):
cs = Ajaxcomment.find_all(weibo_id=user_id)
for c in cs:
Ajaxcomment.delete(c.id)
return cls.delete(user_id)
@classmethod
def update(cls, form):
weibo_id = int(form.get('id'))
w = cls.find(weibo_id)
valid_names = [
'content',
]
for key in form:
if key in valid_names:
setattr(w, key, form[key])
w.save()
return w
|
Jeffreve/socket_web
|
models/ajaxweibo.py
|
ajaxweibo.py
|
py
| 1,046 |
python
|
en
|
code
| 0 |
github-code
|
50
|
20390876489
|
""" Handles auth to Okta and returns SAML assertion """
# pylint: disable=C0325,R0912,C1801
# Incorporates flow auth code taken from https://github.com/Nike-Inc/gimme-aws-creds
import sys
import time
import requests
import re
from codecs import decode
from urllib.parse import parse_qs
from urllib.parse import urlparse
from bs4 import BeautifulSoup as bs
try:
from u2flib_host import u2f, exc
from u2flib_host.constants import APDU_WRONG_DATA
U2F_ALLOWED = True
except ImportError:
U2F_ALLOWED = False
try:
input = raw_input
except NameError:
pass
class OktaAuth():
""" Handles auth to Okta and returns SAML assertion """
def __init__(self, okta_profile, verbose, logger, totp_token, okta_auth_config):
self.okta_profile = okta_profile
self.totp_token = totp_token
self.logger = logger
self.factor = ""
self.verbose = verbose
self._verify_ssl_certs = True
self._preferred_mfa_type = None
self._mfa_code = None
self.https_base_url = "https://%s" % okta_auth_config.base_url_for(okta_profile)
self.username = okta_auth_config.username_for(okta_profile)
self.password = okta_auth_config.password_for(okta_profile)
self.factor = okta_auth_config.factor_for(okta_profile)
self.app_link = okta_auth_config.app_link_for(okta_profile)
self.okta_auth_config = okta_auth_config
self.session = None
self.session_token = ""
self.session_id = ""
def primary_auth(self):
""" Performs primary auth against Okta """
auth_data = {
"username": self.username,
"password": self.password
}
self.session = requests.Session()
resp = self.session.post(self.https_base_url + '/api/v1/authn', json=auth_data)
resp_json = resp.json()
self.cookies = resp.cookies
if 'status' in resp_json:
if resp_json['status'] == 'MFA_REQUIRED':
factors_list = resp_json['_embedded']['factors']
state_token = resp_json['stateToken']
session_token = self.verify_mfa(factors_list, state_token)
elif resp_json['status'] == 'SUCCESS':
session_token = resp_json['sessionToken']
elif resp_json['status'] == 'MFA_ENROLL':
self.logger.warning("""MFA not enrolled. Cannot continue.
Please enroll an MFA factor in the Okta Web UI first!""")
exit(2)
elif resp.status_code != 200:
self.logger.error(resp_json['errorSummary'])
exit(1)
else:
self.logger.error(resp_json)
exit(1)
return session_token
def verify_mfa(self, factors_list, state_token):
""" Performs MFA auth against Okta """
supported_factor_types = ["token:software:totp", "push"]
if U2F_ALLOWED:
supported_factor_types.append("u2f")
supported_factors = []
for factor in factors_list:
if factor['factorType'] in supported_factor_types:
supported_factors.append(factor)
else:
self.logger.error("Unsupported factorType: %s" %
(factor['factorType'],))
supported_factors = sorted(supported_factors,
key=lambda factor: (
factor['provider'],
factor['factorType']))
if len(supported_factors) == 1:
session_token = self.verify_single_factor(
supported_factors[0], state_token)
elif len(supported_factors) > 0:
if not self.factor:
print("Registered MFA factors:")
for index, factor in enumerate(supported_factors):
factor_type = factor['factorType']
factor_provider = factor['provider']
if factor_provider == "GOOGLE":
factor_name = "Google Authenticator"
elif factor_provider == "OKTA":
if factor_type == "push":
factor_name = "Okta Verify - Push"
else:
factor_name = "Okta Verify"
elif factor_provider == "FIDO":
factor_name = "u2f"
else:
factor_name = "Unsupported factor type: %s" % factor_provider
if self.factor:
if self.factor == factor_provider:
factor_choice = index
self.logger.info("Using pre-selected factor choice \
from ~/.okta-aws")
break
else:
print("%d: %s" % (index + 1, factor_name))
if not self.factor:
factor_choice = int(input('Please select the MFA factor: ')) - 1
self.logger.info("Performing secondary authentication using: %s" %
supported_factors[factor_choice]['provider'])
session_token = self.verify_single_factor(supported_factors[factor_choice],
state_token)
else:
print("MFA required, but no supported factors enrolled! Exiting.")
exit(1)
return session_token
def verify_single_factor(self, factor, state_token):
""" Verifies a single MFA factor """
req_data = {
"stateToken": state_token
}
self.logger.debug(factor)
if factor['factorType'] == 'token:software:totp':
if self.totp_token:
self.logger.debug("Using TOTP token from command line arg")
req_data['answer'] = self.totp_token
else:
req_data['answer'] = input('Enter MFA token: ')
post_url = factor['_links']['verify']['href']
resp = requests.post(post_url, json=req_data)
resp_json = resp.json()
if 'status' in resp_json:
if resp_json['status'] == "SUCCESS":
return resp_json['sessionToken']
elif resp_json['status'] == "MFA_CHALLENGE" and factor['factorType'] !='u2f':
print("Waiting for push verification...")
while True:
resp = requests.post(
resp_json['_links']['next']['href'], json=req_data)
resp_json = resp.json()
if resp_json['status'] == 'SUCCESS':
return resp_json['sessionToken']
elif resp_json['factorResult'] == 'TIMEOUT':
print("Verification timed out")
exit(1)
elif resp_json['factorResult'] == 'REJECTED':
print("Verification was rejected")
exit(1)
else:
time.sleep(0.5)
if factor['factorType'] == 'u2f':
devices = u2f.list_devices()
if len(devices) == 0:
self.logger.warning("No U2F device found")
exit(1)
challenge = dict()
challenge['appId'] = resp_json['_embedded']['factor']['profile']['appId']
challenge['version'] = resp_json['_embedded']['factor']['profile']['version']
challenge['keyHandle'] = resp_json['_embedded']['factor']['profile']['credentialId']
challenge['challenge'] = resp_json['_embedded']['factor']['_embedded']['challenge']['nonce']
print("Please touch your U2F device...")
auth_response = None
while not auth_response:
for device in devices:
with device as dev:
try:
auth_response = u2f.authenticate(dev, challenge, resp_json['_embedded']['factor']['profile']['appId'] )
req_data.update(auth_response)
resp = requests.post(resp_json['_links']['next']['href'], json=req_data)
resp_json = resp.json()
if resp_json['status'] == 'SUCCESS':
return resp_json['sessionToken']
elif resp_json['factorResult'] == 'TIMEOUT':
self.logger.warning("Verification timed out")
exit(1)
elif resp_json['factorResult'] == 'REJECTED':
self.logger.warning("Verification was rejected")
exit(1)
except exc.APDUError as e:
if e.code == APDU_WRONG_DATA:
devices.remove(device)
time.sleep(0.1)
elif resp.status_code != 200:
self.logger.error(resp_json['errorSummary'])
exit(1)
else:
self.logger.error(resp_json)
exit(1)
return None
def get_session(self, session_token):
""" Gets a session cookie from a session token """
data = {"sessionToken": session_token}
resp = self.session.post(
self.https_base_url + '/api/v1/sessions', json=data).json()
return resp['id']
def get_apps(self, session_id):
""" Gets apps for the user """
sid = "sid=%s" % session_id
headers = {'Cookie': sid}
resp = self.session.get(
self.https_base_url + '/api/v1/users/me/appLinks',
headers=headers).json()
aws_apps = []
for app in resp:
# Hack to allow for more than one appName pattern
if app['appName'] == "amazon_aws" or \
app['appName'] == "another_pattern" or \
app['appName'] == "and_another_pattern":
aws_apps.append(app)
if not aws_apps:
self.logger.error("No AWS apps are available for your user. \
Exiting.")
sys.exit(1)
aws_apps = sorted(aws_apps, key=lambda app: app['sortOrder'])
app_choice = 0 if len(aws_apps) == 1 else None
if app_choice is None:
print("Available apps:")
for index, app in enumerate(aws_apps):
app_name = app['label']
print("%d: %s" % (index + 1, app_name))
app_choice = int(input('Please select AWS app: ')) - 1
self.logger.debug("Selected app: %s" % aws_apps[app_choice]['label'])
return aws_apps[app_choice]['label'], aws_apps[app_choice]['linkUrl']
def get_simple_assertion(self, html):
soup = bs(html.text, "html.parser")
for input_tag in soup.find_all('input'):
if input_tag.get('name') == 'SAMLResponse':
return input_tag.get('value')
return None
def get_mfa_assertion(self, html):
soup = bs(html.text, "html.parser")
if hasattr(soup.title, 'string') and re.match(".* - Extra Verification$", soup.title.string):
state_token = decode(re.search(r"var stateToken = '(.*)';", html.text).group(1), "unicode-escape")
else:
self.logger.error("No Extra Verification")
return None
self.session.cookies['oktaStateToken'] = state_token
self.session.cookies['mp_Account Settings__c'] = '0'
self.session.cookies['Okta_Verify_Autopush_2012557501'] = 'true'
self.session.cookies['Okta_Verify_Autopush_-610254449'] = 'true'
api_response = self.stepup_auth(self.https_base_url + '/api/v1/authn', state_token)
resp = self.session.get(self.app_link)
return self.get_saml_assertion(resp)
def get_saml_assertion(self, html):
""" Returns the SAML assertion from HTML """
assertion = self.get_simple_assertion(html) or self.get_mfa_assertion(html)
if not assertion:
self.logger.error("SAML assertion not valid: " + assertion)
exit(-1)
return assertion
def stepup_auth(self, embed_link, state_token=None):
""" Login to Okta using the Step-up authentication flow"""
flow_state = self._get_initial_flow_state(embed_link, state_token)
while flow_state.get('apiResponse').get('status') != 'SUCCESS':
flow_state = self._next_login_step(
flow_state.get('stateToken'), flow_state.get('apiResponse'))
return flow_state['apiResponse']
def _next_login_step(self, state_token, login_data):
""" decide what the next step in the login process is"""
if 'errorCode' in login_data:
self.logger.error("LOGIN ERROR: {} | Error Code: {}".format(login_data['errorSummary'], login_data['errorCode']))
exit(2)
status = login_data['status']
if status == 'UNAUTHENTICATED':
self.logger.error("You are not authenticated -- please try to log in again")
exit(2)
elif status == 'LOCKED_OUT':
self.logger.error("Your Okta access has been locked out due to failed login attempts.")
exit(2)
elif status == 'MFA_ENROLL':
self.logger.error("You must enroll in MFA before using this tool.")
exit(2)
elif status == 'MFA_REQUIRED':
return self._login_multi_factor(state_token, login_data)
elif status == 'MFA_CHALLENGE':
if 'factorResult' in login_data and login_data['factorResult'] == 'WAITING':
return self._check_push_result(state_token, login_data)
else:
return self._login_input_mfa_challenge(state_token, login_data['_links']['next']['href'])
else:
raise RuntimeError('Unknown login status: ' + status)
def _get_initial_flow_state(self, embed_link, state_token=None):
""" Starts the authentication flow with Okta"""
if state_token is None:
response = self.session.get(
embed_link, allow_redirects=False)
url_parse_results = urlparse(response.headers['Location'])
state_token = parse_qs(url_parse_results.query)['stateToken'][0]
response = self.session.post(
self.https_base_url + '/api/v1/authn',
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
return {'stateToken': state_token, 'apiResponse': response.json()}
def _get_headers(self):
return {
'User-Agent': 'Okta-awscli/0.0.1',
'Accept': 'application/json',
'Content-Type': 'application/json'
}
def get_assertion(self):
""" Main method to get SAML assertion from Okta """
self.session_token = self.primary_auth()
self.session_id = self.get_session(self.session_token)
if not self.app_link:
app_name, app_link = self.get_apps(self.session_id)
self.okta_auth_config.save_chosen_app_link_for_profile(self.okta_profile, app_link)
else:
app_name = None
app_link = self.app_link
self.session.cookies['sid'] = self.session_id
resp = self.session.get(app_link)
assertion = self.get_saml_assertion(resp)
return app_name, assertion
def _login_send_sms(self, state_token, factor):
""" Send SMS message for second factor authentication"""
response = self.session.post(
factor['_links']['verify']['href'],
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
self.logger.info("A verification code has been sent to " + factor['profile']['phoneNumber'])
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _login_send_call(self, state_token, factor):
""" Send Voice call for second factor authentication"""
response = self.session.post(
factor['_links']['verify']['href'],
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
self.logger.info("You should soon receive a phone call at " + factor['profile']['phoneNumber'])
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _login_send_push(self, state_token, factor):
""" Send 'push' for the Okta Verify mobile app """
response = self.session.post(
factor['_links']['verify']['href'],
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
self.logger.info("Okta Verify push sent...")
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _login_multi_factor(self, state_token, login_data):
""" handle multi-factor authentication with Okta"""
factor = self._choose_factor(login_data['_embedded']['factors'])
if factor['factorType'] == 'sms':
return self._login_send_sms(state_token, factor)
elif factor['factorType'] == 'call':
return self._login_send_call(state_token, factor)
elif factor['factorType'] == 'token:software:totp':
return self._login_input_mfa_challenge(state_token, factor['_links']['verify']['href'])
elif factor['factorType'] == 'token':
return self._login_input_mfa_challenge(state_token, factor['_links']['verify']['href'])
elif factor['factorType'] == 'push':
return self._login_send_push(state_token, factor)
def _login_input_mfa_challenge(self, state_token, next_url):
""" Submit verification code for SMS or TOTP authentication methods"""
pass_code = self._mfa_code;
if pass_code is None:
pass_code = input("Enter verification code: ")
response = self.session.post(
next_url,
json={'stateToken': state_token, 'passCode': pass_code},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response_data = response.json()
if 'status' in response_data and response_data['status'] == 'SUCCESS':
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
else:
return {'stateToken': None, 'sessionToken': None, 'apiResponse': response_data}
def _check_push_result(self, state_token, login_data):
""" Check Okta API to see if the push request has been responded to"""
time.sleep(1)
response = self.session.post(
login_data['_links']['next']['href'],
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _choose_factor(self, factors):
""" gets a list of available authentication factors and
asks the user to select the factor they want to use """
print("Multi-factor Authentication required.")
# filter the factor list down to just the types specified in preferred_mfa_type
if self._preferred_mfa_type is not None:
factors = list(filter(lambda item: item['factorType'] == self._preferred_mfa_type, factors))
if len(factors) == 1:
factor_name = self._build_factor_name(factors[0])
self.logger.info(factor_name, 'selected')
selection = 0
else:
print("Pick a factor:")
# print out the factors and let the user select
for i, factor in enumerate(factors):
factor_name = self._build_factor_name(factor)
if factor_name is not "":
print('[ %d ] %s' % (i, factor_name))
selection = input("Selection: ")
# make sure the choice is valid
if int(selection) > len(factors):
self.logger.error("You made an invalid selection")
exit(1)
return factors[int(selection)]
@staticmethod
def _build_factor_name(factor):
""" Build the display name for a MFA factor based on the factor type"""
if factor['factorType'] == 'push':
return "Okta Verify App: " + factor['profile']['deviceType'] + ": " + factor['profile']['name']
elif factor['factorType'] == 'sms':
return factor['factorType'] + ": " + factor['profile']['phoneNumber']
elif factor['factorType'] == 'call':
return factor['factorType'] + ": " + factor['profile']['phoneNumber']
elif factor['factorType'] == 'token:software:totp':
return factor['factorType'] + "( " + factor['provider'] + " ) : " + factor['profile']['credentialId']
elif factor['factorType'] == 'token':
return factor['factorType'] + ": " + factor['profile']['credentialId']
else:
return ("Unknown MFA type: " + factor['factorType'])
|
seajoshc/dockerfiles
|
terrata/hacked_okta_auth.py
|
hacked_okta_auth.py
|
py
| 22,599 |
python
|
en
|
code
| 0 |
github-code
|
50
|
44566172798
|
import pytest
import os.path
try:
import pyarrow as pa
arrow_version = pa.__version__
except ImportError as msg:
print('Failed to import pyarrow: {}'.format(msg))
pa = None
arrow_version = None
import numpy as np
from librmm_cffi import librmm as rmm
from cudf.comm.gpuarrow import GpuArrowReader
from cudf.dataframe import Series, DataFrame
from cudf.tests.utils import assert_eq
def read_data():
import pandas as pd
basedir = os.path.dirname(__file__)
datapath = os.path.join(basedir, 'data', 'ipums.pkl')
try:
df = pd.read_pickle(datapath)
except Exception as excpr:
if type(excpr).__name__ == 'FileNotFoundError':
pytest.skip('.pkl file is not found')
else:
print(type(excpr).__name__)
names = []
arrays = []
for k in df.columns:
arrays.append(pa.Array.from_pandas(df[k]))
names.append(k)
batch = pa.RecordBatch.from_arrays(arrays, names)
schema = batch.schema.serialize().to_pybytes()
schema = np.ndarray(shape=len(schema), dtype=np.byte,
buffer=bytearray(schema))
data = batch.serialize().to_pybytes()
data = np.ndarray(shape=len(data), dtype=np.byte,
buffer=bytearray(data))
darr = rmm.to_device(data)
return df, schema, darr
@pytest.mark.skipif(arrow_version is None,
reason='need compatible pyarrow to generate test data')
def test_fillna():
_, schema, darr = read_data()
gar = GpuArrowReader(schema, darr)
masked_col = gar[8]
assert masked_col.null_count
sr = Series.from_masked_array(data=masked_col.data, mask=masked_col.null,
null_count=masked_col.null_count)
dense = sr.fillna(123)
np.testing.assert_equal(123, dense.to_array())
assert len(dense) == len(sr)
assert dense.null_count == 0
def test_to_dense_array():
data = np.random.random(8)
mask = np.asarray([0b11010110], dtype=np.byte)
sr = Series.from_masked_array(data=data, mask=mask, null_count=3)
assert sr.null_count > 0
assert sr.null_count != len(sr)
filled = sr.to_array(fillna='pandas')
dense = sr.to_array()
assert dense.size < filled.size
assert filled.size == len(sr)
@pytest.mark.skipif(arrow_version is None,
reason='need compatible pyarrow to generate test data')
def test_reading_arrow_sparse_data():
pdf, schema, darr = read_data()
gar = GpuArrowReader(schema, darr)
gdf = DataFrame(gar.to_dict().items())
assert_eq(pdf, gdf)
|
yongsheng268/rapidsai-cudf
|
python/cudf/tests/test_sparse_df.py
|
test_sparse_df.py
|
py
| 2,576 |
python
|
en
|
code
| 0 |
github-code
|
50
|
26045700219
|
import sys
import requests
import json
url = "http://k.episte.co/keywords/similars"
def get_synonyms(infile, outfile, n):
with open(infile, 'r') as jsonfile:
data = json.load(jsonfile)
output = {}
for k in data['keywords']:
r = requests.get(url, params={'positive': k, 'size': n})
output[k] = r.json()
with open(outfile, 'w+') as out:
json.dump(output,out)
if __name__ == "__main__":
n = sys.argv[3]
infile = sys.argv[1]
outfile = sys.argv[2]
get_synonyms(infile, outfile, n)
|
ESHackathon/keyword_synonyms
|
keywords.py
|
keywords.py
|
py
| 545 |
python
|
en
|
code
| 1 |
github-code
|
50
|
17047373269
|
import json
import trustlab.lab.config as config
import time
from asgiref.sync import sync_to_async
from rest_framework.renderers import JSONRenderer
from trustlab.consumers.chunk_consumer import ChunkAsyncJsonWebsocketConsumer
from trustlab.models import *
from trustlab.serializers.scenario_serializer import ScenarioSerializer
from trustlab.lab.director import Director
from trustlab.lab.connectors.mongo_db_connector import MongoDbConnector
from trustlab.serializers.scenario_file_reader import ScenarioReader
from trustlab.lab.config import MONGODB_URI
class LabConsumer(ChunkAsyncJsonWebsocketConsumer):
"""
LabConsumer class, with sequential process logic of the Director in its receive_json method.
It is therewith the main interface between User Agent and Director.
"""
async def connect(self):
await self.accept()
async def disconnect(self, close_code):
if self.changed_evaluation_status:
config.EVALUATION_SCRIPT_RUNS = False
async def receive_json(self, content, **kwargs):
if content['type'] == 'get_scenarios':
try:
scenarios_message = {'type': 'scenarios'}
# ScenarioFactory throws AssertionError if no predefined scenarios could be loaded
scenario_factory = ScenarioFactory(lazy_load=True)
scenarios_message['scenario_groups'] = JSONRenderer().render(
scenario_factory.get_scenarios_in_categories()).decode('utf-8')
# for manipulation of scenarios via JS, send them also as JSON
scenario_serializer = ScenarioSerializer(scenario_factory.scenarios, many=True)
scenarios_message["scenarios"] = JSONRenderer().render(scenario_serializer.data).decode('utf-8')
await self.send_json(scenarios_message)
except AssertionError:
await self.send_json({'type': 'error', 'message': 'No predefined scenarios could be loaded!'})
elif content['type'] == 'run_scenario':
if 'agents' in content['scenario'].keys():
try:
Scenario.correct_number_types(content['scenario'])
except (ModuleNotFoundError, SyntaxError) as error:
await self.send_json({
'message': f'Scenario Description Error: {str(error)}',
'type': 'error'
})
return
serializer = ScenarioSerializer(data=content['scenario'])
if serializer.is_valid():
director = Director(content['scenario']['name'])
try:
if config.TIME_MEASURE:
reader_start_timer = time.time()
reader_read = False
if 'scenario_reset' in content and content['scenario_reset']:
await self.mongodb_connector.reset_scenario(content['scenario']['name'])
scenario_exists = await self.mongodb_connector.scenario_exists(content['scenario']['name'])
if not scenario_exists:
scenario_name_handler = ScenarioFileNames()
files_for_names = scenario_name_handler.get_files_for_names()
reader = ScenarioReader(content['scenario']['name'],
files_for_names[content['scenario']['name']], self.mongodb_connector)
await reader.read()
if config.TIME_MEASURE:
reader_read = True
if config.TIME_MEASURE:
reader_end_timer = time.time()
# noinspection PyUnboundLocalVariable
reader_time = reader_end_timer - reader_start_timer if reader_read else -1
await sync_to_async(config.write_scenario_status)(director.scenario_run_id,
f"Database-Preparation took {reader_time} s")
except (ValueError, AttributeError, TypeError, ModuleNotFoundError, SyntaxError) as error:
await self.send_json({
'message': f'Scenario Error: {str(error)}',
'type': 'error'
})
return
# if scenario_factory.scenario_exists(scenario.name):
# try:
# scenario = scenario_factory.get_scenario(scenario.name)
# except RuntimeError as error:
# await self.send_json({
# 'message': f'Scenario Load Error: {str(error)}',
# 'type': 'error'
# })
# return
# # TODO: implement what happens if scenario is updated
# else:
# if len(scenario.agents) == 0:
# await self.send_json({
# 'message': f'Scenario Error: Scenario transmitted is empty and not known.',
# 'type': 'error'
# })
# return
# # TODO: implement save for new scenario as currently it won't be saved due to name only load
# scenario_factory.scenarios.append(scenario)
if 'scenario_only_read' in content and content['scenario_only_read']:
await self.send_json({
'scenario_run_id': director.scenario_run_id,
'type': 'read_only_done'
})
return
try:
async with config.PREPARE_SCENARIO_SEMAPHORE:
if config.TIME_MEASURE:
preparation_start_timer = time.time()
supervisor_amount = await director.prepare_scenario()
await self.send_json({
'scenario_run_id': director.scenario_run_id,
'type': "scenario_run_id"
})
if config.TIME_MEASURE:
preparation_end_timer = time.time()
preparation_time = preparation_end_timer - preparation_start_timer
await sync_to_async(config.write_scenario_status)(director.scenario_run_id,
f"Preparation took {preparation_time} s")
execution_start_timer = time.time()
trust_log, trust_log_dict, agent_trust_logs, agent_trust_logs_dict = await director.run_scenario()
if config.TIME_MEASURE:
execution_end_timer = time.time()
# noinspection PyUnboundLocalVariable
execution_time = execution_end_timer - execution_start_timer
await sync_to_async(config.write_scenario_status)(director.scenario_run_id,
f"Execution took {execution_time} s")
cleanup_start_timer = time.time()
await director.end_scenario()
await self.mongodb_connector.cleanup(content['scenario']['name'], director.scenario_run_id)
if config.TIME_MEASURE:
cleanup_end_timer = time.time()
# noinspection PyUnboundLocalVariable
cleanup_time = cleanup_end_timer - cleanup_start_timer
await sync_to_async(config.write_scenario_status)(director.scenario_run_id,
f"CleanUp took {cleanup_time} s")
for agent in agent_trust_logs:
agent_trust_logs[agent] = "".join(agent_trust_logs[agent])
log_message = {
'agents_log': json.dumps(agent_trust_logs),
'agents_log_dict': json.dumps(agent_trust_logs_dict),
'trust_log': "".join(trust_log),
'trust_log_dict': json.dumps(trust_log_dict),
'scenario_run_id': director.scenario_run_id,
'scenario_name': content['scenario']['name'],
'supervisor_amount': supervisor_amount,
'type': "scenario_results"
}
if config.TIME_MEASURE:
# noinspection PyUnboundLocalVariable
atlas_times = {
'preparation_time': preparation_time,
'execution_time': execution_time,
'cleanup_time': cleanup_time
}
if reader_read:
# noinspection PyUnboundLocalVariable
atlas_times['reader_time'] = reader_time
log_message['atlas_times'] = atlas_times
result_factory = ResultFactory()
scenario_result = result_factory.get_result(director.scenario_run_id)
scenario_result.atlas_times = atlas_times
result_factory.save_dict_log_result(scenario_result)
if self.copy_result_pys:
result_factory = ResultFactory()
result_factory.copy_result_pys(director.scenario_run_id)
if 'is_evaluator' in content and content['is_evaluator']:
await self.send_json({
'scenario_run_id': director.scenario_run_id,
'scenario_name': content['scenario']['name'],
'supervisor_amount': supervisor_amount,
'type': "scenario_results"
})
else:
await self.send_json(log_message)
except Exception as exception:
await self.send_json({
'message': str(exception),
'type': 'error'
})
else:
await self.send(text_data=json.dumps({
'message': serializer.errors,
'type': 'error'
}))
elif content['type'] == 'get_scenario_results':
result_factory = ResultFactory()
current_id = content['scenario_run_id']
if config.validate_scenario_run_id(current_id):
try:
scenario_result = result_factory.get_result(current_id)
result_message = {
'agents_log': json.dumps(scenario_result.agent_trust_logs),
'agents_log_dict': json.dumps(scenario_result.agent_trust_logs_dict),
'trust_log': "".join(scenario_result.trust_log),
'trust_log_dict': json.dumps(scenario_result.trust_log_dict),
'scenario_run_id': scenario_result.scenario_run_id,
'scenario_name': scenario_result.scenario_name,
'supervisor_amount': scenario_result.supervisor_amount,
'type': "scenario_results"
}
if config.TIME_MEASURE:
result_message['atlas_times'] = scenario_result.atlas_times
await self.send_json(result_message)
except OSError as exception:
await self.send_json({
'message': "Scenario Result not found",
'exception': str(exception),
'type': 'scenario_result_error'
})
else:
await self.send_json({
'message': "Scenario Run ID is not valid",
'type': 'scenario_result_error'
})
elif content['type'] == 'register_eval_run' or content['type'] == 'lock_webUI':
if content['type'] == 'register_eval_run':
# only memorize the eval run if the webUI is not already registered
self.changed_evaluation_status = not config.EVALUATION_SCRIPT_RUNS
self.copy_result_pys = True
config.EVALUATION_SCRIPT_RUNS = True
await self.send_json({
'message': 'Locked WebUI',
'type': content['type']
})
elif content['type'] == 'unregister_eval_run' or content['type'] == 'unlock_webUI':
config.EVALUATION_SCRIPT_RUNS = False
self.changed_evaluation_status = False
await self.send_json({
'message': 'Unlocked WebUI',
'type': content['type']
})
elif content['type'] == 'end_socket':
await self.send_json(content)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.changed_evaluation_status = False
self.copy_result_pys = False
self.mongodb_connector = MongoDbConnector(MONGODB_URI)
|
zaman365/travos_trust_model
|
trustlab/consumers/lab_consumer.py
|
lab_consumer.py
|
py
| 13,452 |
python
|
en
|
code
| 0 |
github-code
|
50
|
20206520026
|
from view.commands.command import Command
class SearchByID(Command):
def __init__(self, presenter, view):
"""
Поиск заметки по идентификатору
:param presenter: презентер
:param view: представление
"""
super().__init__(presenter, view)
super().set_description("Найти по ID")
def execute(self):
# принимает числовое значение идентификатора заметки, передает его
# презентеру и выводит принятую заметку на экран, обрабатывает
# исключения
try:
note_id = int(input('Введите ID заметки: '))
note = self.presenter.search_by_id(note_id)
print(note)
except ValueError:
print('Некорректное значение, необходимо ввести число')
|
bunny-nun/git-advance-hw
|
view/commands/search/search_by_id.py
|
search_by_id.py
|
py
| 1,003 |
python
|
ru
|
code
| 0 |
github-code
|
50
|
70071774237
|
def main():
data = readFile(input().strip())
if data is None:return -1
lst = data["list"]
n = data["n"]
subs = getSubLists(lst,n)
domins = [getDominant(sub) for sub in subs]
result = getResult(domins,len(lst))
if writer("result.txt",result)==-1:return -1
def writer(outFile,output):
try:
with open(outFile,"w") as file:
print(output)
file.write(output)
except:
return -1
def getResult(domins,l):
count = {}
for i in domins:
try:
count[i]+=1
except:
count[i]=1
keys = count.keys()
return [count[i+1] if (i+1) in keys else 0 for i in range(l)]
def getDominant(sub):
count = {}
for i in sub:
try:
count[i]+=1
except:
count[i]=1
maxFreq = max(count.values())
dominant = min([key for key,val in count.items() if val==maxFreq])
return dominant
def getSubLists(lst,n):
l = len(lst)
subs = [lst[i:i+n] for i in range(l-n+1)]
return subs
def readFile(inpFile):
try:
with open(inpFile) as file:
lst = list(map(int,file.readline().rstrip().split()))
n = int(file.readline().rstrip())
l = len(lst)
if not 0<n<l:
raise Exception
except Exception as e:
print(e)
return
return {"list":lst,"n":n}
main()
|
samitha278/UoM-Labs
|
Programming Assignment 2/uom 2015 pp2/2015-PA2-A3-1 - D.List/D.List.py
|
D.List.py
|
py
| 1,476 |
python
|
en
|
code
| 0 |
github-code
|
50
|
9658908858
|
import pathlib
import diffhtml
import flask
from flask import request
from markupsafe import Markup
app = flask.Flask(
'Diff-HTML Demo',
template_folder=pathlib.Path(__file__).parent.joinpath('templates'),
)
DEFAULT_A = """
I am the very model of a modern Major-General,
I've information vegetable, animal, and mineral,
I know the kings of England, and I quote the fights historical,
From Marathon to Waterloo, in order categorical.
"""
DEFAULT_B = """
I am the very model of an anime individual,
I've information on comical, unusual, and moe girl,
I know the girls from galgames, and I quote the lines all chuunibyo,
From Neo Eva to SAO, down to the very last detail.
"""
@app.route('/ndiff', methods=['GET', 'POST'])
def ndiff():
a = request.form.get('a', DEFAULT_A)
b = request.form.get('b', DEFAULT_B)
try:
cutoff = float(request.form.get('cutoff', 0.6))
except ValueError:
cutoff = 0.6
context = {
'result': None,
'cutoff': cutoff,
'input': {'a': a, 'b': b},
}
if request.method == 'POST':
context['result'] = Markup('<br>').join(diffhtml.ndiff(
a.splitlines(), b.splitlines(), cutoff=cutoff,
))
return flask.render_template('ndiff.html', **context)
@app.route('/')
def home():
return flask.redirect(flask.url_for('ndiff'))
if __name__ == '__main__':
app.run()
|
uranusjr/diffhtml
|
demo/app.py
|
app.py
|
py
| 1,395 |
python
|
en
|
code
| 23 |
github-code
|
50
|
9604369230
|
"""Spezielle Tags für templates."""
from django.conf import settings
from django.utils.module_loading import import_module
from django import template
from operator import itemgetter
from Startseite.views import sysstatus
from webpack_loader import utils
from webpack_loader.exceptions import WebpackBundleLookupError
from django.utils.safestring import mark_safe
import json
register = template.Library()
@register.filter(name='genus')
def genus(value, arg):
"""Genus > {{ value|genus:"maskulin,feminin,neutrum" }} Als Value wird 'm' oder 'f' erwartet sonst wird das Neutrum verwendet."""
if value == 'm':
return arg.split(',')[0]
if value == 'f':
return arg.split(',')[1]
return arg.split(',')[2]
@register.filter(name='formatDuration')
def formatDuration(value):
"""Formatiert "DurationField" > {{ value|formatDuration }} -> 00:00:00.000000 ."""
total_seconds = int(value.total_seconds())
hours = total_seconds // 3600
minutes = (total_seconds % 3600) // 60
seconds = (total_seconds % 60) + value.total_seconds() - total_seconds
return '{:02d}:{:02d}:{:09.6f}'.format(hours, minutes, seconds)
@register.filter(name='toJson')
def toJson(value):
"""Verwandelt Wert in Json > {{ value|toJson }} ."""
return json.dumps(value)
@register.filter
def comma2dot(value):
"""Komma zu Punkt."""
return str(value).replace(",", ".")
@register.filter(name='kategorienListeFilterFX')
def kategorienListeFilterFX(value):
"""Für kategorienListeFilter."""
from django.apps import apps
import re
try:
amodel = apps.get_model(value['app'], value['table'])
aRet = []
for aentry in amodel.objects.all():
def repl(m):
try:
return str(getattr(aentry, m.group(1)))
except:
return '!err'
aRet.append({'title': re.sub(r"!(\w+)", repl, value['title']), 'val': re.sub(r"!(\w+)", repl, value['val'])})
return aRet
except Exception as e:
print(e)
return
@register.assignment_tag(takes_context=True)
def navbarMaker(context):
"""Erstellt Navigation."""
anavbar = []
for value in settings.INSTALLED_APPS: # Alle Installierten Apps durchgehen und nach navbar.py suchen.
try:
anavbar.extend(import_module("%s.navbar" % value).navbar(context.request))
except ImportError:
pass
return sorted(anavbar, key=itemgetter('sort'))
@register.assignment_tag(takes_context=True)
def getSysStatus(context):
"""Systemstatus."""
asysstatus = sysstatus(context.request)
asysstatus['json'] = json.dumps(asysstatus)
return asysstatus
@register.assignment_tag
def to_list(*args):
"""Wandelt Aufzählung in Liste um."""
return args
@register.assignment_tag
def add_to_list(alist, add):
"""Fügt Wert zu Liste."""
if alist:
return alist + [add]
else:
return [add]
@register.simple_tag
def getFeldVal(alist, val):
"""Liest Wert aus einer Liste von Dictonarys aus."""
if alist:
for aDict in alist:
if 'name' in aDict and aDict['name'] == val:
if 'value' in aDict:
return aDict['value']
else:
return
return
@register.simple_tag
def obj_getattr(aobj, val):
"""Für kategorienListeFXData."""
def pObj_getattr(aobj, val):
if 'all()' in val: # Der "all()" Teil ist Expermintell! Ungetestet!
bAll, pAll = val.split('all()', 1)
if pAll[:2] == '__':
pAll = pAll[2:]
if bAll[-2:] == '__':
bAll = bAll[:-2]
aData = []
for aktObj in pObj_getattr(aobj, bAll).all():
aData.append(pObj_getattr(aktObj, pAll))
return aData
if '__' in val:
avals = val.split('__')
else:
avals = [val]
for aval in avals:
if '()' in aval:
aobj = getattr(aobj, aval[:-2])()
else:
aobj = getattr(aobj, aval)
return aobj
try:
return pObj_getattr(aobj, val)
except Exception as e:
# print(e)
# print(aobj)
# print(dir(aobj))
return ''
@register.filter
def get_item(dictionary, key):
"""Wert aus Dictionary auslesen."""
return dictionary.get(key)
@register.simple_tag(takes_context=True)
def render(context, value):
"""Inline Template rendern."""
return template.engines['django'].from_string(value).render(context)
# settings value
@register.simple_tag
def settings_value(name):
"""Einstellung auslesen."""
if name in getattr(settings, 'ALLOWED_SETTINGS_IN_TEMPLATES', ''):
return getattr(settings, name, '')
return ''
@register.simple_tag
def render_bundle(bundle_name, extension=None, config='DEFAULT', attrs=''):
"""Webpack loader."""
try:
tags = utils.get_as_tags(bundle_name, extension=extension, config=config, attrs=attrs)
except WebpackBundleLookupError as e:
return''
return mark_safe('\n'.join(tags))
@register.filter
def subtract(value, arg):
return value - arg
|
german-in-austria/dioeDB
|
app/Startseite/templatetags/dioeTags.py
|
dioeTags.py
|
py
| 4,802 |
python
|
en
|
code
| 2 |
github-code
|
50
|
12632545151
|
from django.contrib import admin
from django.urls import path, include
from Insta.views import (PostListView, PostDetailView, PostCreateView, PostUpdateView,
PostDeleteView, UserProfile, EditProfile, ExploreView,
SignupView, addLike, addComment, toggleFollow)
urlpatterns = [
#path('admin/', admin.site.urls),
#path('/', include('Insta.urls')),
path('', PostListView.as_view(), name = 'home'),
path('post/<int:pk>', PostDetailView.as_view(), name = 'post'),
path('make_post/', PostCreateView.as_view(), name = 'make_post'),
path('update_post/<int:pk>/', PostUpdateView.as_view(), name = 'edit_post'),
path('delete_post/<int:pk>/', PostDeleteView.as_view(), name = 'delete_post'),
path('auth/signup', SignupView.as_view(), name='signup'),
path('user/<int:pk>', UserProfile.as_view(), name = 'profile'),
path('edit_profile/<int:pk>', EditProfile.as_view(), name = 'edit_profile'),
path('like', addLike, name = 'addLike'),
path('comment', addComment, name = 'addComment'),
path('togglefollow', toggleFollow, name = 'togglefollow'),
path('explore', ExploreView.as_view(), name='explore'),
]
|
clareli9/InstagramDemo
|
Insta/urls.py
|
urls.py
|
py
| 1,194 |
python
|
en
|
code
| 0 |
github-code
|
50
|
20877344117
|
import open3d as o3d
from os.path import join, exists
from os import makedirs, listdir
import numpy as np
import cv2
from utils import *
from pcd2mesh import pcd2mesh
from sklearn.neighbors import NearestNeighbors
depth_nbrs = None
rgb_nbrs = None
def depth_to_colormapjet(depth):
depth_color = depth.copy()
min_d, max_d = np.min(depth_color), np.max(depth_color)
depth_color = depth_color * 255. / (max_d - min_d)
depth_color = np.uint8(depth_color)
depth_color = cv2.applyColorMap(depth_color, cv2.COLORMAP_JET)
return depth_color
def load_depth_and_cam(dir_depth, poses, timings, timestamp, K_parameters_depth):
global depth_nbrs
if not depth_nbrs:
depth_nbrs = NearestNeighbors(n_neighbors=1, algorithm='ball_tree').fit(timings[:, 1].reshape(-1, 1))
_, frame_number_depth = depth_nbrs.kneighbors(np.array(float(timestamp) + 0 * (10 ** 4)).reshape(-1, 1))
frame_number_depth = frame_number_depth[0][0]
filename_depth = join(dir_depth, '{:06d}.png'.format(frame_number_depth))
print(f"loading depth image {filename_depth}")
depth = load_depth(filename_depth)
M_depth = poses[frame_number_depth, 1:].reshape(4, 4).copy()
K_depth = K_parameters_depth[:9].reshape(3, 3) # intrinsics
# M_depth[:3, 3] *= 1000
M_depth = np.dot(axis_transform, np.linalg.inv(M_depth))
cam_depth = {}
cam_depth['K_dist'] = K_depth
cam_depth['M_dist'] = M_depth
return depth, cam_depth
def load_rgb_and_cam(dir_rgb, poses_rgb, timing_rgb, time_stamp, K_parameters_rgb):
global rgb_nbrs
if not rgb_nbrs:
rgb_nbrs = NearestNeighbors(n_neighbors=1, algorithm='ball_tree').fit(
timing_rgb[:, 1].reshape(-1, 1))
_, frame_number_rgb = rgb_nbrs.kneighbors(
np.array(float(time_stamp) + 0 * (10 ** 4)).reshape(-1, 1))
frame_number_rgb = frame_number_rgb[0][0]
filename_rgb = join(dir_rgb, '{:06d}.png'.format(frame_number_rgb))
print(f"loading rgb image {filename_rgb}")
rgb = cv2.imread(filename_rgb)
K_color = K_parameters_rgb[:9].reshape(3, 3)
M_color = poses_rgb[frame_number_rgb, 1:].reshape(4, 4).copy()
cam_rgb = {}
cam_rgb['M_origin'] = np.dot(axis_transform, np.linalg.inv(M_color))
# M_color[:3, 3] *= 1000
M_color = np.dot(axis_transform, np.linalg.inv(M_color))
cam_rgb['K_color'] = K_color
cam_rgb['M_color'] = M_color
return rgb, cam_rgb
def create_point_cloud_from_depth(depth, cam_depth, remove_outlier=True, remove_close_to_cam=300):
'''
output: point cloud in the depth frame
'''
K_depth = cam_depth['K_dist']
img2d_converted = depthConversion(depth, K_depth[0][0], K_depth[0][2], K_depth[1][2]) # point depth to plane depth, basically, undistortion
# img2d_converted_color = depth_to_colormapjet(img2d_converted) # plane depth color map jet
# cv2.imshow('img2d_converted_color', img2d_converted_color)
points = generatepointcloud(img2d_converted, K_depth[0][0], K_depth[1][1], K_depth[0][2], K_depth[1][2]) # in the depth coor
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
if remove_outlier:
pcd, _ = pcd.remove_radius_outlier(nb_points=10, radius=50)
pcd, _ = pcd.remove_statistical_outlier(nb_neighbors=20,std_ratio=4.0)
if remove_close_to_cam > 0:
center = np.array([0, 0, 0])
R = np.eye(3)
extent = np.array([remove_close_to_cam, remove_close_to_cam, remove_close_to_cam])
bb = o3d.geometry.OrientedBoundingBox(center, R, extent)
close_points_indices = bb.get_point_indices_within_bounding_box(pcd.points)
pcd = pcd.select_by_index(close_points_indices, invert=True) #select outside points
return pcd
def stitch_pcd(source, target, transformation):
source.transform(transformation)
# o3d.visualization.draw_geometries([source, target])
return source + target
def main():
dir_seq = '../AnnaTrain'
dir_depth = join(dir_seq, 'Depth')
dir_rgb = join(dir_seq, 'Video')
####### loading from dataset ######
# Depth
poses_depth = np.loadtxt(join(dir_depth, 'Pose.txt'))
timing_depth = np.loadtxt(join(dir_depth, 'Timing.txt'))
K_parameters_depth = np.loadtxt(join(dir_depth, 'Intrinsics.txt'))
dist_coeffs = np.array(K_parameters_depth[9:14]).astype('float32')
w_depth, h_depth = [int(x) for x in K_parameters_depth[-2:]]
# RGB
poses_rgb = np.loadtxt(join(dir_rgb, 'Pose.txt'))
timing_rgb = np.loadtxt(join(dir_rgb, 'Timing.txt'))
K_parameters_rgb = np.loadtxt(join(dir_rgb, 'Intrinsics.txt'))
w_color, h_color = [int(x) for x in K_parameters_rgb[-2:]]
###### prepare for color map optimization ######
# we use a continuous frames of videos as input for Color Map Optimization
# initization of containers
debug_mode = True
start_frame = 0 # the first frame of video
total_depth_frames = 1 # total number of frames to be processed
rgbd_images = [] # container for rgbd images
camera_parameters = [] # container for camera intrinsic and extrinsic parameters
whole_pcd = None # Collection of while point clouds
pcd_list = []
################ select images ###################
for frame_number_depth in range(start_frame, start_frame+total_depth_frames):
time_stamp = timing_rgb[frame_number_depth, 1]
# find the nearest depth frame
depth, cam_depth_calib = load_depth_and_cam(dir_depth,
poses_depth,
timing_depth,
time_stamp,
K_parameters_depth)
K_depth = cam_depth_calib['K_dist']
depth_undistort = cv2.undistort(depth, K_depth, dist_coeffs, None, K_depth)
if debug_mode:
# visualize depth & undistorted depth
cv2.imshow('depth_undistort', depth_to_colormapjet(depth_undistort))
cv2.imshow('depth', depth_to_colormapjet(depth))
# find the nearest rgb frame
rgb, cam_rgb_calib = load_rgb_and_cam(dir_rgb,
poses_rgb,
timing_rgb,
time_stamp,
K_parameters_rgb)
# build and store point cloud
# NOTE: pcd_colored in the world frame, pcd still in the depth frame
pcd = create_point_cloud_from_depth(depth_undistort, cam_depth_calib, remove_outlier=True, remove_close_to_cam=1500) # depth frame
# pcd_colored = get_colored_pcd(pcd, rgb, cam_rgb_calib, cam_depth_calib)
# o3d.visualization.draw_geometries([pcd_colored])
# Aligned Depth to be the same size with rgb image
depth_aligned = map_depth_to_rgb(pcd, rgb, cam_rgb_calib, cam_depth_calib, reference='rgb')
if debug_mode:
# visualize aligned depth
cv2.imshow('aligned depth', depth_to_colormapjet(depth_aligned))
# store RGBD of this frame
depth = o3d.geometry.Image(depth_aligned.astype(np.uint16))
color = o3d.geometry.Image(rgb[:,:,::-1].astype(np.uint8))
rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(color, depth, depth_trunc=1000, convert_rgb_to_intensity=False)
rgbd_images.append(rgbd_image)
if debug_mode:
cv2.imshow('rgb_color', rgb)
cv2.imshow('rgb_depth', depth_to_colormapjet(np.array(rgbd_image.depth)))
# store camera Intrinsic and Extrinsic parameters
height, width = rgb.shape[:2]
fx, fy, cx, cy = [cam_rgb_calib['K_color'][0,0], cam_rgb_calib['K_color'][1,1], cam_rgb_calib['K_color'][0,2], cam_rgb_calib['K_color'][1,2]]
intrinsic = o3d.camera.PinholeCameraIntrinsic(width, height, fx, fy, cx, cy)
extrinsic = cam_rgb_calib['M_color']
camera = o3d.camera.PinholeCameraParameters()
camera.intrinsic = intrinsic
camera.extrinsic = extrinsic
camera_parameters.append(camera)
# Stitch point clouds
# use RGBD to generate new pcd, which can be correctly rendered in color_map_optimization
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image,intrinsic)
# transform pcd from depth frame to the world frame
pcd.transform(np.linalg.inv(cam_rgb_calib['M_color']))
# # remove outliers (may need further parameter tuning)
# pcd, _ = pcd.remove_radius_outlier(nb_points=200, radius=0.04)
# pcd, _ = pcd.remove_statistical_outlier(nb_neighbors=200,std_ratio=4.0)
if debug_mode:
# pcd_down = pcd.voxel_down_sample(voxel_size=0.005)
# if not exists('./outputs'): makedirs('./outputs')
# pcd_path = "outputs/frame" + str(frame_number_depth) + ".pcd"
# o3d.io.write_point_cloud(pcd_path, pcd_down)
o3d.visualization.draw_geometries([pcd])
# pcd_list.append(pcd)
if whole_pcd is None: whole_pcd = pcd
else: whole_pcd += pcd
#################### ICP stitching ###########################
# if whole_pcd is None:
# whole_pcd = pcd
# else:
# pcd.estimate_normals()
# whole_pcd.estimate_normals()
# result_icp = o3d.pipelines.registration.registration_icp(pcd, whole_pcd, 0.1, np.identity(4),
# o3d.pipelines.registration.TransformationEstimationPointToPlane())
# whole_pcd = stitch_pcd(pcd, whole_pcd, result_icp.transformation)
#################### downsample and remove outliers ###########################
# whole_pcd = whole_pcd.select_by_index(range(0,len(whole_pcd.points),10))
# whole_pcd, _ = whole_pcd.remove_statistical_outlier(nb_neighbors=50,std_ratio=8.0)
# visualize and store the whole scene
o3d.visualization.draw_geometries([whole_pcd])
pcd_down = whole_pcd.voxel_down_sample(voxel_size=0.005)
if not exists('./outputs'): makedirs('./outputs')
write_path = f"outputs/scene{start_frame}-{start_frame+total_depth_frames}"
o3d.io.write_point_cloud(write_path+".pcd", pcd_down)
pcd2mesh(write_path+".pcd")
# RUN color map optimization
camera_traj = o3d.camera.PinholeCameraTrajectory()
camera_traj.parameters = camera_parameters
mesh = o3d.io.read_triangle_mesh(write_path+".ply")
with o3d.utility.VerbosityContextManager(
o3d.utility.VerbosityLevel.Debug) as cm:
mesh, camera_traj = o3d.pipelines.color_map.run_rigid_optimizer(
mesh, rgbd_images, camera_traj,
o3d.pipelines.color_map.RigidOptimizerOption(
maximum_iteration=100,
maximum_allowable_depth=2.5 * 1000000,
depth_threshold_for_visibility_check=0.03,
depth_threshold_for_discontinuity_check=0.1*1000000
))
print("show with cmop")
o3d.visualization.draw_geometries([mesh], mesh_show_back_face=True)
# store optimized mesh
o3d.io.write_triangle_mesh(f"{write_path}_cmop.ply", mesh)
if __name__ == "__main__":
main()
|
Ribosome-rbx/Color_Map_Optimization
|
color_map_optimization.py
|
color_map_optimization.py
|
py
| 11,580 |
python
|
en
|
code
| 1 |
github-code
|
50
|
30413103525
|
# https://zhuanlan.zhihu.com/p/38163970
import numpy as np
from matplotlib import pyplot as plt
fig = plt.figure() #定义新的三维坐标轴
ax1 = plt.axes(projection='3d')
x1 = np.arange(-5,5,0.5)
x2 = np.arange(-5,5,0.5)
x1, x2 = np.meshgrid(x1, x2)
Z = x1**2 - 2*x1+1+x2**2+4*x2+4
g1 = 10-x1-10*x2
g2 = 10*x1-x2-10
ax1.plot_surface(x1,x2,Z,cmap='rainbow')
ax1.plot_surface(x1,x2,g1,cmap='rainbow')
ax1.plot_surface(x1,x2,g2,cmap='rainbow')
ax1.contour(x1,x2,Z, zdim='z',offset=-2,cmap='rainbow') #等高线图,要设置offset,为Z的最小值
plt.show()
|
shao1chuan/regression
|
机器学习/svm/kkt plot.py
|
kkt plot.py
|
py
| 572 |
python
|
en
|
code
| 1 |
github-code
|
50
|
9397338642
|
from torch.utils.data.dataset import Dataset
import pandas as pd
import numpy as np
import torch
import SimpleITK as sitk
import random
import sys
sys.path.append('../')
import utils as dmutils
class ContinuousDataset(Dataset):
"""
Base class for a dataset for training on a continuous data stream
"""
def init(self, datasetfile, transition_phase_after, order, seed):
"""
Initialization for a continuous streaming dataset.
:param datasetfile (str): filepath to the dataset csv
:param transition_phase_after (float): fraction of images for each scanner after data from the next appears in stream
:param order (list): order of scanners in data stream
:param seed (int): seed to ensure reproducibility
"""
df = pd.read_csv(datasetfile)
assert (set(['train']).issubset(df.split.unique()))
np.random.seed(seed)
res_dfs = list()
for r in order:
res_df = df.loc[df.scanner == r]
res_df = res_df.loc[res_df.split == 'train']
res_df = res_df.sample(frac=1, random_state=seed)
res_dfs.append(res_df.reset_index(drop=True))
combds = None
new_idx = 0
for j in range(len(res_dfs) - 1):
old = res_dfs[j]
new = res_dfs[j + 1]
old_end = int((len(old) - new_idx) * transition_phase_after) + new_idx
if combds is None:
combds = old.iloc[:old_end]
else:
combds = combds.append(old.iloc[new_idx + 1:old_end])
old_idx = old_end
old_max = len(old) - 1
new_idx = 0
i = 0
while old_idx <= old_max and (i / ((old_max - old_end) * 2) < 1):
take_newclass = np.random.binomial(1, min(i / ((old_max - old_end) * 2), 1))
if take_newclass:
combds = combds.append(new.iloc[new_idx])
new_idx += 1
else:
combds = combds.append(old.iloc[old_idx])
old_idx += 1
i += 1
combds = combds.append(old.iloc[old_idx:])
combds = combds.append(new.iloc[new_idx:])
combds.reset_index(inplace=True, drop=True)
self.df = combds
def __len__(self):
return len(self.df)
class LIDCContinuous(ContinuousDataset):
def __init__(self, datasetfile, transition_phase_after=.8, order=['ges', 'geb', 'sie', 'time_siemens'], seed=None, cropped_to=(288, 288)):
super(ContinuousDataset, self).__init__()
self.init(datasetfile, transition_phase_after, order, seed)
self.cropped_to = cropped_to
self.df_multiplenodules = pd.read_csv('/project/catinous/lungnodules_allnodules.csv')
def load_image(self, path, shiftx_aug=0, shifty_aug=0):
dcm = sitk.ReadImage(path)
img = sitk.GetArrayFromImage(dcm)
if self.cropped_to is not None:
w = img.shape[1]
s1 = int((w - self.cropped_to[0]) / 2)
e1 = int(s1 + self.cropped_to[0])
h = img.shape[2]
s2 = int((h - self.cropped_to[1]) / 2)
e2 = int(s2 + self.cropped_to[1])
img = img[:, s1 + shiftx_aug:e1 + shiftx_aug, s2 + shifty_aug:e2 + shifty_aug]
img = dmutils.intensity_window(img, low=-1024, high=1024)
img = dmutils.norm01(img)
# return img[None, :, :]
return np.tile(img, [3, 1, 1])
def load_annotation(self, elem, shiftx_aug=0, shifty_aug=0):
dcm = sitk.ReadImage(elem.image)
x = elem.x1
y = elem.y1
x2 = elem.x2
y2 = elem.y2
if self.cropped_to is not None:
x -= (dcm.GetSize()[0] - self.cropped_to[0]) / 2
y -= (dcm.GetSize()[1] - self.cropped_to[1]) / 2
x2 -= (dcm.GetSize()[0] - self.cropped_to[0]) / 2
y2 -= (dcm.GetSize()[1] - self.cropped_to[1]) / 2
y -= shiftx_aug
x -= shifty_aug
y2 -= shiftx_aug
x2 -= shifty_aug
xs = []
x2s = []
ys = []
y2s = []
for i, row in self.df_multiplenodules.loc[self.df_multiplenodules.image == elem.image].iterrows():
x1_new = row.x1 - shifty_aug
x2_new = row.x2 - shifty_aug
y1_new = row.y1 - shiftx_aug
y2_new = row.y2 - shiftx_aug
if x1_new > 0 and x1_new < self.cropped_to[0] and y1_new > 0 and y1_new < self.cropped_to[1]:
xs.append(x1_new)
x2s.append(x2_new)
ys.append(y1_new)
y2s.append(y2_new)
if xs == []:
box = np.zeros((1, 4))
box[0, 0] = x
box[0, 1] = y
box[0, 2] = x2
box[0, 3] = y2
else:
box = np.zeros((len(xs) + 1, 4))
box[0, 0] = x
box[0, 1] = y
box[0, 2] = x2
box[0, 3] = y2
for j, x in enumerate(xs):
box[j + 1, 0] = x
box[j + 1, 1] = ys[j]
box[j + 1, 2] = x2s[j]
box[j + 1, 3] = y2s[j]
return box
def __getitem__(self, index):
elem = self.df.iloc[index]
if self.cropped_to is None:
shiftx_aug = 0
shifty_aug = 0
else:
shiftx_aug = random.randint(-20, 20)
shifty_aug = random.randint(-20, 20)
img = self.load_image(elem.image, shiftx_aug, shifty_aug)
annotation = self.load_annotation(elem, shiftx_aug, shifty_aug)
target = {}
target['boxes'] = torch.as_tensor(annotation, dtype=torch.float32)
target['labels'] = torch.as_tensor([elem.bin_malignancy + 1] * len(annotation), dtype=torch.int64)
target['image_id'] = torch.tensor([index] * len(annotation))
target['area'] = torch.as_tensor(
((annotation[:, 3] - annotation[:, 1]) * (annotation[:, 2] - annotation[:, 0])))
target['iscrowd'] = torch.zeros((len(annotation)), dtype=torch.int64)
return torch.as_tensor(img, dtype=torch.float32), target, elem.scanner, elem.image
class CardiacContinuous(ContinuousDataset):
def __init__(self, datasetfile, transition_phase_after=.8, order=['Siemens', 'GE', 'Philips', 'Canon'], seed=None):
super(ContinuousDataset, self).__init__()
self.init(datasetfile, transition_phase_after, order, seed)
self.outsize = (240, 196)
def load_image(self, elem):
img = np.load(elem.slicepath)
mask = np.load(elem.slicepath[:-4] + '_gt.npy')
if img.shape != self.outsize:
img = dmutils.crop_center_or_pad(img, self.outsize[0], self.outsize[1])
mask = dmutils.crop_center_or_pad(mask, self.outsize[0], self.outsize[1])
return img[None, :, :], mask
def __getitem__(self, index):
elem = self.df.iloc[index]
img, mask = self.load_image(elem)
return torch.as_tensor(img, dtype=torch.float32), torch.as_tensor(mask,
dtype=torch.long), elem.scanner, elem.slicepath
|
cirmuw/dynamicmemory
|
dataset/ContinuousDataset.py
|
ContinuousDataset.py
|
py
| 7,186 |
python
|
en
|
code
| 13 |
github-code
|
50
|
16198159058
|
import my_queue
import time
import random
def simulate_line(till_show, max_time):
pq = my_queue.Queue()
tix_sold = []
for i in range(100):
pq.enqueue("person" + str(i))
t_end = time.time() + till_show
now = time.time()
while now < t_end and not pq.is_empty():
now = time.time()
r = random.randint(0, max_time)
time.sleep(r)
person = pq.dequeue()
print(person)
tix_sold.append(person)
return tix_sold
sold = simulate_line(5, 1)
print(sold)
|
98shimpei/python_test
|
ticket.py
|
ticket.py
|
py
| 527 |
python
|
en
|
code
| 0 |
github-code
|
50
|
28720445234
|
# import tkinter as tk
#
# root = tk.Tk()
# root.title('Languages')
# root.geometry('500x300')
#
# v = tk.IntVar()
# v.set(1)
#
#
# def show_val():
# print(v.get())
#
#
# languages = [(1, "JAVA"), (2, "Python"), (3, "c#"), (4, "Javascript")]
# tk.Label(root, text="""Choose Language you like most""", justify=tk.LEFT, padx=20).pack()
#
# for val, language in languages:
# tk.Radiobutton(root, text=languages, value=val, variable=v, command=show_val, padx=20).pack(anchor=tk.W)
# root.mainloop()
import tkinter as tk
root = tk.Tk()
v = tk.IntVar()
v.set(1) # initializing the choice, i.e. Python
languages = [
("Python", 1),
("Perl", 2),
("Java", 3),
("C++", 4),
("C", 5)
]
def ShowChoice():
print(v.get())
tk.Label(root,
text="""Choose your favourite
programming language:""",
justify=tk.LEFT,
padx=20).pack()
for val, language in enumerate(languages):
tk.Radiobutton(root,
text=language,
padx=20,
variable=v,
command=ShowChoice,
value=val).pack(anchor=tk.W)
root.mainloop()
|
Yash-barot25/functions-timemodlues-ranges
|
TkinterHandsOn/Demo8.py
|
Demo8.py
|
py
| 1,199 |
python
|
en
|
code
| 0 |
github-code
|
50
|
17406595834
|
import re
import pprint
def get_new_loc(op, val, loc, acc):
"""updates accumulator and finds next line location"""
if op == "jmp":
loc += int(val)
elif op == "acc":
acc += int(val)
loc += 1
else:
loc += 1
return loc, acc
def flip_op(op):
"""flips a jmp operation to nop and vice versa"""
if op == "jmp":
return "nop"
elif op == "nop":
return "jmp"
else:
return op
def execute(loc, locs, acc, data, debug=False):
"""executes the commands in data"""
while loc != (len(data) - 1):
locs.append(loc)
op, val = data[loc]
loc, acc = get_new_loc(op, val, loc, acc)
if loc in locs:
if not debug:
print(f"Infinite loop happens to {locs[-2]} to {loc}")
print(f"Value of accumulator before infinite loop: {acc}")
return (False, acc)
print(f"Value of accumulator at completion: {acc}")
return (True, acc)
def debug(loc, locs, acc, data):
"""debugs the data to find infinite loop root cause"""
while True:
op, val = data[loc]
new_op = flip_op(op)
if new_op in ["nop", "jmp"]:
tmp = data.copy()
tmp[loc][0] = new_op
res = execute(loc, locs, acc, data, True)
if res[0]:
break
loc, acc = get_new_loc(op, val, loc, acc)
print(f"Bug located at: {loc}")
with open("data/day8.txt", "r") as f:
data = f.readlines()
data = [list(re.findall("^([a-z]+) ([\+\-]\d+)$", x)[0]) for x in data]
locs = []
loc = 0
acc = 0
count = 0
mode = input("What mode to run boot code? 'normal' or debug ").lower()
if mode == "normal":
res = execute(loc, locs, acc, data)
elif mode == "debug":
debug(loc, locs, acc, data)
else:
raise TypeError("Incorrect input, enter either 'normal' or 'debug'")
|
JackNelson/advent-of-code
|
2020/day8.py
|
day8.py
|
py
| 1,996 |
python
|
en
|
code
| 0 |
github-code
|
50
|
36856186258
|
import string
import json
import pickle
import itertools
import collections
import numpy as np
import math
TRAIN_PATH = "data/yelp_reviews_train.json"
TEST_PATH = "data/yelp_reviews_test.json"
DEV_PATH = "data/yelp_reviews_dev.json"
STOPWORD_LIST = "data/stopword.list"
TOP_K_FOR_FEATURE = 2000
NUM_FEATURE = 5
def preprocess_text(text, stopwords):
# process sentence
text = text.lower()
text = text.translate(str.maketrans('', '', string.punctuation))
text = text.translate(str.maketrans('', '', string.digits))
remain_list = [w for w in text.split() if w not in stopwords and w.isalpha()]
return list(filter(None, remain_list))
def stars_to_mtx(star_list):
# convert rating into a matrix of column 5
star_ndarray = np.asarray(star_list).T
# need to subtract one so as to align with the index
star_mtx = np.eye(NUM_FEATURE)[star_ndarray-1]
return star_mtx
def preprocess_train_into_dic(data, stopwords):
# train_data.pickle
data_dic = {'user_id':[], 'stars':[], 'text':[]}
for d in data:
data_dic["user_id"].append(d['user_id'])
data_dic["stars"].append(d['stars'])
processed_text = preprocess_text(d['text'], stopwords)
data_dic["text"].append(processed_text)
# save to pickle for further access
with open('train_data.pickle', 'wb') as pkl:
pickle.dump(data_dic, pkl, protocol=pickle.HIGHEST_PROTOCOL)
return data_dic
def preprocess_into_dic(data, filename, stopwords):
# test_data/dev_data.pickle
data_dic = {'user_id':[], 'text':[]}
for d in data:
data_dic["user_id"].append(d['user_id'])
processed_text = preprocess_text(d['text'], stopwords)
data_dic["text"].append(processed_text)
# save to pickle for further access
with open(filename+'.pickle', 'wb') as pkl:
pickle.dump(data_dic, pkl, protocol=pickle.HIGHEST_PROTOCOL)
return data_dic
def get_data_stats(data_dic):
# get CTF for token while printing out the stats
tokens = itertools.chain.from_iterable(data_dic['text'])
token_counter = collections.Counter(tokens)
star_counter = collections.Counter(data_dic['stars'])
print(token_counter.most_common(9))
print(star_counter)
total_star = len(data_dic['stars'])
for k in dict(star_counter):
print("Star {} has percentage: {}%".format(k, 100*star_counter[k]/total_star))
# lastly save the corpus top 2000 word into pickle for ctf access
with open('top'+str(TOP_K_FOR_FEATURE)+'CTFtoken.pickle', 'wb') as pkl:
pickle.dump(dict(token_counter.most_common(TOP_K_FOR_FEATURE)), pkl, protocol=pickle.HIGHEST_PROTOCOL)
def get_token_df(data_dic, if_save=True, shift=0):
# get DF frequency into dictionary, shift allows the window to slide from the top common word
# to some later word with more information
df_dic = {}
for text in data_dic['text']:
unique_tkn = set(text)
for tkn in unique_tkn:
if tkn in df_dic:
df_dic[tkn]+=1
else:
df_dic[tkn]=1
sorted_df_dic = sorted(df_dic.items(), key=lambda t: t[1], reverse=True)
top_df_dic = dict(sorted_df_dic[shift:TOP_K_FOR_FEATURE+shift])
if if_save:
with open('top'+str(TOP_K_FOR_FEATURE+shift)+'DFtoken.pickle', 'wb') as pkl:
pickle.dump(top_df_dic, pkl, protocol=pickle.HIGHEST_PROTOCOL)
return dict(sorted_df_dic)
def get_token_tfidf(data_dic):
# get the tf*idf value for token, define idf as log((N+1)/df)
tokens = itertools.chain.from_iterable(data_dic['text'])
token_counter = collections.Counter(tokens)
df_dic = get_token_df(data_dic, False)
N = len(data_dic['text'])
tfidf_dic = {}
for t, df in df_dic.items():
idf = math.log((N+1)/df)
tfidf = idf*token_counter[t]
tfidf_dic[t] = tfidf
sorted_tfidf_dic = sorted(tfidf_dic.items(), key=lambda t: t[1], reverse=True)
top_tfidf_dic = dict(sorted_tfidf_dic[:TOP_K_FOR_FEATURE])
with open('top' + str(TOP_K_FOR_FEATURE) + 'TFIDFtoken.pickle', 'wb') as pkl:
pickle.dump(top_tfidf_dic, pkl, protocol=pickle.HIGHEST_PROTOCOL)
def get_token_idf(data_dic):
# saving the idf value for all tokens, not limited to most frequent words
df_dic = get_token_df(data_dic, False)
N = len(data_dic['text'])
idf_dic = {}
for t, df in df_dic.items():
idf = math.log((N+1)/df)
idf_dic[t] = idf
with open('allIDFs.pickle', 'wb') as pkl_idf:
pickle.dump(idf_dic, pkl_idf, protocol=pickle.HIGHEST_PROTOCOL)
def run_preprocessing():
# aggregate function for data preprocessing
# this function should only run once and all pickle files would be saved
train_data = [json.loads(line) for line in open(TRAIN_PATH, 'r')]
test_data = [json.loads(line) for line in open(TEST_PATH, 'r')]
dev_data = [json.loads(line) for line in open(DEV_PATH, 'r')]
with open(STOPWORD_LIST) as r:
stopwords = r.read().split()
print("Loading Train Data")
train_dic = preprocess_train_into_dic(train_data, stopwords)
print("Loading Dev Data")
dev_dic = preprocess_into_dic(dev_data, "dev_data", stopwords)
print("Loading Test Data")
test_dic = preprocess_into_dic(test_data, "test_data", stopwords)
# for train_data only, create token files for feature creation
get_data_stats(train_dic)
_ = get_token_df(train_dic)
get_token_idf(train_dic)
# get_token_tfidf(train_dic)
def svm_preprocessing(feature_type, data_type):
# specific preprocessing for SVM as the function takes in data of different format
with open('{}_feature_{}.pickle'.format(feature_type, data_type), 'rb') as r:
data = pickle.load(r)
writer = open("SVM_{}_{}.txt".format(feature_type, data_type), "w")
print("Writing {} for SVM {} data".format(feature_type, data_type))
for f in range(data.shape[0]):
writer.write("0 ")
for num, feature in zip(data[f].data, data[f].nonzero()[1]):
writer.write("{}:{} ".format(feature+1, num))
writer.write("\n")
writer.close()
def svm_train_preprocessing(feature_type):
# specific preprocessing for SVM as the function takes in data of different format
# for training data, need different handling on labels
with open('{}_feature_train.pickle'.format(feature_type), 'rb') as r:
data = pickle.load(r)
with open('train_data.pickle', 'rb') as data_train:
train_dic = pickle.load(data_train)
stars = train_dic['stars']
writer = open("SVM_{}_train.txt".format(feature_type), "w")
for f in range(data.shape[0]):
writer.write("{} ".format(stars[f]))
for num, feature in zip(data[f].data, data[f].nonzero()[1]):
writer.write("{}:{} ".format(feature+1, num))
writer.write("\n")
writer.close()
|
sharonwx54/TextMining
|
NetflixReview/data_preprocess.py
|
data_preprocess.py
|
py
| 6,863 |
python
|
en
|
code
| 0 |
github-code
|
50
|
22312550501
|
# -*- coding: utf-8 -*-
from rest_framework import relations
from rest_framework.test import APIRequestFactory
from ralph.api.serializers import ReversedChoiceField
from ralph.api.tests.api import (
Car,
CarSerializer,
CarViewSet,
ManufacturerSerializer2,
ManufacturerViewSet
)
from ralph.api.viewsets import RalphAPIViewSet
from ralph.tests import RalphTestCase
class ViewsetWithoutRalphPermission(RalphAPIViewSet):
permission_classes = []
class ViewsetWithoutPermissionsForObjectFilter(RalphAPIViewSet):
filter_backends = []
class TestRalphViewset(RalphTestCase):
def setUp(self):
super().setUp()
self.request_factory = APIRequestFactory()
def test_should_raise_attributeerror_when_ralph_permission_missing(self):
with self.assertRaises(AttributeError):
ViewsetWithoutRalphPermission()
def test_should_raise_attributeerror_when_permission_for_object_filter_missing(self): # noqa
with self.assertRaises(AttributeError):
ViewsetWithoutPermissionsForObjectFilter()
def test_get_serializer_class_should_return_base_when_safe_request(self):
request = self.request_factory.get('/')
cvs = CarViewSet()
cvs.request = request
self.assertEqual(cvs.get_serializer_class(), CarSerializer)
def test_get_serializer_class_should_return_dynamic_when_not_safe_request(self): # noqa
request = self.request_factory.post('/')
cvs = CarViewSet()
cvs.request = request
serializer_class = cvs.get_serializer_class()
self.assertEqual(serializer_class.__name__, 'CarSaveSerializer')
self.assertEqual(serializer_class.Meta.model, Car)
self.assertEqual(serializer_class.Meta.depth, 0)
self.assertEqual(
serializer_class.serializer_choice_field, ReversedChoiceField
)
self.assertEqual(
serializer_class.serializer_related_field,
relations.PrimaryKeyRelatedField
)
def test_get_serializer_class_should_return_defined_when_not_safe_request_and_save_serializer_class_defined(self): # noqa
request = self.request_factory.patch('/')
mvs = ManufacturerViewSet()
mvs.request = request
self.assertEqual(mvs.get_serializer_class(), ManufacturerSerializer2)
class TestAdminSearchFieldsMixin(RalphTestCase):
def test_get_filter_fields_from_admin(self):
cvs = CarViewSet()
self.assertEqual(
cvs.filter_fields, ['manufacturer__name', 'year']
)
|
0x24bin/ralph
|
src/ralph/api/tests/test_viewsets.py
|
test_viewsets.py
|
py
| 2,558 |
python
|
en
|
code
| null |
github-code
|
50
|
17438254361
|
count=0
total=0
while True:
n= int(input("Enter an integer (-1 to exit): "))
if n==-1:
break
count+=1
total+=n
print(f"The sum of {count} number(s) is {total}.")
|
HiMAIayas/SIIT_Lab
|
GTS123 (Intro To ComProg)/lab7 (while loop)/lab7_9.py
|
lab7_9.py
|
py
| 190 |
python
|
en
|
code
| 0 |
github-code
|
50
|
30098709852
|
from lxml import etree
class XPATH_CONTEXT (object):
def __init__ (self, file_path, ns_prefix = None):
self.doc = etree.parse (file_path)
if ns_prefix:
self.nsmap = {ns_prefix : self.doc.getroot().nsmap [None]}
else:
self.nsmap = None
def attribute (self, xpath):
attrib_list = self.node_list (xpath)
if len (attrib_list):
return attrib_list [0]
else:
return None
def text (self, xpath):
attrib_list = self.node_list (xpath)
if len (attrib_list):
return attrib_list [0].text
else:
return None
def node_list (self, xpath):
return self.doc.xpath (xpath, namespaces = self.nsmap)
def int_value (self, xpath):
result = self.doc.xpath (xpath, namespaces = self.nsmap)
if isinstance (result, float):
return int (result)
else:
return None
|
finnianr/Eiffel-Loop-safe
|
tool/python-support/eiffel_loop/xml/xpath.py
|
xpath.py
|
py
| 794 |
python
|
en
|
code
| 1 |
github-code
|
50
|
6351621109
|
import machine
from time import sleep
from ssd1306 import SSD1306_I2C
class Pantalla:
def __init__(self, pin1=5, pin2=4):
'''Init display'''
i2c = machine.I2C(scl=machine.Pin(pin1), sda=machine.Pin(pin2))
self.oled = SSD1306_I2C(128, 32, i2c)
self.clear()
def clear(self, color=0):
'''Borra el contenido de la pantalla, color puede ser 0(negro) o 1(blanco)'''
self.oled.fill(color)
self.pos = 0
def texto(self, texto):
inicio = 0
self.oled.text(texto, inicio, self.pos)
self.oled.show()
self.pos += 10
|
katmai1/microesp
|
pantalla.py
|
pantalla.py
|
py
| 609 |
python
|
es
|
code
| 0 |
github-code
|
50
|
18243852987
|
budget = float(input())
statist = int(input())
clothes_number = float(input())
decor = 0.1 * budget
if statist >= 150:
clothes_price = clothes_number * 0.9 * statist
else:
clothes_price = clothes_number * statist
outcome = clothes_price + decor
needed = outcome - budget
have = budget - outcome
if outcome > budget:
print(f' Not enough money!')
print(f' Wingard needs {needed:.2f} leva more.')
if budget >= outcome:
print(f' Action!')
print(f' Wingard starts filming with {have:.2f} leva left.')
|
gajev/programming_basics
|
Conditional_statements_lab/Godzilla_vs_kong.py
|
Godzilla_vs_kong.py
|
py
| 551 |
python
|
en
|
code
| 0 |
github-code
|
50
|
1169656225
|
import molsysmt as msm
import os
import shutil
from pathlib import Path
data_dir = Path('../../../data')
# Purge
files_to_be_purged = [
'pdb/5zmz.pdb',
'mmtf/5zmz.mmtf',
]
for filename in files_to_be_purged:
filepath = Path(data_dir, filename)
if os.path.isfile(filepath):
os.remove(filepath)
# Make
msm.convert('pdb_id:5zmz', to_form='5zmz.pdb')
shutil.move('5zmz.pdb', Path(data_dir, 'pdb/5zmz.pdb'))
msm.convert('pdb_id:5zmz', to_form='5zmz.mmtf')
shutil.move('5zmz.mmtf', Path(data_dir, 'mmtf/5zmz.mmtf'))
print('DONE')
|
uibcdf/MolSysMT
|
molsysmt/systems/make/5zmz.py
|
5zmz.py
|
py
| 573 |
python
|
en
|
code
| 11 |
github-code
|
50
|
73735252956
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.selector import Selector
from scrapy.http import Request
from douban.items import HuanQiuChina, DaoMuBiJi
class Douban(scrapy.Spider):
name = 'huanqiu_china'
start_urls = ['http://china.huanqiu.com/']
def parse(self, response):
media_xpath = '/html/body/div[4]/div[1]/div[7]/div[1]/ul/li/a'
title_xpath = './dl/dt/h3/text()'
content_xpath = './@href'
medias = response.xpath(media_xpath)
for media in medias:
try:
title = media.xpath(title_xpath).extract()[0]
content = media.xpath(content_xpath).extract()[0]
item = HuanQiuChina()
item['title'] = title
item['content'] = content
print(item)
yield item
except:
continue
class DaoMuBiJiSpider(scrapy.Spider):
name = 'daomubiji'
start_urls = ['http://www.daomubiji.com/dao-mu-bi-ji-%s' % num for num in range(1, 8)]
def parse(self, response):
selector = Selector(response)
book = selector.xpath('/html/body/div[1]/div/h1/text()').extract()[0]
chaptors = selector.xpath('/html/body/section/div[2]/div/article/a')
for chaptor in chaptors:
chaptor.xpath('./text()').extract()[0].split(' ')
bookTitle, chaptorNum, chaptorName = chaptor.xpath('./text()').extract()[0].split(' ')
url = chaptor.xpath('./@href').extract()[0]
item = DaoMuBiJi()
item['book'] = book
item['bookTitle'] = bookTitle
item['chaptorNum'] = chaptorNum
item['chaptorName'] = chaptorName
item['url'] = url
yield Request(url, callback=self.parseContent, meta={'item': item})
def parseContent(self, response):
selector = Selector(response)
item = response.meta['item']
html = selector.xpath('/html/body/section/div[1]/div/article').extract()[0]
item['content'] = html
yield item
|
zhannglei/douban
|
douban/spiders/spider.py
|
spider.py
|
py
| 2,060 |
python
|
en
|
code
| 0 |
github-code
|
50
|
13139475765
|
__author__ = 'jwilliams'
import os
import lib
#user defined vars
path = 'L:\\cbt_video_published\\content\\';
lesson = 'EL-2-Current'
code = 'FM' #setting code will check an entire product, set to 'HU-', 'IFR-', 'EG-' ect..
dir = path + lesson + "\\"
os.chdir(dir)
if code:
result = lib.check_product(code)
else:
print('checking the lesson ' + dir + '....')
result = lib.check_lesson()
print("\nNOT USED:")
for file in result:
print(file)
|
GibsonStudio/pythonProjects
|
find_unused_in_html/main.py
|
main.py
|
py
| 479 |
python
|
en
|
code
| 0 |
github-code
|
50
|
12041162217
|
import sys
import datetime
windowName = "SpaceX Booster Use/Reuse Beholder"
chartName = "SpaceX Core History"
spaceXCreationDate = datetime.date(2002, 5, 6)
colors = {
'EXPENDED': '#bbbbbb',
'HOP': '#d9ff5e',
'OCEAN': '#053fff',
'RTLS': '#06b700',
'ASDS': '#56b9f7',
'RUD': '#ff0000',
'REUSE': '#b3e6ff',
'DAYS': '#3a84fc',
'HEAVY': 'none',
}
legend = [
{'icons': [[colors['EXPENDED'], 'black', 'o']], 'label': "No landing"},
{'icons': [[colors['HOP'], 'black', 'o']], 'label': "Grasshopper hop"},
{'icons': [[colors['OCEAN'], 'black', 'o']], 'label': "Ocean landing"},
{'icons': [[colors['RTLS'], 'black', 'o']], 'label': "RTLS landing"},
{'icons': [[colors['ASDS'], 'black', 'o']], 'label': "ASDS landing"},
{'icons': [['none', 'red', 'o']], 'label': "Failed landing"},
{'icons': [[colors['RUD'], 'red', 'x']], 'label': "RUD"},
{'icons': [['none', 'black', '^']], 'label': "Dragon"},
{'icons': [['none', 'black', '*']], 'label': "Starlink"},
{'icons': [[colors['REUSE'], 'none', 'none']], 'label': "Reuse"},
{'icons': [[colors['HEAVY'], 'none', 'black']], 'label': "Falcon Heavy"},
]
if len(sys.argv) != 2 or str.lower(sys.argv[1]) not in {'all', 'f1', 'f9', 'ft'}:
print("Usage : py sburb.py [mode] with mode = all, F1, F9 or FT (case-insensitive)")
exit()
mode = str.lower(sys.argv[1])
def getCoreNumbers():
return [core.number for core in cores]
def getActiveCoreNumbers():
return [core.number for core in cores if core.active]
def getFirstCores():
firstCores = []
for core in cores:
if core.version not in [firstCore.version for firstCore in firstCores]:
firstCores.append(core)
return firstCores
def getLaunchesByCore(core):
return [launch.date for launch in launches if core in launch.cores]
def pickLaunchColor(launch, landingIndex):
location = launch.landings[landingIndex][0]
if (launch.hop == True):
color = colors['HOP']
elif (location in {"No attempt"}):
color = colors['EXPENDED']
elif (location in {"Ocean"}):
color = colors['OCEAN']
elif (location in {"Original JRTI", "JRTI", "OCISLY"}):
color = colors['ASDS']
elif (location in {"Land (parachutes)", "LZ-1", "LZ-2", "LZ-4"}):
color = colors['RTLS']
else:
color = 'black'
return color
|
rinoldm/SBURB
|
data.py
|
data.py
|
py
| 2,314 |
python
|
en
|
code
| 7 |
github-code
|
50
|
2472224163
|
# Import the dependencies.
import configparser
from datetime import datetime
from uuid import uuid4
from pathlib import Path
import os
# Import client library classes.
from influxdb_client import Authorization, InfluxDBClient, Permission, PermissionResource, Point, WriteOptions
from influxdb_client.client.authorizations_api import AuthorizationsApi
from influxdb_client.client.bucket_api import BucketsApi
from influxdb_client.client.query_api import QueryApi
from influxdb_client.client.write_api import SYNCHRONOUS
from influxdb_client.domain.dialect import Dialect
from .sensor import SensorImit
# Get the configuration key-value pairs.
path = Path(__file__)
ROOT_DIR = path.parent.absolute()
config_path = os.path.join(ROOT_DIR, "config.ini")
config = configparser.ConfigParser()
config.read(config_path)
def createauthorization(device_id) -> Authorization:
influxdb_client = InfluxDBClient(url=config.get('APP', 'INFLUX_URL'),
token=config.get('APP', 'INFLUX_TOKEN'),
org=config.get('APP', 'INFLUX_ORG'))
authorization_api = AuthorizationsApi(influxdb_client)
# get bucket_id from bucket
buckets_api = BucketsApi(influxdb_client)
buckets = buckets_api.find_bucket_by_name(config.get('APP', 'INFLUX_BUCKET')) # function returns only 1 bucket
bucket_id = buckets.id
org_id = buckets.org_id
desc_prefix = f'IoTCenterDevice: {device_id}'
org_resource = PermissionResource(org_id=org_id, id=bucket_id, type="buckets")
#print(org_resource)
read = Permission(action="read", resource=org_resource)
write = Permission(action="write", resource=org_resource)
permissions = [write, read]
#print('org_id', org_id)
#print('permissions', permissions)
#print('desc_prefix', desc_prefix)
authorization = Authorization(org_id=org_id, permissions=permissions, description=desc_prefix)
print(authorization)
request = authorization_api.create_authorization(authorization)
return request
def create_device(device_id=None):
config = configparser.ConfigParser()
config.read('config.ini')
influxdb_client = InfluxDBClient(url=config.get('APP', 'INFLUX_URL'),
token=config.get('APP', 'INFLUX_TOKEN'),
org=config.get('APP', 'INFLUX_ORG'))
if device_id is None:
device_id = str(uuid4())
write_api = influxdb_client.write_api(write_options=SYNCHRONOUS)
point = Point('deviceauth') \
.tag("deviceId", device_id) \
.field('key', f'fake_auth_id_{device_id}') \
.field('token', f'fake_auth_token_{device_id}')
client_response = write_api.write(bucket=config.get('APP', 'INFLUX_BUCKET_AUTH'), record=point)
# write() returns None on success
if client_response is None:
return device_id
# Return None on failure
return None
def get_device(device_id=None) -> {}:
influxdb_client = InfluxDBClient(url=config.get('APP', 'INFLUX_URL'),
token=config.get('APP', 'INFLUX_TOKEN'),
org=config.get('APP', 'INFLUX_ORG'))
# Queries must be formatted with single and double quotes correctly
query_api = QueryApi(influxdb_client)
device_filter = ''
if device_id:
device_id = str(device_id)
device_filter = f'r.deviceId == "{device_id}" and r._field != "token"'
else:
device_filter = f'r._field != "token"'
flux_query = f'from(bucket: "{config.get("APP", "INFLUX_BUCKET_AUTH")}") ' \
f'|> range(start: 0) ' \
f'|> filter(fn: (r) => r._measurement == "deviceauth" and {device_filter}) ' \
f'|> last()'
response = query_api.query(flux_query)
result = []
for table in response:
for record in table.records:
try:
'updatedAt' in record
except KeyError:
record['updatedAt'] = record.get_time()
record[record.get_field()] = record.get_value()
result.append(record.values)
return result
def write_measurements(device_id):
influxdb_client = InfluxDBClient(url=config.get('APP', 'INFLUX_URL'),
token=config.get('APP', 'INFLUX_TOKEN'),
org=config.get('APP', 'INFLUX_ORG'))
write_api = influxdb_client.write_api(write_options=SYNCHRONOUS)
virtual_device = SensorImit()
coord = virtual_device.geo()
point = Point("environment") \
.tag("device", device_id) \
.tag("TemperatureSensor", "virtual_bme280") \
.tag("HumiditySensor", "virtual_bme280") \
.tag("PressureSensor", "virtual_bme280") \
.field("Temperature", virtual_device.generate_measurement()) \
.field("Humidity", virtual_device.generate_measurement()) \
.field("Pressure", virtual_device.generate_measurement()) \
.field("Lat", coord['latitude']) \
.field("Lon", coord['longitude']) \
.time(datetime.utcnow())
print(f"Writing: {point.to_line_protocol()}")
client_response = write_api.write(bucket=config.get('APP', 'INFLUX_BUCKET'), record=point)
# write() returns None on success
if client_response is None:
# TODO Maybe also return the data that was written
return device_id
# Return None on failure
return None
def get_measurements(query):
influxdb_client = InfluxDBClient(url=config.get('APP', 'INFLUX_URL'),
token=config.get('APP', 'INFLUX_TOKEN'),
org=config.get('APP', 'INFLUX_ORG'))
query_api = QueryApi(influxdb_client)
result = query_api.query_csv(query,
dialect=Dialect(
header=True,
delimiter=",",
comment_prefix="#",
annotations=['group', 'datatype', 'default'],
date_time_format="RFC3339"))
response = []
for row in result:
#response += (',').join(row) + ('\n')
response.append(row)
return response
#@app.route('/api/devices/<string:device_id>', methods=['GET', 'POST'])
def api_get_device(device_id):
if request.method == "OPTIONS": # CORS preflight
return _build_cors_preflight_response()
return _corsify_actual_response(jsonify(devices.get_device(device_id)))
# брать запрос в бд отсюда
# device_filter = f'r.device == "testdevice"'
# query = f'from(bucket: "{config.get("APP", "INFLUX_BUCKET")}") ' \
# f'|> range(start: 0, stop: now()) ' \
# f'|> filter(fn: (r) => {device_filter} and r._field == "Temperature")' \
# '|> sort(columns: ["_time"]) '
# # #r._measurement == "environment" and
# data = get_measurements(query)
# # формирование списков значений с данными и временем
# time_index = data[3].index('_time')
# meas_index = data[3].index('_value')
# time = list()
# meas = list()
# for l in data:
# print(l)
# for i in range(len(data[4:-1])):
# time.append(data[4+i][time_index])
# meas.append(data[4+i][meas_index])
# print(time)
# print(meas)
|
olegtemirbulatov/sensors
|
sensorslist/devices.py
|
devices.py
|
py
| 7,382 |
python
|
en
|
code
| 0 |
github-code
|
50
|
23999549107
|
bl_info = {
"name": "Bizualizer",
"description": "Create a simple vizualizer for audio",
"author": "doakey3",
"version": (1, 0, 3),
"blender": (2, 7, 8),
"wiki_url": "https://github.com/doakey3/Bizualizer",
"tracker_url": "https://github.com/doakey3/Bizualizer/issues",
"category": "Animation",
"location": "Properties > Scene"}
import bpy
import math
import ntpath
import time
import sys
class BizualizerUI(bpy.types.Panel):
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_label = "Bizualizer"
bl_context = "scene"
def draw(self, context):
layout = self.layout
scene = bpy.context.scene
row = layout.row()
row.prop(scene,'bz_audiofile',icon="SOUND")
row = layout.row()
row.prop(scene,'bz_audio_channel')
row = layout.row()
row.operator("sequencerextra.bz_audio_to_sequencer",
icon="SEQ_SEQUENCER")
row.operator("sequencerextra.bz_audio_remove",icon="CANCEL")
row = layout.row()
row.prop(scene,'bz_bar_count')
row.prop(scene,'bz_bar_width')
row = layout.row()
row.prop(scene,'bz_amplitude')
row.prop(scene,'bz_spacing')
row = layout.row()
split = row.split()
col_a = split.column(align=True)
col_a.prop(scene,'bz_use_radial')
col_b = split.column(align=True)
col_b.prop(scene,'bz_radius')
if scene.bz_use_radial:
col_b.enabled = True
else:
col_b.enabled = False
row = layout.row()
row.operator("object.bz_generate",icon="RADIO")
class AudioToVSE(bpy.types.Operator):
bl_idname = "sequencerextra.bz_audio_to_sequencer"
bl_label = "Add Audio to VSE"
bl_description = "Adds the audio file to the VSE"
@classmethod
def poll(self, context):
scene = context.scene
if scene.bz_audiofile == '':
return False
else:
return True
def execute(self, context):
scene = context.scene
audiofile = bpy.path.abspath(scene.bz_audiofile)
name = ntpath.basename(audiofile)
chan = scene.bz_audio_channel
start = 1
if not scene.sequence_editor:
scene.sequence_editor_create()
sound_strip = scene.sequence_editor.sequences.new_sound(
'bz_' + name,audiofile,chan,start)
return {'FINISHED'}
class RemoveBZAudio(bpy.types.Operator):
bl_idname = "sequencerextra.bz_audio_remove"
bl_label = "Remove Audio"
bl_description = "Adds the audio file to the VSE"
@classmethod
def poll(self, context):
scene = context.scene
if scene.bz_audiofile == '':
return False
else:
return True
def execute(self, context):
scene = context.scene
audiofile = bpy.path.abspath(scene.bz_audiofile)
name = ntpath.basename(audiofile)
all_strips = list(sorted(
bpy.context.scene.sequence_editor.sequences_all,
key=lambda x: x.frame_start))
bpy.ops.sequencer.select_all(action='DESELECT')
count = 0
for strip in all_strips:
if strip.name.startswith('bz_' + name):
strip.select = True
bpy.ops.sequencer.delete()
return {'FINISHED'}
class GenerateVizualizer(bpy.types.Operator):
bl_idname = "object.bz_generate"
bl_label = "(re)Generate Vizualizer"
bl_description = "Generates visualizer bars and animation"
@classmethod
def poll(self, context):
scene = context.scene
if scene.bz_audiofile == '':
return False
else:
return True
def execute(self, context):
scene = context.scene
scene.frame_current = 1
bar_count = scene.bz_bar_count
bar_width = scene.bz_bar_width
amplitude = scene.bz_amplitude
spacing = scene.bz_spacing
radius = scene.bz_radius
audiofile = bpy.path.abspath(scene.bz_audiofile)
digits = str(len(str(bar_count)))
noteStep = 120.0/bar_count
a = 2**(1.0/12.0)
l = 0.0
h = 16.0
bpy.ops.object.select_all(action='DESELECT')
#Remove any visualizer bars in the scene
count = 0
while count < len(scene.objects):
if scene.objects[count].name.startswith('bz_bar'):
scene.objects[count].select = True
bpy.ops.object.delete()
else:
count += 1
wm = context.window_manager
wm.progress_begin(0, 100.0)
context.area.type = 'GRAPH_EDITOR'
for i in range(0, bar_count):
#Add a plane with it's origin = center
name = 'bz_bar' + (("%0" + digits + "d") % i)
mesh = bpy.data.meshes.new(name)
bar = bpy.data.objects.new(name,mesh)
scene.objects.link(bar)
bar.select = True
scene.objects.active = bar
verts = [(-1,2,0), (1,2,0), (1,0,0), (-1,0,0)]
faces = [(3,2,1,0)]
mesh.from_pydata(verts,[],faces)
mesh.update()
loc = [0.0, 0.0, 0.0]
#If radial, rotate the bar around an angle
if scene.bz_use_radial:
angle = -2 * i * math.pi/bar_count
bar.rotation_euler[2] = angle
loc[0] = -math.sin(angle) * radius
loc[1] = math.cos(angle) * radius
else:
loc[0] = (i * spacing) - ((bar_count * spacing)/2)
#Set the bar's current location
bar.location = (loc[0], loc[1], loc[2])
#Scale the plane on x and y axis
bar.scale.x = bar_width
bar.scale.y = amplitude
bpy.ops.object.transform_apply(
location=False, rotation=False, scale=True)
#Insert a scaling keyframe and lock the x and z axis
bpy.ops.anim.keyframe_insert_menu(type='Scaling')
bar.animation_data.action.fcurves[0].lock = True
bar.animation_data.action.fcurves[2].lock = True
l = h
h = l*(a**noteStep)
bpy.ops.graph.sound_bake(
filepath=audiofile, low=(l),high=(h))
active = bpy.context.active_object
active.animation_data.action.fcurves[1].lock = True
bar.select = False
progress = 100 * (i/bar_count)
wm.progress_update(progress)
update_progress("Generating Vizualizer", progress/100.0)
wm.progress_end()
update_progress("Generating Vizualizer", 1)
context.area.type = 'PROPERTIES'
scene.objects.active = None
return {'FINISHED'}
def update_progress(job_title, progress):
length = 20 # modify this to change the length
block = int(round(length*progress))
msg = "\r{0}: [{1}] {2}%".format(job_title,
"#"*block + "-"*(length-block), round(progress*100, 2))
if progress >= 1: msg += " DONE\r\n"
sys.stdout.write(msg)
sys.stdout.flush()
def initprop():
bpy.types.Scene.bz_audiofile = bpy.props.StringProperty(
name = "Audio File",
description = "Define path of the audio file",
subtype = 'FILE_PATH',
)
bpy.types.Scene.bz_audio_channel = bpy.props.IntProperty(
name = "Audio Channel",
description="Channel where audio will be added",
default=1,
min=1)
bpy.types.Scene.bz_bar_count = bpy.props.IntProperty(
name = "Bar Count",
description="The number of bars to make",
default=64,
min=1)
bpy.types.Scene.bz_bar_width = bpy.props.FloatProperty(
name = "Bar Width",
description="The width of the bars",
default=0.8,
min=0)
bpy.types.Scene.bz_amplitude = bpy.props.FloatProperty(
name = "Amplitude",
description="Amplitude of visualizer bars",
default=24.0,
min=0)
bpy.types.Scene.bz_use_radial = bpy.props.BoolProperty(
name = "Use Radial",
description="Use a circular vizualizer",
default=False)
bpy.types.Scene.bz_radius = bpy.props.FloatProperty(
name = "Radius",
description="Radius of the radial vizualizer",
default=20,
min=0)
bpy.types.Scene.bz_spacing = bpy.props.FloatProperty(
name = "Spacing",
description="Spacing between bars",
default=2.25,
min=0)
def register():
bpy.utils.register_class(BizualizerUI)
bpy.utils.register_class(AudioToVSE)
bpy.utils.register_class(GenerateVizualizer)
bpy.utils.register_class(RemoveBZAudio)
initprop()
def unregister():
bpy.utils.unregister_class(BizualizerUI)
bpy.utils.unregister_class(AudioToVSE)
bpy.utils.unregister_class(RemoveBZAudio)
bpy.utils.unregister_class(GenerateVizualizer)
del bpy.types.Scene.bz_audiofile
del bpy.types.Scene.bz_bar_count
del bpy.types.Scene.bz_bar_width
del bpy.types.Scene.bz_amplitude
del bpy.types.Scene.bz_spacing
del bpy.types.Scene.bz_use_radial
del bpy.types.Scene.bz_radius
|
JT-a/blenderpython279
|
scripts/addons_extern/bizualizer.py
|
bizualizer.py
|
py
| 9,526 |
python
|
en
|
code
| 5 |
github-code
|
50
|
3723862412
|
import random
import time
class SF():
def regiun(self):
'''生成身份证前六位'''
#列表里面的都是一些地区的前六位号码
first_list = ['362402','362421','362422','362423','362424','362425','362426','362427','362428','362429','362430','362432','110100','110101','110102','110103','110104','110105','110106','110107','110108','110109','110111']
first = random.choice(first_list)
return first
def year(self):
'''生成年份'''
now = time.strftime('%Y')
#1948为第一代身份证执行年份,now-18直接过滤掉小于18岁出生的年份
second = random.randint(1948,int(now)-18)
age = int(now) - second
return second
def month(self):
'''生成月份'''
three = random.randint(1,12)
#月份小于10以下,前面加上0填充
if three < 10:
three = '0' + str(three)
return three
else:
return three
def day(self):
'''生成日期'''
four = random.randint(1,31)
# 日期小于10以下,前面加上0填充
if four < 10:
four = '0' + str(four)
return four
else:
return four
def randoms(self):
'''生成身份证后四位'''
# 后面序号低于相应位数,前面加上0填充
five = random.randint(1,9999)
if five < 10:
five = '000' + str(five)
return five
elif 10 < five < 100:
five = '00' + str(five)
return five
elif 100 < five < 1000:
five = '0' + str(five)
return five
else:
return five
#随机身份证号码
def sf(self):
first = self.regiun()
second = self.year()
three = self.month()
four = self.day()
last = self.randoms()
IDcard = str(first)+str(second)+str(three)+str(four)+str(last)
return IDcard
#随机姓名
def name(self):
xing = '赵钱孙李周吴郑王冯陈褚卫蒋沈韩杨朱秦尤许何吕施张孔曹严华金魏陶姜'
ming = '豫章故郡洪都新府星分翼轸地接衡庐襟三江而带五湖'
X = random.choice(xing)#在xing中随机选择一个字符串
M = ''.join(random.choice(ming) for i in range(2))#
n = X+M
return n
#随机手机号码
def phone(self):
headList = ["130", "131", "132", "133", "134", "135", "136", "137", "138", "139",
"147", "150", "151", "152", "153", "155", "156", "157", "158", "159",
"186", "187", "188", "189"]
return (random.choice(headList) + "".join(random.choice("0123456789") for i in range(8)))
#随机银行卡
def bankcard(self):
card_id = '62'
for i in range(17):
ran = str(random.randint(0, 9))
card_id += ran
return card_id
#随机电子邮箱
def get_email(self):
email_suf = random.choice(
['@163.com', '@qq.com', '@126.com', '@sina.com', '@sina.cn', '@soho.com', '@yeah.com'])
phone = self.phone()
email = phone + email_suf
return email
if __name__ == '__main__':
a = SF()
print(a.get_email())
|
zguo0601/drg
|
common/sf_xm.py
|
sf_xm.py
|
py
| 3,303 |
python
|
en
|
code
| 0 |
github-code
|
50
|
15485978789
|
import json
import pytest
from gviz_data_table.table import Table
valid_schema = (
{'id':'age', 'type':int, 'label':'Age'},
{'id':'name', 'type':str, 'label':'Name'}
)
schema_missing_id = (
{'type':int},
{'name':'age', 'type':int}
)
bob = (18, 'Bob')
sally = (20, 'Sally')
def test_conditional():
import sys
if sys.version_info < (2, 7):
with pytest.raises(ImportError):
from collections import OrderedDict
def test_constructor():
table = Table()
assert list(table.schema.keys()) == []
assert table.rows == []
def test_invalid_options():
table = Table()
with pytest.raises(ValueError):
table.options = 1
with pytest.raises(ValueError):
table.options = [1, 2, 3]
def test_options():
table = Table()
table.options = dict(bar='baz')
assert table.options == {'bar':'baz'}
def test_missing_id():
with pytest.raises(TypeError):
Table(schema_missing_id)
def test_duplicate_column():
table = Table(valid_schema)
with pytest.raises(ValueError):
table.add_column('age', int)
def test_add_column():
table = Table()
table.add_column(**valid_schema[0])
table.add_column(**valid_schema[1])
assert table.schema['age'].id == "age"
assert table.schema['name'].type == str
with pytest.raises(TypeError):
table.add_column('height')
def test_add_column_with_existing_data():
table = Table(valid_schema)
table.append(bob)
with pytest.raises(ValueError):
table.add_column('size', str)
def test_insert_row_no_columns():
table = Table()
with pytest.raises(ValueError):
table.append(('Bob', ))
def test_insert_row():
table = Table(valid_schema)
table.append(bob)
row = table.rows.pop()
assert row['age'].value == 18
assert row['name'].value == 'Bob'
def test_with_label():
table = Table(valid_schema)
table.append(bob)
rows = table.rows
row = rows.pop()
assert row['name'].label is None
harry = (17, ('Harry', 'Big Man'))
table.append(harry)
row = rows.pop()
assert row['age'].value == 17
assert row['name'].value == 'Harry'
assert row['name'].label == 'Big Man'
def test_cell_options():
table = Table(valid_schema)
jack = [17, ('Jack', 'Beanstalk', dict(key='value'))]
table.append(jack)
row = table.rows.pop()
assert row['name'].options == {'key':'value'}
kate = [26, dict(value='Kate', options={'hair':'long'})]
table.append(kate)
row = table.rows.pop()
assert row['name'].value == 'Kate'
assert row['name'].label == None
assert row['name'].options == {'hair':'long'}
def test_insert_rows():
table = Table(valid_schema)
table.extend([bob, (20, 'Sally')])
rows = table.rows
row = rows.pop()
assert row['name'].value == 'Sally'
row = rows.pop()
assert row['age'].value == 18
def test_invalid_row():
table = Table(valid_schema)
with pytest.raises(ValueError):
table.append([1, 2, 3])
def test_dictionary_interface():
table = Table(options={'foo':'bar'})
expected = dict(rows=[], cols=[], p={'foo':'bar'})
assert dict(table) == expected
def test_encode():
table = Table()
expected = {"rows": [], "cols": []}
result = table.encode()
assert json.loads(result) == expected
def test_source():
table = Table()
google = DummyGoogleObject()
google.visualization = DummyGoogleObject()
google.visualization.Query = DummyGoogleObject()
result = eval(table.source())
expected = {"status": "OK", "table": {"rows": [], "cols": []}, "reqId": 0, "version": 0.6}
assert result == expected
class DummyGoogleObject(object):
"""
Just allows namespaces to mimic the Google API
"""
def setResponse(self, arg):
return arg
|
GoogleCloudPlatform/hellodashboard
|
gviz_data_table/tests/test_table.py
|
test_table.py
|
py
| 3,832 |
python
|
en
|
code
| 11 |
github-code
|
50
|
23230063846
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 16 15:16:29 2019
@author: Andy
"""
import matplotlib.pyplot as plt
import numpy as np
im_path = '..\\..\\EXPERIMENTS\\Italy\\data\\adsa\\20190701_1k2f_60c_volume_ref.bmp'
area = 5.19/3.6 * 7.58/3.6 # mm^2
im = plt.imread(im_path)
im_bw = im[:,:,0] == 0
plt.imshow(im_bw*255)
vol_pix = 0
for r in range(im_bw.shape[0]):
w_pix = sum(im_bw[r,:])
vol_pix += np.pi*(w_pix/2)**2
w_im_mm = 7.58/3.6 # width of the image in mm measured in powerpoint
w_im_pix = im.shape[1] # width of image in pixels
pix2mm = w_im_mm / w_im_pix
vol_uL = vol_pix*pix2mm**3
print('volume of drop is %f uL.' % vol_uL)
# compute drop area
num_white = np.sum(im_bw)
num_tot = im_bw.shape[0] * im_bw.shape[1]
num_black = (num_tot - num_white)
drop_area = area*num_black/num_tot
print("drop area = %f mm^2" % drop_area)
|
andylitalo/g-adsa
|
src/util_clicking/compute_area.py
|
compute_area.py
|
py
| 852 |
python
|
en
|
code
| 0 |
github-code
|
50
|
27005882350
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
from numpy import exp
from arch import arch_model
from sklearn.metrics import mean_squared_error
from statsmodels.tsa.arima.model import ARIMA
from scipy import stats
from datetime import datetime
import warnings
import yfinance as yf
warnings.filterwarnings("ignore")
def LB(ts, lags=[12], g=0):
result = sm.stats.diagnostic.acorr_ljungbox(ts, lags=lags, model_df=g, return_df=True)
return result.iloc[-1]['lb_pvalue']
def LBdf(ts, lags=[12], g=0):
result = sm.stats.diagnostic.acorr_ljungbox(ts, lags=lags, model_df=g, return_df=True)
return result
def ACF(ts, lags=24, title='ACF'):
sm.graphics.tsa.plot_acf(ts, auto_ylims=True, lags=lags, title=title)
def PACF(ts, lags=24, title='PACF'):
sm.graphics.tsa.plot_pacf(ts, auto_ylims=True, lags=lags, title=title)
def ADFT(ts, alpha=0.05, l=12, caption='The time series'):
from statsmodels.tsa.stattools import adfuller
result = adfuller(ts, maxlag=l)
print('*************************')
print('ADF Test on stationarity')
print(f'ADF Statistic: {result[0]}')
print(f'p-value: {result[1]}')
if result[1] <= alpha:
print(f'{caption} is stationary.')
else:
print(f'{caption} is not stationary.')
return
def check_series(rt, alpha=0.05, caption=None):
ADFT(rt)
LB = sm.stats.diagnostic.acorr_ljungbox(rt, lags=[12,24], return_df=True)
print('*************************')
print(f'Ljung-Box Test on Serial Correlation - {caption}')
print(LB)
if (LB['lb_pvalue']>alpha).sum() > 0:
print('H0 is not rejected.')
print('There is no serial correlation.')
else:
print('H0 is rejected')
print('There is serial correlation.')
at = rt-np.mean(rt)
print('*************************')
print(f'Ljung-Box Test on ARCH effect - {caption}')
LB2 = sm.stats.diagnostic.acorr_ljungbox(at**2, lags=[12,24], return_df=True)
print(LB2)
if (LB2['lb_pvalue']>alpha).sum() > 0:
print('H0 is not rejected.')
print('There is no ARCH effect.')
else:
print('H0 is rejected')
print('There is ARCH effect.')
return
def arma_grid_aic(X, mp=5, d=0, mq=5, caption='rt'):
target = []
for k in range(d+1):
for i in range(mp):
for j in range(mq):
print(f'Calculating...p={i},d={k},q={j}')
model = ARIMA(X, order=(i,k,j))
model_fit = model.fit()
QM = LB(model_fit.resid, g=i+j)
if QM > 0.05:
target.append([(i,k,j), model_fit.aic, model_fit.bic])
tdf = pd.DataFrame(target, columns=['order','aic','bic']).sort_values(by='aic')
try:
print(f'{tdf.order.iloc[0]} is the best combination for {caption}')
return tdf, tdf.order.iloc[0]
except:
print('Cannot find appropriate model')
return tdf, 'N/A'
df = yf.Ticker('0005.hk').history(period='max')
# dfw = yf.Ticker('0005.hk').history(period='max', interval='1wk').dropna()
# dfm = yf.Ticker('0005.hk').history(period='max', interval='1mo').dropna()
df = df[df.Volume!=0].dropna()
# df['R_Open'] = df['Open'].pct_change()
df['R_High'] = df['High'].pct_change()
df['R_Low'] = df['Low'].pct_change()
df['R_Close'] = df['Close'].pct_change()
# df['r_Open'] = np.log(df['R_Open']+1)
df['r_High'] = np.log(df['R_High']+1)
df['r_Low'] = np.log(df['R_Low']+1)
df['r_Close'] = np.log(df['R_Close']+1)
n = len(df)
c = df.index.get_loc('2016').start
train_high, test_high = df['r_High'][1:c], df['r_High'][c:]
order_high = (6,0,0)
mh = ARIMA(train_high, order=order_high).fit()
print(mh.summary())
print(LB(mh.resid, [10], g=sum(order_high)))
print(LB(mh.resid**2, [10], g=sum(order_high)))
at_h = mh.resid
mh_garch = arch_model(at_h, mean='Zero', vol='GARCH', p=1, q=1, dist='Normal').fit()
print(mh_garch.summary())
print(LB(mh_garch.std_resid, [20]))
print(LB(mh_garch.std_resid**2, [20]))
train_low, test_low = df['r_Low'][1:c], df['r_Low'][c:]
order_low = (2,0,0)
ml = ARIMA(train_low, order=order_low).fit()
print(ml.summary())
print(LB(ml.resid, [10], g=sum(order_low)))
print(LB(ml.resid**2, [10], g=sum(order_low)))
at_l = ml.resid
ml_garch = arch_model(at_l, mean='Zero', vol='GARCH', p=1, q=1, dist='Normal').fit()
print(ml_garch.summary())
print(LB(ml_garch.std_resid, [20]))
print(LB(ml_garch.std_resid**2, [20]))
train_close, test_close = df['r_Close'][1:c], df['r_Close'][c:]
order_close = ([1,4,9,11],0,0)
mc = ARIMA(train_close, order=order_close).fit()
print(mc.summary())
print(LB(mc.resid, [10], g=4))
print(LB(mc.resid**2, [10], g=4))
at_c = mc.resid
mc_garch = arch_model(at_c, mean='Zero', vol='GARCH', p=1, q=1, dist='Normal').fit()
print(mc_garch.summary())
print(LB(mc_garch.std_resid, [20]))
print(LB(mc_garch.std_resid**2, [20]))
rh = []
rh_se = []
rl = []
rl_se = []
rc = []
rc_se = []
c1 = c
while c1 < n:
# predicting High
print(f'Forecasting {df.index[c1]}')
dh = df['r_High'][1:c1]
model_high = ARIMA(dh, order=order_high).fit()
rh.append(model_high.get_forecast(1).predicted_mean.iloc[0])
model_high_garch = arch_model(model_high.resid, mean='Zero', vol='GARCH', p=1, q=1, dist='Normal').fit(update_freq=0, disp='off')
rh_se.append(np.sqrt(model_high_garch.forecast().variance['h.1'].iloc[-1]))
# predicting Low
dl = df['r_Low'][1:c1]
model_low = ARIMA(dl, order=order_low).fit()
rl.append(model_low.get_forecast(1).predicted_mean.iloc[0])
model_low_garch = arch_model(model_low.resid, mean='Zero', vol='GARCH', p=1, q=1, dist='Normal').fit(update_freq=0, disp='off')
rl_se.append(np.sqrt(model_low_garch.forecast().variance['h.1'].iloc[-1]))
# predicting Close
dc = df['r_Close'][1:c1]
model_close = ARIMA(dc, order=order_close).fit()
rc.append(model_close.get_forecast(1).predicted_mean.iloc[0])
model_close_garch = arch_model(model_close.resid, mean='Zero', vol='GARCH', p=1, q=1, dist='Normal').fit(update_freq=0, disp='off')
rc_se.append(np.sqrt(model_close_garch.forecast().variance['h.1'].iloc[-1]))
c1 += 1
df_sim = df[df.index.get_loc('2016').start:]
df_sim['rhat_High'] = rh
df_sim['rhat_High_se'] = rh_se
df_sim['rhat_Low'] = rl
df_sim['rhat_Low_se'] = rl_se
df_sim['rhat_Close'] = rc
df_sim['rhat_Close_se'] = rc_se
df_sim = pd.concat([df_sim, df.loc['2015-12-31'].to_frame().T]).sort_index()
df_sim['Phat_High'] = df_sim['High'].shift(1)*np.exp(df_sim['rhat_High'])
df_sim['Phat_Low'] = df_sim['Low'].shift(1)*np.exp(df_sim['rhat_Low'])
df_sim['Phat_Close'] = df_sim['Close'].shift(1)*np.exp(df_sim['rhat_Close'])
df_sim['Phat_High_upper'] = df_sim['High'].shift(1)*np.exp(df_sim['rhat_High']+1.96*df_sim['rhat_High_se'])
df_sim['Phat_High_lower'] = df_sim['High'].shift(1)*np.exp(df_sim['rhat_High']-1.96*df_sim['rhat_High_se'])
df_sim['Phat_Low_upper'] = df_sim['Low'].shift(1)*np.exp(df_sim['rhat_Low']+1.96*df_sim['rhat_Low_se'])
df_sim['Phat_Low_lower'] = df_sim['Low'].shift(1)*np.exp(df_sim['rhat_Low']-1.96*df_sim['rhat_Low_se'])
df_sim['Phat_Close_upper'] = df_sim['Close'].shift(1)*np.exp(df_sim['rhat_Close']+1.96*df_sim['rhat_Close_se'])
df_sim['Phat_Close_lower'] = df_sim['Close'].shift(1)*np.exp(df_sim['rhat_Close']-1.96*df_sim['rhat_Close_se'])
d = df_sim[1:]
P_High_percent = sum((d.High<=d.Phat_High_upper) & (d.High>=d.Phat_High_lower))/len(d)*100
P_Low_percent = sum((d.Low<=d.Phat_Low_upper) & (d.Low>=d.Phat_Low_lower))/len(d)*100
P_Close_percent = sum((d.Close<=d.Phat_Close_upper) & (d.Close>=d.Phat_Close_lower))/len(d)*100
# Strategy A
sa = df_sim[['High','Low','Close','r_Close','rhat_Close']][1:]
sa['Order'] = ((sa['rhat_Close']>0).astype(int)-(sa['rhat_Close']<0).astype(int)).shift(1)
sa['Order_Price']=sa['Close'].shift(1)
sa['Order_Success']=0
bdf = sa[sa.Order==1]
sa.loc[sa.Order==1, 'Order_Success'] = (bdf['Order_Price']>=bdf['Low']).astype(int)
sdf = sa[sa.Order==-1]
sa.loc[sa.Order==-1, 'Order_Success'] = (sdf['Order_Price']<=sdf['High']).astype(int)
sa['Trade_Price']=0
sa.loc[sa.Order_Success==1, 'Trade_Price'] = sa.loc[sa.Order_Success==1, ['High','Order_Price']].min(axis=1)
sa['Transaction'] = (-sa.Order*sa.Trade_Price).fillna(0)
sa['Cash'] = sa.Transaction.cumsum().fillna(0)
sa['Position'] = (sa.Order*sa.Order_Success).cumsum().fillna(0)
sa['Balance'] = sa.Cash+sa.Position*sa.Close
# fig, ax = plt.subplots()
# ax3 = ax.twinx()
# sa.Close.plot(ax=ax, style='b-')
# sa.Balance.plot(ax=ax3, style='r-')
# # Plotting Pt
# Pt.plot(title='Stock price over time', ax=ax[0])
# pt = np.log(Pt)
# Rt = Pt.pct_change().dropna()
# Rt.plot(title='Return over time', ax=ax[1])
# rt = pt.diff().dropna()
# rt.plot(title='r(t) over time', ax=ax[2])
# Plotting
# ra_HO = df['High']/df['Open']
# ra_LO = df['Low']/df['Open']
# ra_CO = df['Close']/df['Open']
# check_series(ra_HO[c1:c], caption='Ratio High/Open')
# check_series(ra_LO[c1:c], caption='Ratio Low/Open')
# check_series(ra_CO[c1:c], caption='Ratio Close/Open')
# ACF(ra_HO.iloc[:c])
# PACF(ra_HO.iloc[:c])
# ACF(ra_HO.diff().dropna().iloc[:c])
# PACF(ra_HO.diff().dropna().iloc[:c])
# ACF(ra_LO.iloc[:c])
# PACF(ra_LO.iloc[:c])
# ACF(ra_LO.diff().dropna().iloc[:c])
# PACF(ra_LO.diff().dropna().iloc[:c])
# ACF(ra_CO.iloc[:c])
# PACF(ra_CO.iloc[:c])
# model_rho = ARIMA(ra_HO, order=(0,1,1)).fit()
# # model_rho = ARIMA(ra_HO, order=(1,1,3)).fit()
# # LB(model_rho.resid, g=4)
# LB(model_rho.resid, g=1)
# LB(model_rho.resid**2)
# model_rlo = ARIMA(ra_LO, order=([1,3,4],1,1)).fit()
# LB(model_rlo.resid, g=4)
# LB(model_rlo.resid**2)
# model_rco = ARIMA(ra_CO[:c], order=([1,4,5,9,11],0,0)).fit()
# LB(model_rco.resid, g=5)
# LB(model_rco.resid**2)
# check_series(df['r_Open'][1:], caption='r_Open')
# check_series(df['r_High'][1:], caption='r_High')
# check_series(df['r_Low'][1:], caption='r_Low')
# check_series(df['r_Close'][1:], caption='r_Close')
# best_high = arma_grid_aic(df['r_High'][1:c], 7, 7, caption='r_High')
# best_low = arma_grid_aic(df['r_Low'][1:c], 7, 7, caption='r_Low')
# best_open = arma_grid_aic(df['r_Open'][1:c], 7, 7, caption='r_Open')
# best_close = arma_grid_aic(df['r_Close'][1:c], 7, 7, caption='r_Close')
# model_BH = ARIMA(df['r_High'][1:c], order=best_high).fit()
# LB(model_BH.resid, [15], g=sum(best_high))
# LB(model_BH.resid**2, [15], g=sum(best_high))
# model_BL = ARIMA(df['r_Low'][1:c], order=best_low).fit()
# LB(model_BL.resid, [15], g=sum(best_low))
# LB(model_BL.resid**2, [15], g=sum(best_low))
# model_BO = ARIMA(df['r_Open'][1:c], order=best_open).fit()
# LB(model_BO.resid, [15], g=sum(best_open))
# LB(model_BO.resid**2, [15], g=sum(best_open))
# model_BC = ARIMA(df['r_Close'][1:c], order=best_close).fit()
# LB(model_BC.resid, [15], g=sum(best_close))
# rh = []
# rh_se = []
# rl = []
# rl_se = []
# c1 = c
# while c1 < n:
# # predicting High
# print(f'Forecasing {df.index[c]}')
# dh = ra_HO[1:c1]
# model_high = ARIMA(dh, order=(0,1,1)).fit()
# rh.append(model_high.get_forecast(1).predicted_mean.iloc[0])
# rh_se.append(model_high.get_forecast(1).se_mean.iloc[0])
# # p_high_hat = df['High'][c-1]*exp(r_high_hat)
# # ph.append(p_high_hat)
# # predicting Low
# dl = ra_LO[1:c1]
# model_low = ARIMA(dl, order=(0,1,1)).fit()
# rl.append(model_low.get_forecast(1).predicted_mean.iloc[0])
# rl_se.append(model_low.get_forecast(1).se_mean.iloc[0])
# # p_low_hat = df['Low'][c-1]*exp(r_low_hat)
# # pl.append(p_low_hat)
# c += 1
# df_sim = df[df.index.get_loc('2016').start:]
# df_sim['Predicted_raHO'] = rh
# df_sim['Predicted_raHO_se'] = rh_se
# df_sim['Predicted_raLO'] = rl
# df_sim['Predicted_raLO_se'] = rl_se
# df_sim = pd.concat([df_sim, df.loc['2015-12-31'].to_frame().T]).sort_index()
|
cwk0507/MSDM
|
MSDM5053/Project/Working/HSBC (2).py
|
HSBC (2).py
|
py
| 12,097 |
python
|
en
|
code
| 0 |
github-code
|
50
|
40991595889
|
"""
Calculate the z-score
"""
__author__ = "Matt DeSaix"
# libraries
import numpy as np
from WGSassign import zscore_cy
def AD_summary(L, AD, i, n_threshold, single_read_threshold):
AD_GL_dict = {}
for s in np.arange(AD.shape[0]):
# The key is the allele depth combination
key = tuple([AD[s,2*i], AD[s,2*i+1]])
if key not in AD_GL_dict.keys():
AD_GL_dict[key] = [[L[s,2*i], L[s,2*i+1], (1 - L[s,2*i] - L[s,2*i+1])]]
else:
AD_GL_dict[key].append([L[s,2*i], L[s,2*i+1], (1 - L[s,2*i] - L[s,2*i+1])])
AD_summary_dict = {}
for key, value in AD_GL_dict.items():
AD_summary_dict[key] = [len(value), np.mean(AD_GL_dict[key], axis = 0)]
AD_summary_array = np.empty((len(AD_summary_dict.keys()), 4), dtype = np.int32)
for j in range(len(list(AD_summary_dict.keys()))):
key = list(AD_summary_dict.keys())[j]
a1 = list(list(AD_summary_dict.keys())[j])[0]
a2 = list(list(AD_summary_dict.keys())[j])[1]
n_loci = AD_summary_dict[key][0]
AD_summary_array[j,:] = [a1, a2, a1+a2, n_loci]
if single_read_threshold:
AD_filtered = AD_summary_array[AD_summary_array[:,2] == 1]
else:
AD_filtered = AD_summary_array[(AD_summary_array[:,3] > n_threshold) & (AD_summary_array[:,2] != 0)]
assert( AD_filtered.shape[0] != 0), "No loci were kept! Too stringent filtering?"
assert( AD_filtered.shape[0] != 1), "Not enough loci were kept! Too stringent filtering?"
dict_sum = AD_filtered[:,0] + AD_filtered[:,1]
dl, dl_counts = np.unique(dict_sum, return_counts=True)
dl_keep = dl[dl < dl_counts]
AD_array = AD_filtered[np.in1d(AD_filtered[:,2], dl_keep)]
return AD_summary_dict, AD_array
def get_L_keep(L, AD, AD_summary_dict, AD_array, i):
L_keep = np.empty(AD.shape[0], dtype = np.int32)
for s in np.arange(AD.shape[0]):
Ar = AD[s,2*i]
Aa = AD[s,2*i+1]
observed_A = len(np.argwhere((AD_array[:,0] == Ar) & (AD_array[:,1] == Aa)))
if observed_A != 1:
L_keep[s] = 0
else:
key = tuple([AD[s,2*i], AD[s,2*i+1]])
max_id = np.argwhere(AD_summary_dict[key][1] == np.max(AD_summary_dict[key][1]))[0][0]
L_ind_full = [L[s,2*i], L[s,2*i+1], (1 - L[s,2*i] - L[s,2*i+1])]
if np.abs(AD_summary_dict[key][1][max_id] - L_ind_full[max_id]) > 0.01:
L_keep[s] = 0
else:
L_keep[s] = 1
L_keep_final = np.argwhere(L_keep == 1).reshape(-1).astype(np.int32)
loci_kept = L_keep_final.shape[0]
return L_keep_final, loci_kept
def get_factorials(AD_array, AD_summary_dict, e):
AD_factorial = np.zeros((AD_array.shape[0], 3), dtype = np.float32)
AD_like = np.zeros((AD_array.shape[0], 3), dtype = np.float32)
AD_index = np.zeros((np.max(AD_array[:,0])+1, np.max(AD_array[:,1])+1), dtype = np.int32)
for i in range(AD_factorial.shape[0]):
Ar = AD_array[i,0]
Aa = AD_array[i,1]
ad_index = np.argwhere((AD_array[:,0] == Ar) & (AD_array[:,1] == Aa))[0][0]
AD_index[Ar, Aa] = ad_index
Dl = Aa + Ar
ad_factorial = np.math.factorial(Dl) / (np.math.factorial(Aa)*np.math.factorial(Ar))
P_r_a0 = ad_factorial*((1.0-e)**Ar)*(e**Aa)
P_r_a1 = ad_factorial*((0.5)**Dl)
P_r_a2 = ad_factorial*((1.0-e)**Aa)*(e**Ar)
AD_factorial[i,:] = [P_r_a0, P_r_a1, P_r_a2]
AD_like[i:] = AD_summary_dict[tuple([Ar, Aa])][1]
return AD_factorial, AD_like, AD_index
def get_expected_W_l(L, L_keep, A, AD, AD_array, AD_factorial, AD_like, AD_index, t, i):
W_l_obs_array = np.zeros(L_keep.shape[0], dtype = np.float32)
W_l_array = np.zeros(L_keep.shape[0], dtype = np.float32)
zscore_cy.expected_W_l(L, L_keep, A, AD, AD_array, AD_factorial, AD_like, AD_index, t, i, W_l_obs_array, W_l_array)
# for s_index in range(L_keep.shape[0]):
# s = L_keep[s_index]
# A_sk = A[s,k]
# P_gl = [(1-A_sk)*(1-A_sk), 2*A_sk*(1-A_sk), A_sk*A_sk]
# f_gl = [L[s,2*i] * P_gl[0],L[s,2*i+1] * P_gl[1],(1-L[s,2*i]-L[s,2*i+1]) * P_gl[2]]
# f_gl_log = np.log(f_gl[0] + f_gl[1] + f_gl[2])
# W_l_obs_list[s_index] = f_gl_log
# # Getting the total depth
# Dl = AD[s,2*i] + AD[s,2*i+1]
# for Aa in np.arange(Dl+1):
# Ar = Dl - Aa
# ad_index = np.argwhere((AD_array[:,0] == Ar) & (AD_array[:,1] == Aa))[0][0]
# W_l[s_index] = W_l[s_index] + f_gl_log * P_gl[0] * AD_factorial[ad_index,0] * AD_like[ad_index,0]
# W_l[s_index] = W_l[s_index] + f_gl_log * P_gl[1] * AD_factorial[ad_index,1] * AD_like[ad_index,1]
# W_l[s_index] = W_l[s_index] + f_gl_log * P_gl[2] * AD_factorial[ad_index,2] * AD_like[ad_index,2]
W_l_obs = np.sum(W_l_obs_array, dtype=np.float32)
return W_l_obs, W_l_array
def get_var_W_l(L, L_keep, A, AD, AD_array, AD_factorial, AD_like, AD_index, W_l_array, t, i):
var_W_l_array = np.zeros(L_keep.shape[0], dtype = np.float32)
zscore_cy.variance_W_l(L, L_keep, A, AD, AD_array, AD_factorial, AD_like, AD_index, t, i, var_W_l_array, W_l_array)
# for s_index in range(L_keep.shape[0]):
# s = L_keep[s_index]
# A_sk = A[s,k]
# P_gl = [(1-A_sk)*(1-A_sk), 2*A_sk*(1-A_sk), A_sk*A_sk]
# f_gl = [L[s,2*i] * P_gl[0],L[s,2*i+1] * P_gl[1],(1-L[s,2*i]-L[s,2*i+1]) * P_gl[2]]
# f_gl_log = np.log(f_gl[0] + f_gl[1] + f_gl[2])
# # Getting the total depth
# Dl = AD[s,2*i] + AD[s,2*i+1]
# for Aa in np.arange(Dl+1):
# Ar = Dl - Aa
# ad_index = np.argwhere((AD_array[:,0] == Ar) & (AD_array[:,1] == Aa))[0][0]
# var_W_l[s_index] = var_W_l[s_index] + (W_l[s_index]-f_gl_log)**2 * P_gl[0] * AD_factorial[ad_index,0] * AD_like[ad_index,0]
# var_W_l[s_index] = var_W_l[s_index] + (W_l[s_index]-f_gl_log)**2 * P_gl[1] * AD_factorial[ad_index,1] * AD_like[ad_index,1]
# var_W_l[s_index] = var_W_l[s_index] + (W_l[s_index]-f_gl_log)**2 * P_gl[2] * AD_factorial[ad_index,2] * AD_like[ad_index,2]
return var_W_l_array
|
mgdesaix/WGSassign
|
WGSassign/zscore.py
|
zscore.py
|
py
| 5,775 |
python
|
en
|
code
| 3 |
github-code
|
50
|
31612041978
|
# Program szyfruje i deszyfruje zahardkowana wiadomosc
# Aes tszyfruje bloki co 16 bitow wiec musimy uzupelnic znaki (funkcja dopelnienie do16)
from Crypto.Cipher import AES
import codecs
def dopelnienie_do16(zmienna):
result = zmienna + (16-len(zmienna))*']'
return result
key = b"Sixteen byte key"
cipher = AES.new(key)
text1 = "dupa"
text2 = "siemanko"
text3 = "jacie"
text1 = dopelnienie_do16(text1)
text2 = dopelnienie_do16(text2)
text3 = dopelnienie_do16(text3)
szyfr1 = cipher.encrypt(text1)
szyfr2 = cipher.encrypt(text2)
szyfr3 = cipher.encrypt(text3)
print(szyfr1)
print(szyfr2)
print(szyfr3)
#uzywamy tego samego klucza do dekrypcji (algorytm synchroniczny)
plain1 = cipher.decrypt(szyfr1)
plain2 = cipher.decrypt(szyfr2)
plain3 = cipher.decrypt(szyfr3)
print(plain1)
print(plain2)
print(plain3)
|
KBITSecurity/Notes
|
Programming/Python/UsingScripts/AES.py
|
AES.py
|
py
| 828 |
python
|
pl
|
code
| 0 |
github-code
|
50
|
40108172990
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.a = cms.ESSource("PoolDBESSource",
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0),
authenticationPath = cms.untracked.string('.')
),
toGet = cms.VPSet(cms.PSet(
record = cms.string('PedestalsRcd'),
tag = cms.string('mytest'),
label = cms.untracked.string('lab3d')
), cms.PSet(
record = cms.string('PedestalsRcd'),
tag = cms.string('pedtag'),
label = cms.untracked.string('PEDPED')
), cms.PSet(
record = cms.string('anotherPedestalsRcd'),
tag = cms.string('anothermytest'),
label = cms.untracked.string('Three')
)),
connect = cms.string('sqlite_file:test.db')
)
process.source = cms.Source("EmptyIOVSource",
lastValue = cms.uint64(3),
timetype = cms.string('runnumber'),
firstValue = cms.uint64(1),
interval = cms.uint64(1)
)
process.prod = cms.EDAnalyzer("PedestalsByLabelAnalyzer")
process.p = cms.Path(process.prod)
|
cms-sw/cmssw
|
CondCore/ESSources/test/python/print_ped_bylabel_cfg.py
|
print_ped_bylabel_cfg.py
|
py
| 1,051 |
python
|
en
|
code
| 985 |
github-code
|
50
|
72770423834
|
import itertools
#given a list of numbers and a number k,
#print out the sum of any two numbers in the list = k
num = [10,15,3,7]
mylst = list(itertools.combinations(num,r=2))
print (mylst)
def to_k(lst,k):
for num1, num2 in lst:
if num1 +num2 == k:
print (f" found: {num1} + {num2} = {k}")
else:
print (None)
if __name__ =="__main__":
print ("calling main")
to_k(mylst,17)
|
JieCMarshall/mylearning
|
Data_Alg_Exer/iterations.py
|
iterations.py
|
py
| 432 |
python
|
en
|
code
| 0 |
github-code
|
50
|
73221936154
|
import os
import torch
import torch.nn as nn
from transformers import Trainer
from typing import Optional
from transformers import AutoTokenizer
from video_chatgpt.model import VideoChatGPTLlamaForCausalLM
# from peft import PeftModel
from video_chatgpt.constants import *
def unwrap_model(model: nn.Module) -> nn.Module:
"""
Recursively unwraps a model from potential containers (as used in distributed training).
Args:
model (`torch.nn.Module`): The model to unwrap.
"""
# since there could be multiple levels of wrapping, unwrap recursively
if hasattr(model, "module"):
return unwrap_model(model.module)
else:
return model
class VideoChatGPTTrainer(Trainer):
def _save(self, output_dir: Optional[str] = None, state_dict=None):
if getattr(self.args, 'tune_mm_mlp_adapter', False):
# Save the model
_state_dict = state_dict
if _state_dict is None:
# Only save the model itself if we are using distributed training
model_to_save = unwrap_model(self.model)
_state_dict = model_to_save.state_dict()
## NEW CODE ##
try:
model = model_to_save
except:
model = self.model
# model.save_pretrained("50salads_finetuned_videochatgpt")
# tokenizer = AutoTokenizer.from_pretrained('mmaaz60/LLaVA-7B-Lightening-v1-1')
# model = VideoChatGPTLlamaForCausalLM.from_pretrained('mmaaz60/LLaVA-7B-Lightening-v1-1', low_cpu_mem_usage = True, torch_dtype =torch.float16)
# mm_use_vid_start_end = True
# # Add tokens to tokenizer
# tokenizer.add_tokens([DEFAULT_VIDEO_PATCH_TOKEN], special_tokens=True)
# if mm_use_vid_start_end:
# tokenizer.add_tokens([DEFAULT_VID_START_TOKEN, DEFAULT_VID_END_TOKEN], special_tokens=True)
# # Resize token embeddings of the model
# model.resize_token_embeddings(len(tokenizer))
# model = PeftModel.from_pretrained(model, '50salads_finetuned_videochatgpt')
# model = model.merge_and_unload()
# os.system('rm -r 50salads_finetuned_videochatgpt')
# model.save_pretrained("50salads_finetuned_videochatgpt")
## NEW CODE ##
model.save_pretrained("breakfast_finetuned_videochatgpt")
weight_to_save = {}
keys_to_match = ['positional_encodings', 'embed_tokens', 'norm', 'input_layernorm', 'post_attention_layernorm']
for k, v in _state_dict.items():
# if any(key_match in k for key_match in keys_to_match):
weight_to_save[k] = v
current_folder = output_dir.split('/')[-1]
parent_folder = os.path.dirname(output_dir)
if current_folder.startswith('checkpoint-'):
mm_projector_folder = os.path.join(parent_folder, "model")
os.makedirs(mm_projector_folder, exist_ok=True)
torch.save(weight_to_save, os.path.join(mm_projector_folder, f'{current_folder}.bin'))
else:
torch.save(weight_to_save, os.path.join(output_dir, f'model.bin'), )
# super(VideoChatGPTTrainer, self)._save(output_dir, state_dict)
|
Muhammad4hmed/VideoLlama
|
video_chatgpt/train/llava_trainer.py
|
llava_trainer.py
|
py
| 3,312 |
python
|
en
|
code
| 0 |
github-code
|
50
|
34749491204
|
def dask_setup(worker):
import os
from cachetools import LRUCache
def get_classads():
fname = os.getenv("_CONDOR_JOB_AD")
if not fname:
return {}
d = {}
with open(fname) as fh:
for line in fh:
if "=" not in line:
continue
k, v = line.split("=", 1)
d[k.strip()] = v.strip().lstrip('"').strip('"')
return d
worker.classads = get_classads()
worker.tree_cache = LRUCache(75)
def numtreescached_metric(worker):
if hasattr(worker,"tree_cache"):
return len(list(worker.tree_cache.keys()))
return 0
worker.metrics["numtreescached"] = numtreescached_metric
try:
# Load some imports initially
import coffea.processor
import coffea.executor
except:
pass
|
mhl0116/cscbkg
|
cachepreload.py
|
cachepreload.py
|
py
| 875 |
python
|
en
|
code
| 0 |
github-code
|
50
|
5100305823
|
import os
def split_file(file_path, output_dir, chunk_size=50*1024*1024):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
file_name = os.path.basename(file_path)
file_size = os.path.getsize(file_path)
num_chunks = (file_size // chunk_size) + 1
with open(file_path, 'rb') as infile:
for i in range(num_chunks):
chunk_file_name = f"{file_name}.part{i+1}.txt"
chunk_file_path = os.path.join(output_dir, chunk_file_name)
with open(chunk_file_path, 'wb') as outfile:
outfile.write(infile.read(chunk_size))
print(f"Created chunk: {chunk_file_path}")
if __name__ == "__main__":
large_file_path = "../wikipedia-dump/cs-articles-utf8/cs-articles.txt"
output_directory = "export"
split_file(large_file_path, output_directory)
|
benhmoore/reduce-llm
|
preprocessing/seperator.py
|
seperator.py
|
py
| 843 |
python
|
en
|
code
| 0 |
github-code
|
50
|
74368632154
|
#coding=utf-8
# 命令行命令, 批量tex转换
import sys
import texf_topng
import os
def texf_topng_batch(filedir, den="200"):
if not os.path.isdir(filedir):
print("Need a dir with only tex files")
return
absfiledir = os.path.abspath(filedir)
os.chdir(absfiledir)
for root, dirs, files in os.walk(absfiledir):
for name in files:
try:
texf_topng.tex_framed_topng(name, den)
except Exception as e:
print(e)
argn = len(sys.argv)
if argn == 1:
print("Need a dir with only tex files.")
elif argn == 2:
texf_topng_batch(sys.argv[1])
elif argn == 3:
texf_topng_batch(sys.argv[1], sys.argv[2])
else:
print("Too many arguments.")
|
IshmaelHeathcliff/find-inline-formulae
|
ttpb.py
|
ttpb.py
|
py
| 744 |
python
|
en
|
code
| 0 |
github-code
|
50
|
38020566206
|
def busquedaLineal(lista,encontrar):
isInList = False
for elemento in lista:
if elemento == encontrar:
isInList = True
return isInList
listaEntrada = [2,12,34,5,11,59,4,3,1]
valorEncontrar = int(input('ingrese un número : '))
listaEntrada.sort()
print(busquedaLineal(listaEntrada, valorEncontrar))
import busquedabinaria as bi
import time
import random as rd
listaEntrada = []
for i in range (9990000):
listaEntrada.append(rd.randint(1,100000))
encontrar = 59
listaEntrada.sort()
inicio = time.time()
busquedaLineal(listaEntrada, encontrar)
deltaLineal = time.time() - inicio
inicio = time.time()
bi.busquedaBinaria(listaEntrada, encontrar)
deltaBinario = time.time() - inicio
print(deltaBinario,deltaLineal)
|
weincoder/algoritmos202101
|
clases/algoritmos/busquedaLineal.py
|
busquedaLineal.py
|
py
| 747 |
python
|
es
|
code
| 1 |
github-code
|
50
|
19831926647
|
__author__ = 'Alessio'
from project import app
import logging.handlers
from logging import FileHandler, Formatter
from datetime import *
class Logging(logging.FileHandler):
@classmethod
def __init__(self,user_connect_=None):
self.user_connect = user_connect_
self.application = app\
@classmethod
def create_login_info(self,user,time,url):
self.logging = FileHandler('icollect_info.log')
self.logging.setLevel(logging.DEBUG)
self.logging.setFormatter(Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
self.application.logger.addHandler(self.logging)
create_dict_to_loggin_info = dict({'user_connect':user,'time':time,'url':url})
self.application.logger.info('Info LogIn' + ":" + str(create_dict_to_loggin_info))
@classmethod
def create_logout_info(self,user,time):
self.logging = FileHandler('icollect_info.log')
self.logging.setLevel(logging.DEBUG)
self.logging.setFormatter(Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
self.application.logger.addHandler(self.logging)
create_dict_to_loggin_info = dict({'user_connect':user,'time':time})
self.application.logger.info('Info Logout' + ":" + str(create_dict_to_loggin_info))
if __name__ == '__main__':
pass
|
Sirbin/icollectweb
|
project/Loggin_Debug.py
|
Loggin_Debug.py
|
py
| 1,347 |
python
|
en
|
code
| 0 |
github-code
|
50
|
24975343920
|
import numpy as np
class resultsStats:
def __init__(self, collisionTable,lastHops ,numNodes,seedList,time,seed = 256,threshold = 0.9):
self.threshold = threshold
self.seed = seed
self.time = sum(time[0:self.seed])/len(time[0:self.seed])
self.maxHop = max(lastHops[0:self.seed])
if(type(collisionTable) == list):
self.collsionTable = self.tansform_to_dic(collisionTable)
else:
self.collsionTable = self.lowerBoundDiameter(collisionTable)
self.completeSeedList = seedList
self.numNodes = numNodes
self.totalCouples = self.totalCouples()
self.couplesPercentage = self.totalCouplesPercentage()
self.avgDistance = self.avgDistance()
self.lowerBoundDiameter = len(self.collsionTable.keys())-1
self.effectiveDiameter = self.effectiveDiameter()
def tansform_to_dic(self,m):
new_matrix = [[m[j][i] for j in range(len(m))] for i in range(len(m[0]))]
dic = {}
for i in range(0,len(new_matrix)):
dic[str(i)] = new_matrix[i][:]
return(dic)
def lowerBoundDiameter(self,collisionTable):
newCollisionTable = {}
for hop in range(0,self.maxHop+1):
newCollisionTable[str(hop)] = collisionTable[str(hop)]
return(newCollisionTable)
def totalCouples(self):
overallCollsions = self.collsionTable[str(self.maxHop)][0:self.seed]
numTotalCollisions = sum(overallCollsions)
totalCouplesReachable = numTotalCollisions * self.numNodes / self.seed
return (totalCouplesReachable)
def totalCouplesPercentage(self):
couplesPercentage = self.totalCouples * self.threshold
return (couplesPercentage)
def avgDistance(self):
sumAvg = 0
for hop in range(0,self.maxHop+1):
collisions = self.collsionTable[str(hop)][0:self.seed]
totalCollisions = sum(collisions)
if(hop != 0):
previousHop = hop - 1
previousCollisions = self.collsionTable[str(previousHop)][0:self.seed]
previousTotalCollisions = sum(previousCollisions)
couplesReachable = totalCollisions * self.numNodes / self.seed
previousCouplesReachable = previousTotalCollisions * self.numNodes / self.seed
couplesReachableForHop = couplesReachable - previousCouplesReachable
sumAvg += hop*couplesReachableForHop
return(sumAvg/self.totalCouples)
def interpolate(self,y0,y1,y):
if(y1-y0 == 0):
return(0)
return (y - y0) / (y1 - y0)
def effectiveDiameter(self):
if(len(self.collsionTable.values())==0):
return(0)
d = 1
numCollisions = sum(self.collsionTable[str(self.lowerBoundDiameter)][0:self.seed])
while(sum(self.collsionTable[str(d)][0:self.seed])/numCollisions < self.threshold):
d += 1
collisionsD = sum(self.collsionTable[str(d)][0:self.seed])
previousCollisionsD = sum(self.collsionTable[str(d - 1)][0:self.seed])
couplesD = collisionsD * self.numNodes / self.seed
previousCouplesD = previousCollisionsD * self.numNodes / self.seed
interpolation = self.interpolate(previousCouplesD,couplesD,self.couplesPercentage)
result = (d - 1) + interpolation
if (result < 0):
result = 0
return result
def printStats(self):
print("----------- STATS -----------")
print("Seed number ", self.seed)
print("Avg distance %.20f" %self.avgDistance)
print("Total couples %.20f" %self.totalCouples)
print("Total couples percentage %.20f" %self.couplesPercentage)
print("Lowerbound diameter ", self.lowerBoundDiameter)
print("Effective diameter %.20f" %self.effectiveDiameter)
print("-----------------------------")
def get_stats(self,additionalInfo = None):
if(additionalInfo!= None):
additionalInfo['avg_distance'] = self.avgDistance
additionalInfo['total_couples'] = self.totalCouples
additionalInfo['total_couples_perc'] = self.couplesPercentage
additionalInfo['lower_bound'] = self.lowerBoundDiameter
additionalInfo['effective_diameter'] = self.effectiveDiameter
additionalInfo['treshold'] = self.threshold
additionalInfo['num_seed'] = self.seed
additionalInfo['time'] = self.time
#additionalInfo['seedsList'] = self.transform_seedlist(self.completeSeedList)
else:
additionalInfo ={
'avg_distance':self.avgDistance,
'total_couples': self.totalCouples,
'total_couples_perc':self.couplesPercentage,
'lower_bound': self.lowerBoundDiameter,
'effective_diameter':self.effectiveDiameter,
'treshold':self.threshold,
'num_seed':self.seed,
'time': self.time
#'seedsList':self.transform_seedlist(self.completeSeedList)
}
return(additionalInfo)
def transform_seedlist(self,seedListString):
seedList = []
for seed in seedListString.split(","):
if(seed != ""):
seedList.append(seed)
newSeedList = ""
for seed in seedList[0:self.seed]:
newSeedList += seed
newSeedList += ","
return(newSeedList)
|
BigDataLaboratory/MHSE
|
analyze_results/src/objects/stats.py
|
stats.py
|
py
| 5,525 |
python
|
en
|
code
| 4 |
github-code
|
50
|
33026470883
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Auteur : Aelaig COURNEZ - Flora GUES - Yann LAMBRECHTS - Romain SAFRAN
# Librairies importee
import numpy as np
import pandas
import struct
import glob
import os
import time
import datetime as dt
from pyproj import Proj, transform
#--------------------------------------------------------------------------------#
# LECTURE ET SAUVEGARDE DES DONNEES EA400 #
# Cas ou les donnees de positionnement sont enregistrees dans les trames NME0. #
# #
# Ce code a pour but de lire des fichiers issus d'acquisitions EA400 #
# de les decoder et de les enregistrer sous forme de fichiers h5. #
# Pour utiliser ce script, il suffit de specifier le dossier ou se trouvent #
# les fichiers .raw et .out via la variable d'entree dir_path (main) #
# ainsi que le dossier de sortie out_path ou l'on souhaite que les donnees #
# soit enregistrees. #
#--------------------------------------------------------------------------------#
#-------- FONCTIONS PERMETTANT DE DECODER LES DIFFERENTES TRAMES --------
def decode_CON0(data):
"""
Cette fonction permet de decoder les trames CON0.
Parametres
----------
data : string
trame CON0 - portion de fichier binaire
Sortie
-------
decode_data : dictionary
dictionnaire comprenant l'ensemble des parametres d'acquisition
"""
trame = data[:4]
if trame == b'CON0':
decode_data={}
decode_data["DateTime"] = struct.unpack('<Q', data[4:4+8])[0]
decode_data["SurveyName"] = struct.unpack('<128s', data[12:12+128])[0].decode('ascii').strip('\x00')
decode_data["TransectName"] = struct.unpack('<128s', data[140:140+128])[0].decode('ascii').strip('\x00')
decode_data["SounderName"] = struct.unpack('<128s', data[168:168+128])[0].decode('ascii').strip('\x00')
decode_data["Spare"] = struct.unpack('128s', data[296:296+128])[0].decode('ascii').strip('\x00')
# ecart de 100 bits...(?)
decode_data["TransducerCount"] = struct.unpack('<I', data[524:524+4])[0]
# transducer 1 = 38kHz
decode_data["ChannelId_38"] = struct.unpack('128s', data[528:528+128])[0].decode('ascii').strip('\x00')
decode_data["BeamType_38"] = struct.unpack('<l', data[656:656+4])[0]
decode_data["Frequency_38"] = struct.unpack('<f', data[660:660+4])[0]
decode_data["Gain_38"] = struct.unpack('<f', data[664:664+4])[0]
decode_data["EquivalentBeamAngle_38"] = struct.unpack('<f', data[668:668+4])[0]
# transducer 2 = 200kHz
decode_data["ChannelId_200"] = struct.unpack('128s', data[848:848+128])[0].decode('ascii').strip('\x00')
decode_data["BeamType_200"] = struct.unpack('<l', data[976:976+4])[0]
decode_data["Frequency_200"] = struct.unpack('<f', data[980:980+4])[0]
decode_data["Gain_200"] = struct.unpack('<f', data[984:984+4])[0]
decode_data["EquivalentBeamAngle_200"] = struct.unpack('<f', data[988:988+4])[0]
return decode_data
def decode_RAW0(data):
"""
Cette fonction permet de decoder les trames RAW0.
Parametres
----------
data : string
trame RAW0 - portion de fichier binaire
Sortie
-------
decode_data : dictionary
dictionnaire comprenant l'ensemble des donnees associees a un ping/une mesure
"""
trame = data[:4]
if trame == b'RAW0':
decode_data={}
decode_data["DateTime"] = struct.unpack('<Q', data[4:4+8])[0]
decode_data["Channel"] = struct.unpack('<h', data[12:12+2])[0]
decode_data["Mode"] = struct.unpack('<h', data[14:14+2])[0]
decode_data["TransducerDepth"] = struct.unpack('<f', data[16:16+4])[0]
decode_data["Frequency"] = struct.unpack('<f', data[20:20+4])[0]
decode_data["TransmitPower"] = struct.unpack('<f', data[24:24+4])[0]
decode_data["PulseLength"] = struct.unpack('<f', data[28:28+4])[0]
decode_data["BandWidth"] = struct.unpack('<f', data[32:32+4])[0]
decode_data["SampleInterval"] = struct.unpack('<f', data[36:36+4])[0]
decode_data["SoundVelocity"] = struct.unpack('<f', data[40:40+4])[0]
decode_data["AbsorptionCoefficient"] = struct.unpack('<f', data[44:44+4])[0]
decode_data["Heave"] = struct.unpack('<f', data[48:48+4])[0]
decode_data["Tx_Roll"] = struct.unpack('<f', data[52:52+4])[0]
decode_data["Tx_Pitch"] = struct.unpack('<f', data[56:56+4])[0]
decode_data["Temperature"] = struct.unpack('<f', data[60:60+4])[0]
decode_data["Spare1"] = struct.unpack('<h', data[64:64+2])[0]
decode_data["Spare2"] = struct.unpack('<h', data[66:66+2])[0]
decode_data["Rx_Roll"] = struct.unpack('<f', data[68:68+4])[0]
decode_data["Rx_Pitch"] = struct.unpack('<f', data[72:72+4])[0]
decode_data["Offset"] = struct.unpack('<l', data[76:76+4])[0]
decode_data["Count"] = struct.unpack('<l', data[80:80+4])[0]
decode_data["Power"] = []
for i in range(decode_data["Count"]):
decode_data["Power"].append(struct.unpack('<h', data[84+i*2:84+i*2+2])[0]*10*np.log10(2)/256)
return decode_data
def decode_NME0(data):
"""
Cette fonction permet de decoder les trames NME0
/!\ return None si line_[0]!='$GPGGA'
Parametres
----------
data : string
trame NME0 - portion de fichier binaire
Sorties
-------
data_traj : list of float
donnees de postionnement [date en secondes, latitude, longitude, altitude]
"""
trame = data[:4]
if trame == b'NME0':
trames = data[12:].decode('ascii').split('\n')
for line_ in trames:
line_=line_.split(',')
if line_[0]=='$GPGGA':
sec_txt = line_[1].split(".")[0]
secondes = int(sec_txt[-2:]) + int(sec_txt[-4:-2])*60 + int(sec_txt[:-4])*3600
lat=float(line_[2])
lat=lat//100+(lat-lat//100*100)/60
if line_[3]=='S':
lat=-lat
lon=float(line_[4])
lon = lon // 100 + (lon - lon // 100*100) / 60
if line_[5]=='W':
lon=-lon
data_traj = [secondes, lat, lon, float(line_[9])+float(line_[11])]
return data_traj
def decode_DEP0(data):
"""
Cette fonction permet de decoder les trames DEP0.
Parametres
----------
data : string
trame DEP0 - portion de fichier binaire
Sortie
-------
decode_data : dictionary
dictionnaire comprenant les donnes utiles : detection du fond et BS calcule par Kongsberg
"""
trame = data[:4]
if trame == b'DEP0':
decode_data={}
decode_data["DateTime"] = struct.unpack('<Q', data[4:4+8])[0]
decode_data["NbChannel"] = struct.unpack('<I', data[12:12+4])[0]
# transducer 1 = 38kHz
decode_data["Depth_38"] = struct.unpack('<f', data[16:16+4])[0]
decode_data["BS_38"] = struct.unpack('<f', data[20:20+4])[0]
decode_data["Param2_38"] = struct.unpack('<f', data[24:24+4])[0]
# transducer 2 = 200kHz
decode_data["Depth_200"] = struct.unpack('<f', data[28:28+4])[0]
decode_data["BS_200"] = struct.unpack('<f', data[32:32+4])[0]
decode_data["Param2_200"] = struct.unpack('<f', data[36:36+4])[0]
return decode_data
#--------------- FONCTIONS PERMETTANT DE LIRE LES DONNEES ------------------
def read_RAWfile(f,line,channel,survey_date,range_detection,depth_max_toSave):
"""
Cette fonction permet de lire un fichier .raw, puis elle enregistre les donnees dans des dictionnaires.
Parametres
----------
f : .raw file
fichier .raw issu de l'EA400 et ouvert grace a : open(filepath, 'rb')
line : string
identifiant du fichier de donnees, ici nom de la ligne, par ex : 'L0006'
channel : list of int
liste des canaux que l'on souhaite lire, par ex : [1,2] [1], ou [2] avec {1:38kHz ; 2:200kHz}
survey_date : datetime
date a laquelle a ete realisee le leve (jour), par ex : dt.datetime(year=2020,month=10,day=7,hour = 0,minute = 0,second = 0)
range_detection : list of int
intervalle de profondeur utilise pour la detection du fond, par ex : [5,100]
depth_max_toSave : int
profondeur seuil pour la sauvegarde des donnees, ne pas sauvegarder si la profondeur est superieure au seuil
Sorties
-------
d_param : dictionary
dictionnaire comprenant tous les parametres d'acquisition respectifs a une ligne de leve
d_power : dictionary
dictionnaire comprenant l'ensemble des donnees acoustiques mesurees pendant une ligne
d_traj : dictionary
dictionnaire comprenant l'ensemble des donnees de positionnement recueillies pendant une ligne
"""
#-------------Definition des variables -----------
nb_con , nb_tag , nb_nme , nb_raw , nb_svp , nb_dep = 0,0,0,0,0,0 # compteurs de trames
ping=0 # compteur de ping
# Sauvegarde des donnees NME0
Trajectoire_time = [] # liste des temps trajectoire
Trajectoire_lat,Trajectoire_lon,Trajectoire_z=[],[],[] # listes des latitudes, longitudes et altitudes
# Origine des dates
origine_1601 = dt.datetime(year=1601,month=1,day=1,hour = 0,minute = 0,second = 0)
# Dictionnaires a remplir
d_power = {}
d_param = {}
d_traj = {}
#--------------------------------------------------
data = f.read(4 * 1) # Debut de la lecture des donnees
while len(data)==4 : # Tant qu'il y a toujours des donnees a lire
# on lit la longueur de la trame precisee au debut
lengths, = struct.unpack('<l', data)
# on isole la trame dans data
data = f.read(lengths*1)
trame = data[:4]
if trame == b'CON0':
nb_con+=1
# on decode la trame CON0
decoded_CON0 = decode_CON0(data)
elif trame == b'TAG0':
nb_tag+=1
elif trame == b'NME0':
nb_nme+=1
# on decode la trame NME0
decoded_NME0 = decode_NME0(data)
if decoded_NME0 != None:
Trajectoire_time.append(survey_date + dt.timedelta(seconds = decoded_NME0[0]))
Trajectoire_lat.append(decoded_NME0[1])
Trajectoire_lon.append(decoded_NME0[2])
Trajectoire_z.append(decoded_NME0[3])
elif trame == b'SVP0':
nb_svp+=1
elif trame == b'DEP0':
nb_dep+=1
elif trame == b'RAW0':
nb_raw+=1
# on decode la trame RAW0
decoded_RAW0 = decode_RAW0(data)
# Variables
dateTime_ms = (decoded_RAW0["DateTime"]) // 10 # conversion de l'heure de dixieme de ms en ms
sample_int = decoded_RAW0["SampleInterval"]
sound_vel = decoded_RAW0["SoundVelocity"]
power = decoded_RAW0["Power"]
# Conversion des profondeurs en indices
i_max_save = round( 2*depth_max_toSave / (sample_int * sound_vel)) # i_borne_prof
i_min_detect = round( 2*range_detection[0] / (sample_int * sound_vel))
i_max_detect = round( 2*range_detection[1] / (sample_int * sound_vel))
if decoded_RAW0["Channel"]==channel: # si la trame est dans la frequence choisie
if len(power)!=0:
ping += 1
# Detection du fond = max de puissance dans l'intervalle range_detection
save_power_list = power[:i_max_save+1] # puissance a sauvegarder
detect_power_list = power[i_min_detect:i_max_detect+1] # puissance pour detection du fond
max_power = max(detect_power_list) # maximum de puissance dans detect_power_list
i_max_power = i_min_detect + detect_power_list.index(max_power) # indice du max
prof_max_power = i_max_power * sample_int * sound_vel / 2 # profondeur du max
# - - - Stockage des donnees dans les dictionnaires d_param et d_power - - - #
# extraction de Power dans un dictionnaire dont la clef est le nom de la ligne
# sous forme de dataframe dont l'index est le numero du ping
if line not in d_power: # lorsqu'on traite un nouveau fichier, on crée un nouveau DataFrame dans d_power
# Initialisation du DataFrame pour d_param, definition des variables
d_param[line] = pandas.DataFrame( columns= ['SurveyName','TransectName','SounderName','TransducerCount',
'Frequency_38','Gain_38','EquivalentBeamAngle_38',
'Frequency_200','Gain_200','EquivalentBeamAngle_200',
'Channel','Frequency',
'SampleInterval','SoundVelocity','PulseLength',
'BandWidth','AbsorptionCoefficient','Count',
'Mode','DepthMaxSave','DepthMinDetect','DepthMaxDetect'])
# Enregistrement des metadonnees dans d_param
d_param[line].loc['param'] = [decoded_CON0['SurveyName'],decoded_CON0['TransectName'],decoded_CON0['SounderName'],decoded_CON0['TransducerCount'],
decoded_CON0['Frequency_38'],decoded_CON0['Gain_38'],decoded_CON0['EquivalentBeamAngle_38'],
decoded_CON0['Frequency_200'],decoded_CON0['Gain_200'],decoded_CON0['EquivalentBeamAngle_200'],
channel,decoded_RAW0["Frequency"],
decoded_RAW0['SampleInterval'],decoded_RAW0['SoundVelocity'],decoded_RAW0['PulseLength'],
decoded_RAW0['BandWidth'],decoded_RAW0['AbsorptionCoefficient'],decoded_RAW0['Count'],
decoded_RAW0['Mode'],depth_max_toSave,range_detection[0],range_detection[1]]
# Initialisation du DataFrame pour d_power, definition des variables
d_power[line] = pandas.DataFrame( columns= ['DateTime','Power','PowerDetectInterval','PowerMax','Depth',
'TransmitPower','Mode','TransducerDepth',
'Heave','Tx_Roll','Tx_Pitch','Spare1','Spare2',
'Rx_Roll','Rx_Pitch','Offset'])
# Enregistrement des donnees dans d_power
d_power[line].loc[ping,'DateTime'] = origine_1601 + dt.timedelta(microseconds = dateTime_ms)
d_power[line].loc[ping,'Power'] = save_power_list
d_power[line].loc[ping,'PowerDetectInterval'] = detect_power_list
d_power[line].loc[ping,'PowerMax'] = max_power
d_power[line].loc[ping,'Depth'] = prof_max_power
d_power[line].loc[ping,'TransmitPower'] = decoded_RAW0['TransmitPower']
d_power[line].loc[ping,'Mode'] = decoded_RAW0['Mode']
d_power[line].loc[ping,'TransducerDepth'] = decoded_RAW0['TransducerDepth']
d_power[line].loc[ping,'Heave'] = decoded_RAW0['Heave']
d_power[line].loc[ping,'Tx_Roll'] = decoded_RAW0['Tx_Roll']
d_power[line].loc[ping,'Tx_Pitch'] = decoded_RAW0['Tx_Pitch']
d_power[line].loc[ping,'Spare1'] = decoded_RAW0['Spare1']
d_power[line].loc[ping,'Spare2'] = decoded_RAW0['Spare2']
d_power[line].loc[ping,'Rx_Roll'] = decoded_RAW0['Rx_Roll']
d_power[line].loc[ping,'Rx_Pitch'] = decoded_RAW0['Rx_Pitch']
d_power[line].loc[ping,'Offset'] = decoded_RAW0['Offset']
# - - - end Stockage - - - #
data = f.read(4 * 1) # Poursuite de la lecture
# on lit la longueur de la trame precisee à la fin
lengthf, = struct.unpack('<l', data)
if lengthf != lengths: # on verifie que l'identifiant est le même au début et à la fin
raise Exception('Length problem')
data = f.read(4 * 1) # Poursuite de la lecture
#----------------- FIN BOUCLE WHILE -----------------
# # - - - Stockage des metadonnees et des donnees de positionnement dans d_traj - - - #
if line not in d_traj:
# Initialisation du DataFrame pour d_traj, definition des variables
d_traj[line] = pandas.DataFrame( columns= ['DateTime','lon','lat','X','Y','z'])
# - Conversion des coordonnees de WGS84 en Lambert93 - #
outProj = Proj('epsg:2154')
inProj = Proj('epsg:4326')
lon, lat = np.array(Trajectoire_lon), np.array(Trajectoire_lat)
X_L93,Y_L93 = transform(inProj,outProj,lat,lon)
# Enregistrement des donnees dans d_traj
d_traj[line]['DateTime'] = Trajectoire_time
d_traj[line]['lon'] = Trajectoire_lon # Sauvegarde des coordonnees en WGS84
d_traj[line]['lat'] = Trajectoire_lat
d_traj[line]['X'] = X_L93 # Sauvegarde des coordonnees en Lambert93
d_traj[line]['Y'] = Y_L93
d_traj[line]['z'] = Trajectoire_z
# # - - - end - - - #
# Nombre de trames au total
nb_tot = nb_con + nb_tag + nb_nme + nb_raw + nb_svp + nb_dep
# Affiche le nom du fichier traite et le nombre de trames qu'il contient
print('\nFichier : ',f.name)
print('nb_tot : ',nb_tot,'\nnb_con : ' ,nb_con, '\nnb_tag : ',nb_tag , '\nnb_nme : ',nb_nme , '\nnb_raw : ',nb_raw ,'\nnb_svp : ', nb_svp,'\nnb_dep : ',nb_dep)
return d_param,d_power,d_traj
def read_OUTfile(f,line):
"""
Cette fonction permet de lire un fichier .out, puis elle enregistre les donnees pertinentes dans des dictionnaires.
Parametres
----------
f : .raw file
fichier .raw issu de l'EA400 et ouvert grace a : open(filepath, 'rb')
line : string
identifiant du fichier de donnees, ici nom de la ligne, par ex : 'L0006'
Sorties
-------
d_out : dictionary
dictionnaire comprenant les donnees presentes dans les trames DEP0 : detection du fond et valeur de BS Kongberg
"""
#-------------Definition des variables -----------
nb_con , nb_tag , nb_nme , nb_raw , nb_svp , nb_dep = 0,0,0,0,0,0 # compteurs de trames
# Initialisation des listes
L_time , L_depth38, L_depth200, L_BS38, L_BS200 = [],[],[],[],[]
# Origine des dates
origine_1601 = dt.datetime(year=1601,month=1,day=1,hour = 0,minute = 0,second = 0)
# Dictionnaire a remplir
d_out = {}
#-------------------------------------------------
data = f.read(4 * 1) # Debut de la lecture des donnees
while len(data)==4 : # Tant qu'il y a toujours des donnees a lire
# on lit la longueur de la trame precisee au debut
lengths, = struct.unpack('<l', data)
# on isole la trame dans data
data = f.read(lengths*1)
trame = data[:4]
if trame == b'CON0':
nb_con+=1
elif trame == b'TAG0':
nb_tag+=1
elif trame == b'NME0':
nb_nme+=1
decode_NME0(data)
elif trame == b'RAW0':
nb_raw+=1
elif trame == b'SVP0':
nb_svp+=1
elif trame == b'DEP0':
nb_dep+=1
# on decode la trame DEP0
decoded_DEP0 = decode_DEP0(data)
time_ms = (decoded_DEP0['DateTime']) //10 # conversion des dates en ms
L_time.append(origine_1601 + dt.timedelta(microseconds = time_ms))
L_depth38.append(decoded_DEP0['Depth_38'])
L_depth200.append(decoded_DEP0['Depth_200'])
L_BS38.append(decoded_DEP0['BS_38'])
L_BS200.append(decoded_DEP0['BS_200'])
data = f.read(4 * 1) # Poursuite de la lecture
# on lit la longueur de la trame precisee à la fin
lengthf, = struct.unpack('<l', data)
if lengthf != lengths: # on vérifie que l'identifiant est le même au début et à la fin
raise Exception('Length problem')
data = f.read(4 * 1) # Poursuite de la lecture
#----------------- FIN BOUCLE WHILE -----------------
if line not in d_out:
d_out[line] = pandas.DataFrame( columns= ['DateTime','Depth_38','Depth_200','BS_38','BS_200'])
# Enregistrement des donnees dans d_out
d_out[line]['DateTime'] = L_time
d_out[line]['Depth_38'] = L_depth38
d_out[line]['Depth_200'] = L_depth200
d_out[line]['BS_38'] = L_BS38
d_out[line]['BS_200'] = L_BS200
# Nombre de trames au total
nb_tot = nb_con + nb_tag + nb_nme + nb_raw + nb_svp + nb_dep
# Affiche le nom du fichier traite et le nombre de trames qu'il contient
print('\nFichier : ',f.name)
print('nb_tot : ',nb_tot,'\nnb_con : ' ,nb_con, '\nnb_tag : ',nb_tag , '\nnb_nme : ',nb_nme , '\nnb_raw : ',nb_raw ,'\nnb_svp : ', nb_svp,'\nnb_dep : ',nb_dep)
return d_out
def check_FileExists(filepath):
"""
Cette fonction permet de verifier qu'aucun fichier n'est ecrase involontairement lors d'une sauvegarde.
Parametres
----------
filepath : string
Chemin vers le fichier qu'on souhaite sauvegarde
Sorties
-------
Save : boolean
False si un fichier existe donc on desactive la sauvegarde
True si ce fichier n'existe pas, sauvegarde possible
"""
Save = True # Activation de la sauvegarde
if os.path.exists(filepath) : # on verifie de ne pas ecraser un fichier existant
print('Vous allez ecraser le fichier '+filepath+',\nvoulez vous le remplacer par celui que vous generez maintenant ?')
print('Appuyez sur [y] ou [n], et tapez entrer dans la console.')
action_doublon = input('->')
if action_doublon == 'n':
Save = False # On desactive la sauvegarde
if action_doublon == 'y':
Save = True
return Save
def save_RAWbdd(outpath,channel,d_param,d_power,d_traj):
"""
Cette fonction permet de sauvegarder les donnees des fichiers .RAW rassemblees dans les dictionnaires dans des fichiers h5.
Parametres
----------
outpath : string
chemin vers le dossier de sortie
channel : int
canal a sauvegarder, par ex : 1->38kHz et 2->200kHz
d_param : dictionary
dictionnaire comprenant tous les parametres d'acquisition respectifs a une ligne de leve
d_power : dictionary
dictionnaire comprenant l'ensemble des donnees acoustiques mesurees pendant une ligne
d_traj : dictionary
dictionnaire comprenant l'ensemble des donnees de positionnement recueillies pendant une ligne
"""
# Prise en compte de la frequence
if channel ==1: str_freq = '_38kHz'
else : str_freq = '_200kHz'
for ligne in d_power: # on parcourt les fichiers/lignes traitees
fic_h5_data = ligne +str_freq+ '_data.h5' # nom du fichier de sortie rassemblant les donnees
# Verification pour ne pas ecraser de fichier
Save = check_FileExists(outpath + fic_h5_data)
if Save : # Sauvegarde
# Creation d'un fichier de sortie h5
store = pandas.HDFStore(outpath + fic_h5_data)
store['data'] = d_power[ligne]
store['trajectoire'] = d_traj[ligne]
store['param'] = d_param[ligne]
store.close()
return None
def runDECODEandSAVE_RAWfiles(filesRAW_to_read,out_path,channels,survey_date,range_detection,depth_max_toSave):
"""
Cette fonction permet d'executer la lecture et la sauvegarde des fichiers .RAW dans des fichiers h5.
Parametres
----------
filesRAW_to_read : list of string
liste des chemins vers les fichiers .raw a lire
out_path : string
chemin vers le repertoire de sortie
channels : list of int
liste des canaux que l'on souhaite lire, par ex : [1,2] [1], ou [2] avec {1:38kHz ; 2:200kHz}
survey_date : datetime
date a laquelle a ete realisee le leve (jour), par ex : dt.datetime(year=2020,month=10,day=7,hour = 0,minute = 0,second = 0)
range_detection : list of int
intervalle de profondeur utilise pour la detection du fond, par ex : [5,100]
depth_max_toSave : int
profondeur seuil pour la sauvegarde des donnees, ne pas sauvegarder si la profondeur est superieure au seuil
"""
for channel in channels : # on parcout les canaux
for file in filesRAW_to_read : # on parcourt les fichiers de donnees .raw
line = os.path.basename(file)[:5]
print('-> Lecture du fichier RAW, Ligne : '+line+' Canal : '+str(channel))
# Ouverture du fichier
f = open(file, 'rb')
# lecture et remplissage de dictionnaires :
d_power,d_param,d_traj = read_RAWfile(f,line,channel,survey_date,range_detection,depth_max_toSave)
print('Lecture achevee')
print('-> Sauvegarde du fichier RAW, Ligne : '+line+' Canal : '+str(channel))
# Enregistrement des donnees dans des fichiers h5
save_RAWbdd(out_path,channel,d_param,d_power,d_traj)
print('Sauvegarde achevee')
return None
def runDECODEandSAVE_OUTfiles(filesOUT_to_read,out_path):
"""
Cette fonction permet d'executer la lecture et la sauvegarde des fichiers .out dans des fichiers h5.
Parametres
----------
filesOUT_to_read : list of string
list des chemins vers les fichiers a lire
out_path : string
chemin vers le repertoire de sortie
"""
for file in filesOUT_to_read : # on parcourt les fichiers .out a lire
line = os.path.basename(file)[:5]
print('-> Lecture du fichier OUT, Ligne : '+line)
# Ouverture du fichier
fic = open(file, 'rb')
# lecture et remplissage de dictionnaire :
d_out = read_OUTfile(fic,line)
print('Lecture achevee')
print('-> Sauvegarde du fichier OUT, Ligne : '+line)
out_file = out_path + line + '_ref.txt'
# Verification pour ne pas ecraser de fichier
if check_FileExists(out_file):
# Creation d'un fichier de metadata
d_out[line].to_csv(out_file ,sep = ' ', index=False)
print('Sauvegarde achevee')
return None
if __name__ == '__main__':
t_start = time.time() # Debut du temps de calcul
# Chemins vers les repertoires de donnees
dir_path = './data/' # repertoire d'entree ->>> A SPECIFIER
filesRAW_to_read = glob.glob(dir_path+'*.raw') # Ensemble des fichiers RAW a traiter
filesOUT_to_read = glob.glob(dir_path+'*.out') # Ensemble des fichiers OUT a traiter
out_path = './fic_h5/' # repertoire de sortie ->>> A SPECIFIER
# Variables
# Date du leve /!\ a modifier 07/10/2020 ou 23/10/2020 ->>> A SPECIFIER
survey_date = dt.datetime(year=2020,month=10,day=7,hour = 0,minute = 0,second = 0)
channels = [1,2] # canaux à lire {1:38kHz ; 2:200kHz} ->>> A SPECIFIER
range_detection = [5,100] # intervalle de profondeur pour la recherche du fond (en m)
depth_max_toSave = 100 # profondeur max sauvegardee dans les fichiers h5 (en m)
# Lecture et Sauvegarde des fichiers .RAW
runDECODEandSAVE_RAWfiles(filesRAW_to_read,out_path,channels,survey_date,range_detection,depth_max_toSave)
# Lecture et Sauvegarde des fichiers .OUT
runDECODEandSAVE_OUTfiles(filesOUT_to_read,out_path)
# Affichage du temps de calcul
t_end = time.time() # Fin du temps de calcul
print('\nCalculation time : ',round((t_end-t_start)/60),'min')
|
cournez-ensta/Projet_Classification_2021
|
Scripts_Python/decode_and_save_EA400data_with_NME0.py
|
decode_and_save_EA400data_with_NME0.py
|
py
| 29,607 |
python
|
fr
|
code
| 0 |
github-code
|
50
|
22417742951
|
from alumina.config.userModel import AluminaUserModel
from django.shortcuts import render
from ATUJobPortal.config.authentication import Authentication
from django.http.response import HttpResponseRedirect
def aluminaApplicationController(request):
auth = Authentication(request)
msg = None
errorMessage = None
userDetails = None
sort = None
noApplication = True
if auth.authMap["authorize"]:
userId = auth.authMap["userId"]
userDetails = AluminaUserModel.userModel(userId)
print(userDetails)
else:
return HttpResponseRedirect("/account/logout")
if request.method == "GET":
sort = request.GET.get("sort")
if sort is None:
sort = "pending"
# checking for no application
for application in userDetails.get("allApplicationsList"):
if application.get("status") == sort:
noApplication = False
break
return render(request,
'aluminaApplications.html',
{"heading": "Alumina All Application | ATU Job Portal",
"auth": auth.authMap,
"msg": msg,
"userDetails": userDetails,
"errorMessage": errorMessage,
"sort": sort,
"noApplication": noApplication,
"tabName": "application"})
|
tmsoft96/ATUJobPortal
|
alumina/controllers/aluminaApplicationController.py
|
aluminaApplicationController.py
|
py
| 1,402 |
python
|
en
|
code
| 1 |
github-code
|
50
|
15142269500
|
import cv2 as cv
import numpy as np
image_width = 480
image_height = 640
def sort_points(points):
points = points.reshape((4, 2))
points_new = np.zeros((4, 1, 2), dtype=np.int32)
_sum = points.sum(1)
_diff = np.diff(points, axis=1)
points_new[0] = points[np.argmin(_sum)]
points_new[3] = points[np.argmax(_sum)]
points_new[1] = points[np.argmin(_diff)]
points_new[2] = points[np.argmax(_diff)]
return points_new
def apply_warp(image, points):
points = sort_points(points)
pts1 = np.float32(points)
pts2 = np.float32([[0, 0], [image_width, 0], [0, image_height], [image_width, image_height]])
matrix = cv.getPerspectiveTransform(pts1, pts2)
output = cv.warpPerspective(image, matrix, (image_width, image_height))
crop = output[20:output.shape[0]-20, 20:output.shape[1]-20]
crop = cv.resize(crop, (image_width, image_height))
return crop
def get_contours(image):
biggest_contour = np.array([])
max_area = 0
contours, _ = cv.findContours(image, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)
for contour in contours:
area = cv.contourArea(contour)
if area > 5000:
perimeter = cv.arcLength(contour, True)
approx = cv.approxPolyDP(contour, 0.02*perimeter, True)
if area > max_area and len(approx) == 4:
biggest_contour = approx
max_area = area
cv.drawContours(img_contour, biggest_contour, -1, (0, 255, 0), 20)
return biggest_contour
def process(image):
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blur = cv.GaussianBlur(gray, (5, 5), 1)
canny = cv.Canny(blur, 200, 200)
kernel = np.ones((5, 5), dtype='uint8')
dilated = cv.dilate(canny, kernel, iterations=2)
eroded = cv.erode(dilated, kernel, iterations=1)
return eroded
cap = cv.VideoCapture(0)
while True:
_, img = cap.read()
cv.resize(img, (image_width, image_height))
img_contour = img.copy()
mod_img = process(img)
largest_contour = get_contours(mod_img)
warped_image = np.zeros_like(img)
if len(largest_contour) > 0:
warped_image = apply_warp(img, largest_contour)
cv.imshow('result', warped_image)
cv.imshow('flow', img_contour)
if cv.waitKey(1) & 0xFF == ord('q'):
break
|
chaos-6666/cv-projects
|
doc_scan.py
|
doc_scan.py
|
py
| 2,298 |
python
|
en
|
code
| 0 |
github-code
|
50
|
25953188511
|
import keras
from keras.models import load_model
from agent.agent import Agent
from functions import *
import sys
if len(sys.argv) != 4:
print('Usage: python evaluate.py [stock] [model] [window_size]')
exit()
stock_name, model_name, window_size = sys.argv[1], sys.argv[2], int(sys.argv[3])
agent = Agent(window_size, 10, True, model_name)
data = getStockDataVec(stock_name)[-600000:]
l = len(data) - 1
state = getState(data, 0, window_size + 1)
total_profit = 0
agent.inventory = []
for t in range(l):
action = agent.choose_action(state)
# sit
next_state = getState(data, t + 1, window_size + 1)
profit = 0
reward = 0
if 1 == action: # buy
agent.inventory.append(data[t])
print(f'Buy: {formatPrice(data[t])}')
elif 2 == action and len(agent.inventory): # sell
for bought_price in agent.inventory:
profit += data[t] - bought_price
agent.inventory = []
reward = max(profit, 0)
total_profit += profit
print(f'Sell: {formatPrice(data[t])} | Profit: {formatPrice(profit)}')
done = True if t == l - 1 else False
agent.store_transition(state, action, reward, next_state)
state = next_state
if done:
print('-'*50)
print(f'Total Profit: {formatPrice(total_profit)}')
print('-'*50)
|
goho302jo03/nn_model
|
q-trader/evaluate.py
|
evaluate.py
|
py
| 1,327 |
python
|
en
|
code
| 0 |
github-code
|
50
|
8147050964
|
from PIL import Image, ImageDraw
import math
import random
import numpy as np
import matplotlib.pyplot as plt
random_seed = 123456789
random.seed(random_seed)
def to_rgb(image):
rgb = image.convert("RGB")
width = image.width
height = image.height
matrix = np.zeros((width, height, 3))
for x in range(width):
for y in range(height):
matrix[x, y, :] = rgb.getpixel((x, y))
return matrix
def to_gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
def add_noise(matrix, noise_level):
noise = np.random.randn(matrix.shape[0], matrix.shape[1]) * noise_level
matrix = matrix + noise
matrix[matrix > 255] = 255
matrix[matrix < 0] = 0
return matrix
def to_binary(matrix, threshold):
result = np.array(matrix)
result[matrix > threshold] = 1
result[matrix <= threshold] = -1
return result
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def gibbs_sampling(input_, num_iters = 5, J = 1):
DX = [-1, 1, 0, 0]
DY = [0, 0, -1, 1]
X = np.zeros(input_.shape)
X[:, :] = input_[:, :]
width = X.shape[0]
height = X.shape[1]
for iter in range(num_iters):
X_next = np.zeros(X.shape)
for x in range(width):
for y in range(height):
agree = 0
disagree = 0
for i in range(len(DX)):
x_ = x + DX[i]
y_ = y + DY[i]
if x_ >= 0 and x_ < width and y_ >= 0 and y_ < height:
if X[x_][y_] == X[x][y]:
agree += 1
else:
disagree += 1
eta = X[x][y] * (agree - disagree)
prob = sigmoid(2 * J * eta)
sample = np.random.rand()
if sample <= prob:
X_next[x][y] = 1
else:
X_next[x][y] = -1
X = X_next
return X
image = Image.open("jim_simons.jpg").convert('LA')
matrix = to_gray(to_rgb(image))
noised_matrix = add_noise(matrix, noise_level = 25)
X = to_binary(noised_matrix, threshold = 50)
X_gibbs = gibbs_sampling(X, num_iters = 5, J = 1)
plt.subplot(1, 3, 1)
plt.imshow(matrix.transpose())
plt.title('Original')
plt.subplot(1, 3, 2)
plt.imshow(X.transpose() * 255)
plt.title("White noised")
plt.subplot(1, 3, 3)
plt.imshow(X_gibbs.transpose() * 255)
plt.title("Gibbs sampling on 2D Ising model")
plt.show()
gibbs_1 = gibbs_sampling(X, num_iters = 1, J = 1)
gibbs_3 = gibbs_sampling(X, num_iters = 3, J = 1)
gibbs_5 = gibbs_sampling(X, num_iters = 5, J = 1)
plt.subplot(1, 4, 1)
plt.imshow(X.transpose() * 255)
plt.title("White noised")
plt.subplot(1, 4, 2)
plt.imshow(gibbs_1.transpose() * 255)
plt.title('Gibbs sampling (1 iteration)')
plt.subplot(1, 4, 3)
plt.imshow(gibbs_3.transpose() * 255)
plt.title("Gibbs sampling (3 iterations)")
plt.subplot(1, 4, 4)
plt.imshow(gibbs_5.transpose() * 255)
plt.title("Gibbs sampling (5 iterations)")
plt.show()
|
HyTruongSon/Dirichlet_Process
|
ising_gibbs.py
|
ising_gibbs.py
|
py
| 2,661 |
python
|
en
|
code
| 0 |
github-code
|
50
|
28612570847
|
import os
from tkinter import *
import tkinter.messagebox as tkMessageBox
from idlelib.container import Container, TabbedContainer, ProxyContainer
class FileList:
# N.B. this import overridden in PyShellFileList.
from idlelib.EditorWindow import EditorWindow
def __init__(self, root):
self.root = root
self.dict = {}
self.inversedict = {}
self.containers = {}
self.tab_container = None
self.using_tabs = True
self.vars = {} # For EditorWindow.getrawvar (shared Tcl variables)
def open(self, filename, action=None):
assert filename
filename = self.canonize(filename)
if os.path.isdir(filename):
# This can happen when bad filename is passed on command line:
tkMessageBox.showerror(
"File Error",
"%r is a directory." % (filename,),
master=self.root)
return None
key = os.path.normcase(filename)
if key in self.dict:
edit = self.dict[key]
edit.wakeup()
return edit
if action:
# Don't create window, perform 'action', e.g. open in same window
return action(filename)
else:
edit = self.EditorWindow(self, filename, key)
if edit.good_load:
return edit
else:
edit._close()
return None
def new_container(self, own_window=False):
"Return a new Container for a component"
if self.using_tabs and not own_window:
if self.tab_container is None:
self.tab_container = TabbedContainer(self)
return ProxyContainer(self.tab_container)
else:
return Container(self)
def register_editor_window(self, win, key=None):
self.inversedict[win] = key
if key:
self.dict[key] = win
def already_open(self, filename):
assert filename
filename = self.canonize(filename)
if not os.path.isdir(filename):
key = os.path.normcase(filename)
return key in self.dict
return False
def gotofileline(self, filename, lineno=None):
edit = self.open(filename)
if edit is not None and lineno is not None:
edit.gotoline(lineno)
def new(self, filename=None):
return self.EditorWindow(self, filename)
def new_topwindow(self, event=None):
self.tab_container = None
return self.new()
def close_all_callback(self, *args, **kwds):
for edit in list(self.inversedict):
reply = edit.close()
if reply == "cancel":
break
return "break"
def keep_running(self):
"Application should stay running while any editors are open"
return len(self.inversedict) > 0
def unregister_maybe_terminate(self, edit):
try:
key = self.inversedict[edit]
except KeyError:
print("Don't know this EditorWindow object. (close)")
return
if key:
del self.dict[key]
del self.inversedict[edit]
if not self.inversedict:
self.root.quit()
def filename_changed_edit(self, edit):
edit.saved_change_hook()
try:
key = self.inversedict[edit]
except KeyError:
print("Don't know this EditorWindow object. (rename)")
return
filename = edit.io.filename
if not filename:
if key:
del self.dict[key]
self.inversedict[edit] = None
return
filename = self.canonize(filename)
newkey = os.path.normcase(filename)
if newkey == key:
return
if newkey in self.dict:
conflict = self.dict[newkey]
self.inversedict[conflict] = None
tkMessageBox.showerror(
"Name Conflict",
"You now have multiple edit windows open for %r" % (filename,),
master=self.root)
self.dict[newkey] = edit
self.inversedict[edit] = newkey
if key:
try:
del self.dict[key]
except KeyError:
pass
self.root.after_idle(self.filenames_changed)
# note: replacement for WindowList.add
def add_container(self, container):
container.w.after_idle(self.filenames_changed)
self.containers[str(container)] = container
# note: replacement for WindowList.delete
def delete_container(self, container):
if self.tab_container == container:
self.tab_container = None
try:
del self.containers[str(container)]
except KeyError:
# Sometimes, destroy() is called twice
pass
self.filenames_changed()
# note: replaces callbacks from WindowList; whereas those needed to be
# explicitly registered for and unregistered from, here we just send
# the notice to every component
def filenames_changed(self):
"Callback when one or more filenames changed"
for w in self.inversedict.keys():
w.filenames_changed()
def add_windows_to_menu(self, menu):
list = []
for key in self.containers:
container = self.containers[key]
try:
title = container.get_title()
except TclError:
continue
list.append((title, key, container))
list.sort()
for title, key, container in list:
if container.component is not None:
menu.add_command(label=title,
command=container.component.wakeup)
def configuration_will_change(self):
"Callback from configuration dialog before settings are applied."
for w in self.inversedict.keys():
w.configuration_will_change()
def configuration_changed(self):
"Callback from configuration dialog after settings are applied."
for w in self.inversedict.keys():
w.configuration_changed()
def apply_breakpoints(self, applycmd):
"Callback from debugger asking each editor to apply it's breakpoints"
for w in self.inversedict.keys():
try: # only PyShellEditorWindow will support this callback
w.apply_breakpoints(applycmd)
except Exception:
pass
def rebuild_recent_files_menu(self, rf_list):
"Called when all editors need to rebuild their recent files menus"
for w in self.inversedict.keys():
w.rebuild_recent_files_menu(rf_list)
def canonize(self, filename):
if not os.path.isabs(filename):
try:
pwd = os.getcwd()
except OSError:
pass
else:
filename = os.path.join(pwd, filename)
return os.path.normpath(filename)
def _test():
from idlelib.EditorWindow import fixwordbreaks
import sys
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = FileList(root)
if sys.argv[1:]:
for filename in sys.argv[1:]:
flist.open(filename)
else:
flist.new()
if flist.inversedict:
root.mainloop()
if __name__ == '__main__':
_test()
|
roseman/idle
|
FileList.py
|
FileList.py
|
py
| 7,409 |
python
|
en
|
code
| 48 |
github-code
|
50
|
36603213242
|
import unittest
from unittest import mock
import pandas as pd
from exabel_data_sdk.client.api.data_classes.entity import Entity
from exabel_data_sdk.client.api.data_classes.entity_type import EntityType
from exabel_data_sdk.client.api.entity_api import EntityApi
from exabel_data_sdk.client.client_config import ClientConfig
from exabel_data_sdk.stubs.exabel.api.data.v1.all_pb2 import SearchEntitiesResponse, SearchTerm
from exabel_data_sdk.util.resource_name_normalization import (
_assert_no_collision,
_validate_mic_ticker,
get_namespace_from_resource_identifier,
normalize_resource_name,
to_entity_resource_names,
)
class TestResourceNameNormalization(unittest.TestCase):
def test_basic(self):
self.assertEqual("abc_Inc_", normalize_resource_name("abc, Inc."))
self.assertEqual("abcXYZ0189-_", normalize_resource_name("abcXYZ0189-_"))
self.assertEqual("_l_", normalize_resource_name("-Øl?."))
long_name = "".join(str(i) for i in range(50))
self.assertEqual(long_name[:64], normalize_resource_name(long_name))
self.assertRaises(ValueError, normalize_resource_name, "")
def test_entity_type_uppercase_existing_mapping(self):
data = pd.Series(["abc, Inc.", "abcXYZ0189-_", "", "-Øl?."], name="BRAND")
expected = pd.Series(
[
"entityTypes/BRAND/entities/acme.abc_Inc_",
"entityTypes/BRAND/entities/acme.abcXYZ0189-_",
None,
"entityTypes/BRAND/entities/acme._l_",
],
name="entity",
)
entity_api = mock.create_autospec(EntityApi(ClientConfig(api_key="123")))
entity_api.get_entity_type_iterator.side_effect = self._list_entity_types_uppercase
result = to_entity_resource_names(entity_api, data, namespace="acme").names
pd.testing.assert_series_equal(expected, result)
def test_entity_type_uppercase_not_existing_mapping(self):
data = pd.Series(["abc, Inc.", "abcXYZ0189-_", "", "-Øl?."], name="BRAND")
expected = pd.Series(
[
"entityTypes/brand/entities/acme.abc_Inc_",
"entityTypes/brand/entities/acme.abcXYZ0189-_",
None,
"entityTypes/brand/entities/acme._l_",
],
name="entity",
)
entity_api = mock.create_autospec(EntityApi(ClientConfig(api_key="123")))
entity_api.get_entity_type_iterator.side_effect = self._list_entity_types
result = to_entity_resource_names(entity_api, data, namespace="acme").names
pd.testing.assert_series_equal(expected, result)
def test_global_entity_type_mapping(self):
data = pd.Series(["I_DE", "I_US", "", "abcXYZ0189"], name="country")
expected = pd.Series(
[
"entityTypes/country/entities/I_DE",
"entityTypes/country/entities/I_US",
None,
"entityTypes/country/entities/abcXYZ0189",
],
name="entity",
)
entity_api = mock.create_autospec(EntityApi(ClientConfig(api_key="123")))
entity_api.get_entity_type_iterator.side_effect = self._list_entity_types
result = to_entity_resource_names(entity_api, data, namespace="acme").names
pd.testing.assert_series_equal(expected, result)
def test_validate_mic_ticker(self):
self.assertTrue(_validate_mic_ticker("A:A"))
self.assertFalse(_validate_mic_ticker(":A"))
self.assertFalse(_validate_mic_ticker("A:"))
self.assertFalse(_validate_mic_ticker("A:A:A"))
self.assertFalse(_validate_mic_ticker("A::A"))
self.assertFalse(_validate_mic_ticker("A"))
def test_isin_mapping(self):
data = pd.Series(["US87612E1064", "DE000A1EWWW0", "US87612E1064"], name="isin")
expected = pd.Series(
[
"entityTypes/company/entities/target_inc",
"entityTypes/company/entities/adidas_ag",
"entityTypes/company/entities/target_inc",
],
name="entity",
)
search_terms = [
SearchTerm(field="isin", query="US87612E1064"),
SearchTerm(field="isin", query="DE000A1EWWW0"),
]
entity_api = mock.create_autospec(EntityApi(ClientConfig(api_key="123")))
entity_api.search.entities_by_terms.return_value = [
SearchEntitiesResponse.SearchResult(
terms=[search_terms[0]],
entities=[
Entity("entityTypes/company/entities/target_inc", "Target, Inc.").to_proto(),
],
),
SearchEntitiesResponse.SearchResult(
terms=[search_terms[1]],
entities=[
Entity("entityTypes/company/entities/adidas_ag", "Adidas Ag").to_proto(),
],
),
]
result = to_entity_resource_names(entity_api, data, namespace="acme").names
call_args_list = entity_api.search.entities_by_terms.call_args_list
self.assertEqual(1, len(call_args_list))
self.assertEqual(
"entityTypes/company",
call_args_list[0][1]["entity_type"],
"Arguments not as expected",
)
self.assertSequenceEqual(
search_terms,
call_args_list[0][1]["terms"],
"Arguments not as expected",
)
pd.testing.assert_series_equal(expected, result)
def test_isin_mapping_with_entity_mapping(self):
data = pd.Series(["US87612E1064", "do_not_search_for"], name="isin")
entity_mapping = {
"isin": {"do_not_search_for": "entityTypes/company/entities/was_not_searched_for"}
}
expected = pd.Series(
[
"entityTypes/company/entities/target_inc",
"entityTypes/company/entities/was_not_searched_for",
],
name="entity",
)
search_terms = [
SearchTerm(field="isin", query="US87612E1064"),
]
entity_api = mock.create_autospec(EntityApi(ClientConfig(api_key="123")))
entity_api.search.entities_by_terms.return_value = [
SearchEntitiesResponse.SearchResult(
terms=[search_terms[0]],
entities=[
Entity("entityTypes/company/entities/target_inc", "Target, Inc.").to_proto(),
],
),
]
result = to_entity_resource_names(
entity_api, data, namespace="acme", entity_mapping=entity_mapping
).names
call_args_list = entity_api.search.entities_by_terms.call_args_list
self.assertEqual(1, len(call_args_list))
self.assertEqual(
"entityTypes/company",
call_args_list[0][1]["entity_type"],
"Arguments not as expected",
)
self.assertSequenceEqual(
search_terms,
call_args_list[0][1]["terms"],
"Arguments not as expected",
)
pd.testing.assert_series_equal(expected, result)
def test_entity_mapping(self):
company_data = pd.Series(["TGT US", "ADI GE"], name="bloomberg_ticker")
brand_data = pd.Series(
[
"should_be_mapped_not_normalized",
"should be mapped not normalized",
"should#be#mapped#not#normalized",
],
name="brand",
)
entity_mapping = {
"bloomberg_ticker": {
"TGT US": "entityTypes/company/entities/target_inc",
"ADI GE": "entityTypes/company/entities/adidas_ag",
},
"brand": {
"should_be_mapped_not_normalized": "entityTypes/company/entities/brand",
"should be mapped not normalized": "entityTypes/company/entities/other_brand",
"should#be#mapped#not#normalized": "entityTypes/company/entities/another_brand",
},
}
expected_companies = pd.Series(
["entityTypes/company/entities/target_inc", "entityTypes/company/entities/adidas_ag"],
name="entity",
)
expected_brands = pd.Series(
[
"entityTypes/company/entities/brand",
"entityTypes/company/entities/other_brand",
"entityTypes/company/entities/another_brand",
],
name="entity",
)
entity_api = mock.create_autospec(EntityApi(ClientConfig(api_key="123")))
entity_api.get_entity_type_iterator.side_effect = self._list_entity_types
company_result = to_entity_resource_names(
entity_api, company_data, namespace="acme", entity_mapping=entity_mapping
).names
self.assertFalse(entity_api.search_for_entities.called)
pd.testing.assert_series_equal(expected_companies, company_result)
brand_result = to_entity_resource_names(
entity_api, brand_data, namespace="acme", entity_mapping=entity_mapping
).names
pd.testing.assert_series_equal(expected_brands, brand_result)
def test_micticker_mapping(self):
# Note that "NO?COLON" and "TOO:MANY:COLONS" are illegal mic:ticker identifiers,
# since any legal identifier must contain exactly one colon.
# The to_entity_resource_names function will print a warning for such illegal identifiers,
# and they will not result in any searches towards the Exabel API.
mic_tickers = [
"XOSL:TEL",
"XNAS:AAPL",
"NO?COLON",
"TOO:MANY:COLONS",
"XOSL:ORK",
"MANY:HITS",
"NO:HITS",
]
data = pd.Series(
mic_tickers,
name="mic:ticker",
)
expected = pd.Series(
[
"entityTypes/company/entities/telenor_asa",
"entityTypes/company/entities/apple_inc",
None,
None,
"entityTypes/company/entities/orkla_asa",
None,
None,
],
name="entity",
)
search_terms = []
for mic, ticker in map(lambda x: x.split(":"), mic_tickers[:2] + mic_tickers[4:]):
search_terms.append(SearchTerm(field="mic", query=mic))
search_terms.append(SearchTerm(field="ticker", query=ticker))
entity_api = mock.create_autospec(EntityApi(ClientConfig(api_key="123")))
entity_api.search.entities_by_terms.return_value = [
SearchEntitiesResponse.SearchResult(
terms=[search_terms[0]],
entities=[
Entity("entityTypes/company/entities/telenor_asa", "Telenor ASA").to_proto(),
],
),
SearchEntitiesResponse.SearchResult(
terms=[search_terms[1]],
entities=[
Entity("entityTypes/company/entities/apple_inc", "Apple, Inc.").to_proto(),
],
),
SearchEntitiesResponse.SearchResult(
terms=[search_terms[4]],
entities=[
Entity("entityTypes/company/entities/orkla_asa", "Orkla ASA").to_proto(),
],
),
# Result for "MANY:HITS"
SearchEntitiesResponse.SearchResult(
terms=[search_terms[5]],
entities=[
Entity("entityTypes/company/entities/orkla_asa", "Orkla ASA").to_proto(),
Entity("entityTypes/company/entities/telenor_asa", "Telenor ASA").to_proto(),
],
),
# Result for "NO:HITS"
SearchEntitiesResponse.SearchResult(
terms=[search_terms[6]],
entities=[],
),
]
result = to_entity_resource_names(entity_api, data, namespace="acme").names
pd.testing.assert_series_equal(expected, result)
# Check that the expected searches were performed
call_args_list = entity_api.search.entities_by_terms.call_args_list
self.assertEqual(1, len(call_args_list))
self.assertEqual(
"entityTypes/company",
call_args_list[0][1]["entity_type"],
"Arguments not as expected",
)
self.assertSequenceEqual(
search_terms,
call_args_list[0][1]["terms"],
"Arguments not as expected",
)
def test_name_collision(self):
bad_mapping = {"Abc!": "Abc_", "Abcd": "Abcd", "Abc?": "Abc_"}
self.assertRaises(ValueError, _assert_no_collision, bad_mapping)
good_mapping = {"Abc!": "Abc_1", "Abcd": "Abcd", "Abc?": "Abc_2"}
_assert_no_collision(good_mapping)
def test_mapping_with_duplicated_entity_identifiers_should_not_fail(self):
search_terms = [
SearchTerm(field="bloomberg_ticker", query="TGT US"),
SearchTerm(field="bloomberg_ticker", query="TGT UK"),
]
entity_api = mock.create_autospec(EntityApi(ClientConfig(api_key="123")))
entity_api.search.entities_by_terms.return_value = [
SearchEntitiesResponse.SearchResult(
terms=[search_terms[0]],
entities=[
Entity("entityTypes/company/entities/F_000C7F-E", "Target, Inc.").to_proto(),
],
),
SearchEntitiesResponse.SearchResult(
terms=[search_terms[1]],
entities=[
Entity("entityTypes/company/entities/F_000C7F-E", "Target, Inc.").to_proto(),
],
),
]
def _list_entity_types(self):
return iter(
[
EntityType("entityTypes/brand", "", "", False),
EntityType("entityTypes/country", "", "", True),
]
)
def _list_entity_types_uppercase(self):
return iter(
[
EntityType("entityTypes/BRAND", "", "", False),
]
)
def test_preserve_namespace(self):
mock_entity_api = mock.create_autospec(EntityApi)
identifiers = pd.Series(
[
"ns.entity",
"otherns.entity",
"global_entity",
],
name="global_entity_type",
)
normalized = to_entity_resource_names(
mock_entity_api,
identifiers,
namespace="ns",
check_entity_types=False,
preserve_namespace=True,
).names
expected = pd.Series(
[
"entityTypes/global_entity_type/entities/ns.entity",
"entityTypes/global_entity_type/entities/otherns.entity",
"entityTypes/global_entity_type/entities/global_entity",
],
name="entity",
)
pd.testing.assert_series_equal(expected, normalized)
def test_preserve_namespace__infer_namespace(self):
mock_entity_api = mock.create_autospec(EntityApi)
identifiers = pd.Series(
[
"ns.entity",
"other_entity",
],
name="ns.entity_type",
)
normalized = to_entity_resource_names(
mock_entity_api,
identifiers,
namespace="ns",
check_entity_types=False,
preserve_namespace=True,
).names
expected = pd.Series(
[
"entityTypes/ns.entity_type/entities/ns.entity",
"entityTypes/ns.entity_type/entities/ns.other_entity",
],
name="entity",
)
pd.testing.assert_series_equal(expected, normalized)
def test_get_namespace_from_entity_type_name(self):
self.assertEqual("ns", get_namespace_from_resource_identifier("ns.entity_type"))
self.assertEqual(None, get_namespace_from_resource_identifier("entity_type"))
def test_get_namespace_from_entity_type_name__invalid_name_should_fail(self):
self.assertRaises(
ValueError,
get_namespace_from_resource_identifier,
"ns.ns2.entity_type",
)
|
Exabel/python-sdk
|
exabel_data_sdk/tests/util/test_resource_name_normalization.py
|
test_resource_name_normalization.py
|
py
| 16,285 |
python
|
en
|
code
| 5 |
github-code
|
50
|
6994254119
|
import numpy as np
import pandas as pd
from sklearn import linear_model
class PolynomialRegression:
def __init__(self,degree, Data):
self.degree = degree
self.regr = linear_model.LinearRegression()
self.dataset = pd.read_csv(Data, sep=" ")
self.y = self.dataset.iloc[:,-1:].values
XValues = self.dataset.iloc[:,0:-1].values
newX = XValues.copy()
for i in range(2, self.degree + 1):
newX = np.concatenate((newX, XValues**i), axis = 1)
self.X = newX
def train_Model(self):
self.regr.fit(self.X, self.y)
def predict(self, x):
newX = np.zeros(x.shape)
for i in range(2, self.degree + 1):
newX = np.concatenate((newX, x**i), axis = 1)
return self.regr.predict(newX)
def score(self):
return self.regr.score(self.X, self.y)
data = 'Data.txt'
maxScore = -1
bestDegree = 0
for i in range(1, 15):
model = PolynomialRegression(i, data)
model.train_Model()
score = model.score()
if(score > maxScore):
maxScore = score
bestDegree = i
print(bestDegree)
print(maxScore)
model = PolynomialRegression(bestDegree, data)
model.train_Model()
out = ""
out = out + str(model.regr.intercept_[0])
aux = 0
for d in range(1, bestDegree + 1):
for i in range(0, 15):
out = out + " + " + str(model.regr.coef_[0][aux]) + " * Math.Pow(sensorData[" + str(i) + "], " + str(d) + ")"
aux += 1
f = open("model.txt", "w")
f.write(out)
f.close
|
JuanBC9/UnityCar-AI-
|
Assets/Scripts/PolynomialRegression.py
|
PolynomialRegression.py
|
py
| 1,521 |
python
|
en
|
code
| 0 |
github-code
|
50
|
39980500210
|
import cv2 as cv
import os
import numpy as np
import matplotlib.pyplot as plt
import sys
from pathlib import Path
from src.body_measure import draw_slice_data
from src.util import preprocess_image
import argparse
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--front_img", required=True, help="front image path")
ap.add_argument("-s", "--side_img", required=True, help="side image path")
ap.add_argument("-d", "--data", required=True, help="all measurement data including slices, contour, width, height")
args = vars(ap.parse_args())
path_f = args['front_img']
path_s = args['side_img']
data_path = args['data']
img_f = cv.imread(path_f)
if img_f is None:
print('front image does not exist', file=sys.stderr)
exit()
img_s = cv.imread(path_s)
if img_s is None:
print('side image does not exist', file=sys.stderr)
exit()
print(path_f)
print(path_s)
img_f = preprocess_image(img_f)
img_s = preprocess_image(img_s)
data = np.load(data_path)
contour_f = data.item().get('contour_f')
contour_s = data.item().get('contour_s')
segments_f = data.item().get('landmark_segment_f')
segments_s = data.item().get('landmark_segment_s')
seg_dst_f = data.item().get('landmark_segment_dst_f')
seg_dst_s = data.item().get('landmark_segment_dst_s')
measurements = data.item().get('measurement')
segments_height = data.item().get('landmark_segment_height')
if contour_f is None or contour_s is None or segments_f is None or segments_s is None:
print('missing measurement data', file=sys.stderr)
exit()
draw_slice_data(img_f, contour_f, segments_f)
draw_slice_data(img_s, contour_s, segments_s)
print('length of segment in front and side image\n')
for id, width in seg_dst_f.items():
#if id in ['Height', 'CollarBust', 'CollarWaist', 'InsideLeg']:
# continue
if id in seg_dst_s:
depth = seg_dst_s[id]
else:
depth = -1
print("landmark segment id = {0:30} : width = {1:20}, depth = {2:20}".format(id, width, depth))
print('\n\n')
print('segment relative height\n')
for id, val in segments_height.items():
print("relative height value of segment {0:30} = {1:20}".format(id, val))
print('\n\n')
print('body measurements in height unit\n')
for id, val in measurements.items():
print("measurement type = {0:30} : value = {1:20}".format(id, val))
#for id, distance in measure_f.items():
# if id in ['CollarBust', 'CollarWaist', 'InsideLeg']:
# print("slice id = {0:30} : distance = {1:20}".format(id, distance))
plt.subplot(121)
plt.imshow(img_f[:,:,::-1])
plt.subplot(122)
plt.imshow(img_s[:,:,::-1])
plt.show()
|
amitbend/body_measure
|
src/viz_measurement_result.py
|
viz_measurement_result.py
|
py
| 2,863 |
python
|
en
|
code
| 3 |
github-code
|
50
|
1465408384
|
import heapq
import sys
input = sys.stdin.readline
print = sys.stdout.write
v, e = map(int, input().split())
a = int(input())
graph = {i: [] for i in range(1, v + 1)}
for _ in range(e):
i, j, w = map(int, input().split())
graph[i].append((j, w))
dis = {k: float('inf') for k in graph.keys()}
dis[a] = 0
hq = [(dis[a], a)]
while hq:
cur_dis, cur_x = heapq.heappop(hq)
if cur_dis > dis[cur_x]:
continue
for i, w in graph[cur_x]:
new_dis = cur_dis + w
if dis[i] > new_dis:
dis[i] = new_dis
heapq.heappush(hq, (new_dis, i))
for i in range(1, v + 1):
if dis[i] == float('inf'):
print('INF\n')
continue
print(str(dis[i]) + '\n')
|
kkg5/algorithm
|
step/shortest_path/1753_최단경로.py
|
1753_최단경로.py
|
py
| 723 |
python
|
en
|
code
| 0 |
github-code
|
50
|
74937154396
|
from django.urls import path, include
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(r'tests', views.TestView)
urlpatterns = [
path('', views.index, name='index'),
path('random', views.random, name='random'),
path('router/', include(router.urls))
]
|
filipweidemann/testing-heroku-deployment
|
blog/urls.py
|
urls.py
|
py
| 320 |
python
|
en
|
code
| 0 |
github-code
|
50
|
40237107262
|
from ..parse_json import parse
from pathlib import Path
def more_about_health(make_intimate_comfortable, user_dont_know):
msg_id = "about_health.0"
path = (
Path("consultation_bot/bot/user_dont_know/data/")
/ "data_more_about_health.json"
)
DEFAULT_ACTIONS = {
"default-1": (make_intimate_comfortable,),
"default-2": (more_about_health, *(make_intimate_comfortable, user_dont_know)),
}
parse(path, msg_id, DEFAULT_ACTIONS)
|
mary-zh555/Consultation_bot
|
consultation_bot/bot/user_dont_know/more_about_health.py
|
more_about_health.py
|
py
| 484 |
python
|
en
|
code
| 0 |
github-code
|
50
|
17553389056
|
class Points:
def __init__(self,x,y):
self.x = x
self.y = y
def getDistance(self,other): #两点之间距离公式
return ((self.x - other.x) ** 2+(self.y - other.y) ** 2)**0.5
def type_triangle(self,p2,p3):
self_p2 = self.getDistance(p2)
self_p3 = self.getDistance(p3)
p2_p3 = p2.getDistance(p3)
#对三条边进行排序,即可直接用三边定理排出非三角形情况
if self_p2 > self_p3:
self_p2,self_p3 = self_p3,self_p2
if self_p3 > p2_p3:
self_p3,p2_p3 = p2_p3,self_p3
if self_p2 + self_p3 <= p2_p3 or p2_p3 - self_p2 >=self_p3:
return print("不是三角形")
elif self_p2 ** 2 + self_p3 ** 2 < p2_p3 ** 2:
return print("锐角三角形")
elif self_p2 ** 2 + self_p3 ** 2 == p2_p3 ** 2:
return print("直角三角形")
elif self_p2 ** 2 + self_p3 ** 2 > p2_p3 ** 2:
return print("钝角三角形")
if __name__ == '__main__':
pt1 = Points(2,3)
pt2 = Points(4,5)
pt3 = Points(0,0)
dis12 = pt1.getDistance(pt2)
dis13 = pt1.getDistance(pt3)
dis23 = pt2.getDistance(pt3)
print(dis12,dis13,dis23)
pt1.type_triangle(pt2,pt3)
|
HongwuQz/PythonHmwk
|
BigData/6.2/BigData6.2.py
|
BigData6.2.py
|
py
| 1,294 |
python
|
en
|
code
| 1 |
github-code
|
50
|
40699599039
|
curr = 0
maxcals = 0
data = []
def process_input(line):
global maxcals, curr, data
if len(line) >0:
curr += int(line)
else:
data.append(curr)
curr = 0
def load_data(filename):
global data, curr
with open (filename, 'r') as file:
lines = file.readlines()
for line in lines:
process_input(line.strip())
data.append(curr)
def partA():
global data
print("Part A")
print(max(data))
def partB():
global data
print("Part B")
sorted = data.sort()
print(sum(data[-3:]))
def main():
inputfile = "./input01a.txt"
#inputfile = "./sample.txt"
load_data(inputfile)
partA()
partB()
main()
|
likwidoxigen/PythonExercises
|
AdventOfCode/2022/01/01.py
|
01.py
|
py
| 716 |
python
|
en
|
code
| 0 |
github-code
|
50
|
10261374824
|
from tkinter import*
from timeit import default_timer
fenêtre = Tk()
can=Canvas(fenêtre,bg="white",height=100,width=150)
can.pack()
def chronomètre():
now = default_timer() - début
minutes,secondes = divmod (now, 60)
heures,minutes = divmod(minutes,60)
str_time = "%d:%02d:%02d"%(heures,minutes,secondes)
can.itemconfigure(text_clock, text=str_time)
fenêtre.after(1000, chronomètre)
début = default_timer()
text_clock = can.create_text(65,25)
chronomètre()
fenêtre.mainloop()
|
pauld01/amazing-maze
|
src/chronometre-master/chrono fonctionnel avec simple fenêtre graphique.py
|
chrono fonctionnel avec simple fenêtre graphique.py
|
py
| 512 |
python
|
fr
|
code
| 0 |
github-code
|
50
|
15327825624
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 11 15:27:44 2019
@author: KAMPFF-LAB-ANALYSIS3
"""
import numpy as np
import matplotlib.pyplot as plt
import random
import seaborn as sns
#from filters import *
import os
#os.sys.path.append('/home/kampff/Repos/Pac-Rat/libraries')
os.sys.path.append('D:/Repos/Pac-Rat/libraries')
import parser_library as prs
import behaviour_library as behaviour
### Load pre-processed data
rat_summary_table_path = 'F:/Videogame_Assay/AK_40.2_Pt.csv'
hardrive_path = r'F:/'
Level_2_post = prs.Level_2_post_paths(rat_summary_table_path)
sessions_subset = Level_2_post
# Specify paths
session = sessions_subset[2]
session_path = os.path.join(hardrive_path,session)
mua_path = os.path.join(session_path +'/MUA_250_to_2000.bin')
save_path = os.path.join(hardrive_path, session_path +'/mua')
# Probe from superficial to deep electrode, left side is shank 11 (far back)
probe_map=np.array([[103,78,81,118,94,74,62,24,49,46,7],
[121,80,79,102,64,52,32,8,47,48,25],
[123,83,71,104,66,84,38,6,26,59,23],
[105,69,100,120,88,42,60,22,57,45,5],
[101,76,89,127,92,67,56,29,4,37,9],
[119,91,122,99,70,61,34,1,39,50,27],
[112,82,73,97,68,93,40,3,28,51,21],
[107,77,98,125,86,35,58,31,55,44,14],
[110,113,87,126,90,65,54,20,2,43,11],
[117,85,124,106,72,63,36,0,41,15,16],
[114,111,75,96,116,95,33,10,30,53,17]])
flatten_probe = probe_map.flatten()
# Load MUA (binned to frames)
mua_flat_f32 = np.fromfile(mua_path, dtype=np.float32)
mua_channels = np.reshape(mua_flat_f32, (121,-1))
mua = np.reshape(mua_channels, (11,11,-1))
# Compute full movie median (as baseline)
mua_median = np.median(mua, 2)
# Compute full movie stdev (to z-score)
mua_std = np.std(mua, 2)
# Subtract median (zero baseline) and divide by std (z-score)
mua_zeroed = np.zeros(np.shape(mua))
mua_z_score = np.zeros(np.shape(mua))
for r in range(11):
for c in range(11):
mua_zeroed[r,c,:] = (mua[r,c,:] - mua_median[r,c])
mua_z_score[r,c,:] = (mua[r,c,:] - mua_median[r,c]) / mua_std[r,c]
# Plot z-score avg
mua_avg = np.mean(mua_z_score, 2)
# Get events of intererest indices
touch_path = os.path.join(session_path + '/events/RatTouchBall.csv')
reward_path = os.path.join(session_path + '/events/TrialEnd.csv')
ball_on_path = os.path.join(session_path + '/events/BallOn.csv')
video_csv = os.path.join(session_path + '/Video.csv')
trial_start_path = os.path.join(session_path + '/events/TrialStart.csv')
video_time = behaviour.timestamp_CSV_to_pandas(video_csv)
touch_time = behaviour.timestamp_CSV_to_pandas(touch_path)
reward_time = behaviour.timestamp_CSV_to_pandas(reward_path)
ball_time = behaviour.timestamp_CSV_to_pandas(ball_on_path)
trial_time = behaviour.timestamp_CSV_to_pandas(trial_start_path)
touching_light = behaviour.closest_timestamps_to_events(video_time, touch_time)
reward = behaviour.closest_timestamps_to_events(video_time, reward_time)
ball_on = behaviour.closest_timestamps_to_events(video_time, ball_time)
start = behaviour.closest_timestamps_to_events(video_time, trial_time)
events_list = [touching_light,reward]#ball_noticed]
# Average around event
events = events_list[0]
mua_event0_avg = np.mean(mua_zeroed[:, :, events], 2)
events = events_list[1]
mua_event1_avg = np.mean(mua_zeroed[:, :, events], 2)
#events = events_list[2]
#mua_event2_avg = np.mean(mua_zeroed[:, :, events], 2)
# Display
plt.figure(figsize=(13,4))
plt.subplot(1,2,1)
plt.imshow(mua_event0_avg, vmin=-2.0, vmax=7)
plt.title('touching_light')
plt.colorbar(fraction=0.046, pad=0.04)
#plt.colorbar()
plt.subplot(1,2,2)
plt.imshow(mua_event1_avg, vmin=-2.0, vmax=7)
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('reward')
plt.show()
plt.subplot(1,3,3)
plt.imshow(mua_event1_avg, vmin=-1.0, vmax=10.0)
plt.title('ball_on')
plt.subplot(1,3,3)
# Save "movie around event"
for i in range(-240, 240):
# Average around event (shifted by i)
events = np.array(events_list[0]) + i
mua_event0_avg = np.mean(mua_zeroed[:, :, events], 2)
events = np.array(events_list[1]) + i
mua_event1_avg = np.mean(mua_zeroed[:, :, events], 2)
events = np.array(events_list[2]) + i
mua_event2_avg = np.mean(mua_zeroed[:, :, events], 2)
events = np.array(events_list[2]) - 20000 + i
mua_event3_avg = np.mean(mua_zeroed[:, :, events], 2)
# Create figure
plt.figure()
plt.subplot(2,2,1)
plt.imshow(mua_event0_avg, vmin=-1.0, vmax=12.0)
plt.subplot(2,2,2)
plt.imshow(mua_event1_avg, vmin=-1.0, vmax=12.0)
plt.subplot(2,2,3)
plt.imshow(mua_event2_avg, vmin=-1.0, vmax=12.0)
plt.subplot(2,2,4)
plt.imshow(mua_event3_avg, vmin=-1.0, vmax=12.0)
# Save figure
figure_path = save_path + '/event_avg_' + str(i+1000) + '.png'
plt.savefig(figure_path)
plt.close('all')
events_list = [touching_light,reward,ball_on,start]
# Average around event
events = events_list[0]
mua_event0_avg = np.mean(mua_zeroed[:, :, events], 2)
events = events_list[1]
mua_event1_avg = np.mean(mua_zeroed[:, :, events], 2)
events = events_list[2]
mua_event2_avg = np.mean(mua_zeroed[:, :, events], 2)
event_1 = np.array(events_list[0])
event_2 = np.array(events_list[1])
#
##if the lenght of the trial start is longer remove the last value
#if len(event_1)>len(event_2):
# event_1 = event_1[:-1]
event_diff = abs(event_2 - event_1)
final_array= np.zeros((11,11,len(event_1)))
#i=15 has a problem it gives empty array.
for i, idx in enumerate(event_1):
test_avg = mua_zeroed[:, :, idx:idx+ event_diff[i]]
avg = np.nanmean(test_avg,2)
final_array[:,:,i]=avg
final_avg = np.nanmean(final_array,2)
label_ball_noticed = 'D:/ShaderNavigator/annotations/AK_33.2/2018_04_29-15_43/Video.csv'
label = np.genfromtxt(label_ball_noticed,dtype=str,delimiter = ',', usecols = 0)
ball_noticed = np.genfromtxt(label_ball_noticed,dtype=int,delimiter = ',', usecols = 1)
yes_trials = []
for l,lab in enumerate(label):
if lab == 'yes':
yes_trials.append(l)
test_frames = frame[yes_trials]
event_1_test= event_1[yes_trials]
event_2_test= event_2[yes_trials]
#ball on to noticed
event_1 = np.array(events_list[4])
event_2 = np.array(events_list[0])
event_1 = event_1[yes_trials]
event_2 = event_2[yes_trials]
#
##if the lenght of the trial start is longer remove the last value
#if len(event_1)>len(event_2):
# event_1 = event_1[:-1]
event_diff = abs(event_2 - event_1)
final_array= np.zeros((11,11,len(event_1)))
#i=15 has a problem it gives empty array.
for i, idx in enumerate(event_1):
test_avg = mua_zeroed[:, :, idx:idx+ event_diff[i]]
avg = np.nanmean(test_avg,2)
final_array[:,:,i]=avg
final_avg = np.nanmean(final_array,2)
####################################################
events_list = [touching_light, reward, ball_on, start, ball_noticed]
event_1 = np.array(events_list[0])
offset = 240
new_mua = np.zeros((121,offset*2))
mua_zeroed_reshaped = np.reshape(mua_zeroed, (121,-1))
for ch, channel in enumerate(flatten_probe):
intermediate_ch_mua = np.zeros((len(event_1),offset*2))
for i, idx in enumerate(event_1):
test = mua_zeroed_reshaped[ch, idx-offset:idx+offset]
intermediate_ch_mua[i,:]= test
avg_mua = np.nanmean(intermediate_ch_mua,0)
new_mua[ch,:]=avg_mua
for i, ch in enumerate(new_mua):
plt.plot( ch+ i*2)
plt.figure()
avg_new_mua=np.mean(new_mua,0)
#noticed to touch
|
kampff-lab/Pac-Rat
|
scripts/ephys/MUA_heatmap.py
|
MUA_heatmap.py
|
py
| 7,674 |
python
|
en
|
code
| 0 |
github-code
|
50
|
1156440255
|
from molsysmt._private.exceptions import *
from molsysmt.api_forms.common_gets import *
import numpy as np
from molsysmt import puw
from molsysmt.native.molecular_system import molecular_system_components
form_name='molsysmt.TrajectoryDict'
from_type='class'
is_form={
}
info=["",""]
has = molecular_system_components.copy()
for ii in ['coordinates','box']:
has[ii]=True
def this_dict_is_TrajectoryDict(item):
from molsysmt.native.trajectory_dict import is_trajectory_dict
return is_trajectory_dict(item)
def to_molsysmt_Structures(item, molecular_system=None, atom_indices='all', structure_indices='all'):
from molsysmt.native.io.trajectory import from_TrajectoryDict as TrajectoryDict_to_molsysmt_Structures
tmp_item, tmp_molecular_system = TrajectoryDict_to_molsysmt_Structures(item, None, atom_indices=atom_indices, structure_indices=structure_indices)
return tmp_item, tmp_molecular_system
def to_file_trjpk(item, molecular_system=None, atom_indices='all', structure_indices='all', output_filename=None):
import pickle as pickle
# lengths with nm values and times in ps
if atom_indices is 'all':
if item['coordinates'] is not None:
n_atoms = item['coordinates'].shape[1]
else:
n_atoms = 0
else:
n_atoms = atom_indices.shape[0]
if structure_indices is 'all':
if item['coordinates'] is not None:
n_structures = item['coordinates'].shape[0]
elif tmp_item['box'] is not None:
n_structures = item['box'].shape[0]
elif tmp_item['time'] is not None:
n_structures = item['time'].shape[0]
else:
n_structures = 0
else:
n_structures = structure_indices.shape[0]
fff = open(output_filename, 'wb')
pickle.dump(n_atoms, fff)
pickle.dump(n_structures, fff)
if 'coordinates' in item:
if item['coordinates'] is not None:
coordinates = item['coordinates']
if structure_indices is not 'all':
coordinates = coordinates[structure_indices,:,:]
elif atom_indices is not 'all':
coordinates = coordinates[:,atom_indices,:]
coordinates = puw.get_value(coordinates, to_unit='nm')
else:
coordinates = None
else:
coordinates = None
pickle.dump(coordinates, fff)
del(coordinates)
if 'box' in item:
if item['box'] is not None:
box = item['box']
if structure_indices is not 'all':
box = box[structure_indices,:,:]
box = puw.get_value(box, to_unit='nm')
else:
box = None
else:
box = None
pickle.dump(box, fff)
del(box)
if 'time' in item:
if item['time'] is not None:
time = item['time']
if structure_indices is not 'all':
time = time[structure_indices]
time = puw.get_value(time, to_unit='ps')
else:
time = None
else:
time = None
pickle.dump(time, fff)
del(time)
if 'step' in item:
if item['step'] is not None:
step = item['step']
if structure_indices is not 'all':
step = step[structure_indices]
else:
step = None
else:
step = None
pickle.dump(step, fff)
del(step)
fff.close()
tmp_item = output_filename
if molecular_system is not None:
tmp_molecular_system = molecular_system.combine_with_items(tmp_item)
else:
tmp_molecular_system = None
return tmp_item, tmp_molecular_system
def to_molsysmt_TrajectoryDict(item, molecular_system=None, atom_indices='all', structure_indices='all', copy_if_all=True):
tmp_molecular_system = None
if (atom_indices is 'all') and (structure_indices is 'all'):
if copy_if_all:
tmp_item = extract(item)
if molecular_system is not None:
tmp_molecular_system = molecular_system.combine_with_items(tmp_item)
else:
tmp_item = item
if molecular_system is not None:
tmp_molecular_system = molecular_system
else:
tmp_item = extract(item, atom_indices=atom_indices, structure_indices=structure_indices)
if molecular_system is not None:
tmp_molecular_system = molecular_system.combine_with_items(tmp_item, atom_indices=atom_indices, structure_indices=structure_indices)
return tmp_item, tmp_molecular_system
def extract(item, atom_indices='all', structure_indices='all'):
if (atom_indices is 'all') and (structure_indices is 'all'):
from copy import deepcopy
tmp_item = deepcopy(item)
else:
raise NotImplementedError()
return tmp_item
def merge(item_1, item_2):
raise NotImplementedError
def add(to_item, item):
raise NotImplementedError
def append_structures(item, step=None, time=None, coordinates=None, box=None):
raise NotImplementedError()
def concatenate_structures(item, step=None, time=None, coordinates=None, box=None):
raise NotImplementedError
###### Get
## atom
def get_atom_id_from_atom(item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_atom_name_from_atom(item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_atom_type_from_atom(item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_group_index_from_atom (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_component_index_from_atom (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_chain_index_from_atom (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_molecule_index_from_atom (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_entity_index_from_atom (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_inner_bonded_atoms_from_atom (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_n_inner_bonds_from_atom (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_coordinates_from_atom(item, indices='all', structure_indices='all'):
tmp_coordinates = item['coordinates']
if structure_indices is not 'all':
tmp_coordinates = tmp_coordinates[structure_indices,:,:]
if indices is not 'all':
tmp_coordinates = tmp_coordinates[:,indices,:]
return tmp_coordinates
## group
def get_group_id_from_group(item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_group_name_from_group(item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_group_type_from_group(item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
## component
def get_component_id_from_component (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_component_name_from_component (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_component_type_from_component (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
## molecule
def get_molecule_id_from_molecule (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_molecule_name_from_molecule (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_molecule_type_from_molecule (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
## chain
def get_chain_id_from_chain (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_chain_name_from_chain (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_chain_type_from_chain (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
## entity
def get_entity_id_from_entity (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_entity_name_from_entity (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_entity_type_from_entity (item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
## system
def get_n_atoms_from_system(item, indices='all', structure_indices='all'):
output = None
if 'coordinates' in item:
output = item['coordinates'].shape[1]
return output
def get_n_groups_from_system(item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_n_components_from_system(item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_n_chains_from_system(item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_n_molecules_from_system(item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_n_entities_from_system(item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_n_bonds_from_system(item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_box_from_system(item, indices='all', structure_indices='all'):
output=None
if 'box' in item:
len_shape = len(item['box'].shape)
if len_shape==3:
if structure_indices is 'all':
output=item['box']
else:
output=item['box'][structure_indices,:,:]
elif len_shape==2:
if structure_indices is 'all':
n_structures=get_n_structures_from_system(item)
else:
n_structures=len(structure_indices)
output=np.tile(item['box'], (n_structures, 1, 1))
return output
def get_box_shape_from_system(item, indices='all', structure_indices='all'):
from molsysmt.pbc import box_shape_from_box_vectors
output = None
box = get_box_from_system(item, indices=indices, structure_indices=structure_indices)
if box is not None:
output = box_shape_from_box_vectors(box)
return output
def get_box_lengths_from_system(item, indices='all', structure_indices='all'):
from molsysmt.pbc import box_lengths_from_box_vectors
output = None
box = get_box_from_system(item, indices=indices, structure_indices=structure_indices)
if box is not None:
output = box_lengths_from_box_vectors(box)
return output
def get_box_angles_from_system(item, indices='all', structure_indices='all'):
from molsysmt.pbc import box_angles_from_box_vectors
output = None
box = get_box_from_system(item, indices=indices, structure_indices=structure_indices)
if box is not None:
output = box_angles_from_box_vectors(box)
return output
def get_box_volume_from_system(item, indices='all', structure_indices='all'):
from molsysmt.pbc import box_volume_from_box_vectors
output = None
box = get_box_from_system(item, indices=indices, structure_indices=structure_indices)
if box is not None:
output = box_volume_from_box_vectors(box)
return output
def get_time_from_system(item, indices='all', structure_indices='all'):
output = None
if structure_indices is 'all':
output = item['time']
else:
output = item['time'][structure_indices]
return output
def get_step_from_system(item, indices='all', structure_indices='all'):
output = None
if structure_indices is 'all':
output = item['step']
else:
output = item['step'][structure_indices]
return output
def get_n_structures_from_system(item, indices='all', structure_indices='all'):
output = None
if structure_indices is 'all':
if 'coordinates' in item:
output=item['coordinates'].shape[0]
elif 'box' in item:
len_shape = len(item['box'].shape)
if len_shape==3:
output=item['box'].shape[0]
elif len_shape==2:
output=1
else:
output=structure_indices.shape[0]
return output
def get_bonded_atoms_from_system(item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
## bond
def get_bond_order_from_bond(item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_bond_type_from_bond(item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
def get_atom_index_from_bond(item, indices='all', structure_indices='all'):
raise NotWithThisFormError()
###### Set
## atom
def set_coordinates_to_atom(item, indices='all', structure_indices='all', value=None):
length_unit = puw.get_unit(item['coordinates'])
value = puw.convert(value, to_unit=length_unit)
if indices is 'all':
if structure_indices is 'all':
item['coordinates']=value
else:
item['coordinates'][structure_indices,:,:]=value
else:
if structure_indices is 'all':
item['coordinates'][:,indices,:]=value
else:
item['coordinates'][np.ix_(indices,structure_indices)]=value
pass
## system
def set_box_to_system(item, indices='all', structure_indices='all', value=None):
raise NotImplementedError
def set_coordinates_to_system(item, indices='all', structure_indices='all', value=None):
raise NotImplementedError
|
uibcdf/MolSysMT
|
attic/api_forms/api_molsysmt_TrajectoryDict.py
|
api_molsysmt_TrajectoryDict.py
|
py
| 13,530 |
python
|
en
|
code
| 11 |
github-code
|
50
|
19317832681
|
from sklearn.metrics import auc, roc_curve
import matplotlib.pyplot as plt
def plot_roc_auc(y_true, y_pred, savepath=None):
if type(y_true) in (list, tuple) and type(y_pred) in (list, tuple):
assert len(y_true) == len(y_pred)
if len(y_true) > 5:
raise ValueError('Up to 5 lines supported.')
colors = ('b', 'g', 'c', 'm', 'y')
for i, (split_y_true, split_y_pred) in enumerate(zip(y_true, y_pred)):
fpr, tpr, threshold = roc_curve(split_y_true, split_y_pred)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label=f'Split {i}, AUC = %0.4f' % roc_auc, color=colors[i])
else:
fpr, tpr, threshold = roc_curve(y_true, y_pred)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label='AUC = %0.4f' % roc_auc)
plt.title('Receiver Operating Characteristic')
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
if savepath is not None:
plt.savefig(savepath)
else:
plt.show()
|
mmikolajczak/recommendation_system_hetrec2011_movielens
|
recommendations_system/experiments_scripts/plotting.py
|
plotting.py
|
py
| 1,130 |
python
|
en
|
code
| 5 |
github-code
|
50
|
73752449436
|
from django.urls import path, include
from .views import CategoryList, CategoryDetail, EventList, EventDetail, SeatList, SeatDetail, SeatCategoryList, SeatCategoryDetail, EventImageList, EventImageDetail, EventListForCategory
from apps.accounts.signals import create_category_api, create_event_api
from . import views
accounts_urlpatterns = [
path('api/v1/', include('djoser.urls')),
path('api/v1/', include('djoser.urls.authtoken')),
path('categories/', CategoryList.as_view(), name='category-list'),
path('categories/<int:pk>/', CategoryDetail.as_view(), name='category-detail'),
path('events/', EventList.as_view(), name='event-list'),
path('events/<int:pk>/', EventDetail.as_view(), name='event-detail'),
path('seats/', SeatList.as_view(), name='seats-list'),
path('seats/<int:pk>/', SeatDetail.as_view(), name='seats-detail'),
path('seatcategory/', SeatCategoryList.as_view(), name='seatcategory-list'),
path('seatcategory/<int:pk>/', SeatCategoryDetail.as_view(), name='seatcategory-detail'),
path('eventimage/', EventImageList.as_view(), name='EventImage-list'),
path('EventImage/<int:pk>/', EventImageDetail.as_view(), name='EventImage-detail'),
path('create_category_api/', create_category_api, name='create-category-api'),
path('create_event_api/', create_event_api, name='create_event_api'),
path('categories/<int:category_id>/events/', EventListForCategory.as_view(), name='category-events-list'),
path('search-events/', views.search_events, name='search_events'),
]
|
FakirHerif/react-django
|
backend/server/apps/accounts/urls.py
|
urls.py
|
py
| 1,557 |
python
|
en
|
code
| 2 |
github-code
|
50
|
38239478292
|
## 1. The Range ##
import pandas as pd
houses = pd.read_table('AmesHousing_1.txt')
def range(a):
return ( max(a) - min(a) )
k = houses['Yr Sold'].value_counts().reset_index()
range_by_year ={}
for i in k['index']:
hou =houses[houses['Yr Sold']==i]
range_by_year[i] = range(hou['SalePrice'])
one = False
two = True
## 2. The Average Distance ##
C = [1,1,1,1,1,1,1,1,1,21]
sum1 =0
for i in C :
sum1 = sum1 + i
mean = sum1/len(C)
dist =0
for i in C :
dist = dist + (i-mean)
avg_distance = dist/len(C)
print(avg_distance)
## 3. Mean Absolute Deviation ##
C = [1,1,1,1,1,1,1,1,1,21]
sum1 =0
for i in C :
sum1 = sum1 + i
mean = sum1/len(C)
dist =0
for i in C :
dist = dist + abs(i-mean)
mad = dist/len(C)
print(mad)
## 4. Variance ##
C = [1,1,1,1,1,1,1,1,1,21]
def variancecal(a):
sum1 =0
for i in a :
sum1 = sum1 + i
mean = sum1/len(a)
squared_distance =[]
dist =0
for i in a :
squared_distance.append(abs(i-mean)**2)
return squared_distance
K= variancecal(C)
variance_C = sum(K)/len(K)
## 5. Standard Deviation ##
from math import sqrt
C = [1,1,1,1,1,1,1,1,1,21]
def StdDeviationcal(a):
sum1 =0
for i in a :
sum1 = sum1 + i
mean = sum1/len(a)
squared_distance =[]
dist =0
for i in a :
squared_distance.append((i-mean)**2)
vari = sum(squared_distance)/len(squared_distance)
return sqrt(vari)
standard_deviation_C = StdDeviationcal(C)
## 6. Average Variability Around the Mean ##
def standard_deviation(array):
reference_point = sum(array) / len(array)
distances = []
for value in array:
squared_distance = (value - reference_point)**2
distances.append(squared_distance)
variance = sum(distances) / len(distances)
return sqrt(variance)
k = houses['Yr Sold'].value_counts().reset_index()
range_by_year ={}
for i in k['index']:
hou = houses[houses['Yr Sold'] ==i]
range_by_year[i] = standard_deviation(hou['SalePrice'])
print(range_by_year)
greatest_variability = max(range_by_year, key = range_by_year.get)
lowest_variability = min(range_by_year , key =range_by_year.get)
## 7. A Measure of Spread ##
sample1 = houses['Year Built'].sample(50, random_state = 1)
sample2 = houses['Year Built'].sample(50, random_state = 2)
def standard_deviation(array):
reference_point = sum(array) / len(array)
distances = []
for value in array:
squared_distance = (value - reference_point)**2
distances.append(squared_distance)
variance = sum(distances) / len(distances)
return sqrt(variance)
sample1.plot.hist()
plt.show()
sample2.plot.hist()
plt.show()
bigger_spread = 'sample 2'
st_dev1 =standard_deviation(sample1)
st_dev2 = standard_deviation(sample2)
## 8. The Sample Standard Deviation ##
def standard_deviation(array):
reference_point = sum(array) / len(array)
distances = []
for value in array:
squared_distance = (value - reference_point)**2
distances.append(squared_distance)
variance = sum(distances) / len(distances)
return sqrt(variance)
import matplotlib.pyplot as plt
list_std=[]
for i in range(5000):
sam = houses['SalePrice'].sample(10, random_state =i)
list_std.append(standard_deviation(sam))
plt.hist(list_std)
plt.axvline(standard_deviation(houses['SalePrice']))
plt.show()
## 9. Bessel's Correction ##
def standard_deviation(array):
reference_point = sum(array) / len(array)
distances = []
for value in array:
squared_distance = (value - reference_point)**2
distances.append(squared_distance)
variance = sum(distances) / (len(distances)-1)
return sqrt(variance)
import matplotlib.pyplot as plt
st_devs = []
for i in range(5000):
sample = houses['SalePrice'].sample(10, random_state = i)
st_dev = standard_deviation(sample)
st_devs.append(st_dev)
plt.hist(st_devs)
plt.axvline(standard_deviation(houses['SalePrice']))
## 10. Standard Notation ##
sample = houses.sample(100, random_state = 1)
from numpy import std, var
pandas_stdev = sample['SalePrice'].std(ddof = 1) # default ddof = 1
numpy_stdev = std(sample['SalePrice'], ddof = 1) # default ddof = 0
equal_stdevs = pandas_stdev == numpy_stdev
pandas_var = sample['SalePrice'].var(ddof = 1) # default ddof = 1
numpy_var = var(sample['SalePrice'], ddof = 1) #default ddof = 0
equal_vars = pandas_var == numpy_var
## 11. Sample Variance — Unbiased Estimator ##
population = [0, 3, 6]
samples = [[0,3], [0,6],
[3,0], [3,6],
[6,0], [6,3]
]
from numpy import var, std
pop_var = var(population, ddof = 0)
pop_std = std(population, ddof = 0)
st_devs = []
variances = []
for sample in samples:
st_devs.append(std(sample, ddof = 1))
variances.append(var(sample, ddof = 1))
mean_std = sum(st_devs) / len(st_devs)
mean_var = sum(variances) / len(variances)
equal_stdev = pop_std == mean_std
equal_var = pop_var == mean_var
|
nemkothari/Statistics-Intermediate
|
Measures of Variability-308.py
|
Measures of Variability-308.py
|
py
| 5,124 |
python
|
en
|
code
| 0 |
github-code
|
50
|
38585374633
|
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
import datetime
from common.buffer import PrioritizedBuffer
import torch.nn as nn
import torch.autograd as autograd
import torch.nn.functional as F
import random
import gym
import numpy as np
from tqdm import tqdm
import torch
import matplotlib.pyplot as plt
import rl_utils
class ConvDQN(nn.Module):
def __init__(self, input_dim, output_dim):
super(ConvDQN, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.fc_input_dim = self.feature_size()
self.conv = nn.Sequential(
nn.Conv2d(self.input_dim, 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU()
)
self.fc = nn.Sequential(
nn.Linear(self.fc_input_dim, 128),
nn.ReLU(),
nn.Linear(128, 256),
nn.ReLU(),
nn.Linear(256, self.output_dim)
)
def forward(self, state):
features = self.conv_net(state)
features = features.view(features.size(0), -1)
qvals = self.fc(features)
return qvals
def feature_size(self):
return self.conv_net(autograd.Variable(torch.zeros(1, *self.input_dim))).view(1, -1).size(1)
class Qnet(torch.nn.Module):
''' 只有一层隐藏层的Q网络 '''
def __init__(self, state_dim, action_dim):
super(Qnet, self).__init__()
self.fc1 = torch.nn.Linear(state_dim, 64)
self.fc2 = torch.nn.Linear(64, action_dim)
def forward(self, x):
x = F.relu(self.fc1(x)) # 隐藏层使用ReLU激活函数
return self.fc2(x)
class PERAgent:
def __init__(self, env, use_conv=False, learning_rate=3e-4, gamma=0.99, buffer_size=10000):
self.env = env
self.gamma = gamma
self.replay_buffer = PrioritizedBuffer(buffer_size)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if use_conv:
self.model = ConvDQN(self.env.observation_space.shape[0], env.action_space.n).to(self.device)
else:
self.model = Qnet(self.env.observation_space.shape[0], env.action_space.n).to(self.device)
self.optimizer = torch.optim.Adam(self.model.parameters())
self.MSE_loss = nn.MSELoss()
def get_action(self, state, eps=0.0):
state = torch.FloatTensor(state).float().unsqueeze(0).to(self.device)
qvals = self.model.forward(state)
action = np.argmax(qvals.cpu().detach().numpy())
if (np.random.rand() > eps):
return self.env.action_space.sample()
return action
def _sample(self, batch_size):
return self.replay_buffer.sample(batch_size)
def _compute_TDerror(self, batch_size):
transitions, idxs, IS_weights = self._sample(batch_size)
states, actions, rewards, next_states, dones = transitions
states = torch.FloatTensor(states).to(self.device)
actions = torch.LongTensor(actions).to(self.device)
rewards = torch.FloatTensor(rewards).to(self.device)
next_states = torch.FloatTensor(next_states).to(self.device)
dones = torch.FloatTensor(dones).to(self.device)
IS_weights = torch.FloatTensor(IS_weights).to(self.device)
curr_Q = self.model.forward(states).gather(1, actions.unsqueeze(1))
curr_Q = curr_Q.squeeze(1)
next_Q = self.model.forward(next_states)
max_next_Q = torch.max(next_Q, 1)[0]
expected_Q = rewards.squeeze(1) + self.gamma * max_next_Q
td_errors = torch.pow(curr_Q - expected_Q, 2) * IS_weights
return td_errors, idxs
def update(self, batch_size):
td_errors, idxs = self._compute_TDerror(batch_size)
# update model
td_errors_mean = td_errors.mean()
self.optimizer.zero_grad()
td_errors_mean.backward()
self.optimizer.step()
# update priorities
for idx, td_error in zip(idxs, td_errors.cpu().detach().numpy()):
self.replay_buffer.update_priority(idx, td_error)
env_id = "CartPole-v1"
env_name = "CartPole-v1"
num_episodes = 1000
batch_size = 64
env = gym.make(env_id)
agent = PERAgent(env, use_conv=False)
return_list = []
for i in range(10):
with tqdm(total=int(num_episodes / 10), desc='Iteration %d' % i) as pbar:
for i_episode in range(int(num_episodes / 10)):
episode_return = 0
state = env.reset()
done = False
while not done:
action = agent.get_action(state)
next_state, reward, done, _ = env.step(action)
agent.replay_buffer.push(state, action, reward, next_state, done)
state = next_state
episode_return += reward
# 当buffer数据的数量超过一定值后,才进行Q网络训练
if len(agent.replay_buffer) > batch_size:
agent.update(batch_size)
return_list.append(episode_return)
if (i_episode + 1) % 10 == 0:
pbar.set_postfix({
'episode':
'%d' % (num_episodes / 10 * i + i_episode + 1),
'return':
'%.3f' % np.mean(return_list[-10:])
})
pbar.update(1)
algorithm = "PER_DQN"
fileName = "../result/{}_{}_{}.npy".format(algorithm,env_name,datetime.datetime.now().strftime("%Y-%m-%d-%H-%M"))
np.save(fileName,return_list)
episodes_list = list(range(len(return_list)))
plt.plot(episodes_list, return_list)
plt.xlabel('Episodes')
plt.ylabel('Returns')
plt.title('PerDQN on {}'.format(env_id))
plt.show()
mv_return = rl_utils.moving_average(return_list, 9)
plt.plot(episodes_list, mv_return)
plt.xlabel('Episodes')
plt.ylabel('Returns')
plt.title('PerDQN moving_average on {}'.format(env_id))
plt.show()
|
cgl-dong/my_rl
|
hand/PER_DQN2.py
|
PER_DQN2.py
|
py
| 5,999 |
python
|
en
|
code
| 0 |
github-code
|
50
|
28371073407
|
"""Processing environment to store, retrieve, and add aliases."""
from __future__ import annotations
import os.path
Aliases: dict[str, list[str]] = {}
alias_relative_file: str = "../../config/alias.txt"
def alias_exists_for(name: str):
return name in Aliases.keys()
def command_for_alias(name: str):
return Aliases[name].copy()
def create_alias(name: str, command: list[str]):
"""Create a new alias. \n
Parameters:
`name`: the name of the alias
`command`: the command that the alias runs"""
if len(name) == 0: raise ValueError("Malformed or missing name: %r" % name)
if len(command) == 0: raise ValueError("Malformed or missing command: %r" % command)
Aliases[name] = command
def display_all() -> str:
"""Display all known aliases."""
data = ""
longest_alias = max(len(a) for a in Aliases) + 2
for alias, command in Aliases.items():
data += (alias + ":").ljust(longest_alias) + " ".join(command) + "\n"
return data
def _get_alias_file() -> str:
"""Determine where the aliases are being stored."""
full_file_path = os.path.dirname(__file__)
full_file_path = os.path.abspath(
os.path.join(full_file_path, alias_relative_file)
)
return full_file_path
def load_aliases():
"""Load the current alias data from its file."""
full_file_path = _get_alias_file()
with open(full_file_path, "r") as f:
for line in f.readlines():
if len(line) == 0: continue
alias_name, command = line.strip(" \n").split(" ", 1)
create_alias(alias_name, command.split())
def save_aliases():
"""Save the current alias data to the file."""
alias_file_data = ""
for key, value in Aliases.items():
alias_file_data += "%s %s\n" % (key, " ".join(value))
full_file_path = _get_alias_file()
with open(full_file_path, "w") as f:
f.write(alias_file_data)
load_aliases()
|
AD417/LEDControl
|
internals/command/Alias.py
|
Alias.py
|
py
| 1,940 |
python
|
en
|
code
| 0 |
github-code
|
50
|
40509752290
|
class Solution:
def maxProfit(self, prices: list[int]) -> int:
left = 0
right = 1
difference = 0
while right < len(prices):
currentProfit = prices[right] - prices[left]
if prices[left] < prices[right]:
difference = max(currentProfit, difference)
else:
left = right
right += 1
return difference
if __name__ == "__main__":
sol = Solution()
print(sol.maxProfit([7, 9, 5, 6, 3, 2]))
|
Yahoo002/pythonDSA
|
bestTimeToBuyAndSellStock.py
|
bestTimeToBuyAndSellStock.py
|
py
| 513 |
python
|
en
|
code
| 0 |
github-code
|
50
|
42243745118
|
import sys
from itertools import count
from collections import defaultdict
lines = sys.stdin.readlines()
target = lines.pop(-1).strip()
lines.pop(-1)
rep = defaultdict(list)
rev = defaultdict(list)
for line in lines:
f, t = line.strip().split(' => ')
rep[f].append(t)
rev[t].append(f)
dis = set()
def apply(tr, new, old, rep):
for f, ts in rep.items():
l = len(f)
i = 0
while True:
p = tr.find(f, i)
if p < 0:
break
for t in ts:
s = tr[:p] + t + tr[p+l:]
if not (s in old):
new.add(s)
i = p+1
return new
print(len(apply(target, set(), set(), rep)))
# Exploding
"""
prev = set([target])
old = set().union(prev)
for n in count(1):
new = set()
for t in prev:
apply(t, new, old, rev)
if 'e' in new:
print(n)
break
prev = new
old = old.union(prev)
"""
# Cheat code
mls = sum(map(lambda s: s.isupper(), target))
print(mls - (target.count("Rn") + target.count("Ar") + 2*target.count("Y") + 1))
|
ShuP1/AoC
|
src/2015/19.py
|
19.py
|
py
| 1,103 |
python
|
en
|
code
| 0 |
github-code
|
50
|
27991925985
|
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.compat.v2.experimental import dtensor
from tf_keras import backend
from tf_keras.dtensor import integration_test_utils
from tf_keras.dtensor import layout_map as layout_map_lib
from tf_keras.dtensor import test_util
from tf_keras.optimizers import adam
from tf_keras.utils import tf_utils
class MnistTest(test_util.DTensorBaseTest):
def setUp(self):
super().setUp()
backend.enable_tf_random_generator()
tf_utils.set_random_seed(1337)
global_ids = test_util.create_device_ids_array((2,))
local_device_ids = np.ravel(global_ids).tolist()
mesh_dict = {
device: tf.experimental.dtensor.Mesh(
["batch"],
global_ids,
local_device_ids,
test_util.create_device_list((2,), device),
)
for device in ("CPU", "GPU", "TPU")
}
self.mesh = self.configTestMesh(mesh_dict)
def test_mnist_training(self):
layout_map = layout_map_lib.LayoutMap(self.mesh)
with layout_map.scope():
model = integration_test_utils.get_model()
optimizer = adam.Adam(learning_rate=0.001, mesh=self.mesh)
optimizer.build(model.trainable_variables)
train_losses = integration_test_utils.train_mnist_model_batch_sharded(
model,
optimizer,
self.mesh,
num_epochs=3,
steps_per_epoch=20,
global_batch_size=64,
)
# Make sure the losses are decreasing
self.assertEqual(train_losses, sorted(train_losses, reverse=True))
def test_model_fit(self):
layout_map = layout_map_lib.LayoutMap(self.mesh)
with layout_map.scope():
model = integration_test_utils.get_model()
optimizer = adam.Adam(learning_rate=0.001, mesh=self.mesh)
global_batch_size = 64
model.compile(
loss="CategoricalCrossentropy", optimizer=optimizer, metrics="acc"
)
train_ds, eval_ds = integration_test_utils.get_mnist_datasets(
integration_test_utils.NUM_CLASS, global_batch_size
)
def distribute_ds(dataset):
dataset = dataset.unbatch()
def _create_batch_layout(tensor_spec):
rank = len(tensor_spec.shape) + 1
return dtensor.Layout.batch_sharded(
self.mesh, batch_dim="batch", rank=rank
)
layouts = tf.nest.map_structure(
_create_batch_layout, dataset.element_spec
)
return dtensor.DTensorDataset(
dataset=dataset,
mesh=self.mesh,
layouts=layouts,
global_batch_size=global_batch_size,
dataset_already_batched=False,
batch_dim="batch",
prefetch=None,
tf_data_service_config=None,
)
train_ds = distribute_ds(train_ds)
eval_ds = distribute_ds(eval_ds)
model.fit(train_ds, steps_per_epoch=10)
model.evaluate(eval_ds, steps=10)
if __name__ == "__main__":
tf.test.main()
|
keras-team/tf-keras
|
tf_keras/dtensor/mnist_model_test.py
|
mnist_model_test.py
|
py
| 3,216 |
python
|
en
|
code
| 28 |
github-code
|
50
|
38383059729
|
import tensorflow as tf
import tensorflow_hub as hub
model_url = "https://tfhub.dev/tensorflow/efficientnet/lite0/feature-vector/2"
IMAGE_SHAPE = (224, 224)
layer = hub.KerasLayer(model_url, input_shape=IMAGE_SHAPE+(3,))
model = tf.keras.Sequential([layer])
import numpy as np
from tensorflow.keras.preprocessing import image
from scipy.spatial import distance
class Flt(float):
def __str__(self):
return '{:.2}'.format(self)
def vectorize(filename):
img1 = image.load_img(filename)
file = img1.convert('L').resize(IMAGE_SHAPE)
file = np.stack((file,)*3, axis=-1)
file = np.array(file)/255.0
embedding = changemodel.predict(file[np.newaxis, ...])
embedding_np = np.array(embedding)
change1 = embedding_np.flatten()
return change1
def change(oldv,newv,threshold):
metric = 'cosine'
cosineDistance = distance.cdist([oldv], [newv], metric)[0]
cosd = str(Flt(cosineDistance))
outputstring = "From the Change Module\n"
if (cosineDistance>threshold):
outputstring += "Change Module detect big change in the variance of " + cosd+".\n"
else:
outputstring += "Change Module detect small change in the variance of " + cosd+".\n"
return outputstring
oldv = vectorize("input/0.jpg")
newv = vectorize("input/2.jpg")
str0 = change(oldv,newv,0.1)
print(str0)
|
RunhaiLin/jik
|
change.py
|
change.py
|
py
| 1,394 |
python
|
en
|
code
| 0 |
github-code
|
50
|
36114219663
|
import mindspore.dataset.vision.c_transforms as CV
import mindspore.dataset.transforms.c_transforms as C
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
def create_dataset(data_path, batch_size=32):
"""
数据处理
Args:
dataset_path (str): 数据路径
batch_size (int): 批量大小
repeat_num (int): 数据重复次数
Returns:
Dataset对象
"""
# 载入数据集
data = ds.ImageFolderDataset(data_path)
# 打乱数据集
data = data.shuffle(buffer_size=1000)
# 定义算子
trans = [
CV.Decode(),
CV.Resize(256),
CV.CenterCrop(224),
# 使用训练backbone网络时用的mean和std
CV.Normalize(mean=(100.03388269705046, 94.57511259248079, 72.14921665851293),
std=(23.35913427414271, 20.336537235643164, 21.376613547858327)),
CV.HWC2CHW()
]
type_cast_op = C.TypeCast(mstype.int32)
# 算子运算
data = data.map(operations=trans, input_columns="image")
data = data.map(operations=type_cast_op, input_columns="label")
# 批处理
data = data.batch(batch_size, drop_remainder=True)
# 重复
data = data.repeat(1)
return data
|
littlemou/MindSpore_graduate_pratice
|
Test5/preprocess.py
|
preprocess.py
|
py
| 1,248 |
python
|
en
|
code
| 1 |
github-code
|
50
|
20538425560
|
import logging
import _lcms2
import numpy as np
from typing import Union, Any, Tuple
from . import util_lcms
# ------------------------
# setup logger
# ------------------------
log = logging.getLogger(__name__)
def useDebugMode():
"""
sets logging level and creates a stream handler to show full debugging information
"""
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
log.addHandler(ch)
def get_color_space_number(fmt: Union[str, int]) -> int:
"""
Get color space number by name.
If the number is already given, simply pass it on
:param fmt: name of the colorspace in lcms2 specific format as string, or the corresponding number
:return: the number for that color space
"""
if isinstance(fmt, str):
try:
colorspace_number = getattr(_lcms2, fmt)
except AttributeError:
raise ValueError(
"color space format '{}' not found!\n".format(fmt)
+ "If it is defined in lcms2.h it should be added to 'PyInit__lcms2' in '_lcms2.c"
)
else:
colorspace_number = fmt
return colorspace_number
def get_color_space_specs(cs_number: int) -> Tuple[int, int, int]:
"""
Infer binary layout of the colorspace number 'cs_number' (see lcms2.h).
Use 'get_color_space_number' to get the number from the string name of the color space.
The numbers are also defined as members starting with 'TYPE_'.
That includes
* the total number of channels (channels + extra channels)
* bytes per channel (where 0 means 8 as used for doubles)
* floating point or integer
:param cs_number: the color space number cs_number
:return: tuple (total number of channels, bytes per channel, is floating point)
"""
B = util_lcms.T_BYTES(cs_number)
C = util_lcms.T_CHANNELS(cs_number)
E = util_lcms.T_EXTRA(cs_number)
A = util_lcms.T_FLOAT(cs_number)
if B == 0:
B = 8
log.debug(
"colorspace fmt '{}', channels={}, bytes={}, isFloat={}".format(
cs_number, C + E, B, bool(A)
)
)
return C + E, B, bool(A)
def convert(
in_prof: str,
in_Py_buffer: Any, # anything that supports the buffer interface
in_fmt: Union[str, int],
out_prof: str,
out_fmt: Union[str, int],
intent: int,
dw_flags: int,
):
"""
convert between color spaces
This variant is closest to the underlying 'cmsDoTransform' function in the sense that
the inpout data 'in_Py_buffer' is provided by a generic python buffer (raw binary data)
while its format is given separately by 'in_fmt'. The result is returned as python buffer
as well with a binary format according to 'out_fmt'.
The possible formats corresponds to the predefined values from lcms2.h. Their numeric value
encodes the properties how to read the binary data. As example
- TYPE_RGB_8: 3 channels R,G,B with 8 bit (one byte) per channel
- TYPE_CMYK_DBL: 4 channels C,M,Y,K of doubles (8 byte) per channel
Together with the input profile 'in_prof', output profile 'out_prof', the rendering intent 'intent'
and further flags 'dw_flags' the color transformation is fully specified.
The profiles need to be given as string, either meaning a filename or referring to the predefined
profiles:
- PROFILE_SRGB = '*sRGB'
- PROFILE_Lab_D50 = '*Lab_D50'
The possible intent value are encoded in
- INTENT_PERCEPTUAL = _lcms2.INTENT_PERCEPTUAL
- INTENT_RELATIVE_COLORIMETRIC = _lcms2.INTENT_RELATIVE_COLORIMETRIC
- INTENT_SATURATION = _lcms2.INTENT_SATURATION
- INTENT_ABSOLUTE_COLORIMETRIC = _lcms2.INTENT_ABSOLUTE_COLORIMETRIC
and match the values of the lcms2 implementation.
The same holds for the dw_flags (see little cms docs). As an example we name
- cmsFLAGS_BLACKPOINTCOMPENSATION
which enables blackpoint compensation.
:param in_prof : the input profile as string (filename of predefined values starting with *)
:param in_Py_buffer: the input data as raw binary buffer
:param in_fmt : the format of the input data (see pyColConv members starting with TYPE_)
:param out_prof : the output profile as string (filename of predefined values starting with *)
:param out_fmt : the format of the output data (see pyColConv members starting with TYPE_)
:param intent : the rendering intent (INTENT_PERCEPTUAL, INTENT_RELATIVE_COLORIMETRIC,
INTENT_SATURATION, INTENT_ABSOLUTE_COLORIMETRIC)
:param dw_flags : additional flags for the conversion, like cmsFLAGS_BLACKPOINTCOMPENSATION
:return : bytearray (Py_buffer) containing raw binary data of the output colors
"""
in_C, in_B, in_F = get_color_space_specs(in_fmt)
out_C, out_B, out_F = get_color_space_specs(out_fmt)
num_pixels = len(in_Py_buffer) // in_C
log.debug(
"{} pixels determined from input buffer on length {}".format(
num_pixels, len(in_Py_buffer)
)
)
out_Py_buffer = bytearray(num_pixels * out_C * out_B)
log.debug("create output buffer with {} bytes".format(len(out_Py_buffer)))
ret_code, error_msg = _lcms2.do_transform(
in_prof,
out_prof,
in_Py_buffer,
in_fmt,
out_Py_buffer,
out_fmt,
intent,
dw_flags,
num_pixels,
)
log.debug("calling 'do_transform' from lcms2 returned '{}'".format(ret_code))
if ret_code != 0:
raise RuntimeError(
"calling 'do_transform' from lcms2 failed with error '{}'".format(error_msg)
)
return out_Py_buffer
def convert_nparray(
in_prof: str,
in_array: np.ndarray,
in_pixel_type: int,
out_prof: str,
out_fmt: Union[str, int],
intent: int,
dw_flags: int,
):
"""
convert between color spaces
This variant does the same job as 'convert' with the difference that it expects
the input data and returns the output data as numpy array.
For the input data the pixel type need to be specified (see lcms2.h). They as provided
as module members starting with PT_. The number representation is deduced from the
type of the input array.
The format of the output is set by 'out_fmt'.
The possible formats corresponds to the predefined values from lcms2.h. Their numeric value
encodes the properties how to read the binary data. As example
- TYPE_RGB_8: 3 channels R,G,B with 8 bit (one byte) per channel
- TYPE_CMYK_DBL: 4 channels C,M,Y,K of doubles (8 byte) per channel
Together with the input profile 'in_prof', output profile 'out_prof', the rendering intent 'intent'
and further flags 'dw_flags' the color transformation is fully specified.
The profiles need to be given as string, either meaning a filename or referring to the predefined
profiles:
- PROFILE_SRGB = '*sRGB'
- PROFILE_Lab_D50 = '*Lab_D50'
The possible intent value are encoded in
- INTENT_PERCEPTUAL = _lcms2.INTENT_PERCEPTUAL
- INTENT_RELATIVE_COLORIMETRIC = _lcms2.INTENT_RELATIVE_COLORIMETRIC
- INTENT_SATURATION = _lcms2.INTENT_SATURATION
- INTENT_ABSOLUTE_COLORIMETRIC = _lcms2.INTENT_ABSOLUTE_COLORIMETRIC
and match the values of the lcms2 implementation.
The same holds for the dw_flags (see little cms docs). As an example we name
- cmsFLAGS_BLACKPOINTCOMPENSATION
which enables blackpoint compensation.
:param in_prof : the input profile as string (filename of predefined values starting with *)
:param in_array : the input data as numpy array.
:param in_pixel_type : the pixel type (see pyColConv members starting with PT_)
:param out_prof : the output profile as string (filename of predefined values starting with *)
:param out_fmt : the format of the output data (see pyColConv members starting with TYPE_)
:param intent : the rendering intent (INTENT_PERCEPTUAL, INTENT_RELATIVE_COLORIMETRIC,
INTENT_SATURATION, INTENT_ABSOLUTE_COLORIMETRIC)
:param dw_flags : additional flags for the conversion, like cmsFLAGS_BLACKPOINTCOMPENSATION
:return : numpy array with output colors
"""
in_C = util_lcms.number_of_channels(in_pixel_type)
log.debug("'in_pixel_type' has {} channels".format(in_C))
dt = in_array.dtype
#if (dt.byteorder != '='):
# raise ValueError("Numpy array has has invalid type! "+
# "Byteorder is '{}'. Byteorder must be native.".format(dt.byteorder))
if dt.kind == 'u': # unsigned int
in_F = 0
in_B = dt.alignment
if (in_B != 1) and (in_B != 2):
raise ValueError("Numpy array has has invalid type! "+
"dtype.alignment yields {} byte(s). Only uint8 and uint16 are supported.".format(in_B))
elif dt.kind == 'f': # floating point
in_F = 1
in_B = dt.alignment
if (in_B != 4) and (in_B != 8):
raise ValueError("Numpy array has has invalid type! "+
"Only float (32 bit) and double (64 bit) are supported.")
if in_B == 8:
in_B = 0 # due to overflow in 3-bit representation use 0
else:
raise ValueError("numpy data type '{}' is not supported".format(dt))
log.debug("'in_array' has dtype {} which maps to num_bytes:{} and is_float:{}".format(dt, in_B, in_F))
in_fmt = ( util_lcms.FLOAT_SH(in_F) |
util_lcms.COLORSPACE_SH(in_pixel_type) |
util_lcms.CHANNELS_SH(in_C) |
util_lcms.BYTES_SH(in_B) )
log.debug("the corresponding 'in_fmt' is {}".format(in_fmt))
out_data = convert(
in_prof = in_prof,
in_Py_buffer=in_array.data,
in_fmt=in_fmt,
out_prof=out_prof,
out_fmt=out_fmt,
intent=intent,
dw_flags=dw_flags
)
log.debug("convert successful!")
out_C, out_B, out_F = get_color_space_specs(out_fmt)
if out_F:
out_dt = 'f' # float
else:
out_dt = 'u' # uint
out_dt += str(out_B)
log.debug("numpy dtype for output from 'out_fmt' is {}".format(out_dt))
out_array = np.frombuffer(out_data, dtype=out_dt)
return out_array
|
cimatosa/pyColConv
|
pyColConv/pyColConv.py
|
pyColConv.py
|
py
| 10,541 |
python
|
en
|
code
| 0 |
github-code
|
50
|
875036738
|
import collections
class ReorganizeString:
"""
Given a string S, check if the letters can be rearranged
so that two characters that are adjacent to each other are not the same.
If possible, output any possible result. If not possible, return the empty string.
Example 1:
Input: S = "aab"
Output: "aba"
Example 2:
Input: S = "aaab"
Output: ""
Note:
S will consist of lowercase letters and have length in range [1, 500].
"""
def reorganizeString(self, S: str) -> str:
sortedcommons = []
lens = len(S)
for c, cnt in collections.Counter(S).most_common()[::-1]:
if cnt > (lens + 1) // 2:
return ""
sortedcommons.extend([c] * cnt)
out = [None] * lens
out[::2] = sortedcommons[lens // 2:]
out[1::2] = sortedcommons[:lens // 2]
return "".join(out)
|
DmitryPukhov/pyquiz
|
pyquiz/leetcode/ReorganizeString.py
|
ReorganizeString.py
|
py
| 899 |
python
|
en
|
code
| 0 |
github-code
|
50
|
9633133356
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 24 10:28:51 2018
@author: Administrator
将SVG接线图加入通讯中断的状态
"""
import xml.dom.minidom
doc = xml.dom.minidom.parse('D:/35kV主接线图.svg')
def setNewNode(newNode):
for nc in newNode.childNodes:
if nc.nodeType == nc.ELEMENT_NODE:
if nc.nodeName == 'g':
setNewNode(nc)
else:
if nc.hasAttribute('fill') and nc.getAttribute('fill') != 'none':
nc.setAttribute('fill', 'grey')
if nc.hasAttribute('stroke'):
nc.setAttribute('stroke', 'grey')
def removeAttributeFromG(node):
for nc in node.childNodes:
if nc.nodeType == nc.ELEMENT_NODE:
if nc.nodeName == 'g':
if nc.hasAttribute('irealstate'):
nc.removeAttribute('irealstate')
if nc.hasAttribute('style'):
nc.removeAttribute('style')
removeAttributeFromG(nc)
def parser(node):
node.setAttribute('irealstate', '-1')
newNode = None
for apc in node.childNodes:
if apc.nodeType == apc.ELEMENT_NODE and apc.getAttribute('irealstate') == '0':
newNode = apc.cloneNode(True)
apc.setAttribute('style', 'display:none;')
node.appendChild(newNode)
newNode.setAttribute('irealstate', '-1')
newNode.setAttribute('style', 'display:block;')
setNewNode(newNode)
for nc in node.childNodes:
removeAttributeFromG(nc);
for g in doc.childNodes:
for ap in g.childNodes:
for ap in ap.childNodes:
if ap.nodeType == ap.ELEMENT_NODE and ap.getAttribute('itemplatetype') == 'status':
if ap.getAttribute('igraphtype') in ('circuitbreaker', 'handcart', 'groundedswitch'):
parser(ap)
else:
print(ap.getAttribute('id'))
doc.writexml(open('D:/manage/config/target/graphics/test.svg', 'w', encoding='utf-8'))
|
w8s8y8/pytools
|
status.py
|
status.py
|
py
| 2,079 |
python
|
en
|
code
| 0 |
github-code
|
50
|
22502408130
|
import geopandas as gpd
import matplotlib.pyplot as plt
from shapely.geometry import MultiPolygon, Point, Polygon
import datetime
import pandas as pd
import random
NON_WILDFIRE_POINTS_TO_ADD = 3000
def clean_data(gdf: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
# Remove any incomplete rows
df = gdf.dropna()
# Keep only relevant columns
df = df[['AreaHa', 'StartDate', 'EndDate', 'Label', 'FireName', 'geometry']]
# Consider only wildfires
df = df[df['Label'].str.contains('Wildfire')]
filter_list = df['StartDate'].apply(lambda x: datetime.datetime.strptime(x, "%Y-%m-%d").year > 2009 if x is not None else False)
# Only use data after 2010
df = df[filter_list]
# Convert releant columns into integers
df['AreaHa'] = df['AreaHa'].apply(lambda x: int(x) * 1e4) # Area in square metres
df.reset_index(drop=True, inplace=True)
return df
def get_nsw_outline_gdf(filename='DataCleaning/StateBoundary/SED_2022_AUST_GDA94.shp'):
state_boundaries = gpd.read_file(filename)
nsw = state_boundaries[state_boundaries['STE_NAME21'] =='New South Wales']
nsw_outline = nsw.unary_union
geometry = gpd.GeoSeries([nsw_outline])
nsw_gdf = gpd.GeoDataFrame(geometry, columns=['geometry'])
# Set CRS
if nsw_gdf.crs is None:
nsw_gdf = nsw_gdf.set_crs('EPSG:4283')
# Reduce the island off the coast of the mainland
multi_polygon = nsw_gdf['geometry'][0]
# Initialize variables to keep track of the largest area and geometry
largest_area = 0
largest_body = None
# Iterate through the individual geometries in the MultiPolygon
for body in multi_polygon.geoms:
if body.area > largest_area:
largest_area = body.area
largest_body = body
# Create a new MultiPolygon containing only the largest body
new_multi_polygon = MultiPolygon([largest_body])
nsw_gdf['geometry'] = [new_multi_polygon]
return nsw_gdf
def get_and_clean_gdf(filename='BushfireDataCleaned/BushfireDataCleaned.shp') -> gpd.GeoDataFrame:
gdf = gpd.read_file(filename)
# Convert Date into Datetime
gdf['StartDate'] = gdf['StartDate'].apply(lambda x: datetime.datetime.strptime(x[:10], "%Y-%m-%d").date())
gdf['EndDate'] = gdf['EndDate'].apply(lambda x: datetime.datetime.strptime(x[:10], "%Y-%m-%d").date())
gdf['Centroid'] = gpd.GeoDataFrame(geometry=gdf.centroid)
return gdf
def add_in_random_points(filename='BushfireDataCleaned\BushfireDataCleaned.shp'):
'''Function adds in points to a dataframe where there hasn't been a fire'''
nsw_outline = get_nsw_outline_gdf()
minlat = -37 * 2
maxlat = -28 * 2
minlon = 141 * 2
maxlon = 153 * 2
min_time = datetime.date(2010, 1, 1)
max_time = datetime.date(2023, 10, 24)
time_difference = (max_time - min_time).days
random_gdf_dict = []
geometry_list = []
for i in range(NON_WILDFIRE_POINTS_TO_ADD):
startdate = min_time + datetime.timedelta(random.randint(0, time_difference))
# Ensure that the point is located within the NSW Boundary
while True:
point = Point(random.randint(minlon, maxlon)/2, random.randint(minlat, maxlat)/2)
if point.within(nsw_outline).bool():
break
else:
print(i, point)
geometry_list.append(point)
random_gdf_dict.append(
{
'AreaHa': 'None',
'StartDate': datetime.datetime.strftime(startdate, "%Y-%m-%d"),
'EndDate': 'None',
'Label': 'No Event',
'FireName': 'None',
'geometry': Polygon([(point.x, point.y), (point.x, point.y), (point.x, point.y), (point.x, point.y)])
}
)
bushfires_gdf_uncleaned = gpd.read_file(filename)
random_gdf = gpd.GeoDataFrame(random_gdf_dict, crs='EPSG:4283', geometry='geometry')
lst = [bushfires_gdf_uncleaned, random_gdf]
combined = pd.concat(lst)
# schema = {
# 'geometry': combined.geom_type.map(lambda geom_type: geom_type.capitalize()).tolist(),
# 'properties': {'Name': 'str'},
# }
bushfire_data_cleaned = combined.to_file('BushfireDataWithRandomPoints.shp', driver="ESRI Shapefile")
return bushfire_data_cleaned
def main():
# bushfires_gdf_uncleaned = gpd.read_file('DataCleaning/NPWSFireHistory.shp')
# bushfires_gdf = clean_data(bushfires_gdf_uncleaned)
# bushfire_data_cleaned = bushfires_gdf.to_file('BushfireDataCleaned.shp', driver="ESRI Shapefile")
# bushfire_data_cleaned
with_random_points = add_in_random_points()
if __name__ == '__main__':
main()
|
danielmtro/ThesisBackup
|
DataCleaning/datacleaning.py
|
datacleaning.py
|
py
| 4,826 |
python
|
en
|
code
| 0 |
github-code
|
50
|
41296300253
|
import sys
from os.path import abspath, dirname
import numpy as np
import open3d as o3d
parent_dir = dirname(dirname(dirname(abspath(__file__)))) # type: ignore
if parent_dir not in sys.path: # type: ignore
sys.path.append(parent_dir)
print(parent_dir)
from pcd_algorithm.utils.merge import merge
def visualize_cluster(cluster_list: list[list[float]], num_of_cluster: int):
"""
クラスタリング結果を表示する.
Args:
cluster_list (list[list[float]]): 入力点群
num_of_cluster (int): クラスタ数
"""
cluster_pcd = o3d.geometry.PointCloud()
for i in range(num_of_cluster):
cluster = o3d.geometry.PointCloud()
cluster.points = o3d.utility.Vector3dVector(cluster_list[i])
cluster.paint_uniform_color(np.random.rand(3))
cluster_pcd = merge(cluster_pcd, cluster)
o3d.visualization.draw_geometries([cluster_pcd])
def k_means(points: np.ndarray, num_of_cluster: int):
"""
受け取った点の集合をk-means法でクラスタリングする.
Args:
points (np.ndarray): 入力点群
num_of_cluster (int): クラスタ数
"""
assert len(points) >= num_of_cluster, "points is less than number of clusters."
seed_points = np.random.rand(num_of_cluster, 3) # ランダムに点選択するよう変更
seed_dist = np.array([10e7, 10e7, 10e7])
while np.max(seed_dist) > 0.005:
cluster_list: list[list[float]] = [[]] * num_of_cluster
# 各点をクラスタに分ける
for point in points:
distance = []
for seed_point in seed_points:
distance = np.append(distance, np.linalg.norm(point - seed_point))
nearest_index = np.argmin(distance)
if len(cluster_list[nearest_index]) > 0:
cluster_list[nearest_index] = np.vstack(
[cluster_list[nearest_index], point]
)
else:
cluster_list[nearest_index] = point
# seed点の更新
cluster_heart_point = [[0, 0, 0]] * num_of_cluster
for i in range(num_of_cluster):
sum_points = np.sum(cluster_list[i], axis=0)
cluster_heart_point[i] = [
point / len(cluster_list[i]) for point in sum_points
]
seed_dist = np.linalg.norm(cluster_heart_point[i] - seed_points[i])
seed_points = np.array(cluster_heart_point)
visualize_cluster(cluster_list, num_of_cluster)
if __name__ == "__main__":
points = np.random.rand(100, 3)
k_means(points, 4)
|
sakamo1112/pcd_algorithm
|
pcd_algorithm/clustering/k_means.py
|
k_means.py
|
py
| 2,600 |
python
|
en
|
code
| 0 |
github-code
|
50
|
37770993352
|
import requests
import json
apiKey = '37134fea34fc11b5bdc5e82ad894109b3d141674'
url = 'https://api.github.com/repos/datarepresentationstudent/aPrivateOne'
filename = 'repo_out.json'
file = open("pmg_test.txt", 'r')
#json.dump(repoJSON, file, indent=4)
response = requests.put(url, file, auth=('token',apiKey))
#response = requests.put(url, auth=('token',apiKey))
#repoJSON = response.json()
#print(response.json())
#print(response.status_code)
|
Pmcg1/dataRepresentation
|
week06/lab06.02_challenge02.py
|
lab06.02_challenge02.py
|
py
| 450 |
python
|
en
|
code
| 0 |
github-code
|
50
|
70732891675
|
import os
from os import makedirs
from os.path import isdir, join, exists
import fcntl
from time import sleep, strftime
import sys
import datetime
VERBOSE = False
DEBUG_MODE = False
ROOT_DIR = '/home/gb/logger/bdata'
if DEBUG_MODE:
ROOT_DIR = '.'
class SoundLogger:
def __init__(self, dev_card = 1, sampling_rate = 44100, duration = 60, verbose=VERBOSE):
# setting
self.dev_card = dev_card
self.sampling_rate = sampling_rate
self.duration = duration
self.__VERBOSE = verbose
## file name
#self.__OUTPUT_PATH = ROOT_DIR + '/sound/%Y/%m/%d'
#self.__FNAME_FORMAT = '_%Y-%m%d-%H%M%S+0000.wav'
#self.__FNAME_FORMAT = '_%Y-%m%d-%H%M%S%z.wav'
utcnow = datetime.datetime.now(tz=datetime.timezone.utc)
#p = f'{utcnow.year:04}-{utcnow.month:02}{utcnow.day:02}-{utcnow.hour:02}{utcnow.minute:02}{utcnow.second:02}.wav'
self.__OUTPUT_PATH = ROOT_DIR + '/sound/{:04d}/{:02d}/{:02d}'.format(utcnow.year, utcnow.month, utcnow.day)
p = '{:04d}-{:02d}{:02d}-{:02d}{:02d}{:02d}.wav'.format(utcnow.year, utcnow.month, utcnow.day, utcnow.hour,utcnow.minute, utcnow.second)
self.lockpath = '/home/gb/.gb_lock/'+'sound.lock'
if self.__VERBOSE:
print('lock file:' + self.lockpath)
#self.__FILE_PATH = self.__OUTPUT_PATH + '/' + self.__FNAME_FORMAT
self.__FILE_PATH = self.__OUTPUT_PATH + '/' + p
if self.__VERBOSE:
print('sounf file:' + self.__FILE_PATH)
def write_to_wav(self):
## file making
dname = strftime(self.__OUTPUT_PATH)
if not isdir(dname): makedirs(dname)
self.fname = strftime(self.__FILE_PATH)
if self.__VERBOSE: print(self.fname)
cmd = 'arecord --device plughw:' + str(self.dev_card) + ' -r ' + str(self.sampling_rate) + ' -d ' + str(self.duration) + ' -f S16_LE ' + self.fname
print(cmd)
os.system(cmd)
pass
def log_main(self):
# check lock file
try:
print(self.lockpath)
lockf = open(self.lockpath, 'w')
print(lockf)
fcntl.lockf(lockf.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except:
print('Locked.')
exit(1)
# save start
self.write_to_wav()
# wait for save
sleep(self.duration + int(10))
sys.exit("save duration is finished")
def main():
from argparse import ArgumentParser
dev_card = 1
sampling_rate = 44100
#duration = 60
duration = 15
parser = ArgumentParser()
parser.add_argument('-dcard', '--dev_card', type=int, help='device card', default=dev_card)
parser.add_argument('-sr', '--sampling_rate', type=int, help='sampling rate [Hz]', default=sampling_rate)
parser.add_argument('-dr', '--duration', type=int, help='log duration [seconds]', default=duration)
parser.add_argument('-v', '--verbose', action='store_true', help='verbose option')
args = parser.parse_args()
logger = SoundLogger(dev_card = args.dev_card, sampling_rate = args.sampling_rate, duration = args.duration, verbose=args.verbose)
logger.log_main()
if __name__ == '__main__':
main()
|
groundbird/sound_logger
|
sound_logger.py
|
sound_logger.py
|
py
| 3,242 |
python
|
en
|
code
| 0 |
github-code
|
50
|
6528910941
|
from userpreferences.models import UserPreference
from .models import Income, IncomeStream
import datetime
def get_user_currency_symbol(request_user):
# get user currency
try:
user_preferences_object = UserPreference.objects.get(user=request_user)
except UserPreference.DoesNotExist:
user_preferences_object = UserPreference.objects.create(user=request_user)
finally:
user_currency_symbol = user_preferences_object.get_user_preference()
return user_currency_symbol
# returns dictionary with valid fields and error messages
def server_validation(amount=None, date=None, income_stream=None, description=None):
error_messages = []
valid_fields = {}
# no error checking for description
valid_fields['description'] = description
try:
# round to 2dp
amount = float(amount)
amount = round(amount, 2)
if amount < 0:
error_messages.append(f"please enter a positive amount.")
valid_fields['amount'] = None
else:
valid_fields['amount'] = amount
except ValueError:
error_messages.append(f"please enter a valid amount.")
valid_fields['amount'] = None
try:
date_string = str(date)
date_format = '%Y-%m-%d'
date_object = datetime.datetime.strptime(date_string, date_format)
valid_fields['date'] = date_string
except ValueError:
error_messages.append('please enter a valid date.')
valid_fields['date'] = None
try:
IncomeStream.objects.get(income_stream=income_stream)
valid_fields['income_stream'] = income_stream
except:
error_messages.append('please select an income stream.')
valid_fields['income_stream'] = None
valid_fields['error_messages'] = error_messages
return valid_fields
|
melvinloh/expenses_project
|
expenses_project/income/utils.py
|
utils.py
|
py
| 1,849 |
python
|
en
|
code
| 0 |
github-code
|
50
|
37737354939
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: Olivier Noguès
import logging
from ares.Lib.connectors.files import AresFile
class FilePdf(AresFile.AresFile):
"""
:category: Ares File
:rubric: PY
:type: class
:label: Connector to read a bespoke PDF file.
:dsc:
Connector to read a bespoke PDF file.
From this connector there is also some features in order to ask the framework to "digest" the files in order to enrich the search engine with metadata.
At this stage we are more working on the collection of metadata but the search engine will then leverage on all these information
This module to work need a Python package called PyMuPDF. To install it you can run the command pip install PyMuPDF.
Once this available to your Python environment this connector will work fine.
If you want to check the connector please click [here](/api?module=connector&alias=PDF)
:link PyMuPDF Documentation: https://pymupdf.readthedocs.io/en/latest/tutorial/
"""
__fileExt = ['.pdf']
label = "Interface to deal with PDF files"
_extPackages = [("fitz", 'PyMuPDF')]
def _read(self, toPandas=False, **kwargs):
doc = self.pkgs["fitz"].open(self.filePath)
if kwargs['pageNumber'] >= doc.pageCount:
logging.debug("Page number %s does not exist, max value %s in file %s" % (kwargs['pageNumber'], doc.pageCount - 1, self.filePath))
return ''
return doc[kwargs['pageNumber']].getText()
if __name__ == '__main__':
pdfObj = FilePdf(filePath=r'C:\Users\HOME\Downloads\easyBus769586.pdf')
print( pdfObj.read(pageNumber=0) )
|
jeamick/ares-visual
|
Lib/connectors/files/AresFilePdf.py
|
AresFilePdf.py
|
py
| 1,645 |
python
|
en
|
code
| 0 |
github-code
|
50
|
39921209805
|
#!C:\Python32\python.exe
__author__ = 'jonathan'
import cgi
from dbsql import *
import json
print("Content-Type: application/json\n")
form = cgi.FieldStorage()
if "id" in form and len(form["id"].value) == 32 and not form["id"].value.count(' '):
q = "SELECT json FROM stored_matches_json WHERE hashed=%(hash)s"
vals = {'hash':form["id"].value}
if "user" in form and len(form["user"].value) < 20 and not form["user"].value.count(' '):
q += " AND user=%(user)s"
vals["user"] = form["user"].value
results = SQLQuery().q(q, vals)
if results and len(results):
jresults = json.loads(results[0][0])
jresults["permalink_id"] = form["id"].value
print(json.dumps(jresults))
else:
print(json.dumps({'status':'error'}))
else:
print(json.dumps({'status':'error', 'error':'invalid_permalink'}))
|
sbilstein/twitterjelly
|
cgi-bin/GetStoredResult.py
|
GetStoredResult.py
|
py
| 862 |
python
|
en
|
code
| 2 |
github-code
|
50
|
21644429821
|
# %%-- To do:
"""
The sets I have done:
set 11 both n and p.
set 10 both n and p.
set 01 both n and p.
set 00 both n (with k) and p.
"""
# %%-
# %%-- Imports
import pandas as pd
import numpy as np
import seaborn as sn
from sklearn.model_selection import train_test_split, GridSearchCV
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier
from sklearn.metrics import r2_score, mean_absolute_error, confusion_matrix, f1_score, accuracy_score
from sklearn.linear_model import LinearRegression, Ridge
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor, RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.neural_network import MLPRegressor, MLPClassifier
from sklearn.svm import SVR, SVC
import sys
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
# uncomment the below line for dell laptop only
from playsound import playsound
from sklearn.model_selection import cross_val_score, RepeatedKFold
from sklearn.multioutput import RegressorChain
from semiconductor.recombination import SRH
import scipy.constants as sc
from datetime import datetime
import smtplib
from email.message import EmailMessage
import os
import sys
# import the function file from another folder:
# use this line if on hp laptop:
# sys.path.append(r'C:\Users\budac\Documents\GitHub\SRH_sklearn_playwithdata\2_levels_problem\mode2')
# use this line if on dell laptop
sys.path.append(r'C:\Users\sijin wang\Documents\GitHub\SRH_sklearn_playwithdata\2_levels_problem\mode2')
# use this line if on workstation
sys.path.append(r'C:\Users\z5183876\OneDrive - UNSW\Documents\GitHub\SRH_sklearn_playwithdata\2_levels_problem\mode2')
sys.path.append(r'C:\Users\z5183876\OneDrive - UNSW\Documents\GitHub\SRH_sklearn_playwithdata\2_levels_problem\mode2\DPML')
sys.path.append(r'C:\Users\z5183876\OneDrive - UNSW\Documents\GitHub\SRH_sklearn_playwithdata\2_levels_problem\mode2\Savedir_example')
from MLobject_tlevel import *
# from dynamic_generation_regression import *
df1 = MyMLdata_2level(r"C:\Users\sijin wang\Desktop\research\thesiswork\ML_results\simulation_data\Etnonordered\p\set11\set11_8k.csv", 'bandgap1',1)
df1.data.head()
# %%-
# %%-- different data engineering before training ML model.
# multiplying lifetime by (dn+p0+n0)
df1.pre_processor_dividX()
# %%-
# %%-- Single tasks.
# ['Et_eV_2', 'logSn_2', 'logSp_2', 'Et_eV_1', 'logSn_1', 'logSp_1']
for task in ['logk_1', 'logk_2']:
print(task)
# refresh the dataset
# df1 = MyMLdata_2level(r"G:\study\thesis_data_storage\unordered\set11\p\set11_p_800k.csv", 'bandgap1',5)
df1.singletask = task
r2_frame, y_prediction_frame, y_test_frame, best_model, scaler_return = df1.regression_repeat(output_y_pred=True)
# reshape the test and prediction frame back to 2D:
y_test_frame = pd.DataFrame(y_test_frame)
y_prediction_frame = pd.DataFrame(y_prediction_frame)
exportdata = pd.concat([y_test_frame, y_prediction_frame], axis=1)
# export the validation data: name composed of: the singletask + the filename of the dataset.
# df1.path
filename = str(df1.singletask) + str(df1.path).split('\\')[-1]
exportdata.to_csv(str(filename))
# %%-
# %%-- Compare with Yan method using object.
# load the BO example.
BO_data = pd.read_csv(r'G:\study\thesis_data_storage\unordered\BO_validation.csv')
BO_lifetime = BO_data.iloc[:,17:-2]
# take hte log 10 of lifetime.
BO_lifetime_log = np.log10(np.array(BO_lifetime))
sys.stdout = open(r"Bo_validation.txt", "w")
for task in ['Et_eV_1', 'Et_eV_2']:
# define the ML task.
df1.singletask = task
# traing the model.
r2_frame, y_prediction_frame, y_test_frame, best_model, scaler_return = df1.regression_repeat(output_y_pred=True)
# goes through the scaler.
X = scaler_return.transform(BO_lifetime_log)
# ask model to predict.
y = best_model.predict(X)
print('Predicted ' + task + ' is: ')
print(y)
sys.stdout.close()
# %%-
# %%-- colour coding:
for column in ['logSn_2', 'logSp_2', 'Et_eV_2', 'logSn_1', 'logSp_1']:
df1.colour_column = column
df1.singletask = 'Et_eV_1'
df1.colour_code_training()
# %%-
# %%-- Data leakage.
df1.singletask = 'Et_eV_2_known_Et_eV_2_plus'
r2scores = df1.regression_repeat()
# this makes the results better but has data leakage, R2 got about 0.999.
df1.singletask = 'Et_eV_2_known_param1'
r2scores = df1.regression_repeat()
df1.email_reminder()
# %%-
# %%-- Perform chain regression for energy levels.
# %%-- Just the chain.
df1.regression_matrix = 'Mean Absolute Error'
df1.regression_matrix = 'R2'
chain_scores = df1.repeat_chain_regressor(repeat_num=2, regression_order=None, chain_name = 'Et1->Et2')
chain_scores = df1.repeat_chain_regressor(repeat_num=2, regression_order=None, chain_name = 'Et1->Et1+Et2->Et2')
chain_scores = df1.repeat_chain_regressor(repeat_num=1, regression_order=None, plotall=True, chain_name = 'Et1->Sp1->Sn1->Sp2->Sn2->Et2')
# pd.DataFrame(np.array(chain_scores).reshape(35, 2)).to_csv(path_or_buf = r'C:\Users\sijin wang\Documents\GitHub\SRH_sklearn_playwithdata\2_levels_problem\mode2\Et_regression\set11\chainscore_two_steps.csv')
# %%-
# %%-- Chain and subtraction.
# the plan is to first predict Et1, then predict Et1+Et2, then predict Et2 by subtracting the prediction of sum by Et1 prediction.
# r2 = df1.sum_minus_Et1_chain(regression_order=None, plotall=True)
model_names, y_pred_matrix, y_test, r2list = df1.repeat_subtraction_method(repeat_num=1, regression_order=None, plotall=False, return_pred=True)
# %%-
# %%-
# %%-- Perform chain regression for k
chain_scores = df1.repeat_chain_regressor(repeat_num=5, regression_order=None, chain_name = 'logk1+logk2->logk1->logk2')
# %%-
# %%-- insert all known information as columns (failed)
df1.pre_processor_insert_all_known()
# %%-
# %%-- calculate C1 C2 C3 C4 as known for each defect.
# This equation only calcualte C while ignoring excess carrier concentration, and only works for one doping and one temperature.
# df1.C1_C2_C3_C4_calculator()
# this eqution works for lifetime data that vary both T and doping.
C2n_list, C2d_list, C1n_list, C1d_list = df1.C1n_C2n_C1d_C2d_calculator(return_C=True, export=False, sanity_check=False, playmusic=False)
# %%--this session will plot the histogram of Cn.
C1n_array = np.array(C1n_list)[4:, :]
C2n_array = np.array(C2n_list)[4:, :]
# flattern hte array.
C1n_array = np.reshape(C1n_array, (-1, ))
print(np.mean(C1n_array))
print(np.max(C1n_array))
C2n_array = np.reshape(C2n_array, (-1, ))
print(np.mean(C2n_array))
print(np.max(C2n_array))
# remove the outliers
# C1n_array = C1n_array[abs(C1n_array - np.mean(C1n_array)) < 1 * np.std(C1n_array)]
# C2n_array = C2n_array[abs(C2n_array - np.mean(C2n_array)) < 1 * np.std(C2n_array)]
# take the log.
C1n_array.astype(float)
# plot the histogram
bins = 100
plt.figure(facecolor='white')
# plot hte log.
plt.hist(np.log10(C1n_array.astype('float')) , bins=bins, label=r'$C_{\rm 1n}$', alpha=0.75, density=True)
plt.hist(np.log10(C2n_array.astype('float')) , bins=bins, label=r'$C_{\rm 2n}$', alpha=0.75, density=True)
# plot hte origin.
# plt.hist(C1n_array.astype('float'), bins=bins, label='$C_{1n}$')
# plt.hist(C2n_array.astype('float'), bins=bins, label='$C_{2n}$')
plt.legend(fontsize=15)
# plt.title('Histogram of $C_d$')
plt.xlabel(r'log$_{\rm 10}$ of $C_{\rm n}$', fontsize=22)
plt.ylabel('Probability density', fontsize=22)
plt.xticks(fontsize=22)
plt.yticks([0, 0.05, 0.1, 0.15, 0.2], fontsize=22)
plt.savefig('Cn_compare.png', bbox_inches='tight')
plt.show()
# plot the boxplot.
# plt.figure()
# # plot hte log.
# plt.boxplot([np.log10(C1n_array.astype('float')),np.log10(C2n_array.astype('float'))])
# # plot hte origin.
# # plt.hist(C1n_array.astype('float'), bins=bins, label='$C_{1n}$')
# # plt.hist(C2n_array.astype('float'), bins=bins, label='$C_{2n}$')
# # plt.legend(['$C_{1n}$', '$C_{2n}$'])
# plt.title('Distribution of Cn')
# plt.ylabel('log10 of Cn')
# # plt.ylim([-1, 1])
# plt.show()
# plt.figure()
# plt.bar(['$C_{1n}$', '$C_{2n}$'],height=[np.log10(np.mean(C1n_array)), np.log10(np.mean(C2n_array))])
# plt.show()
# %%-
# %%--this session will plot the histogram of Cd.
C1d_array = np.array(C1n_list)[4:, :]
C2d_array = np.array(C2n_list)[4:, :]
# C1d_array = np.array(C1d_list)[4:, :]
# C2d_array = np.array(C2d_list)[4:, :]
# flattern hte array.
C1d_array = np.reshape(C1d_array, (-1, ))
print(np.mean(C1d_array))
print(np.max(C1d_array))
C2d_array = np.reshape(C2d_array, (-1, ))
print(np.mean(C2d_array))
print(np.max(C2d_array))
# remove the outliers
# C1d_array = C1d_array[abs(C1d_array - np.mean(C1d_array)) < 1 * np.std(C1d_array)]
# C2d_array = C2d_array[abs(C2d_array - np.mean(C2d_array)) < 1 * np.std(C2d_array)]
# take the log.
C1d_array.astype(float)
# plot the histogram
bins = 100
plt.figure(facecolor='white')
# plot hte log.
plt.hist(np.log10(C1d_array.astype('float')) , bins=bins, label=r'$C_{\rm 1d}$', alpha=0.75, density=True)
plt.hist(np.log10(C2d_array.astype('float')) , bins=bins, label=r'$C_{\rm 2d}$', alpha=0.75, density=True)
# plot hte origin.
# plt.hist(C1d_array.astype('float'), bins=bins, label='$C_{1d}$')
# plt.hist(C2d_array.astype('float'), bins=bins, label='$C_{2d}$')
plt.legend(fontsize=15)
# plt.title('Histogram of $C_d$')
plt.xlabel(r'log$_{\rm 10}$ of $C_{\rm d}$', fontsize=22)
plt.ylabel('Probability density', fontsize=22)
plt.xticks(fontsize=22)
plt.yticks([0, 0.05, 0.1, 0.15, 0.2], fontsize=22)
plt.savefig('Cd_compare.png', bbox_inches='tight')
plt.show()
# plot the boxplot.
# plt.figure()
# # plot hte log.
# plt.boxplot([np.log10(C1n_array.astype('float')),np.log10(C2n_array.astype('float'))])
# # plot hte origin.
# # plt.hist(C1n_array.astype('float'), bins=bins, label='$C_{1n}$')
# # plt.hist(C2n_array.astype('float'), bins=bins, label='$C_{2n}$')
# # plt.legend(['$C_{1n}$', '$C_{2n}$'])
# plt.title('Distribution of Cn')
# plt.ylabel('log10 of Cn')
# # plt.ylim([-1, 1])
# plt.show()
# plt.figure()
# plt.bar(['$C_{1n}$', '$C_{2n}$'],height=[np.log10(np.mean(C1n_array)), np.log10(np.mean(C2n_array))])
# plt.show()
# %%-
# %%-
# %%-- Data visualization
# %%-- General historam:
# histogram for C:
df1.C_visiaulization(variable='C1n/C2n')
df1.C_visiaulization()
# %%-
# %%-- Histogram for different T.
# plot for one temperauree:
# df1.C_visiaulization(task_name='histogram at T', T=150)
# plot for demoninator term.
for T in range(150, 401, 50):
df1.C_visiaulization(task_name='histogram at T', T=T)
# plot for numeator term:
for T in range(150, 401, 50):
df1.C_visiaulization(task_name='histogram at T', variable='C1n/C2n', T=T)
# why it seems like T does not change anything?
# %%-
# %%-- Histogram for different doping.
df1.C_visiaulization(task_name='histogram at doping', doping=1e14)
# # plot for demoninator term.
# for doping in [3e14, 7e14, 1e15, 3e15, 7e15, 1e16]:
# df1.C_visiaulization(task_name='histogram at doping', doping=doping)
# # plot for numeator term:
# for doping in [3e14, 7e14, 1e15, 3e15, 7e15, 1e16]:
# df1.C_visiaulization(task_name='histogram at doping', doping=doping, variable='C1n/C2n')
# # why it seems like doping does not change anything either.
# %%-
# %%-- Visialize individual parameters.
df1.C_visiaulization(variable='C1n')
df1.C_visiaulization(variable='C2n')
df1.C_visiaulization(variable='C1d')
df1.C_visiaulization(variable='C2d')
df1.C_visiaulization(task_name='C histogram compare')
# %%-
# %%-- T vs C:
df1.C_visiaulization(variable='C1n/C2n', task_name='plot with T')
df1.C_visiaulization(variable='C1d/C2d', task_name='plot with T')
# %%-
# %%-- Doping vs C:
df1.C_visiaulization(variable='C1d/C2d', task_name='plot with doping')
df1.C_visiaulization(variable='C1n/C2n', task_name='plot with doping')
# %%-
# %%-- dn vs C:
df1.C_visiaulization(variable='C1d/C2d', task_name='plot with dn')
df1.C_visiaulization(variable='C1n/C2n', task_name='plot with dn')
# %%-
# %%-- E_diff vs C:
df1.C_visiaulization(variable='C1n/C2n', task_name='plot with Et1-Et2')
df1.C_visiaulization(variable='C1d/C2d', task_name='plot with Et1-Et2')
# %%-
# %%-- data importance visualization
df1.feature_importance_visualisation('Et_eV_2')
# %%-
# %%-- histogram for defect charge state population:
C2n_frame, C2d_frame, C1n_frame, C1d_frame = df1.C1n_C2n_C1d_C2d_calculator(return_C=True, export=False, sanity_check=False, playmusic=False)
# now we want to compare the results for C1n and C2n:
C1n_framedata = C1n_frame.iloc[3:, :]
C1n_av = np.mean(np.array(C1n_framedata), axis=1).reshape(np.shape(C1n_av)[0], 1).astype(float)
C1n_avlog = np.log10(np.array(C1n_av))
# extract teh C2n as well.
C2n_framedata = C2n_frame.iloc[3:, :]
# C2n_framedata
C2n_av = np.mean(np.array(C2n_framedata), axis=1).reshape(np.shape(C1n_av)[0], 1).astype(float)
C2n_avlog = np.log10(np.array(C2n_av))
C1n_av2 = np.mean(C1n_av)
C2n_av2 = np.mean(C2n_av)
print(C1n_av2/C2n_av2)
# %%--
# plot the histogram comparison:
labels=['most positively charge / middle charge', 'most negatively charge / middle charge']
plt.figure()
plt.boxplot(np.concatenate([C1n_avlog, C2n_avlog], axis=1), vert=False, labels=labels, showfliers=True)
# plt.title('Mean absolute error scores for ' + str(self.singletask))
plt.xlabel('log10 of the value')
plt.show()
# %%-
# %%-
# %%-
# %%-- test the first of dynamic generation method: use ML object.
df1 = MyMLdata_2level(r"G:\study\thesis_data_storage\set11\set11_80000.csv", 'bandgap1',1)
# predict Et1:
df1.singletask='logSn_1'
r2_frame, y_prediction_frame, y_test_frame, best_model, scaler_return = df1.regression_repeat(output_y_pred=True)
# now we have new lifetiem data from another file: load the lifetime data:
validationdata = pd.read_csv(r"C:\Users\z5183876\OneDrive - UNSW\Documents\GitHub\SRH_sklearn_playwithdata\lifetimedata\point3\set11_50_1.csv")
# validationdata = pd.read_csv(r"C:\Users\sijin wang\Desktop\Thesis\thesiswork\simulation_data\set11\set11_1.csv")
# extract the lifetime data:
select_X_list = []
validationsetX = validationdata
for string in validationdata.columns.tolist():
if string[0].isdigit():
# take the log of the data.
select_X_list.append(string)
# extract the lifetime data.
validationsetX = validationdata[select_X_list]
# print(validationsetX)
# take the log:
validationsetX = np.log10(validationsetX)
# print(validationsetX)
# go through the scaler.
validationsetX = scaler_return.transform(validationsetX)
# print(validationsetX)
# Model to predict:
y_pred = best_model.predict(validationsetX)
print(y_pred)
df1.email_reminder()
# %%-
# %%-- test the second of dynamic generation method: use ML object.
# assume at this step the data generation for second step is done:
df1 = MyMLdata_2level(r"G:\study\thesis_data_storage\set11\ddgm\point1\dataset2_withAuger.csv", 'bandgap1',1)
df1.singletask = 'logSp_2'
# try to do without pre processor or manually.
r2_frame, y_prediction_frame, y_test_frame, best_model, scaler_return = df1.regression_repeat(output_y_pred=True)
# now we have new lifetiem data from another file: load the lifetime data:
validationdata = pd.read_csv(r"G:\study\thesis_data_storage\set11\ddgm\point1\set11_50_1.csv")
# validationdata = pd.read_csv(r"C:\Users\sijin wang\Desktop\Thesis\thesiswork\simulation_data\set11\set11_1.csv")
# extract the lifetime data:
select_X_list = []
validationsetX = validationdata
for string in validationdata.columns.tolist():
if string[0].isdigit():
# take the log of the data.
select_X_list.append(string)
# extract the lifetime data.
validationsetX = validationdata[select_X_list]
print(validationsetX)
# print(validationsetX)
# take the log:
validationsetX = np.log10(validationsetX)
print(validationsetX)
# print(validationsetX)
# go through the scaler.
validationsetX = scaler_return.transform(validationsetX)
print(validationsetX)
# print(validationsetX)
# Model to predict:
y_pred = best_model.predict(validationsetX)
print(y_pred)
# %%-
# %%-- test the idea of dynamic genration method: from scrach, no scaler nor log10.
# assume at this step the data generation for second step is done: load the data:
trainingset = pd.read_csv(r"G:\study\thesis_data_storage\set11\ddgm\point1\dataset2_withAuger.csv")
# extract the lifeitme training data.
select_X_list = []
for string in trainingset.columns.tolist():
if string[0].isdigit():
# take the log of the data.
select_X_list.append(string)
trainingX = trainingset[select_X_list]
# define the ML model.
model = RandomForestRegressor(n_estimators=150)
# extract the target value:
y = trainingset['logSp_2']
# traing the model.
model.fit(trainingX, y)
# now we have new lifetiem data from another file: load the lifetime data:
validationdata = pd.read_csv(r"G:\study\thesis_data_storage\set11\ddgm\point1\set11_50_1.csv")
# extract the lifetime data.
validationsetX = validationdata[select_X_list]
# Model to predict:
y_pred = model.predict(validationsetX)
print(y_pred)
# %%-
# %%-- test the idea of dynamic genration method: from scrach, but with log10 and scalers. (using predicted Et1, Sn1, Sp1)
# assume at this step the data generation for second step is done: load the data:
trainingset = pd.read_csv(r"G:\study\thesis_data_storage\set11\ddgm\point1\dataset2_withAuger.csv")
# extract the lifeitme training data.
select_X_list = []
for string in trainingset.columns.tolist():
if string[0].isdigit():
# take the log of the data.
select_X_list.append(string)
trainingX = trainingset[select_X_list]
# take log10 of the data.
trainingX = np.log10(trainingX)
# apply a scaler on the data.
scaler = MinMaxScaler()
scaler.fit_transform(trainingX)
# define the ML model.
model = RandomForestRegressor(n_estimators=150)
# extract the target value:
y = trainingset['logSp_2']
# traing the model.
model.fit(trainingX, y)
# now we have new lifetiem data from another file: load the lifetime data:
validationdata = pd.read_csv(r"G:\study\thesis_data_storage\set11\ddgm\point1\set11_50_1.csv")
# extract the lifetime data.
validationsetX = validationdata[select_X_list]
# take the log for validation data.
validationsetX = np.log10(validationsetX)
# go through the scaler.
validtionsetX = scaler.transform(validationsetX)
# Model to predict:
y_pred = model.predict(validationsetX)
print(y_pred)
# %%-
# %%-- test the dynamic regression object.
# training_path = r"G:\study\thesis_data_storage\set11\set11_80000.csv"
# validation_path = r"G:\study\thesis_data_storage\set11\set11_50.csv"
validation_path = r"C:\Users\sijin wang\Desktop\Thesis\thesiswork\simulation_data\set11\set11_1.csv"
training_path = r"C:\Users\sijin wang\Desktop\Thesis\thesiswork\simulation_data\set11\set11_80000.csv"
dy = Dynamic_regression(training_path=training_path, validation_path = validation_path, noise_factor=0, simulate_size=8000, n_repeat=1)
dy.t_step_train_predict()
# export the data
# pd.DataFrame(dy.y_predictions_1).to_csv(r'C:\Users\sijin wang\Desktop\Thesis\thesiswork\code_running_results\dynamic_generation]\x.csv')
# pd.DataFrame(dy.y_predictions_2).to_csv(r'C:\Users\sijin wang\Desktop\Thesis\thesiswork\code_running_results\dynamic_generation]\y.csv')
# dy.email_reminder()
# we have a problme here, why the real value is about 0.33 but the first prediction is about 0.30 twice
# figure out why the first prediction is always 0.3 instead of 0.33eV
# figure out why the prediction for Et1 is always smaller than the real value, check the scalor.
# the scalor seems to be fine, maybe because of the particular validation we are having tend to be predicted smaller
# lets check it with larger validation set (100), if we still have Et1 prediction less than Et2, then we must have a systematic error.
# seems we do have a systematic error:
# 1. just focus on the first step.
# 2. check the scalour.
# first step works!
# lets try to do validation.
# check is there anything wrong with the second step? maybe because it does not have any column name for second step training?
# %%-
df1.email_reminder()
|
sijinwnag/SRH_sklearn_playwithdata
|
2_levels_problem/mode2/Et_regression/set11/set11.py
|
set11.py
|
py
| 20,159 |
python
|
en
|
code
| 0 |
github-code
|
50
|
5880174517
|
def chars_to_bools(chars):
# assert all([c == "0" or c == "1" for c in chars])
return [c == "1" for c in chars] + [False] * (6 - len(chars))
def bools_to_chars(bools):
return ["1" if b else "0" for b in bools]
def draw_outline(term, start_coords, end_coords, title=None, color=None):
if color == None:
color = term.green_on_black
print(
term.move_xy(start_coords[0], start_coords[1])
+ color("┌" + "─" * (end_coords[0] - start_coords[0] - 1) + "┐")
)
for y in range(start_coords[1] + 1, end_coords[1]):
print(term.move_xy(start_coords[0], y) + color("│"))
print(term.move_xy(end_coords[0], y) + color("│"))
print(
term.move_xy(start_coords[0], end_coords[1])
+ color("└" + "─" * (end_coords[0] - start_coords[0] - 1) + "┘")
)
if title is not None:
assert len(title) <= end_coords[0] - start_coords[0] - 1
print(term.move_xy(start_coords[0] + 1, start_coords[1]) + color(title))
def outline_editor(term, editor, title, color=None):
draw_outline(
term,
(
editor.origin[0] - 1,
editor.origin[1] - 1,
),
(
editor.origin[0] + editor.size[0],
editor.origin[1] + editor.size[1],
),
title=title,
color=color,
)
class NanoEditor:
def __init__(self, term, origin, size, contents=None):
self.term = term
self.origin = origin
self.size = size
self.cursor = [0, 0]
self.is_focused = True
self.highlighted_lines = []
if contents is None:
self.contents = [[] for _ in range(self.size[1])]
else:
self.contents = contents
self.legal_chars = (
[chr(d) for d in range(ord("0"), ord("9") + 1)]
+ [chr(c) for c in range(ord("A"), ord("Z") + 1)]
+ list(" |=!@#$%^&*().")
)
def edit_callback(self):
"""Meant to be overwritten"""
pass
def draw(self):
if len(self.contents) < self.size[1]:
contents = self.contents + [[]] * (self.size[1] - len(self.contents))
elif len(self.contents) > self.size[1]:
contents = self.contents[: self.size[1]]
else:
contents = self.contents
for y, line in enumerate(contents):
line = "".join(line)
line_color = (
self.term.white_on_black
if (y not in self.highlighted_lines)
else self.term.black_on_red
)
cursor_color = (
# self.term.black_on_green if (y not in self.highlighted_lines) else self.term.black_on_white
self.term.black_on_green
)
if self.cursor[1] == y and self.is_focused:
assert self.cursor[0] <= len(line)
assert self.cursor[0] <= self.size[0] - 1
colored_line = line_color(line[: self.cursor[0]])
if len(line) == self.cursor[0]: # cursor is hanging out after the line
colored_line += cursor_color(" ")
# One extra char for cursor
colored_line += line_color(" " * (self.size[0] - len(line) - 1))
elif len(line) == self.cursor[0] + 1: # cursor is on last char of line
colored_line += cursor_color(line[self.cursor[0]])
colored_line += line_color(" " * (self.size[0] - len(line)))
else:
colored_line += cursor_color(line[self.cursor[0]])
colored_line += line_color(line[self.cursor[0] + 1 :])
colored_line += line_color(" " * (self.size[0] - len(line)))
else:
colored_line = line_color(line)
colored_line += line_color(" " * (self.size[0] - len(line)))
print(self.term.move_xy(self.origin[0], self.origin[1] + y) + colored_line)
def keypress(self, inp):
if inp.code == self.term.KEY_LEFT:
if 0 < self.cursor[0]:
self.cursor[0] -= 1
elif inp.code == self.term.KEY_RIGHT:
if self.cursor[0] < self.size[0] - 1 and self.cursor[0] < len(
self.contents[self.cursor[1]]
):
self.cursor[0] += 1
elif inp.code == self.term.KEY_HOME:
self.cursor[0] = 0
elif inp.code == self.term.KEY_END:
self.cursor[0] = len(self.contents[self.cursor[1]])
elif inp.code == self.term.KEY_UP:
if 0 < self.cursor[1]:
self.cursor[1] -= 1
self.cursor[0] = min(self.cursor[0], len(self.contents[self.cursor[1]]))
elif inp.code == self.term.KEY_DOWN:
if self.cursor[1] < self.size[1] - 1:
self.cursor[1] += 1
self.cursor[0] = min(self.cursor[0], len(self.contents[self.cursor[1]]))
elif inp.code == self.term.KEY_BACKSPACE:
if 0 == self.cursor[0]:
if 0 < self.cursor[1] and self.contents[self.cursor[1] - 1] == []:
del self.contents[self.cursor[1] - 1]
self.contents.append([])
self.cursor[1] -= 1
else:
del self.contents[self.cursor[1]][self.cursor[0] - 1]
self.cursor[0] -= 1
elif inp.code == self.term.KEY_DELETE:
if self.cursor[0] < len(self.contents[self.cursor[1]]):
del self.contents[self.cursor[1]][self.cursor[0]]
elif inp.code == self.term.KEY_ENTER:
if self.contents[-1] == [] and self.cursor[1] < self.size[1] - 1:
self.contents.pop()
self.contents.insert(self.cursor[1] + 1, [])
self.cursor[0] = 0
self.cursor[1] += 1
elif inp.upper() in self.legal_chars:
if len(self.contents[self.cursor[1]]) < self.size[0]:
self.contents[self.cursor[1]].insert(self.cursor[0], inp.upper())
else:
self.contents[self.cursor[1]][self.cursor[0]] = inp.upper()
if self.cursor[0] < self.size[0] - 1:
self.cursor[0] += 1
self.edit_callback()
self.draw()
|
aselker/ld48_computer_1
|
nano_editor.py
|
nano_editor.py
|
py
| 6,311 |
python
|
en
|
code
| 5 |
github-code
|
50
|
34567644548
|
__author__ = 'Administrator'
# coding: utf-8
import adbtools
import os
import datetime
import time
def command():
with open('E:/1/monkey.txt') as file:
data = file.readlines()
str = ''.join(data)
file.close()
print(str)
os.popen(str)
def write_result():
format_time = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
path = 'e:/1/'
filename = 'monkey_run.txt'
end_name = format_time + filename
os.rename(path + filename, path + end_name)
print (end_name)
if __name__ == '__main__':
device = adbtools.AdbTools('c248f4b7')
command()
write_result()
|
luoxin0420/study
|
monkey.py
|
monkey.py
|
py
| 624 |
python
|
en
|
code
| 0 |
github-code
|
50
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.