seq_id
stringlengths 4
11
| text
stringlengths 113
2.92M
| repo_name
stringlengths 4
125
⌀ | sub_path
stringlengths 3
214
| file_name
stringlengths 3
160
| file_ext
stringclasses 18
values | file_size_in_byte
int64 113
2.92M
| program_lang
stringclasses 1
value | lang
stringclasses 93
values | doc_type
stringclasses 1
value | stars
int64 0
179k
⌀ | dataset
stringclasses 3
values | pt
stringclasses 78
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|
40779260735
|
import magic
import collections
from pprint import pprint
class cbmagic(magic.Magic):
def __init__(self):
magic.Magic.__init__(self)
self.file_types=collections.OrderedDict()
self.file_types['JPEG'] = 'jpg'
self.file_types['PNG'] = 'png'
self.file_types['PDF'] = 'pdf'
def from_file(self, file):
result=magic.Magic.from_file(self,file)
#Return the file type: jpg, png or pdf
return self.file_types[result.split(' ',1)[0]]
if __name__ == "__main__":
from cb_idcheck import cbmagic
myMagic = cbmagic.cbmagic()
print("Deduces the file type from the file header and returns one of the following strings: 'jpg', 'png' or 'pdf'.")
filename = input("File name: ")
pprint(myMagic.from_file(filename))
|
commerceblock/cb_idcheck
|
cb_idcheck/cbmagic.py
|
cbmagic.py
|
py
| 902 |
python
|
en
|
code
| 1 |
github-code
|
50
|
13156721245
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This script cleans up the
workspace after a
full synthese (vasy,
boom, boog, loon) run.
"""
__author__ = "Siegfried Kienzle"
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Siegfried Kienzle"
__email__ = "[email protected]"
import os
import sys
import getopt
# Extension of VHDL Behavioral Subset
VBE = ".vbe"
# Extension of VHDL Structural Subset
VST = ".vst"
# Extension for graphical schematic viewer
XSC = ".xsc"
def usage():
print("usage: cleanup.py [-h] [-f]")
print("")
print("optional arguments:")
print("-h, --help show this help message and exit")
print(
"-f, --force removes without asking all files with extensions " +
VBE +
", " +
VST +
", " +
XSC +
".")
def remove(filename):
os.remove(filename)
def delete_question(filename):
delete = input("Should the file " + filename + " removed? (y/n)")
return delete
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hf", ["help", "force"])
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(2)
force = False
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-f", "--force"):
force = True
path = os.path.abspath(os.getcwd())
files = []
for filename in os.listdir(path):
if filename.endswith(VBE) or filename.endswith(
VST) or filename.endswith(XSC):
files.append(filename)
for candidate in files:
if force:
print(candidate + " will be deleted")
remove(candidate)
else:
if delete_question(candidate) == ("y"):
print(candidate + " will be deleted")
remove(candidate)
if __name__ == "__main__":
main()
|
sikienzl/FPGA_Alliance_Scripts
|
wrapper_synthese/cleanup.py
|
cleanup.py
|
py
| 1,927 |
python
|
en
|
code
| 0 |
github-code
|
50
|
18781204661
|
if '__file__' in globals():
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import unittest
import numpy as np
from dezero import Variable
def f(x):
y = x ** 4 - 2 * x ** 2
return y
class HighgradTest(unittest.TestCase):
def test_backward(self):
x = Variable(np.array(2.0))
y = f(x)
y.backward(create_graph=True)
# print(x.grad)
self.assertEqual(x.grad.data, 24.0)
gx = x.grad
x.cleargrad()
gx.backward()
# print(x.grad)
self.assertEqual(x.grad.data, 44.0)
unittest.main()
|
kanan4gh/my-dezero
|
tests/testStep33.py
|
testStep33.py
|
py
| 607 |
python
|
en
|
code
| 0 |
github-code
|
50
|
11932588404
|
import zipfile
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Dropout, Conv2D, MaxPooling2D, Flatten
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image_dataset_from_directory
from tensorflow.keras.applications import EfficientNetB0, resnet50
from tensorflow.keras.models import Sequential
import numpy as np
import pandas as pd
# !wget !wget https://storage.googleapis.com/ztm_tf_course/food_vision/pizza_steak.zip
zip_ref = zipfile.ZipFile("pizza_steak.zip", "r")
zip_ref.extractall()
zip_ref.close()
train_directory = './pizza_steak/train/'
test_directory = './pizza_steak/test/'
IMAGE_SIZE = (224, 224)
image_data_generator = ImageDataGenerator(rescale=1. / 255,
zoom_range=0.2,
shear_range=0.2,
rotation_range=0.2)
# incase we use class_mode='binary' we must 1 node in the last layer
# incase we use class_mode='categorical' we must have 2 nodes in the last layer
train_dt = image_data_generator.flow_from_directory(directory=train_directory,
class_mode='categorical',
batch_size=32,
target_size=IMAGE_SIZE)
test_dt = image_data_generator.flow_from_directory(directory=test_directory,
class_mode='categorical',
batch_size=32,
target_size=IMAGE_SIZE)
# Get the class names
# test_dt.class_indices
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=3, activation='relu'))
model.add(Conv2D(filters=16, kernel_size=3, activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(filters=16, kernel_size=3, activation='relu'))
model.add(Conv2D(filters=16, kernel_size=3, activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(2, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(train_dt,
epochs=5,
validation_data=test_dt,
validation_steps=len(test_dt))
# --------------------- PREDICTION ---------------------------
def load_image_for_prediction(img_path):
img = tf.io.read_file(img_path)
# Decode the read file into a tensor & ensure 3 colour channels
# (our model is trained on images with 3 colour channels and sometimes images have 4 colour channels)
img = tf.image.decode_image(img, channels=3)
# Resize the image (to the same size our model was trained on)
img = tf.image.resize(img, size=IMAGE_SIZE)
# Rescale the image (get all values between 0 and 1)
img = img / 255.
return tf.expand_dims(img, axis=0)
# ----------------- Get categories name
class_names = [x for x in test_dt.class_indices.keys()]
class_names
# ------------------------- Get prediction probability
img_path = './pizza_steak/test/pizza/1001116.jpg' # it is a pizza image
model.predict(load_image_for_prediction(img_path))
img_path = './pizza_steak/test/steak/1064847.jpg' # it is a steak image
model.predict(load_image_for_prediction(img_path))
#------------ Get most probable class
prediction_probabilities = [0.08698346, 0.9142322 ]
max_values = np.max(prediction_probabilities)
class_names[prediction_probabilities.index(max_values)]
|
salman-/small-codes-for-tensorflow-certificate-exam
|
classifications/image-classification/binary-image-classification-prediction.py
|
binary-image-classification-prediction.py
|
py
| 3,534 |
python
|
en
|
code
| 0 |
github-code
|
50
|
26636857394
|
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class MyWindow(QWidget):
def __init__(self):
super(MyWindow, self).__init__()
self.resize(300,300)
self.setWindowTitle('test geometry')
self.setMinimumSize(300,300)
self.setMaximumSize(600,600)
# self.move(0,0)
# self.setGeometry(0,0,300,300)
self.total_widgets = 9
self.col = 3
self.init_gui()
def init_gui(self):
my_button_width = self.width() // self.col
total_rows = (self.total_widgets - 1) // self.col + 1
my_button_height = self.height() // total_rows
print(self.frameGeometry())
# print(self.width())
# print(self.height())
# print(self.geometry().width())
for i in range(self.total_widgets):
my_btn = QPushButton(self)
my_btn.setText('btn%d'%i)
my_btn.setStyleSheet('background-color:grey;border:1px solid yellow;')
my_btn_x = i % self.col * my_button_width
my_btn_y = i // self.col * my_button_height
my_btn.resize(my_button_width,my_button_height)
my_btn.move(my_btn_x,my_btn_y)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
mywindow = MyWindow()
mywindow.show()
mywindow.setGeometry(-10,-200,300,300)
print(mywindow.frameSize())
print(mywindow.frameGeometry())
sys.exit(app.exec_())
|
PeterZhangxing/codewars
|
gui_test/test_pyqt/test_geometry.py
|
test_geometry.py
|
py
| 1,478 |
python
|
en
|
code
| 0 |
github-code
|
50
|
33682023920
|
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
import numpy as np
fig = plt.figure()
ax = plt.axes()
ax = ax.set(xlabel='x', ylabel='f(x) = x^3 + x^2 - 10')
x = np.linspace(-20, 20, 1000)
plt.axis([-20, 20, -20, 20])
plt.plot(x, ((x * x * x) + (4 * x * x) - 10), color = '0.75')
plt.plot(x, x-x)
plt.plot(y, y-y)
plt.title("Analyse numerique : TP1")
plt.show()
|
TheoDaix/TP_anum
|
Entrainements/matplotlib_test.py
|
matplotlib_test.py
|
py
| 392 |
python
|
en
|
code
| 0 |
github-code
|
50
|
34556117864
|
from bs4 import BeautifulSoup
import requests
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def get_burberry_df():
urls = [
"https://us.burberry.com/womens-new-arrivals-new-in/",
"https://us.burberry.com/womens-new-arrivals-new-in/?start=2&pageSize=120&productsOffset=&cellsOffset=8&cellsLimit=&__lang=en"
]
# SCRAPING & CREATING A LIST OF LINKS
doc = []
for url in urls:
r = requests.get(url)
html_doc = r.text
soup = BeautifulSoup(html_doc)
for link in soup.find_all("a"):
l = link.get("href")
if "-p80" in l: # <-- THIS WILL NEED TO CHANGE
doc.append(l)
# DEDUPLICATING THE LIST OF LINKS
doc_uniq = set(doc)
print("Number of unique items:"+str(len(doc_uniq)))
# CREATING A DICTIONARY WITH WORDS : COUNTS AND KEY : VALUE PAIRS
result = {}
for link in doc_uniq:
words = link.replace("/", "").split("-")
for word in words:
if word in result:
result[word] += 1
else:
result[word] = 1
words = list(result.keys())
counts = list(result.values())
# TURNING THE DICTIONARY INTO A DATAFRAME, SORTING & SELECTING FOR RELEVANCE
df = pd.DataFrame.from_dict({
"words": words,
"counts": counts,
})
df_sorted = df.sort_values("counts", ascending = True)
df_rel = df_sorted[df_sorted['counts']>3]
print(df_rel.head())
print(df_rel.shape)
# PLOTTING
plt.barh(df_rel['words'], df_rel['counts'], color = "#C19A6B")
plt.title("Most used words in Burberry 'New in' SS2020 Women collection")
plt.xticks(np.arange(0, 18, step=2))
plt.savefig("SS2020_Burberry_word_frequency.png")
df_rel['brand']='burberry'
return df_rel
def get_versace_df():
# VERSACE
# CREATING LIST OF RELEVANT URLS
url = "https://www.versace.com/us/en-us/women/new-arrivals/new-in/"
# SCRAPING & CREATING A LIST OF LINKS
doc = []
#for url in urls:
r = requests.get(url)
html_doc = r.text
soup = BeautifulSoup(html_doc)
soup_f = soup.find_all("a")
for t in soup_f:
a = t.get("href")
if a.startswith("/us/en-us/women/new-arrivals/new-in/"):
doc.append(a)
# DEDUPLICATING THE LIST OF LINKS
doc_uniq = set(doc)
print("Number of unique items:"+str(len(doc_uniq)))
#print(doc_uniq)
result = {}
garbage = []
for link in doc_uniq:
if link.startswith("/us/en-us/women/new-arrivals/new-in/?"):
continue
words = link.replace("/us/en-us/women/new-arrivals/new-in/", "") .split("/")
words = words[0].split("-")
for word in words:
if word in result:
result[word] += 1
else:
result[word] = 1
words = list(result.keys())
counts = list(result.values())
#print(result)
# TURNING THE DICTIONARY INTO A DATAFRAME, SORTING & SELECTING FOR RELEVANCE
df = pd.DataFrame.from_dict({
"words": words,
"counts": counts,
})
df2 = df.set_index("words")
#df2 = df.drop(["a1008"],axis=0)
df_sorted = df2.sort_values("counts", ascending = True)
df_rel = df_sorted[df_sorted['counts']>2]
#print(df_rel.head())
#print(df_rel.shape)
#PLOTTING
plt.barh(df_rel.index, df_rel['counts'], color = "#FFD700")
plt.title("Most used words in Versace 'New in' SS2020 Women collection")
plt.savefig("SS2020_Versace_word_frequency.png")
df_rel['brand']='versace'
return df_rel
def get_dg_df():
# CREATING LIST OF RELEVANT URLS
urls = []
#urls = list(urls)
for i in [1,2,3,4]:
u = str("https://us.dolcegabbana.com/en/women/highlights/new-in/?page=") + str(i)
urls.append(u)
#print(urls)
# SCRAPING & CREATING A LIST OF LINKS
doc = []
for url in urls:
r = requests.get(url)
html_doc = r.text
soup = BeautifulSoup(html_doc)
soup_f = soup.find_all("a")
for t in soup_f:
a = t.get("aria-label")
if a != None and a.startswith("Visit"):
doc.append(a)
#print(doc)
# DEDUPLICATING THE LIST OF LINKS
doc_uniq = set(doc)
print("Number of unique items:"+str(len(doc_uniq)))
result = {}
for link in doc_uniq:
words = link.replace("Visit", "").replace(" product page","").split(" ")
for word in words:
if word in result:
result[word] += 1
else:
result[word] = 1
del(result[""])
words = list(result.keys())
counts = list(result.values())
# TURNING THE DICTIONARY INTO A DATAFRAME, SORTING & SELECTING FOR RELEVANCE
df = pd.DataFrame.from_dict({
"words": words,
"counts": counts,
})
df2 = df.set_index("words")
#df2.drop(["", "WITH"])
df_sorted = df2.sort_values("counts", ascending = True)
df_rel = df_sorted[df_sorted['counts']>4]
#print(df_rel.head())
#print(df_rel.shape)
# PLOTTING
plt.barh(df_rel.index, df_rel['counts'], color = "#E0115F")
plt.title("Most used words in D&G 'New in' SS2020 Women collection")
plt.savefig("SS2020_D&G_word_frequency.png", pad_inches=0.1)
df_rel['brand']='d&g'
return df_rel
|
adasegroup/FDS2020_seminars
|
Week 2/Day 2/Submissions/Sergei_Gostilovich/get_data_fun.py
|
get_data_fun.py
|
py
| 5,341 |
python
|
en
|
code
| 3 |
github-code
|
50
|
11342959263
|
import pickle
import requests
import string
def get_keyword_xml(letter):
r = requests.get(f'https://vocab.lternet.edu/vocab/vocab/services.php?task=letter&arg={letter}')
return r.text
def parse_keywords(txt):
i = 0
keywords = []
while i >= 0:
i = txt.find('<string><![CDATA[', i)
if i >= 0:
i = i + len('<string><![CDATA[')
j = txt.find(']', i)
keywords.append(txt[i:j])
return keywords
def get_all_keywords():
keywords = []
for letter in list(string.ascii_lowercase):
keywords.extend(parse_keywords(get_keyword_xml(letter)))
return keywords
keywords = get_all_keywords()
with open('webapp/static/lter_keywords.pkl', 'wb') as keyfile:
pickle.dump(keywords, keyfile)
|
PASTAplus/ezEML
|
get_lter_keywords.py
|
get_lter_keywords.py
|
py
| 775 |
python
|
en
|
code
| 6 |
github-code
|
50
|
71244192794
|
def read():
numbers = []
with open("./files/numbers.txt", "r", encoding="utf-8") as data:
for line in data:
numbers.append(int(line))
print(numbers)
def write():
names = ["Facundo", "Miguel", "Pepe", "Christian", "Fernández"]
with open("./files/numbers.txt", "a") as data:
for name in names:
data.write(name)
data.write("\n")
# whith open("/directory", "mode", encoding="utf-8") as f:
# Modes
# a = Append
# r = Read
# w = Write
# Filename = f (Usualy this way)
def run():
#read()
write()
if __name__ == "__main__":
run()
|
defdzg/Platzi-Python-intermedio
|
archivos.py
|
archivos.py
|
py
| 675 |
python
|
en
|
code
| 0 |
github-code
|
50
|
15726376022
|
#!/usr/bin/env python
# coding: utf-8
# In[39]:
import pandas as pd
from pandas_datareader import data as pdr
import yfinance as yf
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
# In[117]:
tickers = ['HD','DIS','WMT','VZ']
# In[118]:
weights = np.array([.25, .3, .15, .3])
# In[119]:
initial_investment = 1000
# In[120]:
start = dt.datetime(2020,1,1)
end = dt.datetime(2020,12,31)
# In[121]:
data = pdr.get_data_yahoo(tickers, start, end=dt.date.today())['Close']
# In[122]:
returns = data.pct_change()
# In[123]:
returns.tail()
# In[124]:
cov_matrix = returns.cov()
cov_matrix
# ### Calculating Means
# In[125]:
avg_rets = returns.mean()
# In[126]:
port_mean = avg_rets.dot(weights)
# Calculate portfolio standard deviation
port_stdev = np.sqrt(weights.T.dot(cov_matrix).dot(weights))
# Calculate mean of investment
mean_investment = (1+port_mean) * initial_investment
# Calculate standard deviation of investmnet
stdev_investment = initial_investment * port_stdev
# ### Confidence Level
# In[142]:
# Select our confidence interval (I'll choose 95% here)
conf_level1 = 0.05
from scipy.stats import norm
cutoff1 = norm.ppf(conf_level1, mean_investment, stdev_investment)
# ### 95% Confidence
# In[143]:
#Finally, we can calculate the VaR at our confidence interval
var_1d1 = initial_investment - cutoff1
var_1d1
# In[140]:
# Calculate n Day VaR
var_array = []
num_days = int(15)
for x in range(1, num_days+1):
var_array.append(np.round(var_1d1 * np.sqrt(x),2))
print(str(x) + " day VaR @ 95% confidence: " + str(np.round(var_1d1 * np.sqrt(x),2)))
# In[139]:
plt.xlabel("Day #")
plt.ylabel("Max portfolio loss (USD)")
plt.title("Max portfolio loss (VaR) over 15-day period")
plt.plot(var_array, "r")
# In[130]:
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from scipy.stats import norm
from scipy import stats
import scipy as sp
# In[138]:
returns['HD'].hist(bins=40, histtype="stepfilled",alpha=0.5)
x = np.linspace(port_mean - 3*port_stdev, port_mean+3*port_stdev,100)
plt.plot(x, sp.stats.norm.pdf(x, port_mean, port_stdev), "r")
plt.title("HD returns (binned) vs. normal distribution")
plt.show()
# In[137]:
returns['DIS'].hist(bins=40, histtype="stepfilled",alpha=0.5)
x = np.linspace(port_mean - 3*port_stdev, port_mean+3*port_stdev,100)
plt.plot(x, sp.stats.norm.pdf(x, port_mean, port_stdev), "r")
plt.title("DIS returns (binned) vs. normal distribution")
plt.show()
# In[136]:
returns['WMT'].hist(bins=40, histtype="stepfilled",alpha=0.5)
x = np.linspace(port_mean - 3*port_stdev, port_mean+3*port_stdev,100)
plt.plot(x, sp.stats.norm.pdf(x, port_mean, port_stdev), "r")
plt.title("WMT returns (binned) vs. normal distribution")
plt.show()
# In[135]:
returns['VZ'].hist(bins=40, histtype="stepfilled",alpha=0.5)
x = np.linspace(port_mean - 3*port_stdev, port_mean+3*port_stdev,100)
plt.plot(x, sp.stats.norm.pdf(x, port_mean, port_stdev), "r")
plt.title("VZ returns (binned) vs. normal distribution")
plt.show()
# ### Source
# #### https://www.interviewqs.com/blog/value_at_risk
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
btobin0/Python-Hedging-Trading
|
Learning ValueAtRisk(VAR).py
|
Learning ValueAtRisk(VAR).py
|
py
| 3,246 |
python
|
en
|
code
| 2 |
github-code
|
50
|
27193442601
|
import numpy as np
from tqdm import tqdm
from tabulate import tabulate
from torch.utils.data import DataLoader
from scipy.special import softmax
from sklearn import preprocessing
from data.lmdb_dataset import LMDBDataset
from models.models_dict import DATASET_MODELS_DICT
from config import args
from ds3_utils import ds3
import scipy.io as scio
def compute_prototype(support_feas,support_labels):
unique_labels = np.unique(support_labels)
n_category = unique_labels.shape[0]
prots = np.zeros((n_category,support_feas.shape[1]))
for i in range(n_category):
idx = np.where(support_labels == i)[0]
prots[i,:] = support_feas[idx, :].mean(0)
return prots
all_support_dataset = ['ilsvrc_2012','cu_birds','dtd','quickdraw','fungi','vgg_flower','omniglot','aircraft']
def main():
LIMITER = 600
# Setting up datasets
dataspec_root_dir = args['data.dataspec_root_dir']
all_test_datasets = args['data.trgset']
extractor_domains = args['data.train']
dump_name = args['dump.name'] if args['dump.name'] else 'test_dump'
testset = LMDBDataset(args,extractor_domains, all_test_datasets,
args['model.backbone'], 'test', dump_name, LIMITER)
# define the embedding method
dataset_models = DATASET_MODELS_DICT[args['model.backbone']]
accs_names = ['AS3 ']
all_accs = dict()
# Go over all test datasets
for test_dataset in all_test_datasets:
# print(test_dataset)
testset.set_sampling_dataset(test_dataset)
test_loader = DataLoader(testset, batch_size=None, batch_sampler=None, num_workers=16)
all_accs[test_dataset] = {name: [] for name in accs_names}
i = 0
all_selected_weights = []
for sample in tqdm(test_loader):
context_labels = sample['context_labels'].numpy()
target_labels = sample['target_labels'].numpy()
context_features_dict = {k: v.numpy() for k, v in sample['context_feature_dict'].items()}
target_features_dict = {k: v.numpy() for k, v in sample['target_feature_dict'].items()}
learner_weight,all_prots = ds3(context_features_dict,context_labels)
target_features_dict_keys = list(target_features_dict.keys())
all_selected_trg_feas = []
all_selected_prots = []
weights = np.zeros(8)
for i in range(len(target_features_dict_keys)):
# print(learner_weight.shape)
selected_query_feas = target_features_dict[target_features_dict_keys[i]]
selected_prototypes = all_prots[i]
selected_query_feas = preprocessing.normalize(selected_query_feas,norm='l2')
selected_prototypes = preprocessing.normalize(selected_prototypes,norm='l2')
all_selected_trg_feas.append(learner_weight[i] * selected_query_feas)
all_selected_prots.append(learner_weight[i] * selected_prototypes)
idx = all_support_dataset.index(target_features_dict_keys[i])
weights[idx] = learner_weight[i]
selected_query_feas = np.hstack(all_selected_trg_feas)
selected_support_prots = np.hstack(all_selected_prots)
# print(selected_idxs)
selected_support_prots = selected_support_prots.transpose([1,0])
logits = np.dot(selected_query_feas, selected_support_prots)
# logits = np.reshape(logits,[-1,logits.shape[-1]])
probs = softmax(logits, axis=1)
preds = probs.argmax(1)
final_acc = np.mean(np.equal(preds, target_labels))
all_accs[test_dataset]['AS3'].append(final_acc)
all_selected_weights.append(weights)
# Make a nice accuracy table
all_selected_weights = np.vstack(all_selected_weights)
results_save_dir = f'{args["model.save_dir"]}/'
rows = []
for dataset_name in all_test_datasets:
row = [dataset_name]
for model_name in accs_names:
acc = np.array(all_accs[dataset_name][model_name]) * 100
mean_acc = acc.mean()
conf = (1.96 * acc.std()) / np.sqrt(len(acc))
mean_acc_round = round(mean_acc, 2)
conf_round = round(conf, 2)
save_pth = f'{results_save_dir}/{dataset_name}/{dataset_name}_ds3_results.txt'
weight_save_pth = f'{results_save_dir}/{dataset_name}/{dataset_name}_weights.mat'
scio.savemat(weight_save_pth,{'support_dset':all_support_dataset,'weight':all_selected_weights})
with open(save_pth, 'w') as f:
f.write(dataset_name)
f.write('\n')
f.write(str(mean_acc_round))
f.write('\n')
f.write(str(conf_round))
f.write('\n')
row.append(f"{mean_acc:0.2f} +- {conf:0.2f}")
rows.append(row)
table = tabulate(rows, headers=['model \\ data'] + accs_names, floatfmt=".2f")
print(table)
print("\n")
if __name__ == '__main__':
main()
|
indussky8/AS3
|
AS3_MM/test_ds3.py
|
test_ds3.py
|
py
| 5,247 |
python
|
en
|
code
| 0 |
github-code
|
50
|
23792607445
|
from copy import deepcopy
n, m = map(int, input().split())
grid = [list(map(int, input().split())) for _ in range(n)]
check = [[False for _ in range(m)] for _ in range(n)]
answer = -99999999
def check_is_visited(check, r1, c1, r2, c2):
return any(
[any([check[i][j] for j in range(c1, c2 + 1)]) for i in range(r1, r2 + 1)]
)
def get_visited_grid(check):
checked = deepcopy(check)
for i in range(r1, r2 + 1):
for j in range(c1, c2 + 1):
checked[i][j] = True
return checked
def get_total(r1, c1, r2, c2):
ret = 0
for i in range(r1, r2 + 1):
for j in range(c1, c2 + 1):
ret += grid[i][j]
return ret
def get_maximum_second_rectangle(checked):
maximum = -9999999
for r1 in range(n):
for c1 in range(m):
for r2 in range(r1, n):
for c2 in range(c1, m):
if check_is_visited(checked, r1, c1, r2, c2):
break
maximum = max(
maximum,
get_total(r1, c1, r2, c2)
)
return maximum
for r1 in range(n):
for c1 in range(m):
for r2 in range(r1, n):
for c2 in range(c1, m):
rectangle = get_total(r1, c1, r2, c2)
checked = get_visited_grid(check)
second_rectangle = get_maximum_second_rectangle(checked)
answer = max(answer, rectangle + second_rectangle)
print(answer)
|
innjuun/Algorithm
|
LeeBros/2주차/겹쳐지지 않는 두 직사각형.py
|
겹쳐지지 않는 두 직사각형.py
|
py
| 1,515 |
python
|
en
|
code
| 2 |
github-code
|
50
|
16380363520
|
# Preprocessor - Alexander Liao
# This will take dict input (JSON format) and assign each note a UUID
# See /data-formats.md
# `some input` -> `python3 chordgenerator.py`
import json
from sys import stdin, stdout
from chordoffsets import C, D, E, F, G, A, B
def snap(notes):
sixteenthnote = notes["tempo"] / 4
for note in notes["notes"]:
note["startTime"] = round(note["startTime"] / sixteenthnote) * sixteenthnote
return notes
def process(notes):
uuid = 0
for note in notes["notes"]:
note["pitch"] -= notes["key"]
note["key"] = notes["key"]
note["tempo"] = notes["tempo"]
note.update(uuid = uuid, octaves = note["pitch"] // 12)
note["pitch"] %= 12
uuid += 1
notes["notes"].sort(key = lambda note: note["startTime"])
return notes
def merge(notes):
combos = {}
tempo = notes["tempo"]
index = 0
notelist = notes["notes"]
for note in notelist:
beat = int(note["startTime"] / tempo)
if beat not in combos: combos[beat] = []
combos[beat].append(note)
return list(combos.values())
print(json.dumps(merge(process(snap(json.loads(stdin.read())))), indent = 4))
|
hyper-neutrino/hack-the-north-2017
|
acc-gen/preprocessor.py
|
preprocessor.py
|
py
| 1,192 |
python
|
en
|
code
| 0 |
github-code
|
50
|
9513492956
|
#!/usr/bin/env /usr/bin/python3
import numpy as np
import os
import subprocess as sp
import multiprocessing as mp
from pathlib import Path
from timer import timer
from make_initial import make_initial
################################################################################
#===============================================================================
# run_eigen.py
#===============================================================================
################################################################################
base_dir = Path(__file__).resolve().parent
out_dir = base_dir.parent/'hyperbolic_eigenvalues'
out_dir.mkdir(exist_ok=True)
cores_to_use = mp.cpu_count() - 4 # = 12
number_sims = cores_to_use * 16
r_param = 0.5
number_cells = 128
polygon_number = 12
area_values = np.array([2.0,1.0,0.5,0.25,0.125])*np.pi
#run_type = 'coarse'
run_type = 'medium'
#run_type = 'fine'
# Coarse
if run_type == 'coarse':
p_values = np.arange(3.800, 4.201, 0.020)
# Fine
elif run_type == 'fine':
p_values = np.arange(3.800, 4.201, 0.002)
else:
p_values = np.arange(3.800, 4.201, 0.010)
def run_eigen (run_number):
sim_dir = out_dir/'run_{0:02d}'.format(run_number)
Path.mkdir(sim_dir, exist_ok = True)
os.chdir(str(sim_dir))
# Run through the parameter values.
for area_param in area_values:
# Generate an initial state.
if (sim_dir/('initial_state_{0:1.4f}.fe'.format(area_param))).exists():
pass
else:
make_initial(N = number_cells,
polygon_sides = polygon_number,
outfile = sim_dir / ('initial_state_' + \
'{0:1.4f}.fe'.format(area_param)),
polygon_area = area_param,
shape_index = 3.6,
perimeter_modulus = 0.5)
for p_param in p_values:
if (sim_dir/('eigenvalues_area_{0:1.4f}'.format(area_param) + \
'_p0_{0:1.3f}.csv'.format(p_param))).exists():
continue
else:
with open(sim_dir/'eigen.fe','w') as eigen_script:
eigen_script.write('p0_shape_index := {0:1.3f};\n'.format(
p_param))
eigen_script.write('relax_system(1000);\n')
eigen_script.write('J;\n0.01\nrelax_system(100);\nJ;\n')
eigen_script.write('relax_system(10000);\n')
eigen_script.write('ritz(-1000,2*vertex_count)')
eigen_script.write('>>"temp.txt"\n')
eigen_script.write('quit 1\n')
# Relax system and output eigenvalues.
eigen = sp.Popen(['evolver', '-feigen.fe', '-x',
'initial_state_{0:1.4f}.fe'.format(area_param)])
eigen.wait()
(sim_dir / 'eigen.fe').unlink()
print(run_number)
eigenvalues = np.genfromtxt('./temp.txt', usecols = (1),
skip_header = 2, skip_footer = 1)
np.savetxt(sim_dir / \
('eigenvalues_area_{0:1.4f}'.format(area_param) + \
'_p0_{0:1.3f}.csv'.format(p_param)),
eigenvalues,
delimiter = ',')
(sim_dir / 'temp.txt').unlink()
if __name__ == '__main__':
os.chdir(str(out_dir))
np.savetxt('./area_values.csv', area_values, delimiter=',', fmt='%1.4f')
np.savetxt('./p_values.csv', p_values, delimiter=',', fmt='%1.3f')
np.savetxt('./run_values.csv', np.arange(1,number_sims+1),
delimiter=',', fmt='%d')
code_timer = timer()
code_timer.start()
Path.mkdir(out_dir, exist_ok = True)
with mp.Pool(processes = cores_to_use) as pool:
pool.map(run_eigen, range(1,number_sims+1))
code_timer.stop()
################################################################################
# EOF
|
HopyanLab/ConPT2D
|
hyperbolic_source/run_eigen.py
|
run_eigen.py
|
py
| 3,416 |
python
|
en
|
code
| 0 |
github-code
|
50
|
15341415584
|
import requests
import datetime
import pandas as pd
import csv
request_headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:75.0) Gecko/20100101 Firefox/75.0',
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Origin': 'https://grafcan1.maps.arcgis.com',
'Connection': 'keep-alive',
'Referer': 'https://grafcan1.maps.arcgis.com/apps/opsdashboard/index.html',
'Cache-Control': 'max-age=0',
'TE': 'Trailers',
}
deaths_params = (
('f', 'json'),
('where', '(ESTADO=\'Fallecido\') AND (TIPO_MUN=\'Residencia\')'),
('returnGeometry', 'false'),
('spatialRel', 'esriSpatialRelIntersects'),
('outFields', '*'),
('outStatistics', '[{"statisticType":"count","onStatisticField":"OID","outStatisticFieldName":"value"}]'),
('resultType', 'standard'),
('cacheHint', 'true'),
)
recovered_params = (
('f', 'json'),
('where', '(ESTADO=\'Cerrado por alta m\xE9dica\') AND (TIPO_MUN=\'Residencia\')'),
('returnGeometry', 'false'),
('spatialRel', 'esriSpatialRelIntersects'),
('outFields', '*'),
('outStatistics', '[{"statisticType":"count","onStatisticField":"OID","outStatisticFieldName":"value"}]'),
('resultType', 'standard'),
('cacheHint', 'true'),
)
total_cases_params = (
('f', 'json'),
('where', 'TIPO_MUN=\'Residencia\''),
('returnGeometry', 'false'),
('spatialRel', 'esriSpatialRelIntersects'),
('outFields', '*'),
('outStatistics', '[{"statisticType":"count","onStatisticField":"OID","outStatisticFieldName":"value"}]'),
('resultType', 'standard'),
('cacheHint', 'true'),
)
endpoint = 'https://services9.arcgis.com/CgZpnNiCwFObjaOT/arcgis/rest/services/CV19tipo/FeatureServer/4/query'
deaths_response = requests.get(endpoint, headers=request_headers, params=deaths_params)
recoveries_response = requests.get(endpoint, headers=request_headers, params=recovered_params)
total_cases_response = requests.get(endpoint, headers=request_headers, params=total_cases_params)
today = datetime.date.today().strftime('%Y/%-m/%d')
deaths = deaths_response.json()['features'][0]['attributes']['value']
recoveries = recoveries_response.json()['features'][0]['attributes']['value']
total_cases = total_cases_response.json()['features'][0]['attributes']['value']
today_row = [today, 'Canaries', total_cases, '', deaths, recoveries]
# check if we have been run already today, and if not, then add the new row
df = pd.read_csv("../data/canarias_arcgis.csv")
if df.values[-1].tolist()[0] == today_row[0]:
print("update_canarias_cases.py: Already run today.")
else:
print("update_canarias_cases.py: First run today.")
with open('../data/canarias_arcgis.csv', 'a') as datafile:
writer = csv.writer(datafile, lineterminator='\n')
print("update_canarias_cases.py: writing row:", today_row)
writer.writerow(today_row)
|
nathanschepers/covid-canaries
|
scripts/update_canarias_cases.py
|
update_canarias_cases.py
|
py
| 2,880 |
python
|
en
|
code
| 2 |
github-code
|
50
|
6661385448
|
# ---
#
# Needs .csv tables to plot quantities
#
# ---
from __future__ import division
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
import numpy as np
import pandas as pd
import sys
import os
import copy
import h5py
import csv
from scipy import interpolate
from scipy import stats
#
sys.path.append('/data01/numrel/vsevolod.nedora/bns_ppr_tools/')
from preanalysis import LOAD_INIT_DATA
from outflowed import EJECTA_PARS
from preanalysis import LOAD_ITTIME
from plotting_methods import PLOT_MANY_TASKS
from profile import LOAD_PROFILE_XYXZ, LOAD_RES_CORR, LOAD_DENSITY_MODES
from utils import Paths, Lists, Labels, Constants, Printcolor, UTILS, Files, PHYSICS
from data import *
from tables import *
from settings import simulations, old_simulations, resolutions
# v_n + _modmtot : v_n / (M1 + M2)
# v_n + _modmchirp : v_n / [(M1 * M2) / (M1 + M2) ** (1. / 5.)] which is Mchirp
# v_n + _modq : v_n / [(M1 * M2) / (M1 + M2) ** 2] which is q
# v_n + _modq2 : v_n / [ [(M1 * M2) / (M1 + M2) ** 2] ** 2] which is q
# v_n + _modqmtot2 : v_n / [ [(M1 * M2) / (M1 + M2) ** 2] * [M1 + M2] ** 2 ]
#
plot_fit1 = True
plot_fit2 = True
plot_fit_total = True
plot_old_table = True
v_n_x = "Lambda"#"Mej_tot-geo_entropy_above_10_dev_mtot"#"Mej_tot-geo_entropy_above_10"#"Lambda"
v_n_y = "Mej_tot-geo_dev_mtotsymqmchirp"#"Mej_tot-geo_entropy_below_10_dev_mtot"#"Mej_tot-geo_entropy_below_10"#"Mej_tot-geo_6"#"Mej_tot-geo_Mchirp"
v_n_col = "q"
simlist = simulations
simlist2 = old_simulations
simtable = Paths.output + "models3.csv"#"models2.csv"
simtable2 = Paths.output + "radice2018_summary2.csv"#"radice2018_summary.csv"
deferr = 0.2
__outplotdir__ = "/data01/numrel/vsevolod.nedora/figs/all3/"
xyscales = None#"log"
prompt_bhtime = 1.5
marker_pc = 's'
marker_bh = 'o'
marker_long = 'd'
plot_legend = True
rs, rhos = [], []
def get_table_label(v_n):
if v_n == "q":
return r"$M_a/M_b$"
if v_n == "mtot":
return r"$M_b + M_a$"
if v_n == "mtot2":
return r"$(M_b + M_a)^2$"
if v_n == "Mej_tot-geo" or v_n == "Mej":
return r"$M_{\rm{ej}}$ $[10^{-2}M_{\odot}]$"
if v_n == "Lambda":
return r"$\tilde{\Lambda}$"
if v_n == "mchirp":
return r"$\mathcal{M}$"
if v_n == "mchirp2":
return r"$\mathcal{M} ^2$"
if v_n == "Mej":
return r"M_{\rm{ej}}"
if v_n == "symq":
return r"$\eta$"
if v_n == "symq2":
return r"$\eta^2$"
if v_n == "symqmchirp":
return r"$\eta\mathcal{M}$"
if v_n == "mtotsymqmchirp":
return r"$\eta M_{\rm{tot}}\mathcal{M}$"
if v_n == "Mej_tot-geo_entropy_below_10"or v_n == "Mej_tidal":
return r"$M_{\rm{ej;s<10}}$" # $[10^{-2}M_{\odot}]$
if v_n == "Mej_tot-geo_entropy_above_10" or v_n == "Mej_shocked":
return r"$M_{\rm{ej;s>10}}$" # $[10^{-2}M_{\odot}]$
#
elif str(v_n).__contains__("_mult_"):
v_n1 = v_n.split("_mult_")[0]
v_n2 = v_n.split("_mult_")[-1]
lbl1 = get_table_label(v_n1)
lbl2 = get_table_label(v_n2)
return lbl1 + r"$\times$" + lbl2
elif str(v_n).__contains__("_dev_"):
v_n1 = v_n.split("_dev_")[0]
v_n2 = v_n.split("_dev_")[-1]
lbl1 = get_table_label(v_n1)
lbl2 = get_table_label(v_n2)
return lbl1 + r"$/$" + lbl2
raise NameError("Np label for v_n: {}".format(v_n))
# if v_n == "Lambda":
# return r"$\tilde{\Lambda}$"
# if v_n == "Mej_tot-geo":
# return r"$M_{\rm{ej}}$ $[10^{-2}M_{\odot}]$"
# if v_n == "Mej_tot-geo_entropy_above_10" or v_n_y == "Mej_shocked":
# return r"$M_{\rm{ej;s>10}}$ $[10^{-2}M_{\odot}]$"
# if v_n == "Mej_tot-geo_entropy_below_10"or v_n_y == "Mej_tidal":
# return r"$M_{\rm{ej;s<10}}$ $[10^{-2}M_{\odot}]$"
# if v_n == "Mej_tot-geo" or v_n == "Mej":
# return r"$M_{\rm{ej}}$ $[10^{-2}M_{\odot}]$"
# if v_n == "Mej_tot-geo_5" or v_n == "Mej5":
# return r"$M_{\rm{ej}} / (\eta^2)$" # $\eta=(M_1 \times M_2) / (M_1 + M_2)^2$
# if v_n == "Mej_tot-geo_1" or v_n == "Mej1":
# return r"$M_{\rm{ej}} / M_{\rm{chirp}}$" # $\eta=(M_1 \times M_2) / (M_1 + M_2)^2$
# if v_n == "Mej_tot-geo_6" or v_n == "Mej6":
# return r"$M_{\rm{ej}} \times \eta^2$" # $\eta=(M_1 \times M_2) / (M_1 + M_2)^2$
# if v_n == "q":
# return r"$M_a/M_b$"
# return str(v_n).replace('_','\_')
def set_dic_xminxmax(v_n, dic, xarr):
#
if v_n == "Mej_tot-geo" or v_n == "Mej":
dic['xmin'], dic['xmax'] = 0, 1.5
elif v_n == "Lambda":
dic['xmin'], dic['xmax'] = 5, 1500
elif v_n == "Mej_tot-geo_entropy_above_10" or v_n == "Mej_shocked":
dic['xmin'], dic['xmax'] = 0, 0.7
elif v_n == "Mej_tot-geo_entropy_below_10" or v_n == "Mej_tidal":
dic['xmin'], dic['xmax'] = 0, 0.5
else:
dic['xmin'], dic['xmax'] = np.array(xarr).min(), np.array(xarr).max()
Printcolor.yellow("xlimits are not set for v_n_x:{}".format(v_n))
return dic
def set_dic_yminymax(v_n, dic, yarr):
#
if v_n == "Mej_tot-geo" or v_n == "Mej":
dic['ymin'], dic['ymax'] = 0, 1.5
elif v_n == "Mej_tot-geo_2" or v_n == "Mej2":
dic['ymin'], dic['ymax'] = 0, 2
elif v_n == "Mej_tot-geo_1" or v_n == "Mej1":
dic['ymin'], dic['ymax'] = 0, 0.7
elif v_n == "Mej_tot-geo_3" or v_n == "Mej3":
dic['ymin'], dic['ymax'] = 0, 2
elif v_n == "Mej_tot-geo_4" or v_n == "Mej4":
dic['ymin'], dic['ymax'] = 0, .75
elif v_n == "Mej_tot-geo_5" or v_n == "Mej5":
dic['ymin'], dic['ymax'] = 0, 10.
elif v_n == "Mej_tot-geo_6" or v_n == "Mej6":
dic['ymin'], dic['ymax'] = 0, 0.06
elif v_n == "Mej_tot-geo_entropy_above_10" or v_n == "Mej_shocked":
dic['ymin'], dic['ymax'] = 0, 0.7
elif v_n == "Mej_tot-geo_entropy_below_10" or v_n == "Mej_tidal":
dic['ymin'], dic['ymax'] = 0, 0.5
else:
dic['ymin'], dic['ymax'] = np.array(yarr).min(), np.array(yarr).max()
Printcolor.yellow("xlimits are not set for v_n_x:{}".format(v_n))
return dic
''' --------------------------------------------------------------- '''
total_x = [] # for fits
total_y = [] # for fits
Printcolor.blue("Collecting Data")
o_tbl = GET_PAR_FROM_TABLE()
o_tbl.set_intable = simtable
o_tbl.load_table()
data = {}
all_x = []
all_y = []
all_col = []
all_marker = []
for eos in simlist.keys():
data[eos] = {}
for usim in simlist[eos].keys():
data[eos][usim] = {}
sims = simlist[eos][usim]
print("\t{} [{}]".format(usim, len(sims)))
x, x1, x2 = o_tbl.get_par_with_error(sims, v_n_x, deferr=deferr)
y, y1, y2 = o_tbl.get_par_with_error(sims, v_n_y, deferr=deferr)
col = o_tbl.get_par(sims[0], v_n_col)
data[eos][usim]["x"] = x
data[eos][usim]['x1'] = x1
data[eos][usim]['x2'] = x2
data[eos][usim]['y'] = y
data[eos][usim]['y1'] = y1
data[eos][usim]['y2'] = y2
data[eos][usim]['col'] = col
all_x.append(x)
all_y.append(y)
all_col.append(col)
isbh, ispromtcoll = o_tbl.get_is_prompt_coll(sims, delta_t=prompt_bhtime, v_n_tmerg="tmerg_r")
data[eos][usim]["isprompt"] = ispromtcoll
data[eos][usim]["isbh"] = isbh
if isbh and not ispromtcoll:
marker = marker_bh
elif isbh and ispromtcoll:
marker = marker_pc
else:
marker = marker_long
all_marker.append(marker)
data[eos][usim]["marker"] = marker
data["allx"] = np.array(all_x)
data["ally"] = np.array(all_y)
data["allcol"] = np.array(all_col)
data["allmarker"] = all_marker
# for fits
for eos in simlist.keys():
for usim in simlist[eos].keys():
if not data[eos][usim]["isprompt"]:
total_x.append(data[eos][usim]["x"])
total_y.append(data[eos][usim]["y"])
#
Printcolor.green("Data is collected")
Printcolor.blue("Plotting Data")
#
def make_plot_name(v_n_x, v_n_y, v_n_col, do_plot_old_table):
figname = ''
figname = figname + v_n_x + '_'
figname = figname + v_n_y + '_'
figname = figname + v_n_col + '_'
if do_plot_old_table:
figname = figname + '_InclOldTbl'
figname = figname + '.png'
return figname
figname = make_plot_name(v_n_x, v_n_y, v_n_col, False)
#
def get_custom_colormap(cmap_name = 'newCmap', n_bin=8):
from matplotlib.colors import LinearSegmentedColormap
# colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
colors=([(1, 0, 0), (0, 0, 1)],[1., 1.8])
cm = LinearSegmentedColormap.from_list(cmap_name, colors, N=n_bin)
return cm
#
o_plot = PLOT_MANY_TASKS()
o_plot.gen_set["figdir"] = __outplotdir__
o_plot.gen_set["type"] = "cartesian"
o_plot.gen_set["figsize"] = (4.2, 3.6) # <->, |]
o_plot.gen_set["figname"] = figname
o_plot.gen_set["sharex"] = True
o_plot.gen_set["sharey"] = False
o_plot.gen_set["subplots_adjust_h"] = 0.0
o_plot.gen_set["subplots_adjust_w"] = 0.0
o_plot.set_plot_dics = []
#
assert len(data["allx"]) == len(data["ally"])
assert len(data["ally"]) == len(data["allcol"])
assert len(data["allcol"]) > 0
if v_n_y.__contains__("Mej"):
data["ally"] = data["ally"] * 1e2
if v_n_x.__contains__("Mej"):
data["allx"] = data["allx"] * 1e2
#
# if eos == "BLh" and u_sim == simulations2[eos][q].keys()[-1]:
# print('-0--------------------')
if plot_fit1:
total_x1, total_y1 = UTILS.x_y_z_sort(total_x, total_y)
# fit_polynomial(x, y, order, depth, new_x=np.empty(0, ), print_formula=True):
Printcolor.blue("New data fit")
if xyscales == "log":
fit_x1, fit_y1 = UTILS.fit_polynomial(total_x, total_y, order=1, depth=100)
else:
fit_x1, fit_y1 = UTILS.fit_polynomial(total_x, total_y, order=1, depth=100)
#
if v_n_y.__contains__("Mej"):
fit_y1 = fit_y1 * 1e2
if v_n_x.__contains__("Mej"):
fit_x1 = fit_x1 * 1e2
# print(fit_x, fit_y)
linear_fit = {
'task': 'line', 'ptype': 'cartesian',
'position': (1, 1),
'xarr': fit_x1, "yarr": fit_y1,
'xlabel': None, "ylabel": None,
'label': "New Data",
'ls': '-', 'color': 'red', 'lw': 1., 'alpha': 0.8, 'ds': 'default',
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14,
# 'text':{'x':1., 'y':1., 'text':'my_text', 'fs':14, 'color':'black','horal':True}
}
o_plot.set_plot_dics.append(linear_fit)
r, rho = stats.spearmanr(total_x1, total_y1)
print("r: {} rho: {}".format(r, rho))
rs.append(r)
rhos.append(rho)
text_dic = {
'task': 'text', 'ptype': 'cartesian',
'position': (1, 1),
'x': 0.45, 'y': 0.9,
'text': r'New: $r:{:.2f}$ $\rho:{:.2e}$'.format(r, rho),
'fs': 10, 'color': 'black', 'horizontalalignment': "left",
'transform': True
}
o_plot.set_plot_dics.append(text_dic)
dic = {
'task': 'scatter', 'ptype': 'cartesian', # 'aspect': 1.,
'xarr': data["allx"], "yarr": data["ally"], "zarr": data["allcol"],
'position': (1, 1), # 'title': '[{:.1f} ms]'.format(time_),
'cbar': {},
'v_n_x': v_n_x, 'v_n_y': v_n_y, 'v_n': v_n_col,
'xlabel': get_table_label(v_n_x), "ylabel": get_table_label(v_n_y),
'xmin': 300, 'xmax': 900, 'ymin': None, 'ymax': None, 'vmin': 1.0, 'vmax': 1.9,
'fill_vmin': False, # fills the x < vmin with vmin
'xscale': None, 'yscale': None,
'cmap': 'tab10', 'norm': None, 'ms': 60, 'markers': data["allmarker"], 'alpha': 0.7, "edgecolors": None,
'tick_params': {"axis": 'both', "which": 'both', "labelleft": True,
"labelright": False, # "tick1On":True, "tick2On":True,
"labelsize": 12,
"direction": 'in',
"bottom": True, "top": True, "left": True, "right": True},
'yaxiscolor': {'bottom': 'black', 'top': 'black', 'right': 'black', 'left': 'black'},
'minorticks': True,
'title': {}, # {"text": eos, "fontsize": 12},
'label': None,
'legend': {},
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
dic = set_dic_xminxmax(v_n_x, dic, data["allx"])
dic = set_dic_yminymax(v_n_y, dic, data["ally"])
dic['cbar'] = {'location': 'right .03 .0', 'label': Labels.labels(v_n_col), # 'fmt': '%.1f',
'labelsize': 14, 'fontsize': 14}
o_plot.set_plot_dics.append(dic)
''' ------------------------------------------------------------------------------------------ '''
if plot_old_table:
translation = {"Mej_tot-geo":"Mej",
"Lambda":"Lambda",
"Mej_tot-geo_Mchirp":"Mej_Mchirp",
"Mej_tot-geo_1":"Mej1",
"Mej_tot-geo_2":"Mej2",
"Mej_tot-geo_3": "Mej3",
"Mej_tot-geo_4": "Mej4",
"Mej_tot-geo_5": "Mej5",
"Mej_tot-geo_6": "Mej6",
"tcoll_gw":"tcoll",
"Mej_tot-geo_entropy_above_10":"Mej_shocked",
"Mej_tot-geo_entropy_below_10":"Mej_tidal",
"Mej_tot-geo_entropy_above_10_dev_mtot":"Mej_shocked_dev_mtot",
"Mej_tot-geo_entropy_below_10_dev_mtot":"Mej_tidal_dev_mtot",
"Mej_tot-geo_dev_mtot":"Mej_dev_mtot",
"Mej_tot-geo_dev_mtot2":"Mej_dev_mtot2",
"Mej_tot-geo_mult_mtot":"Mej_mult_mtot",
"Mej_tot-geo_mult_mtot2":"Mej_mult_mtot2",
"Mej_tot-geo_dev_symq":"Mej_dev_symq",
"Mej_tot-geo_dev_symq2":"Mej_dev_symq2",
"Mej_tot-geo_mult_symq":"Mej_mult_symq",
"Mej_tot-geo_mult_symq2":"Mej_mult_symq2",
"Mej_tot-geo_dev_mchirp":"Mej_dev_mchirp",
"Mej_tot-geo_dev_mchirp2":"Mej_dev_mchirp2",
"Mej_tot-geo_mult_mchirp":"Mej_mult_mchirp",
"Mej_tot-geo_dev_symqmchirp":"Mej_dev_symqmchirp",
"Mej_tot-geo_dev_mtotsymqmchirp":"Mej_dev_mtotsymqmchirp"}
v_n_x = translation[v_n_x]
v_n_y = translation[v_n_y]
total_x2 = [] # for fits
total_y2 = [] # for fits
Printcolor.blue("Collecting Data")
o_tbl = GET_PAR_FROM_TABLE()
o_tbl.set_intable = simtable2
o_tbl.load_table()
data2 = {}
all_x = []
all_y = []
all_col = []
all_marker = []
for eos in simlist2.keys():
data2[eos] = {}
for usim in simlist2[eos].keys():
data2[eos][usim] = {}
sims = simlist2[eos][usim]
print("\t{} [{}]".format(usim, len(sims)))
x, x1, x2 = o_tbl.get_par_with_error(sims, v_n_x, deferr=deferr)
y, y1, y2 = o_tbl.get_par_with_error(sims, v_n_y, deferr=deferr)
# col = o_tbl.get_par(sims[0], v_n_col)
col = o_tbl.get_par(sims[0], v_n_col)
# print(col); exit(1)
data2[eos][usim]["x"] = x
data2[eos][usim]['x1'] = x1
data2[eos][usim]['x2'] = x2
data2[eos][usim]['y'] = y
data2[eos][usim]['y1'] = y1
data2[eos][usim]['y2'] = y2
data2[eos][usim]['col'] = col
all_x.append(x)
all_y.append(y)
all_col.append(col)
isbh, ispromtcoll = o_tbl.get_is_prompt_coll(sims, delta_t=3., v_n_tcoll="tcoll", v_n_tmerg="tmerg_r")
data2[eos][usim]["isprompt"] = ispromtcoll
data2[eos][usim]["isbh"] = isbh
if isbh and not ispromtcoll:
marker = marker_bh
elif isbh and ispromtcoll:
marker = marker_pc
else:
marker = marker_long
all_marker.append(marker)
data2[eos][usim]["marker"] = marker
data2["allx"] = np.array(all_x)
data2["ally"] = np.array(all_y)
data2["allcol"] = np.array(all_col)
data2["allmarker"] = all_marker
#
#
Printcolor.green("Data is collected")
Printcolor.blue("Plotting Data")
#
def make_plot_name(v_n_x, v_n_y, v_n_col, do_plot_old_table):
figname = ''
figname = figname + v_n_x + '_'
figname = figname + v_n_y + '_'
figname = figname + v_n_col + '_'
if do_plot_old_table:
figname = figname + '_InclOldTbl'
figname = figname + '.png'
return figname
figname = make_plot_name(v_n_x, v_n_y, v_n_col, False)
#
def get_custom_colormap(cmap_name = 'newCmap', n_bin=8):
from matplotlib.colors import LinearSegmentedColormap
# colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
colors=([(1, 0, 0), (0, 0, 1)],[1., 1.8])
cm = LinearSegmentedColormap.from_list(cmap_name, colors, N=n_bin)
return cm
#
#
assert len(data2["allx"]) == len(data2["ally"])
assert len(data2["ally"]) == len(data2["allcol"])
assert len(data2["allcol"]) > 0
if v_n_y.__contains__("Mej"):
data2["ally"] = data2["ally"] * 1e2
if v_n_x.__contains__("Mej"):
data2["allx"] = data2["allx"] * 1e2
#
# if plot_legend:
# x = -1.
# y = -1.
# marker_dic_lr = {
# 'task': 'line', 'ptype': 'cartesian',
# 'position': (1, 1),
# 'xarr': [x], "yarr": [y],
# 'xlabel': None, "ylabel": None,
# 'label': "BH formation",
# 'marker': marker_bh, 'color': 'gray', 'ms': 10., 'alpha': 0.4,
# 'sharey': False,
# 'sharex': False, # removes angular citkscitks
# 'fontsize': 14,
# 'labelsize': 14
# }
#
# o_plot.set_plot_dics.append(marker_dic_lr)
# marker_dic_lr = {
# 'task': 'line', 'ptype': 'cartesian',
# 'position': (1, 1),
# 'xarr': [x], "yarr": [y],
# 'xlabel': None, "ylabel": None,
# 'label': "Prompt collapse",
# 'marker': marker_pc, 'color': 'gray', 'ms': 10., 'alpha': 0.4,
# 'sharey': False,
# 'sharex': False, # removes angular citkscitks
# 'fontsize': 14,
# 'labelsize': 14
# }
#
# o_plot.set_plot_dics.append(marker_dic_lr)
# marker_dic_lr = {
# 'task': 'line', 'ptype': 'cartesian',
# 'position': (1, 1),
# 'xarr': [x], "yarr": [y],
# 'xlabel': None, "ylabel": None,
# 'label': "Long lived",
# 'marker': marker_long, 'color': 'gray', 'ms': 10., 'alpha': 0.4,
# 'sharey': False,
# 'sharex': False, # removes angular citkscitks
# 'fontsize': 14,
# 'labelsize': 14
# }
# marker_dic_lr['legend'] = {'loc': 'upper left', 'ncol': 1, 'shadow': False, 'framealpha': 0.,
# 'borderaxespad': 0., 'fontsize': 11}
# o_plot.set_plot_dics.append(marker_dic_lr)
# # if eos == "BLh" and u_sim == simulations2[eos][q].keys()[-1]:
# # print('-0--------------------')
# print(data2["ally"]); exit(1)
dic2 = {
'task': 'scatter', 'ptype': 'cartesian', # 'aspect': 1.,
'xarr': data2["allx"], "yarr": data2["ally"], "zarr": data2["allcol"],
'position': (1, 1), # 'title': '[{:.1f} ms]'.format(time_),
'cbar': {},
'v_n_x': v_n_x, 'v_n_y': v_n_y, 'v_n': v_n_col,
'xlabel': get_table_label(v_n_x), "ylabel": get_table_label(v_n_y),
'xmin': 300, 'xmax': 900, 'ymin': 0.03, 'ymax': 0.5, 'vmin': 1.0, 'vmax': 1.9,
'fill_vmin': False, # fills the x < vmin with vmin
'xscale': None, 'yscale': None,
'cmap': 'tab10', 'norm': None, 'ms': 40, 'marker': '*', 'alpha': 0.4, "edgecolors": None, #data2["allmarker"]
'tick_params': {"axis": 'both', "which": 'both', "labelleft": True,
"labelright": False, # "tick1On":True, "tick2On":True,
"labelsize": 12,
"direction": 'in',
"bottom": True, "top": True, "left": True, "right": True},
'yaxiscolor': {'bottom': 'black', 'top': 'black', 'right': 'black', 'left': 'black'},
'minorticks': True,
'title': {}, # {"text": eos, "fontsize": 12},
'label': None,
'legend': {},
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
dic2 = set_dic_xminxmax(v_n_x, dic2, data2["allx"])
dic2 = set_dic_yminymax(v_n_y, dic2, data2["ally"])
dic2['cbar'] = {'location': 'right .03 .0', 'label': Labels.labels(v_n_col), # 'fmt': '%.1f',
'labelsize': 14, 'fontsize': 14}
if xyscales == "log":
dic2["xscale"] = "log"
dic2["yscale"] = "log"
dic2["xmin"], dic2["xmax"] = 5e-3, 1e0
dic2["ymin"], dic2["ymax"] = 5e-3, 1e0
o_plot.set_plot_dics.append(dic2)
# for fits
for eos in simlist2.keys():
for usim in simlist2[eos].keys():
if not data2[eos][usim]["isprompt"]:
total_x2.append(data2[eos][usim]["x"])
total_y2.append(data2[eos][usim]["y"])
if plot_fit2:
total_x2, total_y2 = UTILS.x_y_z_sort(total_x2, total_y2)
# fit_polynomial(x, y, order, depth, new_x=np.empty(0, ), print_formula=True):
Printcolor.blue("Old data fit")
if xyscales == "log":
fit_x2, fit_y2 = UTILS.fit_polynomial(total_x2, total_y2, order=1, depth=100)
else:
fit_x2, fit_y2 = UTILS.fit_polynomial(total_x2, total_y2, order=1, depth=100)
#
if v_n_y.__contains__("Mej"):
fit_y2 = fit_y2 * 1e2
if v_n_x.__contains__("Mej"):
fit_x2 = fit_x2 * 1e2
# print(fit_x, fit_y)
linear_fit = {
'task': 'line', 'ptype': 'cartesian',
'position': (1, 1),
'xarr': fit_x2, "yarr": fit_y2,
'xlabel': None, "ylabel": None,
'label': "Old Data",
'ls': '--', 'color': 'blue', 'lw': 1., 'alpha': 0.8, 'ds': 'default',
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14,
# 'text':{'x':1., 'y':1., 'text':'my_text', 'fs':14, 'color':'black','horal':True}
}
o_plot.set_plot_dics.append(linear_fit)
r, rho = stats.spearmanr(total_x2, total_y2)
rs.append(r)
rhos.append(rho)
print("r: {} rho: {}".format(r, rho))
text_dic = {
'task': 'text', 'ptype': 'cartesian',
'position': (1, 1),
'x': 0.45, 'y': 0.8,
'text': r'Old: $r:{:.2f}$ $\rho:{:.2e}$'.format(r, rho),
'fs': 10, 'color': 'black', 'horizontalalignment': "left",
'transform': True
}
o_plot.set_plot_dics.append(text_dic)
if plot_fit2 and plot_old_table:
total_x3, total_y3 = np.append(total_x, total_x2), np.append(total_y, total_y2)
# print(len(total_x3)); exit(1)
total_x3, total_y3 = UTILS.x_y_z_sort(total_x3, total_y3)
# fit_polynomial(x, y, order, depth, new_x=np.empty(0, ), print_formula=True):
Printcolor.blue("All data fit")
if xyscales == "log":
fit_x3, fit_y3 = UTILS.fit_polynomial(total_x3, total_y3, order=1, depth=100)
else:
fit_x3, fit_y3 = UTILS.fit_polynomial(total_x3, total_y3, order=1, depth=100)
#
if v_n_y.__contains__("Mej"):
fit_y3 = fit_y3 * 1e2
if v_n_x.__contains__("Mej"):
fit_x3 = fit_x3 * 1e2
# print(fit_x, fit_y)
linear_fit = {
'task': 'line', 'ptype': 'cartesian',
'position': (1, 1),
'xarr': fit_x3, "yarr": fit_y3,
'xlabel': None, "ylabel": None,
'label': "All Data",
'ls': ':', 'color': 'black', 'lw': 1., 'alpha': 1., 'ds': 'default',
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14,
# 'text':{'x':1., 'y':1., 'text':'my_text', 'fs':14, 'color':'black','horal':True}
}
o_plot.set_plot_dics.append(linear_fit)
r, rho = stats.spearmanr(total_x3, total_y3)
print("r: {} rho: {}".format(r, rho))
text_dic = {
'task': 'text', 'ptype': 'cartesian',
'position': (1, 1),
'x': 0.45, 'y': 0.7,
'text': r'All: $r:{:.2f}$ $\rho:{:.2e}$'.format(r, rho),
'fs': 10, 'color': 'black', 'horizontalalignment': "left",
'transform': True
}
o_plot.set_plot_dics.append(text_dic)
rs.append(r)
rhos.append(rho)
if plot_legend:
x = -1.
y = -1.
marker_dic_lr = {
'task': 'line', 'ptype': 'cartesian',
'position': (1, 1),
'xarr': [x], "yarr": [y],
'xlabel': None, "ylabel": None,
'label': "BH formation",
'marker': marker_bh, 'color': 'gray', 'ms': 8., 'alpha': 0.4,
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
o_plot.set_plot_dics.append(marker_dic_lr)
marker_dic_lr = {
'task': 'line', 'ptype': 'cartesian',
'position': (1, 1),
'xarr': [x], "yarr": [y],
'xlabel': None, "ylabel": None,
'label': "Prompt collapse",
'marker': marker_pc, 'color': 'gray', 'ms': 8., 'alpha': 0.4,
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
o_plot.set_plot_dics.append(marker_dic_lr)
marker_dic_lr = {
'task': 'line', 'ptype': 'cartesian',
'position': (1, 1),
'xarr': [x], "yarr": [y],
'xlabel': None, "ylabel": None,
'label': "Long lived",
'marker': marker_long, 'color': 'gray', 'ms': 8., 'alpha': 0.4,
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
marker_dic_lr['legend'] = {'loc': 'upper left', 'ncol': 1, 'shadow': False, 'framealpha': 0.,
'borderaxespad': 0., 'fontsize': 11}
o_plot.set_plot_dics.append(marker_dic_lr)
print("\n")
Printcolor.blue("Spearman's Rank Coefficients for: ")
Printcolor.green("v_n_x: {}".format(v_n_x))
Printcolor.green("v_n_y: {}".format(v_n_y))
Printcolor.blue("New data: ", comma=True)
Printcolor.green("{:.2f}".format(rs[0]))
Printcolor.blue("Old data: ", comma=True)
Printcolor.green("{:.2f}".format(rs[1]))
Printcolor.blue("All data: ", comma=True)
Printcolor.green("{:.2f}".format(rs[2]))
o_plot.main()
exit(0)
__outplotdir__ = "/data01/numrel/vsevolod.nedora/figs/all3/"
#/data01/numrel/vsevolod.nedora/bns_ppr_tools
# import imp
# LOAD_INIT_DATA = imp.load_source("LOAD_INIT_DATA", "/data01/numrel/vsevolod.nedora/bns_ppr_tools/preanalysis.py")
# LOAD_INIT_DATA.
def __get_value(o_init, o_par, det=None, mask=None, v_n=None):
if v_n in o_init.list_v_ns and mask == None:
value = o_init.get_par(v_n)
elif not v_n in o_init.list_v_ns and mask == None:
value = o_par.get_par(v_n)
elif v_n == "Mej_tot_scaled":
ma = __get_value(o_init, o_par, None, None, "Mb1")
mb = __get_value(o_init, o_par, None, None, "Mb2")
mej = __get_value(o_init, o_par, det, mask, "Mej_tot")
return mej / (ma + mb)
elif v_n == "Mej_tot_scaled2":
# M1 * M2 / (M1 + M2) ^ 2
ma = __get_value(o_init, o_par, None, None, "Mb1")
mb = __get_value(o_init, o_par, None, None, "Mb2")
eta = ma * mb / (ma + mb) ** 2
mej = __get_value(o_init, o_par, det, mask, "Mej_tot")
return mej / (eta * (ma + mb))
elif not v_n in o_init.list_v_ns and mask != None:
value = o_par.get_outflow_par(det, mask, v_n)
else:
raise NameError("unrecognized: v_n_x:{} mask_x:{} det:{} combination"
.format(v_n, mask, det))
if value == None or np.isinf(value) or np.isnan(value):
raise ValueError("sim: {} det:{} mask:{} v_n:{} --> value:{} wrong!"
.format(o_par.sim,det,mask,v_n, value))
return value
def __get_val_err(sims, o_inits, o_pars, v_n, det=0, mask="geo", error=0.2):
if v_n == "nsims":
return len(sims), len(sims), len(sims)
elif v_n == "pizzaeos":
pizza_eos = ''
for sim, o_init, o_par in zip(sims, o_inits, o_pars):
_pizza_eos = o_init.get_par("pizza_eos")
if pizza_eos != '' and pizza_eos != _pizza_eos:
raise NameError("sim:{} pizza_eos:{} \n sim:{} pizza_eos: {} \n MISMATCH"
.format(sim, pizza_eos, sims[0], _pizza_eos))
pizza_eos = _pizza_eos
return pizza_eos, pizza_eos, pizza_eos
if len(sims) == 0:
raise ValueError("no simualtions passed")
_resols, _values = [], []
assert len(sims) == len(o_inits)
assert len(sims) == len(o_pars)
for sim, o_init, o_par in zip(sims, o_inits, o_pars):
_val = __get_value(o_init, o_par, det, mask, v_n)
# print(sim, _val)
_res = "fuck"
for res in resolutions.keys():
if sim.__contains__(res):
_res = res
break
if _res == "fuck":
raise NameError("fuck")
_resols.append(resolutions[_res])
_values.append(_val)
if len(sims) == 1:
return _values[0], _values[0] - error * _values[0], _values[0] + error * _values[0]
elif len(sims) == 2:
delta = np.abs(_values[0] - _values[1])
if _resols[0] < _resols[1]:
return _values[0], _values[0] - delta, _values[0] + delta
else:
return _values[1], _values[1] - delta, _values[1] + delta
elif len(sims) == 3:
_resols_, _values_ = UTILS.x_y_z_sort(_resols, _values) # 123, 185, 236
delta1 = np.abs(_values_[0] - _values_[1])
delta2 = np.abs(_values_[1] - _values_[2])
# print(_values, _values_); exit(0)
return _values_[1], _values_[1] - delta1, _values_[1] + delta2
else:
raise ValueError("Too many simulations")
def __get_is_prompt_coll(sims, o_inits, o_pars, delta_t = 3.):
isprompt = False
isbh = False
for sim, o_init, o_par in zip(sims, o_inits, o_pars):
tcoll = o_par.get_par("tcoll_gw")
if np.isinf(tcoll):
pass
else:
isbh = True
tmerg = o_par.get_par("tmerg")
assert tcoll > tmerg
if float(tcoll - tmerg) < delta_t * 1e-3:
isprompt = True
return isbh, isprompt
def __get_custom_descrete_colormap(n):
# n = 5
import matplotlib.colors as col
from_list = col.LinearSegmentedColormap.from_list
cm = from_list(None, plt.cm.Set1(range(0, n)), n)
x = np.arange(99)
y = x % 11
z = x % n
return cm
v_n_x = "Lambda"
v_n_y = "Ye_ave"
v_n_col = "q"
det = 0
do_plot_linear_fit = True
do_plot_promptcoll = True
do_plot_bh = True
do_plot_error_bar_y = True
do_plot_error_bar_x = False
do_plot_old_table = True
do_plot_annotations = False
mask_x, mask_y, mask_col = None, "geo", None # geo_entropy_above_10
data2 = {}
error = 0.2 # in * 100 percent
delta_t_prompt = 2. # ms
''' --- collect data for table 1 --- '''
old_data = {}
if do_plot_old_table:
#
if mask_x != None and mask_x != "geo":
raise NameError("old table des not contain data for mask_x: {}".format(mask_x))
if mask_y != None and mask_y != "geo":
raise NameError("old table des not contain data for mask_x: {}".format(mask_y))
if mask_col != None and mask_col != "geo":
raise NameError("old table des not contain data for mask_x: {}".format(mask_col))
#
new_old_dic = {'Mej_tot': "Mej",
"Lambda": "Lambda",
"vel_inf_ave": "vej",
"Ye_ave": "Yeej"}
old_tbl = ALL_SIMULATIONS_TABLE()
old_tbl.set_list_neut = ["LK", "M0"]
old_tbl.set_list_vis = ["L5", "L25", "L50"]
old_tbl.set_list_eos.append("BHBlp")
old_tbl.set_intable = Paths.output + "radice2018_summary.csv"
old_tbl.load_input_data()
old_all_x = []
old_all_y = []
old_all_col = []
for run in old_tbl.table:
sim = run['name']
old_data[sim] = {}
if not sim.__contains__("HR") \
and not sim.__contains__("OldM0") \
and not sim.__contains__("LR") \
and not sim.__contains__("L5") \
and not sim.__contains__("L25") \
and not sim.__contains__("L50"):
x = float(run[new_old_dic[v_n_x]])
y = float(run[new_old_dic[v_n_y]])
col = "gray"
old_all_col.append(col)
old_all_x.append(x)
old_all_y.append(y)
old_data[sim][v_n_x] = x
old_data[sim][v_n_y] = y
Printcolor.green("old data is collected")
old_all_x = np.array(old_all_x)
old_all_y = np.array(old_all_y)
''' --- --- --- '''
new_data = {}
# collect old data
old_data = {}
if do_plot_old_table:
#
if mask_x != None and mask_x != "geo":
raise NameError("old table des not contain data for mask_x: {}".format(mask_x))
if mask_y != None and mask_y != "geo":
raise NameError("old table des not contain data for mask_x: {}".format(mask_y))
if mask_col != None and mask_col != "geo":
raise NameError("old table des not contain data for mask_x: {}".format(mask_col))
#
new_old_dic = {'Mej_tot': "Mej",
"Lambda": "Lambda",
"vel_inf_ave": "vej",
"Ye_ave": "Yeej"}
old_tbl = ALL_SIMULATIONS_TABLE()
old_tbl.set_list_neut = ["LK", "M0"]
old_tbl.set_list_vis = ["L5", "L25", "L50"]
old_tbl.set_list_eos.append("BHBlp")
old_tbl.set_intable = Paths.output + "radice2018_summary.csv"
old_tbl.load_input_data()
old_all_x = []
old_all_y = []
old_all_col = []
for run in old_tbl.table:
sim = run['name']
old_data[sim] = {}
if not sim.__contains__("HR") \
and not sim.__contains__("OldM0") \
and not sim.__contains__("LR") \
and not sim.__contains__("L5") \
and not sim.__contains__("L25") \
and not sim.__contains__("L50"):
x = float(run[new_old_dic[v_n_x]])
y = float(run[new_old_dic[v_n_y]])
col = "gray"
old_all_col.append(col)
old_all_x.append(x)
old_all_y.append(y)
old_data[sim][v_n_x] = x
old_data[sim][v_n_y] = y
Printcolor.green("old data is collected")
old_all_x = np.array(old_all_x)
old_all_y = np.array(old_all_y)
# exit(1)
# collect data
for eos in simulations.keys():
data2[eos] = {}
for q in simulations[eos]:
data2[eos][q] = {}
for u_sim in simulations[eos][q]:
data2[eos][q][u_sim] = {}
sims = simulations[eos][q][u_sim]
o_inits = [LOAD_INIT_DATA(sim) for sim in sims]
o_pars = [ADD_METHODS_ALL_PAR(sim) for sim in sims]
x_coord, x_err1, x_err2 = __get_val_err(sims, o_inits, o_pars, v_n_x, det, mask_x, error)
y_coord, y_err1, y_err2 = __get_val_err(sims, o_inits, o_pars, v_n_y, det, mask_y, error)
col_coord, col_err1, col_err2 = __get_val_err(sims, o_inits, o_pars, v_n_col, det, mask_col, error)
data2[eos][q][u_sim]["lserr"] = len(sims)
data2[eos][q][u_sim]["x"] = x_coord
data2[eos][q][u_sim]["xe1"] = x_err1
data2[eos][q][u_sim]["xe2"] = x_err2
data2[eos][q][u_sim]["y"] = y_coord
data2[eos][q][u_sim]["ye1"] = y_err1
data2[eos][q][u_sim]["ye2"] = y_err2
data2[eos][q][u_sim]["c"] = col_coord
data2[eos][q][u_sim]["ce1"] = col_err1
data2[eos][q][u_sim]["ce2"] = col_err2
#
isbh, ispromtcoll = __get_is_prompt_coll(sims, o_inits, o_pars, delta_t=delta_t_prompt)
data2[eos][q][u_sim]["isprompt"] = ispromtcoll
data2[eos][q][u_sim]["isbh"] = isbh
if isbh and not ispromtcoll:
marker = 'o'
elif isbh and ispromtcoll:
marker = 's'
else:
marker = 'd'
data2[eos][q][u_sim]["marker"] = marker
#
pizzaeos = False
if eos == "SFHo":
pizzaeos, _, _ = __get_val_err(sims, o_inits, o_pars, "pizzaeos")
if pizzaeos.__contains__("2019"):
_pizzaeos = True
data2[eos][q][u_sim]['pizza2019'] = True
else:
_pizzaeos = False
data2[eos][q][u_sim]['pizza2019'] = False
#
Printcolor.print_colored_string([u_sim, "({})".format(len(sims)),
"x:[", "{:.1f}".format(x_coord),
"v:", "{:.1f}".format(x_err1),
"^:", "{:.1f}".format(x_err2),
"|",
"y:", "{:.5f}".format(y_coord),
"v:", "{:.5f}".format(y_err1),
"^:",
"{:.5f}".format(y_err2),
"] col: {} BH:".format(col_coord),
"{}".format(ispromtcoll),
"pizza2019:",
"{}".format(pizzaeos)],
["blue", "green", "blue", "green", "blue", "green",
"blue", "green", "yellow", "blue", "green", "blue",
"green", "blue", "green", "blue", "green", "blue", "green"])
# Printcolor.blue("Processing {} ({} sims) x:[{:.1f}, v:{:.1f} ^{:.1f}] y:[{:.5f}, v{:.5f} ^{:.5f}] col:{:.1f}"
# .format(u_sim, len(sims), x_coord, x_err1, x_err2, y_coord, y_err1, y_err2, col_coord))
Printcolor.green("Data is collaected")
# FIT
print(" =============================== ")
all_x = []
all_y = []
for eos in data2.keys():
for q in data2[eos].keys():
for u_sim in data2[eos][q].keys():
ispc = data2[eos][q][u_sim]["isprompt"]
if not ispc:
all_x.append(data2[eos][q][u_sim]["x"])
all_y.append(data2[eos][q][u_sim]['y'])
all_x = np.array(all_x)
all_y = np.array(all_y)
# print(all_x)
all_x, all_y = UTILS.x_y_z_sort(all_x, all_y)
# print(all_x);
print("_log(lambda) as x")
UTILS.fit_polynomial(np.log10(all_x), all_y, 1, 100)
print("lamda as x")
fit_x, fit_y = UTILS.fit_polynomial(all_x, all_y, 1, 100)
# print(fit_x); exit(1)
print("ave: {}".format(np.sum(all_y) / len(all_y)))
print(" =============================== ")
# stuck data for scatter plot
for eos in simulations.keys():
for v_n in ["x", "y", "c", "marker"]:
arr = []
for q in simulations[eos].keys():
for u_sim in simulations[eos][q]:
arr.append(data2[eos][q][u_sim][v_n])
data2[eos][v_n + "s"] = arr
Printcolor.green("Data is stacked")
# plot the scatter points
figname = ''
if mask_x == None:
figname = figname + v_n_x + '_'
else:
figname = figname + v_n_x + '_' + mask_x + '_'
if mask_y == None:
figname = figname + v_n_y + '_'
else:
figname = figname + v_n_y + '_' + mask_y + '_'
if mask_col == None:
figname = figname + v_n_col + '_'
else:
figname = figname + v_n_col + '_' + mask_col + '_'
if det == None:
figname = figname + ''
else:
figname = figname + str(det)
if do_plot_old_table:
figname = figname + '_InclOldTbl'
figname = figname + '.png'
#
o_plot = PLOT_MANY_TASKS()
o_plot.gen_set["figdir"] = __outplotdir__
o_plot.gen_set["type"] = "cartesian"
o_plot.gen_set["figsize"] = (4.2, 3.6) # <->, |]
o_plot.gen_set["figname"] = figname
o_plot.gen_set["sharex"] = True
o_plot.gen_set["sharey"] = False
o_plot.gen_set["subplots_adjust_h"] = 0.0
o_plot.gen_set["subplots_adjust_w"] = 0.0
o_plot.set_plot_dics = []
# FOR LEGENDS
if do_plot_promptcoll:
x = -1.
y = -1.
marker_dic_lr = {
'task': 'line', 'ptype': 'cartesian',
'position': (1, 1),
'xarr': [x], "yarr": [y],
'xlabel': None, "ylabel": None,
'label': "Prompt collapse",
'marker': 's', 'color': 'gray', 'ms': 10., 'alpha': 0.4,
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
# if eos == "BLh" and u_sim == simulations2[eos][q].keys()[-1]:
# print('-0--------------------')
marker_dic_lr['legend'] = {'loc': 'upper left', 'ncol': 1, 'shadow': False, 'framealpha': 0.,
'borderaxespad': 0., 'fontsize': 11}
o_plot.set_plot_dics.append(marker_dic_lr)
if do_plot_bh:
x = -1.
y = -1.
marker_dic_lr = {
'task': 'line', 'ptype': 'cartesian',
'position': (1, 1),
'xarr': [x], "yarr": [y],
'xlabel': None, "ylabel": None,
'label': "BH formation",
'marker': 'o', 'color': 'gray', 'ms': 10., 'alpha': 0.4,
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
# if eos == "BLh" and u_sim == simulations2[eos][q].keys()[-1]:
# print('-0--------------------')
marker_dic_lr['legend'] = {'loc': 'upper left', 'ncol': 1, 'shadow': False, 'framealpha': 0.,
'borderaxespad': 0., 'fontsize': 11}
o_plot.set_plot_dics.append(marker_dic_lr)
if do_plot_bh:
x = -1.
y = -1.
marker_dic_lr = {
'task': 'line', 'ptype': 'cartesian',
'position': (1, 1),
'xarr': [x], "yarr": [y],
'xlabel': None, "ylabel": None,
'label': "Long Lived",
'marker': 'd', 'color': 'gray', 'ms': 10., 'alpha': 0.4,
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
# if eos == "BLh" and u_sim == simulations2[eos][q].keys()[-1]:
# print('-0--------------------')
marker_dic_lr['legend'] = {'loc': 'upper right', 'ncol': 1, 'shadow': False, 'framealpha': 0.,
'borderaxespad': 0., 'fontsize': 11}
o_plot.set_plot_dics.append(marker_dic_lr)
if do_plot_old_table:
x = -1.
y = -1.
marker_dic_lr = {
'task': 'line', 'ptype': 'cartesian',
'position': (1, 1),
'xarr': [x], "yarr": [y],
'xlabel': None, "ylabel": None,
'label': "Radice+2018",
'marker': '*', 'color': 'gray', 'ms': 10., 'alpha': 0.4,
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
# if eos == "BLh" and u_sim == simulations2[eos][q].keys()[-1]:
# print('-0--------------------')
marker_dic_lr['legend'] = {'loc': 'upper right', 'ncol': 1, 'shadow': False, 'framealpha': 0.,
'borderaxespad': 0., 'fontsize': 11}
o_plot.set_plot_dics.append(marker_dic_lr)
# FOR FITS
if do_plot_linear_fit:
if v_n_y == "Mej_tot" or v_n_y == "Mej_tot_scaled":
fit_y = fit_y * 1e2
if v_n_x == "Mej_tot" or v_n_x == "Mej_tot_scaled":
fit_x = fit_x * 1e2
# print(fit_x, fit_y)
linear_fit = {
'task': 'line', 'ptype': 'cartesian',
'position': (1, 1),
'xarr': fit_x, "yarr": fit_y,
'xlabel': None, "ylabel": None,
'label': "Linear fit",
'ls': '-', 'color': 'black', 'lw': 1., 'alpha': 1., 'ds': 'default',
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
o_plot.set_plot_dics.append(linear_fit)
#
if do_plot_old_table:
if v_n_y == "Mej_tot" or v_n_y == "Mej_tot_scaled":
old_all_y = old_all_y * 1e2
if v_n_x == "Mej_tot" or v_n_x == "Mej_tot_scaled":
old_all_x = old_all_x * 1e2
dic = {
'task': 'scatter', 'ptype': 'cartesian', # 'aspect': 1.,
'xarr': old_all_x, "yarr": old_all_y, "zarr": old_all_col,
'position': (1, 1), # 'title': '[{:.1f} ms]'.format(time_),
'cbar': {},
'v_n_x': v_n_x, 'v_n_y': v_n_y, 'v_n': v_n_col,
'xlabel': None, "ylabel": Labels.labels(v_n_y, mask_y),
'xmin': 300, 'xmax': 900, 'ymin': 0.03, 'ymax': 0.3, 'vmin': 1.0, 'vmax': 1.9,
'fill_vmin': False, # fills the x < vmin with vmin
'xscale': None, 'yscale': None,
'cmap': 'tab10', 'norm': None, 'ms': 60, 'marker': '*', 'alpha': 0.7, "edgecolors": None,
'tick_params': {"axis": 'both', "which": 'both', "labelleft": True,
"labelright": False, # "tick1On":True, "tick2On":True,
"labelsize": 12,
"direction": 'in',
"bottom": True, "top": True, "left": True, "right": True},
'yaxiscolor': {'bottom': 'black', 'top': 'black', 'right': 'black', 'left': 'black'},
'minorticks': True,
'title': {}, # {"text": eos, "fontsize": 12},
'label': None,
'legend': {},
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
o_plot.set_plot_dics.append(dic)
if do_plot_annotations:
for eos in ["SFHo"]:
print(eos)
for q in simulations[eos].keys():
for u_sim in simulations[eos][q].keys():
x = data2[eos][q][u_sim]["x"]
y = data2[eos][q][u_sim]["y"]
y1 = data2[eos][q][u_sim]["ye1"]
y2 = data2[eos][q][u_sim]["ye2"]
if data2[eos][q][u_sim]["pizza2019"]:
if v_n_x == "Mej_tot" or v_n_x == "Mej_tot_scaled":
x = x * 1e2
if v_n_y == "Mej_tot" or v_n_y == "Mej_tot_scaled":
y1 = y1 * 1e2
y2 = y2 * 1e2
y = y * 1e2
marker_dic_lr = {
'task': 'line', 'ptype': 'cartesian',
'position': (1, 1),
'xarr': [x], "yarr": [y],
'xlabel': None, "ylabel": None,
'label': None,
'marker': '2', 'color': 'blue', 'ms': 15, 'alpha': 1.,
# 'ls': ls, 'color': 'gray', 'lw': 1.5, 'alpha': 1., 'ds': 'default',
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
o_plot.set_plot_dics.append(marker_dic_lr)
# PLOTS
i_col = 1
for eos in ["SLy4", "SFHo", "BLh", "LS220", "DD2"]:
print(eos)
# Error Bar
if do_plot_error_bar_y:
for q in simulations[eos].keys():
for u_sim in simulations[eos][q].keys():
x = data2[eos][q][u_sim]["x"]
y = data2[eos][q][u_sim]["y"]
y1 = data2[eos][q][u_sim]["ye1"]
y2 = data2[eos][q][u_sim]["ye2"]
nsims = data2[eos][q][u_sim]["lserr"]
if v_n_x == "Mej_tot" or v_n_x == "Mej_tot_scaled":
x = x * 1e2
if v_n_y == "Mej_tot" or v_n_y == "Mej_tot_scaled":
y1 = y1 * 1e2
y2 = y2 * 1e2
y = y * 1e2
if nsims == 1:
ls = ':'
elif nsims == 2:
ls = '--'
elif nsims == 3:
ls = '-'
else:
raise ValueError("too many sims >3")
marker_dic_lr = {
'task': 'line', 'ptype': 'cartesian',
'position': (1, i_col),
'xarr': [x, x], "yarr": [y1, y2],
'xlabel': None, "ylabel": None,
'label': None,
'ls': ls, 'color': 'gray', 'lw': 1.5, 'alpha': 0.6, 'ds': 'default',
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
o_plot.set_plot_dics.append(marker_dic_lr)
if do_plot_error_bar_x:
for q in simulations[eos].keys():
for u_sim in simulations[eos][q].keys():
x = data2[eos][q][u_sim]["x"]
x1 = data2[eos][q][u_sim]["xe1"]
x2 = data2[eos][q][u_sim]["xe2"]
y = data2[eos][q][u_sim]["y"]
nsims = data2[eos][q][u_sim]["lserr"]
if v_n_y == "Mej_tot" or v_n_y == "Mej_tot_scaled":
y = y * 1e2
if v_n_x == "Mej_tot" or v_n_x == "Mej_tot_scaled":
x1 = x1 * 1e2
x2 = x2 * 1e2
x = x * 1e2
if nsims == 1:
ls = ':'
elif nsims == 2:
ls = '--'
elif nsims == 3:
ls = '-'
else:
raise ValueError("too many sims >3")
marker_dic_lr = {
'task': 'line', 'ptype': 'cartesian',
'position': (1, i_col),
'xarr': [x1, x2], "yarr": [y, y],
'xlabel': None, "ylabel": None,
'label': None,
'ls': ls, 'color': 'gray', 'lw': 1.5, 'alpha': 1., 'ds': 'default',
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
o_plot.set_plot_dics.append(marker_dic_lr)
# if do_plot_promptcoll:
# for q in simulations2[eos].keys():
# for u_sim in simulations2[eos][q].keys():
# x = data[eos][q][u_sim]["x"]
# y = data[eos][q][u_sim]["y"]
# isprompt = data[eos][q][u_sim]["isprompt"]
# if v_n_y == "Mej_tot" or v_n_y == "Mej_tot_scaled":
# y = y * 1e2
# if v_n_x == "Mej_tot" or v_n_x == "Mej_tot_scaled":
# x = x * 1e2
# if isprompt:
# marker_dic_lr = {
# 'task': 'line', 'ptype': 'cartesian',
# 'position': (1, i_col),
# 'xarr': [x], "yarr": [y],
# 'xlabel': None, "ylabel": None,
# 'label': None,
# 'marker': 's', 'color': 'gray', 'ms': 10., 'alpha': 0.4,
# 'sharey': False,
# 'sharex': False, # removes angular citkscitks
# 'fontsize': 14,
# 'labelsize': 14
# }
# # if eos == "BLh" and u_sim == simulations2[eos][q].keys()[-1]:
# # print('-0--------------------')
# marker_dic_lr['legend'] = {'loc':'upper left', 'ncol':1, 'shadow': False, 'framealpha':0., 'borderaxespad':0., 'fontsize':11}
# o_plot.set_plot_dics.append(marker_dic_lr)
# if do_plot_bh:
# for q in simulations2[eos].keys():
# for u_sim in simulations2[eos][q].keys():
# x = data[eos][q][u_sim]["x"]
# y = data[eos][q][u_sim]["y"]
# isbh = data[eos][q][u_sim]["isbh"]
# if v_n_y == "Mej_tot" or v_n_y == "Mej_tot_scaled":
# y = y * 1e2
# if v_n_x == "Mej_tot" or v_n_x == "Mej_tot_scaled":
# x = x * 1e2
# if isbh:
# marker_dic_lr = {
# 'task': 'line', 'ptype': 'cartesian',
# 'position': (1, i_col),
# 'xarr': [x], "yarr": [y],
# 'xlabel': None, "ylabel": None,
# 'label': None,
# 'marker': 'o', 'color': 'gray', 'ms': 10., 'alpha': 0.4,
# 'sharey': False,
# 'sharex': False, # removes angular citkscitks
# 'fontsize': 14,
# 'labelsize': 14
# }
# # if eos == "BLh" and u_sim == simulations2[eos][q].keys()[-1]:
# # print('-0--------------------')
# marker_dic_lr['legend'] = {'loc':'upper left', 'ncol':1, 'shadow': False, 'framealpha':0., 'borderaxespad':0., 'fontsize':11}
# o_plot.set_plot_dics.append(marker_dic_lr)
# LEGEND
# if eos == "DD2" and plot_legend:
# for res in ["HR", "LR", "SR"]:
# marker_dic_lr = {
# 'task': 'line', 'ptype': 'cartesian',
# 'position': (1, i_col),
# 'xarr': [-1], "yarr": [-1],
# 'xlabel': None, "ylabel": None,
# 'label': res,
# 'marker': 'd', 'color': 'gray', 'ms': 8, 'alpha': 1.,
# 'sharey': False,
# 'sharex': False, # removes angular citkscitks
# 'fontsize': 14,
# 'labelsize': 14
# }
# if res == "HR": marker_dic_lr['marker'] = "v"
# if res == "SR": marker_dic_lr['marker'] = "d"
# if res == "LR": marker_dic_lr['marker'] = "^"
# # if res == "BH": marker_dic_lr['marker'] = "x"
# if res == "SR":
# if v_n_y == "Ye_ave":
# loc = 'lower right'
# else:
# loc = 'upper right'
# marker_dic_lr['legend'] = {'loc': loc, 'ncol': 1, 'fontsize': 12, 'shadow': False,
# 'framealpha': 0.5, 'borderaxespad': 0.0}
# o_plot.set_plot_dics.append(marker_dic_lr)
#
xarr = np.array(data2[eos]["xs"])
yarr = np.array(data2[eos]["ys"])
colarr = data2[eos]["cs"]
markers = data2[eos]['markers']
# marker = data[eos]["res" + 's']
# edgecolor = data[eos]["vis" + 's']
# bh_marker = data[eos]["tcoll" + 's']
#
# UTILS.fit_polynomial(xarr, yarr, 1, 100)
#
# print(xarr, yarr); exit(1)
if v_n_y == "Mej_tot" or v_n_y == "Mej_tot_scaled":
yarr = yarr * 1e2
if v_n_x == "Mej_tot" or v_n_x == "Mej_tot_scaled":
xarr = xarr * 1e2
#
#
#
# dic_bh = {
# 'task': 'scatter', 'ptype': 'cartesian', # 'aspect': 1.,
# 'xarr': xarr, "yarr": yarr, "zarr": colarr,
# 'position': (1, i_col), # 'title': '[{:.1f} ms]'.format(time_),
# 'cbar': {},
# 'v_n_x': v_n_x, 'v_n_y': v_n_y, 'v_n': v_n_col,
# 'xlabel': None, "ylabel": None, 'label': eos,
# 'xmin': 300, 'xmax': 900, 'ymin': 0.03, 'ymax': 0.3, 'vmin': 1.0, 'vmax': 1.5,
# 'fill_vmin': False, # fills the x < vmin with vmin
# 'xscale': None, 'yscale': None,
# 'cmap': 'viridis', 'norm': None, 'ms': 80, 'marker': bh_marker, 'alpha': 1.0, "edgecolors": edgecolor,
# 'fancyticks': True,
# 'minorticks': True,
# 'title': {},
# 'legend': {},
# 'sharey': False,
# 'sharex': False, # removes angular citkscitks
# 'fontsize': 14,
# 'labelsize': 14
# }
#
# if mask_y != None and mask_y.__contains__("bern"):
# o_plot.set_plot_dics.append(dic_bh)
#
#
#
# print("marker: {}".format(marker))
dic = {
'task': 'scatter', 'ptype': 'cartesian', # 'aspect': 1.,
'xarr': xarr, "yarr": yarr, "zarr": colarr,
'position': (1, i_col), # 'title': '[{:.1f} ms]'.format(time_),
'cbar': {},
'v_n_x': v_n_x, 'v_n_y': v_n_y, 'v_n': v_n_col,
'xlabel': None, "ylabel": Labels.labels(v_n_y, mask_y),
'xmin': 300, 'xmax': 900, 'ymin': 0.03, 'ymax': 0.3, 'vmin': 1.0, 'vmax': 1.9,
'fill_vmin': False, # fills the x < vmin with vmin
'xscale': None, 'yscale': None,
'cmap': 'tab10', 'norm': None, 'ms': 60, 'markers': markers, 'alpha': 0.6, "edgecolors": None,
'tick_params': {"axis": 'both', "which": 'both', "labelleft": True,
"labelright": False, # "tick1On":True, "tick2On":True,
"labelsize": 12,
"direction": 'in',
"bottom": True, "top": True, "left": True, "right": True},
'yaxiscolor': {'bottom': 'black', 'top': 'black', 'right': 'black', 'left': 'black'},
'minorticks': True,
'title': {}, # {"text": eos, "fontsize": 12},
'label': None,
'legend': {},
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
#
if v_n_y == "q":
dic['ymin'], dic['ymax'] = 0.9, 2.0
if v_n_col == "nsims":
dic['vmin'], dic['vmax'] = 1, 3.9
dic['cmap'] = __get_custom_descrete_colormap(3)
# dic['cmap'] = 'RdYlBu'
if v_n_y == "Mdisk3Dmax":
dic['ymin'], dic['ymax'] = 0.03, 0.30
if v_n_y == "Mb":
dic['ymin'], dic['ymax'] = 2.8, 3.4
if v_n_y == "Mej_tot" and mask_y == "geo":
dic['ymin'], dic['ymax'] = 0, 1.2
if v_n_y == "Mej_tot_scaled" and mask_y == "geo":
dic['ymin'], dic['ymax'] = 0, 0.5
if v_n_y == "Mej_tot_scaled2" and mask_y == "geo":
dic['ymin'], dic['ymax'] = 0, 1.
if v_n_y == "Mej_tot_scaled2" and mask_y == "geo_entropy_above_10":
dic['ymin'], dic['ymax'] = 0, 0.01
if v_n_y == "Mej_tot_scaled2" and mask_y == "geo_entropy_below_10":
dic['ymin'], dic['ymax'] = 0, 0.02
if v_n_y == "Mej_tot" and mask_y == "bern_geoend":
if dic['yscale'] == "log":
dic['ymin'], dic['ymax'] = 1e-3, 2e0
else:
dic['ymin'], dic['ymax'] = 0, 3.2
if v_n_y == "Mej_tot" and mask_y == "geo_entropy_above_10":
if dic['yscale'] == "log":
dic['ymin'], dic['ymax'] = 1e-3, 2e0
else:
dic['ymin'], dic['ymax'] = 0, .6
if v_n_y == "Mej_tot" and mask_y == "geo_entropy_below_10":
if dic['yscale'] == "log":
dic['ymin'], dic['ymax'] = 1e-2, 2e0
else:
dic['ymin'], dic['ymax'] = 0, 1.2
if v_n_y == "Mej_tot_scaled" and mask_y == "bern_geoend":
dic['ymin'], dic['ymax'] = 0, 3.
if v_n_y == "Ye_ave" and mask_y == "geo":
dic['ymin'], dic['ymax'] = 0.01, 0.35
if v_n_y == "Ye_ave" and mask_y == "bern_geoend":
dic['ymin'], dic['ymax'] = 0.1, 0.4
if v_n_y == "vel_inf_ave" and mask_y == "geo":
dic['ymin'], dic['ymax'] = 0.1, 0.3
if v_n_y == "vel_inf_ave" and mask_y == "bern_geoend":
dic['ymin'], dic['ymax'] = 0.05, 0.25
#
#
if v_n_x == "Mdisk3Dmax":
dic['xmin'], dic['xmax'] = 0.03, 0.30
if v_n_x == "Mb":
dic['xmin'], dic['xmax'] = 2.8, 3.4
if v_n_x == "Mej_tot" and mask_x == "geo":
dic['xmin'], dic['xmax'] = 0, 1.5
if v_n_x == "Mej_tot_scaled" and mask_x == "geo":
dic['xmin'], dic['xmax'] = 0, 0.5
if v_n_x == "Mej_tot" and mask_x == "bern_geoend":
dic['xmin'], dic['xmax'] = 0, 3.2
if v_n_x == "Mej_tot" and mask_x == "geo_entropy_above_10":
if dic['xscale'] == "log":
dic['xmin'], dic['xmax'] = 1e-3, 2e0
else:
dic['xmin'], dic['xmax'] = 0, .6
if v_n_x == "Mej_tot" and mask_x == "geo_entropy_below_10":
if dic['xscale'] == "log":
dic['xmin'], dic['xmax'] = 1e-2, 2e0
else:
dic['xmin'], dic['xmax'] = 0, 1.2
if v_n_x == "Mej_tot_scaled" and mask_x == "bern_geoend":
dic['xmin'], dic['xmax'] = 0, 3.
if v_n_x == "Ye_ave" and mask_x == "geo":
dic['xmin'], dic['xmax'] = 0.01, 0.30
if v_n_x == "Ye_ave" and mask_x == "bern_geoend":
dic['xmin'], dic['xmax'] = 0.1, 0.4
if v_n_x == "vel_inf_ave" and mask_x == "geo":
dic['xmin'], dic['xmax'] = 0.1, 0.3
if v_n_x == "vel_inf_ave" and mask_x == "bern_geoend":
dic['xmin'], dic['xmax'] = 0.05, 0.25
#
# if eos == "SLy4":
# dic['xmin'], dic['xmax'] = 380, 420
# dic['xticks'] = [390, 410]
# if eos == "SFHo":
# dic['xmin'], dic['xmax'] = 390, 430
# dic['xticks'] = [400, 420]
# if eos == "BLh":
# dic['xmin'], dic['xmax'] = 510, 550
# dic['xticks'] = [520, 540]
# if eos == "LS220":
# dic['xmin'], dic['xmax'] = 690, 730
# dic['xticks'] = [700, 720]
# if eos == "DD2":
# dic['xmin'], dic['xmax'] = 820, 860
# dic['xticks'] = [830, 850]
# if eos == "SLy4":
# dic['tick_params']['right'] = False
# dic['yaxiscolor']["right"] = "lightgray"
# elif eos == "DD2":
# dic['tick_params']['left'] = False
# dic['yaxiscolor']["left"] = "lightgray"
# else:
# dic['tick_params']['left'] = False
# dic['tick_params']['right'] = False
# dic['yaxiscolor']["left"] = "lightgray"
# dic['yaxiscolor']["right"] = "lightgray"
#
# if eos != "SLy4" and eos != "DD2":
# dic['yaxiscolor'] = {'left':'lightgray','right':'lightgray', 'label': 'black'}
# dic['ytickcolor'] = {'left':'lightgray','right':'lightgray'}
# dic['yminortickcolor'] = {'left': 'lightgray', 'right': 'lightgray'}
# elif eos == "DD2":
# dic['yaxiscolor'] = {'left': 'lightgray', 'right': 'black', 'label': 'black'}
# # dic['ytickcolor'] = {'left': 'lightgray'}
# # dic['yminortickcolor'] = {'left': 'lightgray'}
# elif eos == "SLy4":
# dic['yaxiscolor'] = {'left': 'black', 'right': 'lightgray', 'label': 'black'}
# # dic['ytickcolor'] = {'right': 'lightgray'}
# # dic['yminortickcolor'] = {'right': 'lightgray'}
#
# if eos != "SLy4":
# dic['sharey'] = True
if eos == "BLh":
dic['xlabel'] = Labels.labels(v_n_x, mask_x)
if eos == 'DD2':
dic['cbar'] = {'location': 'right .03 .0', 'label': Labels.labels(v_n_col), # 'fmt': '%.1f',
'labelsize': 14, 'fontsize': 14}
if v_n_col == "nsims":
dic['cbar']['fmt'] = '%d'
#
o_plot.set_plot_dics.append(dic)
#
# i_col = i_col + 1
if do_plot_old_table:
if v_n_x == 'Lambda':
dic['xmin'], dic['xmax'] = 5, 1500
# LEGEND
#
o_plot.main()
exit(0)
|
vsevolodnedora/prj_gw170817
|
scripts/legacy/plot_summary.py
|
plot_summary.py
|
py
| 63,511 |
python
|
en
|
code
| 0 |
github-code
|
50
|
18376053040
|
import numpy as np
import matplotlib
matplotlib.use("agg")
from minivggnet import MiniVGGNet
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from keras.optimizers import SGD
from keras.datasets import cifar10
import matplotlib.pyplot as plt
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-o", "--output", help="plots output loc")
args = vars(ap.parse_args())
print("[INFO] Loading the CIFAR10 dataset...")
((trainX, trainY), (testX, testY)) = cifar10.load_data()
trainX = trainX.astype("float")/255.0
testX = testX.astype("float")/255.0
lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.fit_transform(testY)
labelNames = ["airplane", "automobile", "bird", "cat", "deer",
"dog", "frog", "horse", "ship", "truck"]
print("[INFO] Compling the model...")
print(np.shape(trainY))
input_h = trainX.shape[1]
input_W = trainX.shape[2]
input_d = trainX.shape[3]
input_classes = trainY.shape[1]
model = MiniVGGNet.build(input_h, input_W, input_d, input_classes)
opt = SGD(0.05)
model.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["accuracy"])
print("[INFO] Training the network...")
epochs = 40
H = model.fit(trainX, trainY, batch_size=64, epochs=epochs, verbose=1, shuffle=True)
print("[INFO] Evaluating Network...")
predictions = model.predict(testX, batch_size=64)
print(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1), target_names=labelNames))
model.save("model.hdf5")
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, epochs), H.history["loss"], label="Loss")
plt.plot(np.arange(0, epochs), H.history["val_loss"], label="Val_loss")
plt.plot(np.arange(0, epochs), H.history["accuracy"], label="Accuracy")
plt.plot(np.arange(0, epochs), H.history["val_accuracy"], labels="Val_accuracy")
plt.title("Training Loss and Accuracy on CIFAR-10")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.savefig(args["output"])
|
SalahSoliman/VGGNet
|
trainvgg.py
|
trainvgg.py
|
py
| 2,028 |
python
|
en
|
code
| 0 |
github-code
|
50
|
38302844005
|
from flask import Blueprint, render_template, request, flash, redirect, url_for
from flask_login import login_required, current_user
from db_manager import db_manager
from prep_stocks import put_into_db
stock = Blueprint('stock', __name__)
def render_stocks():
cur = db_manager.get_cursor()
cur.execute("""SELECT id,
symbol, name, price, open_price, high_price, low_price, total
FROM stocks1
ORDER BY name ASC;""")
stocks = cur.fetchall()
return render_template("stock2.html", stocks=stocks, user=current_user)
@stock.route('/update-db')
def update_db():
result = put_into_db()
return result
@stock.route('/stocks', methods=['GET', 'POST'])
@login_required
def render_stocks_from_db():
cur = db_manager.get_cursor()
in_list = cur.execute("""select favorites.stock_id from favorites join stocks1 on stocks1.id=favorites.stock_id where favorites.user_id = %s""", (current_user.id,))
in_list = cur.fetchall()
if request.method == 'POST':
stock_id = request.form.get("add")
view_id = request.form.get("symbol")
info_id = request.form.get("name")
search_id = request.form.get("search")
search_id = search_id.lower()
if stock_id:
db_manager.add_favorite(current_user.id, stock_id)
if in_list:
for i in in_list:
if i[0] == int(stock_id):
flash("This stock is already in your favorites", category = 'error' )
return render_stocks()
flash("This stock has been added to your favorites", category = 'success')
return render_stocks()
elif view_id:
cur.execute("SELECT * FROM stocks1 WHERE symbol = %s", (view_id,))
stock_name = cur.fetchall()
if stock_name:
print("check")
return redirect(url_for('hist.render_stock_history', symbol=view_id))
elif info_id:
cur.execute("SELECT name FROM stocks1 WHERE name = %s", (info_id,))
stock_name = cur.fetchall()
if stock_name:
return redirect(url_for('info.render_info_from_db', name = info_id))
elif search_id:
search_id = search_id.lower()
cur.execute("SELECT * FROM stocks1 WHERE LOWER(symbol) LIKE %s", ('%' + search_id + '%',))
matching_stocks = cur.fetchall()
if matching_stocks:
return redirect(url_for('hist.render_stock_history', symbol = search_id))
else:
flash('Stock not found.')
return render_stocks()
|
jkw944/DIS_Project
|
MyWebApp/stocks.py
|
stocks.py
|
py
| 2,741 |
python
|
en
|
code
| 0 |
github-code
|
50
|
21833302791
|
import sys
from sqlalchemy import create_engine
import pandas as pd
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.base import BaseEstimator, TransformerMixin
import pickle
import os
import numpy as np
import pandas as pd
import nltk
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
class MessageLengthTransformer(BaseEstimator, TransformerMixin):
"""
In this class we create a transformer that calculates the Message Length
for each message
"""
def fit(self, X, y=None):
return self
def transform(self, X):
return np.array([len(x) for x in X]).reshape(-1,1)
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
"""
In this class we create a starting verb extractor
"""
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
def fit(self, X, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.starting_verb)
return pd.DataFrame(X_tagged)
def load_data(database_filepath):
"""
This function is used to load data
"""
path = 'sqlite:///' + database_filepath
engine = create_engine(path)
df = pd.read_sql_table(table_name='df', con=engine)
X = df["message"]
y = df.loc[:, "related":"direct_report"]
return X, y, y.columns
def tokenize(text):
"""
Tokenization function
"""
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
"""
Function used to define model parameters, define pipeline and setup grid search
"""
parameters = {
'clf__estimator': [
#AdaBoostClassifier(n_estimators=50, learning_rate=0.4),
#AdaBoostClassifier(n_estimators=100, learning_rate=0.4),
#AdaBoostClassifier(n_estimators=50, learning_rate=0.8),
#AdaBoostClassifier(n_estimators=100, learning_rate=0.8),
#AdaBoostClassifier(n_estimators=50, learning_rate=1),
#AdaBoostClassifier(n_estimators=100, learning_rate=1),
#RandomForestClassifier(n_estimators=50, criterion='entropy'),
#RandomForestClassifier(n_estimators=100, criterion='entropy'),
#RandomForestClassifier(n_estimators=50, criterion='gini'),
#RandomForestClassifier(n_estimators=100, criterion='gini')
RandomForestClassifier(n_estimators=10, criterion='gini'),
RandomForestClassifier(n_estimators=10, criterion='entropy'),
AdaBoostClassifier(n_estimators=10, learning_rate=1),
AdaBoostClassifier(n_estimators=10, learning_rate=0.5)
]
}
pipeline = Pipeline([
('features', FeatureUnion([
('text_pipeline', Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())
])),
('msg_length', MessageLengthTransformer()),
('starting_verb', StartingVerbExtractor())
])),
('clf', MultiOutputClassifier(estimator=None))
])
cv = GridSearchCV(pipeline, param_grid = parameters)
return cv
def evaluate_model(model, X_test, y_test, category_names):
"""
Function used to evaluate (print metrics) of the results obtained by the created model
"""
y_pred = model.predict(X_test)
for i, category in enumerate(category_names):
metrics = classification_report(y_test.iloc[i], y_pred[i])
print("""category: {}
{} """.format(category, metrics))
def save_model(model, model_filepath):
"""
Function used to save the created model
"""
with open(model_filepath, 'wb') as file:
pickle.dump(model, file)
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, y, category_names = load_data(database_filepath)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, y_train)
gs_model = model.best_estimator_
print('Evaluating model...')
evaluate_model(gs_model, X_test, y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
|
gustavex/Udacity_Data_Scientist
|
04_disaster_response_pipeline/models/train_classifier.py
|
train_classifier.py
|
py
| 5,967 |
python
|
en
|
code
| 2 |
github-code
|
50
|
70085291677
|
import threading
import ursina
from calc import *
from gui import Simulation
from planet import Planet, Sky
class Main:
def __init__(self, app, planet_list=[]):
# SET BASIC VARIABLES FOR ursina -------------------------------------------------------
self.app = app
ursina.window.title = 'planet simulation' # set meta data for app
ursina.window.borderless = True
ursina.window.fullscreen = True
ursina.window.exit_button.visible = False
ursina.window.fps_counter.enabled = True
self.planet_list = planet_list # list of all planets in the simulation
# CREATION OF SUN ---------------------------------------------------------------------
Planet(file_name='/textures/sun', planet_name="sun", planet_diameter=2.5, plannr=0)
# CREATION OF SKY ----------------------------------------------------------------------
Sky()
# CREATION OF SIMULATION ---------------------------------------------------------------
simulation = Simulation(self.planet_list)
# CREATION OF THREADS ------------------------------------------------------------------
for planet in self.planet_list:
# For every planet, there is a thread, which calculates the current Position of its planet
calc = Calc(planet)
temp = threading.Thread(target=calc.get_coords, args=(planet,))
temp.start()
# STARTS THE SIMULATION ---------------------------------------------------------------
# runs simulation.update() constantly
self.app.run()
|
DerBerlinr/Planet-Simulation
|
main.py
|
main.py
|
py
| 1,620 |
python
|
en
|
code
| 2 |
github-code
|
50
|
30297363580
|
# tree with class
class Node:
def __init__(self, data):
self.data = data
self.right = None
self.left = None
class Tree:
def __init__(self, root):
self.r = root
root = None
# inorder traversal
def inorder_wrapper_traversal(self):
self.inorder_Traversal(self.r)
def inorder_Traversal(self, root):
if root:
self.inorder_Traversal(root.left)
print(root.data)
self.inorder_Traversal(root.right)
def preorder_Traversal(self, root):
if root:
print(root.data)
self.preorder_Traversal(root.left)
self.preorder_Traversal(root.right)
def postorder_Traversal(self, root):
if root:
self.postorder_Traversal(root.left)
self.postorder_Traversal(root.right)
print(root.data)
if __name__ == "__main__":
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
root.right.left = Node(6)
root.right.right = Node(7)
root.right.right.left = Node(8)
tree1 = Tree(root)
tree1.inorder_wrapper_traversal()
|
dynstat/DataStructuresInPython
|
kanchan/Tree/tree_with_class.py
|
tree_with_class.py
|
py
| 1,184 |
python
|
en
|
code
| 0 |
github-code
|
50
|
45239525528
|
#!/usr/bin/env python3
from flask import Flask, render_template, request, flash, redirect, url_for
from services import*
from services.service import create_people, get_peoples, get_people, delete_people, edit_people, get_courses, people_exist
app = Flask(__name__)
app.secret_key = "mysecretkey"
@app.route('/')
def index():
peoples = get_peoples()
courses = get_courses()
return render_template('index.html', peoples=peoples, courses=courses, len_peoples=len(get_peoples()))
@app.route('/', methods= ['GET','POST'])
def add_people():
try:
if request.method == 'POST':
first_name = request.form['first_name']
last_name = request.form['last_name']
email = request.form['email']
course = request.form['course']
data = {"first_name": first_name,"last_name": last_name,"email": email,"courses": [{"id": course}]}
if not people_exist(email):
if first_name == "" or last_name == "" or email == "":
flash('Complete todos los datos')
return redirect(url_for('index'))
else:
create_people(data)
peoples = get_peoples()
courses = get_courses()
flash('Alumno inscripto')
return render_template('index.html', peoples=peoples, courses=courses, len_peoples=len(get_peoples()))
else:
if first_name == "" or last_name == "" or email == "":
flash('Complete todos los datos')
return redirect(url_for('index'))
else:
flash('El alumno ya existe')
return redirect(url_for('index'))
except (KeyError):
flash('Complete todos datos')
return redirect(url_for('index'))
@app.route('/delete/<id>')
def delete(id):
delete_people(id)
flash('Alumno eliminado')
peoples = get_peoples()
courses = get_courses()
return render_template('index.html', peoples=peoples, courses=courses, len_peoples=len(get_peoples()))
@app.route('/edit/<id>')
def get_student(id):
people = get_people(id)
courses = get_courses()
course = course_people(id)
return render_template('edit.html', people=people, courses=courses, len_peoples=len(get_peoples()), course=course)
@app.route('/update/<id>', methods= ['POST'])
def update_people(id):
if request.method == 'POST':
first_name = request.form['first_name']
last_name = request.form['last_name']
email = request.form['email']
course = request.form['course']
data = {"first_name": first_name, "last_name": last_name, "email": email, "courses": [{"id": course}]}
edit_people(id, data)
flash('Alumno actualizado')
peoples = get_peoples()
courses = get_courses()
return render_template('index.html', peoples=peoples, courses=courses, len_peoples=len(get_peoples()))
def course_people(id):
people = get_people(id)
for cour in people['courses']:
course = cour
return course
@app.route('/data/<id>')
def data_people(id):
people = get_people(id)
peoples = get_peoples()
courses = get_courses()
course = course_people(id)
return render_template('data.html', people=people, peoples=peoples, courses=courses, len_peoples=len(get_peoples()), course=course)
if __name__ == '__main__':
app.run(debug=True)
|
lordmaster11/Challege-Peoples
|
app.py
|
app.py
|
py
| 3,564 |
python
|
en
|
code
| 0 |
github-code
|
50
|
32786245734
|
# -*- coding: utf-8 -*-
# @Author : wangtingyun
# @Time : 2020/03/28
import sys
from PyQt5.QtCore import QPropertyAnimation, Qt, QPoint, QEasingCurve, QTimer
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
class MarqueeWidget(QWidget):
"""跑马灯控件"""
def __init__(self, parent):
super(MarqueeWidget, self).__init__(parent)
self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground)
self.resize(200, 30)
self.label_1 = QLabel(self)
self.label_1.setGeometry(self.geometry())
self.label_2 = QLabel(self)
self.label_2.setGeometry(self.geometry())
self.duration = 4000
self.spacing = 40
self.anim_1 = QPropertyAnimation(self.label_1, b'pos')
self.anim_1.setEasingCurve(QEasingCurve.Linear)
self.anim_1.setDuration(self.duration)
self.anim_1.setLoopCount(-1)
self.anim_2 = QPropertyAnimation(self.label_2, b'pos')
self.anim_2.setEasingCurve(QEasingCurve.Linear)
self.anim_2.setDuration(self.duration)
self.anim_2.setLoopCount(-1)
self.init_ui()
self.start_move()
def init_ui(self):
self.label_1.setStyleSheet("QLabel{font-family: 'Microsoft YaHei'; font-size: 14px; color: #000000;}")
self.label_1.setText('欢迎来到房间这里是房间名字测试的房间')
self.label_1.adjustSize()
self.label_2.setStyleSheet(self.label_1.styleSheet())
self.label_2.setText(self.label_1.text())
self.label_2.adjustSize()
def start_move(self):
self.anim_1.setStartValue(QPoint(0, self.label_1.y()))
self.anim_1.setEndValue(QPoint(-(self.label_1.width() + self.spacing), self.label_1.y()))
self.anim_2.setStartValue(QPoint(self.label_1.width() + self.spacing, self.label_2.y()))
self.anim_2.setEndValue(QPoint(0, self.label_2.y()))
self.anim_1.start()
self.anim_2.start()
if __name__ == '__main__':
app = QApplication(sys.argv)
window = QWidget()
window.setWindowTitle('Demo')
window.resize(300, 100)
window.label = MarqueeWidget(window)
window.label.move((window.width()-window.label.width())//2, (window.height()-window.label.height())//2)
window.show()
sys.exit(app.exec_())
|
aiwangtingyun/PythonDemo
|
component/marquee_widget.py
|
marquee_widget.py
|
py
| 2,358 |
python
|
en
|
code
| 0 |
github-code
|
50
|
32218884493
|
import pygame
class Guy(pygame.sprite.Sprite):
def __init__(self, *groups):
super().__init__(*groups)
self.image = pygame.image.load("data/enzo.png") # 16x16s
self.image = pygame.transform.scale(self.image, [100, 100])
self.rect = pygame.Rect(50, 50, 100, 100)
self.speed = 0
self.acceleration = 0.1
def update(self, *args):
#LOGICA
keys = pygame.key.get_pressed()
if keys[pygame.K_w]:
self.speed -= self.acceleration
elif keys[pygame.K_s]:
self.speed += self.acceleration
else:
self.speed *= 0.95
self.rect.y += self.speed
if self.rect.top < 0:
self.rect.top = 0
self.speed = 0
elif self.rect.bottom > 480:
self.rect.bottom = 480
self.speed = 0
|
Ewertonalex/Jogo-Pygame-Enzo-vs-Zumbi
|
guy.py
|
guy.py
|
py
| 892 |
python
|
en
|
code
| 5 |
github-code
|
50
|
71116402715
|
#!/usr/bin/python
import simplejson
import urllib
import urllib2
import sys
apikey = ""
url = "https://www.virustotal.com/vtapi/v2/file/report"
parameters = {"resource": sys.argv[1], "apikey": apikey}
data = urllib.urlencode(parameters)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
json = response.read()
response_dict = simplejson.loads(json)
if response_dict.get("response_code") == 1:
sys.exit(response_dict.get("positives"))
sys.exit(0)
|
FKilic/x-tier
|
X-TIER/scripts/virustotal/virustotal.py
|
virustotal.py
|
py
| 467 |
python
|
en
|
code
| 4 |
github-code
|
50
|
14591693671
|
def sum(a, b, c ):
return a + b + c
def printBoard(xState, oState):
zero = 'X' if xState[0] else ('O' if oState[0] else 0)
one = 'X' if xState[1] else ('O' if oState[1] else 1)
two = 'X' if xState[2] else ('O' if oState[2] else 2)
three = 'X' if xState[3] else ('O' if oState[3] else 3)
four = 'X' if xState[4] else ('O' if oState[4] else 4)
five = 'X' if xState[5] else ('O' if oState[5] else 5)
six = 'X' if xState[6] else ('O' if oState[6] else 6)
seven = 'X' if xState[7] else ('O' if oState[7] else 7)
eight = 'X' if xState[8] else ('O' if oState[8] else 8)
print(f"{zero} | {one} | {two} ")
print(f"--|---|---")
print(f"{three} | {four} | {five} ")
print(f"--|---|---")
print(f"{six} | {seven} | {eight} ")
def checkWin(xState, oState):
wins = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 3, 6], [1, 4, 7], [2, 5, 8], [0, 4, 8], [2, 4, 6]]
for win in wins:
if(sum(xState[win[0]], xState[win[1]], xState[win[2]]) == 3):
print("X Won the match")
return 1
if(sum(oState[win[0]], oState[win[1]], oState[win[2]]) == 3):
print("O Won the match")
return 0
return -1
xState = [0, 0, 0, 0, 0, 0, 0, 0, 0]
oState = [0, 0, 0, 0, 0, 0, 0, 0, 0]
turn = 1 # 1 for X and 0 for O
print("Welcome to Tic Tac Toe")
while(True):
printBoard(xState, oState)
if(turn == 1):
print("X's Chance")
value = int(input("Please enter a value: "))
xState[value] = 1
else:
print("O's Chance")
value = int(input("Please enter a value: "))
oState[value] = 1
cwin = checkWin(xState, oState)
if(cwin != -1):
print("Match over")
break
turn = 1 - turn
|
saadhussain01306/Tic_tak_toe
|
project[1].py
|
project[1].py
|
py
| 1,793 |
python
|
en
|
code
| 1 |
github-code
|
50
|
14762107866
|
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from .models import Profile, MyUser
import os
"""
@receiver(post_save, sender=MyUser)
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance.username)
else:
#print('--->not found', created)
#@shopowner_required
@receiver(post_save, sender=MyUser)
def save_profile(sender, instance, **kwargs):
instance.profile.save()
"""
@receiver(pre_save, sender=Profile)
def auto_delete_image_on_update(sender, instance, **kwargs):
if not instance.pk:
return False
try:
old_file = sender.objects.get(pk=instance.pk).image
except sender.DoesNotExist:
return False
new_file = instance.image
if not old_file == new_file:
if os.path.isfile(instance.image.path):
os.remove(old_file.path)
r = []
k = []
r.append(new_file.path)
k.append(old_file.path)
if k != r:
try:
for i in k:
if i not in r:
os.remove(i)
except:
pass
|
armani24/gglocal
|
guido/users/signals.py
|
signals.py
|
py
| 1,186 |
python
|
en
|
code
| 0 |
github-code
|
50
|
31940036407
|
#!/usr/bin/env python
#_*_ codig: utf8 _*_
import os, time, sqlite3
from watchdog.observers.polling import PollingObserver
from watchdog.events import FileSystemEventHandler
from Modules.constants import *
def on_created(event):
con=sqlite3.connect('data.db')
cur=con.cursor()
file_name=os.path.basename(event.src_path)
r=cur.execute(f"select bytes from data where name like '{file_name}'").fetchall()
if r==[]:
file_size=os.path.getsize(f"{src_path}{file_name}")
cur.execute(f"insert into data values('{file_name}', {file_size})")
con.commit()
print('Create', os.path.basename(event.src_path))
else:
pass
con.close()
if __name__ == "__main__":
event_handler = FileSystemEventHandler()
event_handler.on_created = on_created
observer = PollingObserver()
observer.schedule(event_handler, src_path, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
mgarciasantamaria/uparoundv2
|
watchFolder.py
|
watchFolder.py
|
py
| 1,049 |
python
|
en
|
code
| 0 |
github-code
|
50
|
7252211669
|
#%%
import os
import pandas as pd
from ecg_arrythmia_analysis.code.dataloader import *
from ecg_arrythmia_analysis.code.architectures import *
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from sklearn.metrics import f1_score, accuracy_score
#%%
MODEL_PATH = 'models/'
DATA_PATH = 'data/'
CNN_SPECS = (4, [5, 3, 3, 3], [16, 32, 32, 256], 1)
RCNN_SPECS = (4, 3, [3, 12, 48, 192], 2, 1)
RNN_SPECS = (2, False, 3, 16, 256, 'LSTM', 2, 1)
ENSEMBLE_SPECS = (2, 1024, 1)
SPEC_LIST = {'cnn': CNN_SPECS,
'rcnn': RCNN_SPECS,
'rnn': RNN_SPECS,
'ensemble': ENSEMBLE_SPECS}
#%%
def architect(mode, data, type, run_id, type_ids=None):
if isinstance(data, str):
data = [data]
if isinstance(type, str):
type = [type]
id = run_id
# Testing
if mode is 'training':
optimizers = ['Adam']
# dropouts = [0.1, 0.5]
# n_layers = [1, 2, 3]
lr_list = [0.01, 0.001]
for d in data:
for t in type:
for o in optimizers:
for lr in lr_list:
if o is 'Adam':
opt = tf.keras.optimizers.Adam(lr)
specs = SPEC_LIST[t]
if d is 'mitbih':
specs = list(specs)
specs[-1] = 5
specs = tuple(specs)
m = get_architecture(t, specs)
training(m, opt, d, t, id)
# Testing
if mode is 'testing':
for d in data:
for t in type:
specs = SPEC_LIST[t]
if d is 'mitbih':
specs = list(specs)
specs[-1] = 5
specs = tuple(specs)
m = get_architecture(t, specs)
testing(m, d, t, id)
if mode is 'ensemble':
run_ensemble(data=data, type_ids=type_ids, id=run_id)
if mode is 'visualization':
pass
#%%
def training(model, opt, data, type, id):
file_path = MODEL_PATH + type + '_' + data + '_' + str(id) + '.h5'
if type is 'tfl':
save = False
print("Not saving best models... not implemented for submodules!")
else:
save = True
checkpoint = ModelCheckpoint(file_path, monitor='val_acc', verbose=1, save_best_only=save, mode='max')
early = EarlyStopping(monitor="val_acc", mode="max", patience=5, verbose=1)
redonplat = ReduceLROnPlateau(monitor="val_acc", mode="max", patience=3, verbose=2)
callbacks_list = [checkpoint, early, redonplat]
if data is 'mitbih':
Y, X, _, _ = get_mitbih()
model.compile(optimizer=opt, loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['acc'])
else:
Y, X, _, _ = get_ptbdb()
model.compile(optimizer=opt, loss=tf.keras.losses.binary_crossentropy, metrics=['acc'])
if save:
model.fit(X, Y, epochs=1, callbacks=callbacks_list, validation_split=0.1)
else:
# NO CHECKPOINTS FOR TFL -> due to using submodules the save-implementation broke
model.fit(X, Y, callbacks=[early, redonplat], validation_split=0.1)
model.save_weights(filepath=file_path)
return model
def testing(model, data, type, id):
file_path = MODEL_PATH + type + '_' + data + '_' + str(id) + '.h5'
print(file_path)
if data is 'mitbih':
_, _, Y_test, X_test = get_mitbih()
else:
_, _, Y_test, X_test = get_ptbdb()
model.build(input_shape=(None, X_test.shape[1], X_test.shape[2]))
model.load_weights(file_path)
pred_test = model.predict(X_test)
pred_test = np.argmax(pred_test, axis=-1)
f1 = f1_score(Y_test, pred_test, average="macro")
print("Test f1 score : %s " % f1)
acc = accuracy_score(Y_test, pred_test)
print("Test accuracy score : %s " % acc)
return {'target': Y_test, 'prediction': pred_test}
def get_architecture(type, specs):
if type is 'cnn':
return CNNmodel(specs)
elif type is 'rcnn':
return RCNNmodel(specs)
elif type is 'rnn':
return RNNmodel(specs)
elif type is 'ensemble':
return Ensemble_FFL_block(specs)
#%%
def load_models(data, type_ids):
print(os.getcwd())
if isinstance(data, list):
data = data[0]
if isinstance(type_ids, tuple):
type_ids = [type_ids]
model_list = []
for ti in type_ids:
t = ti[0]
id = ti[1]
file_path = MODEL_PATH + t + '_' + data + '_' + str(id) + '.h5'
print(file_path)
specs = SPEC_LIST[t]
empty = get_architecture(t, specs)
empty.build(input_shape=(None, 187, 1))
empty.load_weights(file_path)
model_list.append(empty)
return model_list
# create stacked model input dataset as outputs from the ensemble
def stacked_dataset(models, data):
if data is 'mitbih':
Y, X, Y_test, X_test = get_mitbih()
else:
Y, X, Y_test, X_test = get_ptbdb()
stacked_X = None
stacked_X_test = None
for model in models:
y = model.predict(X, verbose=0)
y_test = model.predict(X_test, verbose=0)
if stacked_X is None:
stacked_X = y
stacked_X_test = y_test
else:
stacked_X = np.dstack((stacked_X, y))
stacked_X_test = np.dstack((stacked_X_test, y_test))
stacked_X = stacked_X.reshape((stacked_X.shape[0], stacked_X.shape[1] * stacked_X.shape[2]))
stacked_X_test = stacked_X_test.reshape((stacked_X_test.shape[0], stacked_X_test.shape[1] * stacked_X_test.shape[2]))
return stacked_X, Y, stacked_X_test, Y_test
def load_ensemble_nn(data):
specs = SPEC_LIST['ensemble']
if data is 'mitbih':
specs = list(specs)
specs[-1] = 5
specs = tuple(specs)
return Ensemble_FFL_block(specs)
# specify settings
def run_ensemble(data, type_ids, id=500, mode='nn'):
# mode can be mean, logistic or nn
# load all corresponding models into model-list
models = load_models(data, type_ids)
# predict datasets with models to generate new ensemble dataset
X, Y, X_test, Y_test = stacked_dataset(models, data)
if mode is 'nn':
file_path = MODEL_PATH + 'ensemble_' + data[0] + '_' + str(id) + '.h5'
model = load_ensemble_nn(data)
opt = tf.keras.optimizers.Adam(0.001)
checkpoint = ModelCheckpoint(file_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
early = EarlyStopping(monitor="val_acc", mode="max", patience=5, verbose=1)
redonplat = ReduceLROnPlateau(monitor="val_acc", mode="max", patience=3, verbose=2)
callbacks_list = [checkpoint, early, redonplat]
if data is 'mitbih':
model.compile(optimizer=opt, loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['acc'])
else:
model.compile(optimizer=opt, loss=tf.keras.losses.binary_crossentropy, metrics=['acc'])
model.fit(X, Y, epochs=100, callbacks=callbacks_list, validation_split=0.1)
model.predict(X_test, Y_test)
# Todo: Use a simple mean of the predictions and a logistic regression for comparsion
#%%
def transfer_learning(data_tfl, data, type_id, id=700, freeze=True):
tfl_model = load_models(data_tfl, type_id)[0]
comb_model = tf.keras.Sequential()
for j, layer in enumerate(tfl_model.layers):
if layer.name is 'ffl_block':
del_id = j
for layer in tfl_model.layers[:-del_id]: # just exclude last layer from copying
comb_model.add(layer)
if freeze:
for layer in comb_model.layers:
layer.trainable = False
ffl_block = load_ensemble_nn(data)
comb_model.add(ffl_block)
opt = tf.keras.optimizers.Adam(0.001)
input_shape = (None, 187, 1)
comb_model.build(input_shape)
trained_model = training(comb_model, opt, data, 'tfl', id)
output = testing(trained_model, data, 'tfl', id)
df = pd.DataFrame.from_dict(output, orient="index")
df.to_csv("results_tfl.csv")
|
adrianomartinelli/machine-learning-for-health-care
|
ecg_arrythmia_analysis/code/functions.py
|
functions.py
|
py
| 8,081 |
python
|
en
|
code
| 0 |
github-code
|
50
|
29774578297
|
import torch
import numpy as np
# Blender is right hand system
def dataset_loader():
data = np.load('../ganyu_150.npz')
# data = np.load('../tiny_nerf_data.npz')
images = data['images']
poses = data['poses'] # camera to world
focal = data['focal']
return images, poses, focal
|
Pokerlishao/MyNeRF
|
datasets/make_dataset.py
|
make_dataset.py
|
py
| 300 |
python
|
en
|
code
| 0 |
github-code
|
50
|
4595634845
|
from django.http import HttpResponseServerError
from rest_framework.viewsets import ViewSet
from rest_framework.response import Response
from rest_framework import serializers, status
from iimproveapi.models import Tag, User
class TagsView(ViewSet):
"""iimproveapi tags view"""
def retrieve(self, request, pk):
"""Handle GET requests for single tag type
Returns:
Response -- JSON serialized tag type
"""
try:
tag = Tag.objects.get(pk=pk)
serializer = TagSerializer(tag)
serial_tag = serializer.data
serial_tag['userId'] = serial_tag.pop('user_id')
return Response(serial_tag)
except Tag.DoesNotExist as ex:
return Response({'message': 'Unable to fetch tag data. '
+ ex.args[0]}, status=status.HTTP_404_NOT_FOUND)
def list(self, request):
"""get my tags"""
try:
user_id = request.GET.get("userId")
tags = Tag.objects.filter(user_id=user_id).values()
serializer = TagSerializer(tags, many=True)
serial_tag = serializer.data
for tag in serial_tag:
tag['userId'] = tag.pop('user_id')
return Response(serial_tag)
except Tag.DoesNotExist as ex:
return Response({'message': 'Unable to get my tag data. '
+ ex.args[0]}, status=status.HTTP_404_NOT_FOUND)
def create(self, request):
'''handels creation of my tags'''
user_id = request.data['userId']
try:
User.objects.get(id = user_id)
tag = Tag.objects.create(
title = request.data['title'],
user_id = user_id
)
serializer = TagSerializer(tag)
return Response(serializer.data)
except User.DoesNotExist as ex:
return Response({'message': 'Unable to create tag. '
+ ex.args[0]}, status=status.HTTP_401_UNAUTHORIZED)
def destroy(self, request, pk):
"""Handle Delete
"""
tag = Tag.objects.get(pk=pk)
tag.delete()
return Response(None, status=status.HTTP_204_NO_CONTENT)
class TagSerializer(serializers.ModelSerializer):
"""JSON serializer for tags
"""
class Meta:
model = Tag
fields = ('id', 'title', 'user_id')
|
nishayaraj/I-Improve-Server
|
iimproveapi/views/tags.py
|
tags.py
|
py
| 2,411 |
python
|
en
|
code
| 0 |
github-code
|
50
|
20698870729
|
from pyo import *
CHORD = {
'maj7': [-12, -8, -5, -1],
'm7': [-12, -9, -5, -2],
'x7': [-12, -8, -5, -2],
'half_dim': [-12, -9, -6, -2]
}
s = Server()
s.setInputDevice(3) # Steinberg in
s.setOutputDevice(3) # Steinberg out
s.setMidiInputDevice(99)
s.boot()
mic = Input().play().out()
notes = Notein(poly=10, scale=0, first=0, last=127, channel=0, mul=1)
harm_1, harm_3, harm_5, harm_7 = None, None, None, None
def chord(chordType):
global mic, CHORD, harm_1, harm_3, harm_5, harm_7
tones = CHORD[chordType]
harm_1 = Harmonizer(mic, transpo=tones[0]).out()
harm_3 = Harmonizer(mic, transpo=tones[1]).out()
harm_5 = Harmonizer(mic, transpo=tones[2]).out()
harm_7 = Harmonizer(mic, transpo=tones[3]).out()
def handle_note_on(voice):
pit = int(notes["pitch"].get(all=True)[voice])
if pit == 48:
chord('maj7')
print('Chord: maj7')
elif pit == 49:
chord('m7')
print('Chord: m7')
elif pit == 50:
chord('x7')
print('Chord: x7')
elif pit == 51:
chord('half_dim')
print('Chord: half_dim')
def handle_note_off(voice):
global harm_1, harm_3, harm_5, harm_7
harm_1.stop()
harm_3.stop()
harm_5.stop()
harm_7.stop()
print('No chords.')
tfon = TrigFunc(notes["trigon"], handle_note_on, arg=list(range(10)))
tfoff = TrigFunc(notes["trigoff"], handle_note_off, arg=list(range(10)))
s.start()
s.gui(locals())
|
ancoopa/chords-machine
|
chord_machine.py
|
chord_machine.py
|
py
| 1,373 |
python
|
en
|
code
| 2 |
github-code
|
50
|
11069612230
|
import os
import tempfile
import unittest
from unittest.mock import patch
from click.testing import CliRunner
from gramps.cli.clidbman import CLIDbManager
from gramps.gen.dbstate import DbState
from sqlalchemy.exc import IntegrityError
from gramps_webapi.__main__ import cli
from gramps_webapi.app import create_app
from gramps_webapi.const import ENV_CONFIG_FILE, TEST_AUTH_CONFIG
class TestPerson(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.name = "Test Web API"
cls.dbman = CLIDbManager(DbState())
_, _name = cls.dbman.create_new_db_cli(cls.name, dbid="sqlite")
cls.config_file = tempfile.NamedTemporaryFile(delete=False)
cls.user_db = tempfile.NamedTemporaryFile(delete=False)
config = """TREE="Test Web API"
SECRET_KEY="C2eAhXGrXVe-iljXTjnp4paeRT-m68pq"
USER_DB_URI="sqlite:///{}"
""".format(
cls.user_db.name
)
with open(cls.config_file.name, "w") as f:
f.write(config)
with patch.dict("os.environ", {ENV_CONFIG_FILE: cls.config_file.name}):
cls.app = create_app()
cls.app.config["TESTING"] = True
cls.client = cls.app.test_client()
cls.runner = CliRunner()
@classmethod
def tearDownClass(cls):
cls.dbman.remove_database(cls.name)
os.remove(cls.config_file.name)
os.remove(cls.user_db.name)
def test_add_delete_user(self):
result = self.runner.invoke(
cli, ["--config", self.config_file.name, "user", "add", "user", "123"]
)
assert result.exit_code == 0
# try adding again
result = self.runner.invoke(
cli, ["--config", self.config_file.name, "user", "add", "user", "123"]
)
assert result.exception
result = self.runner.invoke(
cli, ["--config", self.config_file.name, "user", "delete", "user"]
)
assert result.exit_code == 0
# try deleting again
result = self.runner.invoke(
cli, ["--config", self.config_file.name, "user", "delete", "user"]
)
assert result.exception
|
windmark/gramps-webapi
|
tests/test_cli.py
|
test_cli.py
|
py
| 2,131 |
python
|
en
|
code
| null |
github-code
|
50
|
26382057896
|
import os
import setuptools
from tools import get_requirements, get_readme, get_version
def main():
path = os.path.dirname(os.path.abspath(__file__))
version = get_version()
open( os.path.join(path, "kara_storage", "version.py"), "w" ).write('version = "%s"' % version)
setuptools.setup(
name="kara_storage",
version=version,
author="a710128",
author_email="[email protected]",
description="Kara Storage SDK",
long_description=get_readme(),
long_description_content_type="text/markdown",
url="https://git.thunlp.vip/kara/kara-row-storage",
packages=setuptools.find_packages(exclude=("tools",)),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Programming Language :: C++"
],
python_requires=">=3.6",
setup_requires=["wheel"],
scripts=["scripts/kara_storage"],
install_requires=get_requirements()
)
if __name__ == "__main__":
main()
|
a710128/kara-storage
|
setup.py
|
setup.py
|
py
| 1,061 |
python
|
en
|
code
| 7 |
github-code
|
50
|
8454219258
|
from django.urls import path
from .views import RegisterView, RetrieveUserView, LogoutView
from . import views
urlpatterns = [
path('register', RegisterView.as_view()),
path('me', RetrieveUserView.as_view()),
path('login', views.LoginView,name="login"),
path('logout', LogoutView.as_view()),
path('verify_token',views.verify_token,name='verify_token'),
path('profile_view/<int:id>',views.profile_view,name='profile_view'),
path('addImage/<int:id>',views.addImage,name='addImage'),
#adminside
path('admin_login',views.admin_login,name='admin_login'),
path('user_list',views.user_list,name='user_list'),
path('edit_user/<int:id>',views.edit_user,name='edit_user'),
path('update_user/<int:id>',views.update_user,name='update_user'),
# path('edit_user/<int:id>',views.edit_user,name='edit_user'),
path('delete_user/<int:id>',views.delete_user,name='delete_user'),
]
|
NithinKrishna10/Django-Rest-Framework-JWT-authentication
|
accounts/urls.py
|
urls.py
|
py
| 935 |
python
|
en
|
code
| 0 |
github-code
|
50
|
45009242698
|
# Author: Sheikh Rabiul Islam
# Date: 07/10/2019; updated: 07/15/2019
# Purpose: preprocess data using all features; resample minority class;
# save the fully processed data as numpy array (binary: data/____.npy)
#import modules
import pandas as pd
import numpy as np
import time
from sklearn.utils import shuffle
start = time.time()
# import data
dataset = pd.read_csv('data/combined_sampled.csv', sep=',', dtype='unicode')
dataset = shuffle(dataset)
dataset = dataset.iloc[:, 1:] # drop the first Unnamed 0 column
#maximum finite value in any cell of the dataset. Infinity value in any cell is replaced with with this value.
max_value = 655453030.0
# seperate the dependent (target) variaable
X = dataset.iloc[:,0:-1].values
X_columns = dataset.iloc[:,0:-1].columns.values
y = dataset.iloc[:,-1].values
#del(dataset)
#X_bk = pd.DataFrame(data=X, columns =X_columns )
from sklearn.preprocessing import LabelEncoder
df_dump_part1 = pd.DataFrame(X, columns=X_columns)
df_dump_part2 = pd.DataFrame(y, columns=['Class'])
df_dump = pd.concat([df_dump_part1,df_dump_part2], axis = 1)
df_dump.to_csv("data/data_preprocessed_numerical.csv",encoding='utf-8', index = False) # keeping a backup of preprocessed numerical data.
end = time.time()
print("checkpoint 1:", end-start)
start = time.time()
# Encoding the Dependent Variable
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
X = np.array(X, dtype=float) # this is required for checking infinite and null below
#X_bk = pd.DataFrame(data=X, columns =X_columns )
# replace infinite with max_value, null with 0.0
for i in range(X.shape[0]):
for j in range(X.shape[1]):
k = X[i,j]
if not np.isfinite(k):
X[i,j] = max_value
if np.isnan(k):
X[i,j] = 0.0
# Feature Scaling (scaling all attributes/featues in the same scale)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_ = sc.fit_transform(X[:, 1:-1]) # except first and last column as first is index and last is class all
X = np.hstack((X[:,[0,-1]],X_)) #append old index and class all in the beginning
del X_
#add index to X to indentify the rows after split.
index = np.arange(len(X)).reshape(len(X),1)
X = np.hstack((index,X))
#########seperating training and test set ##################
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42,stratify=y)
col_l = ['index','index_old', 'Class_all']
for i in range(1,len(X_columns)-1): #excluding index_old from X_columns ans it is already included
col_l.append(X_columns[i])
#dump preprocessed trainset which includes (id, old index, class all, and class)
df_dump_part1 = pd.DataFrame(X_train, columns=col_l)
df_dump_part2 = pd.DataFrame(y_train, columns=['Class'])
df_dump = pd.concat([df_dump_part1,df_dump_part2], axis = 1)
df_dump.to_csv("data/data_preprocessed_numerical_train_all_features.csv",encoding='utf-8')
#dump preprocessed testset which includes (id, old index, class all, and class)
df_dump_part1 = pd.DataFrame(X_test, columns=col_l)
df_dump_part2 = pd.DataFrame(y_test, columns=['Class'])
df_dump = pd.concat([df_dump_part1,df_dump_part2], axis = 1)
df_dump.to_csv("data/data_preprocessed_numerical_test_all_features.csv",encoding='utf-8')
del df_dump_part1
del df_dump_part2
del df_dump
end = time.time()
print("checkpoint 2:", end-start)
# index, old index, class all in X is no more needed; drop it
start = time.time()
X_train = np.delete(X_train,0,1) #drop index
X_test = np.delete(X_test,0,1)
X_train = np.delete(X_train,0,1) #drop old index
X_test = np.delete(X_test,0,1)
X_train = np.delete(X_train,0,1) #drop class all
X_test = np.delete(X_test,0,1)
del(X) # free some memory; encoded (onehot) data takes lot of memory
del(y) # free some memory; encoded (onehot) data takes lot of memory
#dump onehot encoded training data
# save the fully processed data as binary for future use in any ML algorithm without any more preprocessing.
np.save('data/data_fully_processed_X_train_all_features.npy',X_train)
np.save('data/data_fully_processed_y_train_all_features.npy',y_train)
print("Before OverSampling, counts of label '1': {}".format(sum(y_train==1)))
print("Before OverSampling, counts of label '0': {} \n".format(sum(y_train==0)))
# save the fully processed data as binary for future use in any ML algorithm without any more preprocessing.
np.save('data/data_fully_processed_X_test_all_features.npy',X_test)
np.save('data/data_fully_processed_y_test_all_features.npy',y_test)
end = time.time()
print("checkpoint 3:", end-start)
################oversampling the minority class of training set #########
from imblearn.over_sampling import SMOTE
# help available here: #https://imbalanced-learn.readthedocs.io/en/stable/generated/imblearn.over_sampling.SMOTE.html
sm = SMOTE(random_state=42)
X_train_res, y_train_res = sm.fit_sample(X_train, y_train)
# save the fully processed data as binary for future use in any ML algorithm without any more preprocessing.
np.save('data/data_fully_processed_X_train_resampled_all_features.npy',X_train_res)
np.save('data/data_fully_processed_y_train_resampled_all_features.npy',y_train_res)
print('After OverSampling, the shape of train_X: {}'.format(X_train_res.shape))
print('After OverSampling, the shape of train_y: {} \n'.format(y_train_res.shape))
print("After OverSampling, counts of label '1': {}".format(sum(y_train_res==1)))
print("After OverSampling, counts of label '0': {}".format(sum(y_train_res==0)))
|
SheikhRabiul/domain-knowledge-aided-explainable-ai-for-intrusion-detection-and-response
|
data_preprocess_all_features.py
|
data_preprocess_all_features.py
|
py
| 5,596 |
python
|
en
|
code
| 1 |
github-code
|
50
|
874748908
|
class RobotInAGrid:
"""
8.2
Robot in a Grid: Imagine a robot sitting on the upper left corner of grid with r rows and c columns.
The robot can only move in two directions, right and down, but certain cells are "off limits" such that
the robot cannot step on them. Design an algorithm to find a path for the robot from the top left to
the bottom right.
"""
def __init__(self, stop_cells, r, c):
"""
:param r: rows in a grid
:param c: cells in a grid
"""
self.grid_r = r - 1
self.grid_c = c - 1
self.stop_cells = set(stop_cells)
def find_path(self):
"""
Algorithm to find a path for the robot from the top left to
the bottom right.
Algo: step right, if not possible, step left, if not possible go back
"""
# Path is a list of cells
path = [(0, 0)]
# Visited cells, from which we went right or left
went_right: set = set()
went_down: set = set()
r = 0
c = 0
while (r, c) != (self.grid_r, self.grid_c):
# Step right if possible and we have not already been there
if c < self.grid_c \
and (r, c) not in went_right \
and (r, c + 1) not in self.stop_cells:
went_right.add((r, c))
c += 1
path.append((r, c))
# Step down if possible and we have not already been there
elif r < self.grid_r \
and (r, c) not in went_down \
and (r + 1, c) not in self.stop_cells:
went_down.add((r, c))
r += 1
path.append((r, c))
# No way to go right or down, go back
else:
(r, c) = path.pop()
return path
|
DmitryPukhov/pyquiz
|
pyquiz/ctci/dynamic/RobotInAGrid.py
|
RobotInAGrid.py
|
py
| 1,849 |
python
|
en
|
code
| 0 |
github-code
|
50
|
25822728887
|
imdb_file = input("Enter the name of the IMDB file ==> ").strip()
print(imdb_file)
counts = dict()
for line in open(imdb_file, encoding = "ISO-8859-1"):
words = line.strip().split('|')
movie = words[1].strip()
if movie in counts:
if words[0] in counts[movie]:
continue
counts[movie].append(words[0])
continue
counts[movie]=[words[0]]
movies=sorted(counts)
vals = sorted(counts.values())
#sorted by value
max_val=max(vals,key=len)
max_movie=[]
ones_count=0
for index in range(len(movies)):
movie = movies[index]
if len(counts[movie])==1:
ones_count+=1
for key,value in counts.items():
if value==max_val:
max_movie.append(key)
print(len(max_val))
print(max_movie[0])
print(ones_count)
|
emilyvroth/cs1
|
lecture/lecture17/part2.py
|
part2.py
|
py
| 758 |
python
|
en
|
code
| 0 |
github-code
|
50
|
70644985755
|
from flask_app import app
from flask import render_template, request, redirect
from flask_app.models.user import User
@app.route("/")
def index():
return render_template("index.html")
@app.route("/read_all")
def read_all():
return render_template("read(all).html", all_users = User.retrieve_all())
@app.route('/create_user', methods=['POST'])
def add_user_to_db():
data = {
"fn": request.form["fname"],
"ln": request.form["lname"],
"email": request.form["email"]
}
User.save(data)
return redirect("/")
@app.route('/create_new')
def create_new_user():
return render_template("create.html")
@app.route('/read_one/<int:id>')
def read_one_page(id):
data = {
'id': id
}
return render_template("read(one).html", one_user = User.retrieve_one(data))
@app.route('/edit_form/<int:id>')
def editing_form(id):
data = {
'id':id
}
return render_template("/user_edit.html", that_one_id = User.retrieve_one(data))
@app.route('/users_edit/<int:id>',methods = ['POST'])
def user_edit(id):
data = {
"id": id,
"fn": request.form["fname"],
"ln": request.form["lname"],
"email": request.form["email"]
}
User.update(data)
return redirect(f'/read_one/{id}')
@app.route('/delete_user/<int:id>')
def delete_user(id):
data = {
'id': id
}
User.destroy(data)
return redirect('/')
|
Diaz1620/user_crud_mod
|
flask_app/controllers/users.py
|
users.py
|
py
| 1,422 |
python
|
en
|
code
| 0 |
github-code
|
50
|
72087571675
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bbs', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='parent_comment',
field=models.ForeignKey(related_name='p_comment', blank=True, to='bbs.Comment', null=True),
),
]
|
triaquae/py_training
|
OldboyBBS2/bbs/migrations/0002_auto_20150909_0238.py
|
0002_auto_20150909_0238.py
|
py
| 449 |
python
|
en
|
code
| 85 |
github-code
|
50
|
6790715267
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 10 22:30:13 2018
@author: Zhang Xiang
"""
import numpy as np
def loadData(filename):
"""导入数据"""
dataMat = []
fr = open(filename)
for line in fr.readlines():
curline = line.split('\t')
curline = list(map(float, curline))
dataMat.append(curline)
return dataMat
def binsplitDataSet(dataMat, feature, value):
"""将数据分为左右两部分"""
mat0 = dataMat[np.nonzero(dataMat[:, feature]>value)[0], :]
mat1 = dataMat[np.nonzero(dataMat[:, feature]<=value)[0], :]
return mat0, mat1
def solverLinear(dataSet):
# 构造线性方程
m, n = np.shape(dataSet)
X = np.matrix(np.ones((m, n)))
y = np.matrix(np.ones((m, 1)))
X[:, 1:] = dataSet[:, :n-1]
y = dataSet[:, -1]
xTx = X.T*X
if np.linalg.det(xTx) == 0:
print('This matrix is singular, try increasing the value of ops[1]')
ws = xTx.I*(X.T*y)
return ws, X, y
def modelleaf(dataSet):
# 对模型树叶节点的处理, 模型树的叶节点的返回值为线性回归方程的系数
ws, X, y = solverLinear(dataSet)
return ws
def modelErr(dataSet):
# 模型树误差的计算
ws, X, y = solverLinear(dataSet)
yHat = X*ws
return sum(np.power(yHat - y, 2))
def ChooseBestSplit(dataSet, leafType = modelleaf, errType = modelErr, ops = (1, 4)):
tolS = ops[0]; tolN = ops[1]
if len(set(dataSet[:, -1].T.tolist()[0])) == 1:
return None, leafType(dataSet)
m,n = np.shape(dataSet)
S = errType(dataSet)
bestS = np.inf;bestIndex = 0;bestValue = 0
for featureIndex in range(n-1):
for splitValue in set(dataSet[:,featureIndex].T.tolist()[0]):
mat0, mat1 = binsplitDataSet(dataSet, featureIndex, splitValue)
if (len(mat0)<tolN or len(mat1)<tolN):
continue
errS = errType(mat0) + errType(mat1)
if errS < bestS:
bestIndex = featureIndex
bestS = errS
bestValue = splitValue
if (S - bestS) < tolS:
return None, leafType(dataSet)
mat0, mat1 = binsplitDataSet(dataSet, bestIndex, bestValue)
if (len(mat0) < tolN or len(mat1) < tolN):
return None, leafType(dataSet)
return bestIndex, bestValue
def CreatTree(dataSet, leafType = modelleaf, errType = modelErr, ops = (10, 100)):
"""构造树"""
feat, val =ChooseBestSplit(dataSet, leafType, errType, ops)
if feat == None:
return val
retTree = {}
retTree['spInd'] = feat
retTree['spVal'] = val
lmat, rmat = binsplitDataSet(dataSet, feat, val)
retTree['left'] = CreatTree(lmat, leafType, errType, ops)
retTree['right'] = CreatTree(rmat, leafType, errType, ops)
return retTree
if __name__ == "__main__":
# 建树过程, 条件ops控制树的大小
filename = r'E:\machinelearninginaction\Ch09\ex2.txt'
dataMat = loadData(filename)
dataMat = np.mat(dataMat)
myTree = CreatTree(dataMat)
print(myTree)
|
zhangxiangchn/Demo
|
Model Tree.py
|
Model Tree.py
|
py
| 3,132 |
python
|
en
|
code
| 3 |
github-code
|
50
|
10977899326
|
import logging
import numpy as np
from ibmfl.model.model_update import ModelUpdate
from ibmfl.aggregator.fusion.iter_avg_fusion_handler import IterAvgFusionHandler
logger = logging.getLogger(__name__)
class PrejudiceRemoverFusionHandler(IterAvgFusionHandler):
def fusion_collected_responses(self, lst_model_updates, key='weights'):
"""
Receives a list of model updates, where a model update is of the type
`ModelUpdate`, using the values (indicating by the key)
included in each model_update, it finds the mean.
:param lst_model_updates: List of model updates of type `ModelUpdate` \
to be averaged.
:type lst_model_updates: `list`
:param key: A key indicating what values the method will aggregate over.
:type key: `str`
:return: results after aggregation
:rtype: `list`
"""
v = []
for update in lst_model_updates:
a = update.get(key)
#Checks if LRwPRType4() appends 'None' to updates
if a[len(a)-1] == None:
v.append(np.array(a[:-1]))
else:
v.append(np.array(a))
results = np.mean(np.array(v), axis=0)
return results.tolist()
|
SEED-VT/FedDebug
|
debugging-constructs/ibmfl/aggregator/fusion/prej_remover_fusion_handler.py
|
prej_remover_fusion_handler.py
|
py
| 1,248 |
python
|
en
|
code
| 7 |
github-code
|
50
|
4815676842
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pickle
# creamos/abrimos el archivo acces donde se guardará los datos de acceso menos la contraseña, lo leemos y cerramos el archivo
def lee(nombre):
try:
# leemos los datos del archivo acces.txt
fin=open(nombre,"rb")
list=pickle.load(fin)
fin.close()
# pendiente de investigar
str=[i.rstrip() for i in list]
return str
except:
# si al leer no existe el archivo con la excepcion creara el archivo y la lista vacia
lst=['']
fin=open(nombre,"wb")
pickle.dump(lst,fin)
fin.close()
str=[]
return str
def escribe(nombre,valores):
fin=open(nombre, "wb")
pickle.dump(valores,fin)
fin.close()
|
Carlostlr/Gestor-empresa
|
general/archivos.py
|
archivos.py
|
py
| 809 |
python
|
es
|
code
| 0 |
github-code
|
50
|
72148543514
|
import streamlit as st
import io
import pdfplumber
import openai
from keys import OPEN_API_KEY
# Set your API key
openai.api_key = OPEN_API_KEY
# Define the model you want to use
MODEL_NAME = "text-davinci-003"
MAX_TOKENS = 100
# Page Configuration
st.set_page_config(page_title="PDF Summarizer", page_icon=":arrow_up:", layout="wide")
# Header
st.title("PDF Summarizer")
def summarize_pdf_text(pdf_text: str) -> str:
try:
# Tokenize and split the text into manageable chunks if needed
# For simplicity, this example does not include chunking logic
# You might need to add logic to handle long texts
# Call the OpenAI API to summarize
response = openai.Completion.create(
model=MODEL_NAME,
prompt=f"Summarize this document with {MAX_TOKENS} max tokens: {pdf_text}",
max_tokens=MAX_TOKENS, # Adjust based on your needs
)
return response.choices[0].text.strip()
except Exception as e:
st.error(f"An error occurred: {e}")
return ""
def read_pdf(file):
try:
with pdfplumber.open(file) as pdf:
pages = [page.extract_text() for page in pdf.pages]
return "\n".join(pages)
except Exception as e:
st.error(f"Error reading PDF: {e}")
return ""
uploaded_file = st.file_uploader("Upload a PDF file", type="pdf")
if uploaded_file is not None:
with st.spinner("Reading PDF..."):
with io.BytesIO(uploaded_file.getbuffer()) as file_stream:
pdf_text = read_pdf(file_stream)
if pdf_text:
with st.spinner("Summarizing..."):
summary = summarize_pdf_text(pdf_text)
st.markdown(summary)
|
mazalkov/baag.ai
|
src/baag/app.py
|
app.py
|
py
| 1,707 |
python
|
en
|
code
| 0 |
github-code
|
50
|
28103079549
|
# Calculadora Python
calc = True
while calc:
entrada = input('Pressione "Enter" para continuar ou "sair" para encerrar o programa: ').lower()
if entrada != 'sair':
num1 = input('Digite um número: ')
int_num1 = int(num1)
oper = input('Digite a operação (+, -, /, *) >> ')
num2 = input('Digite outro número: ')
int_num2 = int(num2)
if oper == '+':
print(f'A soma de {num1} + {num2} é igual a: {int_num1+int_num2}')
elif oper == '-':
print(f'A subtração de {num1} - {num2} é igual a: {int_num1-int_num2}')
elif oper == '/':
print(f'A divisão entre {num1} / {num2} é igual a: {int_num1/int_num2}')
elif oper == '*':
print(f'A multiplicação de {num1} * {num2} é igual a: {int_num1*int_num2}')
else:
print('Ops, não entendi a operação.')
else:
calc = False
print('Fim do programa.')
|
marcelogabrielcn/udemy_python2023
|
aula27.py
|
aula27.py
|
py
| 983 |
python
|
pt
|
code
| 0 |
github-code
|
50
|
19873780444
|
#!/usr/bin/python
from string import Template
import stat
import SCons
def md5sum(filename):
import hashlib
f = file(filename,'rb')
return hashlib.md5(f.read()).hexdigest()
def md5sum_action(target, source, env):
for i in range(len(source)):
digest = md5sum(source[i].abspath)
content = digest + ' ' + source[i].name + '\n'
file(target[i].abspath, 'w').write(content)
return 0
def md5sum_emitter(target, source, env):
if len(target) < len(source):
diff = len(source) - len(target)
offset = len(target)
for i in range(diff):
s = source[offset + i]
target.append(env.File(s.abspath + '.md5sum'))
return (target, source)
def generate(env, **kw):
try:
env['BUILDERS']['MD5SUMSTR']
env['BUILDERS']['MD5SUM']
except KeyError:
md5str = "Caculating md5sum: $TARGETS"
action = SCons.Action.Action(md5sum_action, '$MD5SUMSTR')
env['MD5SUMSTR'] = md5str
env['BUILDERS']['MD5SUM'] = env.Builder(action = action,
emitter = md5sum_emitter,
suffix = '.md5sum')
def exists(env):
try:
import hashlib
return True
except:
return False
|
LolHacksRule/popcap
|
osframework/source/site_scons/site_tools/md5sum.py
|
md5sum.py
|
py
| 1,306 |
python
|
en
|
code
| 5 |
github-code
|
50
|
9148368381
|
# -*- coding: utf-8 -*-
import os
import yaml
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.exc import IntegrityError, SQLAlchemyError
import pysite.models
from pysite.authmgr.models import Principal, Role
def check_site(sites_dir, sitename):
"""
Checks integrity of a site.
A site is integer if:
- It has a matching subdirectory in SITES_DIR
- It has a matching YAML file in SITES_DIR
- Its master rc at least has settings for
- ``max_size``
- Its master ACL has at least one entry for a file manager:
- Permission "allow"
- List of principals contains at least one role (i.e. the manager role)
- Permission name is 'manage_files'
- The manager role exists in the database
- The manager role has at least one member
:param site_dir: The SITE_DIR, e.g. as configured in the app's rc file
:param sitename: Name of site to check
:returns: Returns a dict with the collected information. Key ``rc`` has
the loaded configuration (or None) and ``manager`` has details
about the manager: It is a dict with keys ``rolename`` and
``principals``. And if errors or warnings occured, respective
keys are set (if both of them are absent, everything went well).
"""
errors = []
warnings = []
dir_ = os.path.join(sites_dir, sitename)
info = dict(rc=None, manager=dict(rolename=None, principals=None),
errors=errors, warnings=warnings)
sess = pysite.models.DbSession()
def _check_dir():
if not os.path.exists(dir_):
errors.append("Site directory does not exist: '{0}'".format(
dir_))
return False
if not os.path.isdir(dir_):
errors.append("Site is not a directory: '{0}'".format(
dir_))
return False
return True
def _load_rc(fn):
with open(fn, 'r', encoding='utf-8') as fh:
return yaml.load(fh)
def _check_rc(rc):
# Process all warnings without stopping
if rc:
if not 'max_size' in rc:
warnings.append("Rc has 'max_size' not set. Default applies.")
if not 'acl' in rc:
warnings.append("Rc has no ACL.")
else:
warnings.append("Master rc has no settings")
def _check_acl(acl):
for ace in acl:
if 'allow'.startswith(ace[0].lower()) \
and ace[2] == 'manage_files':
rolename = ace[1][2:] if ace[1].startswith('r:') else ace[1]
try:
role = sess.query(Role).filter(Role.name == rolename).one()
except NoResultFound:
errors.append("Role '{0}' does not exist".format(rolename))
return False
info['manager']['rolename'] = "{0} ({1})".format(role.name,
role.id)
info['_role'] = role
return True
# No role was set or allowed
warnings.append("""ACL contains no role that is permitted
'manage_files'""")
return True # still return True, this is a warning, no error
def _check_rolemember(role):
qry = sess.query(Principal).filter(Principal.roles.any(
name=role.name))
principals = ["{0} ({1})".format(p.principal, p.id)
for p in qry.all()]
if not principals:
errors.append("Role '{0}' has no members".format(role.name))
return False
info['manager']['principals'] = principals
return True
if not _check_dir():
return info
rc = _load_rc(dir_ + '.yaml')
info['rc'] = rc
_check_rc(rc) # this produces only warnings
if rc and 'acl' in rc:
if not _check_acl(rc['acl']):
return info
if '_role' in info:
if not _check_rolemember(info['_role']):
return info
del info['_role']
return info
def create_site(owner, sites_dir, data):
"""
Creates a site.
The data must be a dict with this keys:
- ``sitename``: Name of the site. This will be the name of the site's
directory and its manager role.
- ``title``: Optional. Title of the site. Will be written in the site's
user rc.
- ``master_rc``: Optional. Dict with settings for the master rc file.
- ``role``: Optional. Name of the manager role. If omitted, the site's name
is used.
- ``principal``: Either a principal (string) of an existing principal, or a
dict with data for a new principal.
- ``site_template``: Optional. Specifies a site template that is copied to
the new directory. If it starts with a path separator, e.g. `/', it is
treated as an absolute path, else it is treated as the name of a template
within ``var/site-templates``. If omitted, the template "default" is
used.
The data for a new principal must be a dict with these keys:
``principal``, ``email``, ``pwd``. Other keys may optionally be given, like
``first_name``, ``last_name``, ``display_name``, ``notes``.
:param sites_dir: Directory where the site will be stored
:param data: Data structure that describes the new site, see above.
:returns: Dict with keys ``errors`` and ``warnings``.
"""
errors = []
warnings = []
msgs = []
info = dict(errors=errors, warnings=warnings, msgs=msgs)
if 'sitename' not in data:
errors.append("Key 'sitename' is missing from data")
return info
if 'principal' not in data:
errors.append("Key 'principal' is missing from data")
return info
dir_ = os.path.join(sites_dir, data['sitename'])
rolename = data['role'] if 'role' in data else data['sitename']
site_template = data.get('site_template', 'default')
if not site_template.startswith(os.path.sep):
root_dir = os.path.join(os.path.dirname(__file__), '..', '..')
site_template = os.path.join(root_dir, 'var', 'site-templates',
site_template)
with open(site_template + '.yaml', 'r', encoding='utf-8') as fh:
master_rc = yaml.load(fh)
master_rc['acl'][0][1] = 'r:' + rolename
if 'master_rc' in data:
master_rc.update(data['master_rc'])
fn = os.path.join(site_template, 'rc.yaml')
with open(fn, 'r', encoding='utf-8') as fh:
user_rc = yaml.load(fh)
if 'title' in data:
user_rc.update(dict(title=data['title']))
def _create_site_files():
import shutil
try:
fn = dir_ + '.yaml'
# Ensure the site does not exist yet
if os.path.exists(dir_):
raise IOError("Site directory already exists: '{0}'"
.format(dir_))
if os.path.exists(fn):
raise IOError("Master rc file already exists: '{0}'"
.format(fn))
# Copy template
if site_template:
shutil.copytree(site_template, dir_)
msgs.append("Copied template " + site_template)
else:
# Site dir
os.mkdir(dir_)
# Top level dirs
dirs = ['assets', 'cache', 'plugins', 'content']
for d in dirs:
os.mkdir(os.path.join(dir_, d))
# Master rc file
with open(fn, 'w', encoding='utf-8') as fh:
yaml.dump(master_rc, fh, allow_unicode=True,
default_flow_style=False)
# User rc file
if user_rc:
fn = os.path.join(dir_, 'rc.yaml')
with open(fn, 'w', encoding='utf-8') as fh:
yaml.dump(user_rc, fh, allow_unicode=True,
default_flow_style=False)
return True
except IOError as e:
errors.append(e)
return False
def _create_role_and_principal():
import pysite.authmgr.manager as usrmanager
sess = pysite.models.DbSession()
try:
role = sess.query(Role).filter(Role.name == rolename).one()
msgs.append("Use existing role '{0}' ({1})".format(
role.name, role.id))
except NoResultFound:
role_data = dict(
name=rolename,
owner=owner,
notes="Manager role for site '{0}'".format(data['sitename'])
)
role = usrmanager.create_role(role_data)
msgs.append("Created role '{0}' ({1})".format(role.name, role.id))
if isinstance(data['principal'], dict):
data['principal']['owner'] = owner
principal = usrmanager.create_principal(data['principal'])
msgs.append("Created principal '{0}' ({1})".format(
principal.principal, principal.id))
else:
try:
principal = sess.query(Principal).filter(
Principal.principal == data['principal']).one()
msgs.append("Use existing principal '{0}' ({1})".format(
principal.principal, principal.id))
except NoResultFound:
errors.append("Principal '{0}' not found".format(
data['principal']))
return info
try:
# Save these here. If create_rolemember fails, the session is
# aborted and we cannot access the attributes of the entities
# in the except handler.
princ = principal.principal
rol = role.name
usrmanager.create_rolemember(dict(principal_id=principal.id,
role_id=role.id, owner=owner))
msgs.append("Set principal '{0}' as member of role '{1}'".format(
principal.principal, role.name))
except IntegrityError:
msgs.append("Principal '{0}' is already member of role '{1}'".format(
princ, rol))
if not _create_site_files():
return info
try:
_create_role_and_principal()
except SQLAlchemyError as e:
errors.append(e)
return info
|
dmdm/PySite
|
pysite/sitemgr/manager.py
|
manager.py
|
py
| 10,164 |
python
|
en
|
code
| 5 |
github-code
|
50
|
20334472449
|
import numpy as np
def preprocess(dataset):
data = np.array(dataset['data'])
data = np.unique(data, axis=0)
X = data[:, :-1]
y = data[:, -1]
X = X.astype(np.float64)
y = y.astype(np.uint32)
return X, y
|
MarioDudjak/OversamplingWorkflow
|
program/DatasetManagement/Preprocessing.py
|
Preprocessing.py
|
py
| 234 |
python
|
en
|
code
| 0 |
github-code
|
50
|
23534776330
|
import streamlit as st
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import hiplot as hip
import plotly.express as px
#import altair as alt
#sklearn
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.impute import KNNImputer
def predict_linear_regression(df):
#Fill up data with KNN
my_imputer = KNNImputer(n_neighbors=5, weights='distance', metric='nan_euclidean')
df_repaired = pd.DataFrame(my_imputer.fit_transform(df), columns=df.columns)
# Data Preparation
X = df_repaired.drop(["Potability"], axis=1)
y = df_repaired["Potability"]
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
# Train the Linear Regression model
model = LinearRegression()
model.fit(X_train, y_train)
# Streamlit App
st.title("Water Portability Prediction with Linear Regression")
# Sidebar for user input
c1 = st.columns(3)
c2 = st.columns(3)
c3 = st.columns(3)
ph = c1[0].slider("pH Value", 0.0, 14.0, 7.0)
hardness = c1[1].slider("Hardness", 0, 500, 250)
solids = c1[2].slider("Solids", 0, 50000, 25000)
chloramines = c2[0].slider("Chloramines", 0.0, 15.0, 7.5)
sulfate = c2[1].slider("Sulfate", 0, 500, 250)
conductivity = c2[2].slider("Conductivity", 100, 1000, 550)
organic_carbon = c3[0].slider("Organic Carbon", 0, 50, 25)
trihalomethanes = c3[1].slider("Trihalomethanes", 0.0, 150.0, 75.0)
turbidity = c3[2].slider("Turbidity", 0.0, 10.0, 5.0)
# Create a DataFrame for prediction
input_data = {
"ph": ph,
"Hardness": hardness,
"Solids": solids,
"Chloramines": chloramines,
"Sulfate": sulfate,
"Conductivity": conductivity,
"Organic_carbon": organic_carbon,
"Trihalomethanes": trihalomethanes,
"Turbidity": turbidity
}
input_df = pd.DataFrame([input_data])
# Predict using the model
prediction = model.predict(input_df)
# Display the prediction result
st.header("Prediction Result")
st.write(f"The predicted Potability is: {prediction[0]*100:.2f} %")
# Optional: Show the model's metrics
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
mse = mean_squared_error(y_test, y_pred)
st.header("Model Evaluation")
st.write(f"Mean Absolute Error (MAE): {mae:.4f}")
st.write(f"Mean Squared Error (MSE): {mse:.4f}")
def predict_KNN(df):
#Fill up data with KNN
my_imputer = KNNImputer(n_neighbors=9, weights='distance', metric='nan_euclidean')
df_repaired = pd.DataFrame(my_imputer.fit_transform(df), columns=df.columns)
# Data Preparation
X = df_repaired.drop(["Potability"], axis=1)
y = df_repaired["Potability"]
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
# Train the KNN model
k = 9 # Choose the number of neighbors (you can change this)
model = KNeighborsClassifier(n_neighbors=k)
model.fit(X_train, y_train)
# Streamlit App
st.title("Water Portability Prediction with KNN")
# Sidebar for user input
c4 = st.columns(3)
c5 = st.columns(3)
c6 = st.columns(3)
ph1 = c4[0].slider("pH value", 0.0, 14.0, 7.0)
hardness1 = c4[1].slider("hardness", 0, 500, 250)
solids1 = c4[2].slider("solids", 0, 50000, 25000)
chloramines1 = c5[0].slider("chloramines", 0.0, 15.0, 7.5)
sulfate1 = c5[1].slider("sulfate", 0, 500, 250)
conductivity1 = c5[2].slider("conductivity", 100, 1000, 550)
organic_carbon1 = c6[0].slider("organic carbon", 0, 50, 25)
trihalomethanes1 = c6[1].slider("trihalomethanes", 0.0, 150.0, 75.0)
turbidity1 = c6[2].slider("turbidity", 0.0, 10.0, 5.0)
# Create a DataFrame for prediction
input_data1 = {
"ph": ph1,
"Hardness": hardness1,
"Solids": solids1,
"Chloramines": chloramines1,
"Sulfate": sulfate1,
"Conductivity": conductivity1,
"Organic_carbon": organic_carbon1,
"Trihalomethanes": trihalomethanes1,
"Turbidity": turbidity1
}
input_df1 = pd.DataFrame([input_data1])
# Predict using the model
prediction1 = model.predict_proba(input_df1)
# Display the prediction result
st.header("Prediction Result")
st.write(f"The predicted Potability is: {prediction1[0][1]*100:.2f} %")
# Optional: Show the model's metrics
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
mse = mean_squared_error(y_test, y_pred)
st.header("Model Evaluation")
st.write(f"Mean Absolute Error (MAE): {mae:.4f}")
st.write(f"Mean Squared Error (MSE): {mse:.4f}")
def predict_ml(df):
x = df.drop(['Potability'], axis='columns')
y = df.Potability
features_scaler = MinMaxScaler()
features = features_scaler.fit_transform(x)
#features
model_params = {
'linear_regression': {
'model': LinearRegression(),
'params': {}
},
'logistic_regression' : {
'model': LogisticRegression(solver='liblinear',multi_class='auto'),
'params': {
'C': [1,5,10]
}
},
'svm': {
'model': SVC(gamma='auto'),
'params' : {
'C': [1,10,20,30,50],
'kernel': ['rbf','linear','poly']
}
},
'KNN' : {
'model': KNeighborsClassifier(),
'params': {
'n_neighbors': [3,7,11,13]
}
},
'random_forest': {
'model': RandomForestClassifier(),
'params' : {
'n_estimators': [10,50,100]
}
}
}
scores = []
for model_name, mp in model_params.items():
clf = GridSearchCV(mp['model'], mp['params'], cv=5, return_train_score=False)
clf.fit(features, y)
scores.append({
'model': model_name,
'best_score': abs(clf.best_score_), #abs should not be here, just for removing error, this is not correct,
'best_params': clf.best_params_
})
df_score = pd.DataFrame(scores,columns=['model','best_score','best_params'])
# Create a bar plot
fig, ax = plt.subplots()
sns.barplot(x="model", y="best_score", data=df_score, ax=ax)
plt.ylim(0, 1)
plt.title("Model Scores")
plt.xlabel("Model")
# Rotate x-axis labels
plt.xticks(rotation=90)
plt.ylabel("Best Score")
# Display the plot in Streamlit
st.pyplot(fig)
#write best scores
st.write(df_score)
def summary(df):
# Columns Summary
st.subheader('| SUMMARY')
col1, col2 = st.columns([2, 1])
# column 1 - Describe
with col1:
st.write(df.describe())
# column 2 - Potability Pie
with col2:
col = len(df.columns)-1
st.write('PARAMETERS : ',col)
row = len(df)
st.write('TOTAL DATA : ', row)
st.write("Potability Distribution (Pie Chart)")
potability_counts = df['Potability'].value_counts()
fig1, ax1 = plt.subplots()
ax1.pie(potability_counts, labels=potability_counts.index, autopct='%1.1f%%', startangle=90)
ax1.axis('equal')
st.pyplot(fig1)
def missingdata(df):
# Columns Summary
st.subheader('| SUMMARY')
col1, col2 = st.columns([1, 2])
# column 1 - Describe missing data
with col1:
st.write(df.isnull().sum())
# column 2 - Potability Pie
with col2:
st.write('Heatmap of Missing Values: ')
sns.heatmap(df.isna(), cmap="flare")
#sns.heatmap(df.corr(), annot=True, cmap='coolwarm')
heatmap_fig = plt.gcf() # Get the current figure
st.pyplot(heatmap_fig)
def fill_data_median(df):
df_old = df
col1, col2 = st.columns([1, 1])
# column 1 - Describe missing data
with col1:
st.write("Before Fillup (df.isna())")
fig1, ax = plt.subplots()
sns.heatmap(df_old.isna(), cmap="plasma")
st.write(fig1)
#Fill up data with median
df['ph'].fillna(value=df['ph'].median(),inplace=True)
df['Sulfate'].fillna(value=df['Sulfate'].median(),inplace=True)
df['Trihalomethanes'].fillna(value=df['Trihalomethanes'].median(),inplace=True)
# column 2 - Potability Pie
with col2:
st.write("After Fillup with Median (df.isna())")
fig2, ax = plt.subplots()
sns.heatmap(df.isna(), cmap="plasma")
st.write(fig2)
return df
def fill_data_KNN(df):
df_old = df
col1, col2 = st.columns([1, 1])
# column 1 - Describe missing data
with col1:
st.write("Before Fillup (df.isna())")
fig1, ax = plt.subplots()
sns.heatmap(df_old.isna(), cmap="plasma")
st.write(fig1)
#Fill up data with KNN
my_imputer = KNNImputer(n_neighbors=5, weights='distance', metric='nan_euclidean')
df_repaired = pd.DataFrame(my_imputer.fit_transform(df_old), columns=df_old.columns)
# column 2 - Potability Pie
with col2:
st.write("After Fillup with KNNImputer (df.isna())")
fig2, ax = plt.subplots()
sns.heatmap(df_repaired.isna(), cmap="plasma")
st.write(fig2)
return df_repaired
def main():
#intro flag
intro = 1;
st.sidebar.title('CMSE 830 : Midterm Project')
st.sidebar.write('Developed by Md Arifuzzaman Faisal')
# st.header("Upload your CSV data file")
# data_file = st.file_uploader("Upload CSV", type=["csv"])
# if data_file is not None:
df = pd.read_csv("water_potability.csv")
st.sidebar.header("Visualizations")
#show info of the dataset
visual1 = st.sidebar.checkbox('Exploratory Data Analysis (EDA)')
if visual1:
plot_options = ["Correlation Heat Map", "Joint Plot of Columns","Histogram of Column", "Pair Plot", "PairGrid Plot", "Box Plot of Column", "3D Scatter Plot"]
selected_plot = st.sidebar.selectbox("Choose a plot type", plot_options)
if selected_plot == "Correlation Heat Map":
st.write("Correlation Heatmap:")
#plt.figure(figsize=(10, 10))
sns.heatmap(df.corr(), annot=True, cmap='coolwarm')
heatmap_fig = plt.gcf() # Get the current figure
st.pyplot(heatmap_fig)
elif selected_plot == "Joint Plot of Columns":
x_axis = st.sidebar.selectbox("Select x-axis", df.columns, index=0)
y_axis = st.sidebar.selectbox("Select y-axis", df.columns, index=1)
st.write("Joint Plot:")
jointplot = sns.jointplot(data = df, x=df[x_axis], y=df[y_axis], hue="Potability")
#sns.scatterplot(data = df, x=df[x_axis], y=df[y_axis], hue="Potability", ax=ax)
st.pyplot(jointplot)
elif selected_plot == "Histogram of Column":
column = st.sidebar.selectbox("Select a column", df.columns)
bins = st.sidebar.slider("Number of bins", 5, 100, 20)
st.write("Histogram:")
fig, ax = plt.subplots()
sns.histplot(data=df, x=column, hue="Potability",bins=bins, kde=True)
st.pyplot(fig)
elif selected_plot == "Pair Plot":
st.subheader("Pair Plot")
selected_box = st.multiselect('Select variables:', [col for col in df.columns if col != 'Potability'])
selected_data = df[selected_box + ['Potability']] # Add 'Potability' column
all_columns = selected_data.columns
exclude_column = 'Potability'
dims = [col for col in all_columns if col != exclude_column]
fig = px.scatter_matrix(selected_data, dimensions=dims, title="Pair Plot", color='Potability')
fig.update_layout(plot_bgcolor="white")
st.plotly_chart(fig)
elif selected_plot == "PairGrid Plot":
st.subheader("Pair Plot")
selected_box = st.multiselect('Select variables:', [col for col in df.columns if col != 'Potability'],default=['ph'])
selected_data = df[selected_box + ['Potability']] # Add 'Potability' column
# Create a PairGrid
g = sns.PairGrid(selected_data, hue='Potability')
g.map_upper(plt.scatter)
g.map_diag(plt.hist, histtype="step", linewidth=2, bins=30)
g.map_lower(plt.scatter)
g.add_legend()
# Display the PairGrid plot
st.pyplot(plt.gcf())
elif selected_plot == "Box Plot of Column":
column = st.sidebar.selectbox("Select a column", df.columns)
st.write("Box Plot:")
fig, ax = plt.subplots()
sns.boxplot(df[column], ax=ax)
st.pyplot(fig)
elif selected_plot == "3D Scatter Plot":
x_axis = st.sidebar.selectbox("Select x-axis", df.columns, index=0)
y_axis = st.sidebar.selectbox("Select y-axis", df.columns, index=1)
z_axis = st.sidebar.selectbox("Select z-axis", df.columns, index=2)
st.subheader("3D Scatter Plot")
fig = px.scatter_3d(df, x=x_axis, y=y_axis, z=z_axis, color='Potability')
st.plotly_chart(fig)
intro = 0
st.sidebar.header("Missing Data Analysis")
#show info of the dataset
misdata = st.sidebar.checkbox('Summary of Missing Data')
if misdata:
missingdata(df)
intro = 0
st.sidebar.header("Treatment of Missing Data")
#show info of the dataset
#fill_data = st.sidebar.checkbox('Fill Data')
#if fill_data:
#fill_median = st.sidebar.checkbox('Fill Data Using Median')
#if fill_median:
#df1 = fill_data_median(df)
#predict_ml(df1)
fill_KNN = st.sidebar.checkbox('Fill Data Using KNN Imputer')
if fill_KNN:
df2 = fill_data_KNN(df)
#predict_ml(df2)
intro = 0
st.sidebar.header("Prediction")
#Predict with Linear Regression
predict = st.sidebar.checkbox('Predict Potability Using Linear Regression')
if predict:
predict_linear_regression(df)
intro = 0
#Predict with KNN
predict = st.sidebar.checkbox('Predict Potability Using KNN')
if predict:
predict_KNN(df)
intro = 0
#show about the dataset
#show = st.sidebar.checkbox('Show Introduction')
#if show:
#intro=1;
if intro:
st.subheader("Water Potability! Is the water safe for drink?")
#tabs
intro_tab, goal_tab, describe_tab, hiplot_tab, significance_tab, con_tab = st.tabs(["Introduction", "Project Goal", "Describe the Dataset", "HiPlot", "Project Significance","Conclusion"])
with intro_tab:
col1, col2 = st.columns([1, 1])
with col1:
st.image("dw.jpg", caption="Is the water safe for drink?", use_column_width=True)
with col2:
st.write("Access to safe drinking-water is essential to health, a basic human right and a component of effective policy for health protection. This is important as a health and development issue at a national, regional and local level. In some regions, it has been shown that investments in water supply and sanitation can yield a net economic benefit, since the reductions in adverse health effects and health care costs outweigh the costs of undertaking the interventions.")
st.markdown('[Source : Kaggle Dataset](https://www.kaggle.com/datasets/adityakadiwal/water-potability)')
# Add a slider for selecting the number of rows to display
num_rows = st.slider("Number of Rows", 1, 3276, 100)
# Display the selected number of rows
st.write(f"Displaying top {num_rows} rows:")
st.write(df.head(num_rows))
with goal_tab:
st.write("The main objective of this mid-term project is to conduct a thorough analysis of the Water Quality dataset in order to assess the safety of water sources for consumption. Specifically, our aim is to develop a predictive model that can accurately determine the drinkability of water based on various comprehensive water quality parameters.")
col1, col2 = st.columns([1, 1])
with col1:
st.subheader('| SUMMARY')
col = len(df.columns)-1
st.write('PARAMETERS : ',col)
row = len(df)
st.write('TOTAL DATA : ', row)
st.write("Potability Distribution (Pie Chart)")
potability_counts = df['Potability'].value_counts()
fig1, ax1 = plt.subplots()
ax1.pie(potability_counts, labels=potability_counts.index, autopct='%1.1f%%', startangle=90)
ax1.axis('equal')
st.pyplot(fig1)
with col2:
st.write("This research aims to determine if a comprehensive analysis of water quality parameters can accurately predict the drinkability of water sources. Additionally, we seek to understand how the findings from this analysis can contribute to addressing the critical concern of ensuring safe drinking water for everyone. The significance of this project lies in its potential to have a direct impact on public health and well-being. Access to clean and safe drinking water is a basic human right, and by conducting this analysis, we hope to provide valuable insights that can inform water management decisions and help ensure the provision of safe drinking water to communities in need.")
with describe_tab:
st.write(df.describe())
with hiplot_tab:
# Convert the DataFrame to a HiPlot Experiment
exp = hip.Experiment.from_dataframe(df)
# Render the HiPlot experiment in Streamlit
st.components.v1.html(exp.to_html(), width=900, height=600, scrolling=True)
with significance_tab:
col1, col2 = st.columns([1, 1])
with col1:
st.write("This project holds great significance and is worthy of completion for multiple reasons.")
st.write("Firstly, it addresses a social concern, which is safe drinking water. Access to safe water is essential for human health and well-being.")
with col2:
st.image("wc.jpg", use_column_width=True)
st.write("Secondly, the analysis of the Water Quality dataset has the potential to save lives by identifying unsafe water sources. By using data analysis techniques, the project can detect patterns and indicators of water contamination, allowing for early intervention and prevention measures to be implemented.")
st.write("Thirdly, the project offers valuable insights for water management and public health protection. By analyzing the dataset, it can provide information on the factors that contribute to water quality issues, enabling authorities and organizations to make informed decisions regarding water treatment, distribution, and policy-making.")
st.write("Lastly, the development of a user-friendly web app provides a simple and accessible interface for accessing water drinkability predictions to a wide range of people.")
with con_tab:
st.write("In this project, we conducted a thorough Exploratory Data Analysis (EDA) on the water portability dataset. Through visualizations and statistical summaries, we gained valuable insights into the chemical attributes influencing water quality. Key factors such as pH levels, Chloramines, and Solids content were analyzed in depth. The correlation heatmap provided a clear understanding of feature relationships. This EDA serves as a solid foundation for further analysis and potential model development.")
st.write("The dataset used in this project contains information on nine chemical attributes: pH, Hardness, Solids, Chloramines, Sulfate, Conductivity, Organic Carbon, Trihalomethanes, and Turbidity. These attributes were crucial in training our models to predict the water potability accurately.This concise conclusion highlights the main achievements of your EDA project, emphasizing the importance of the insights gained for future analyses or model development.")
if __name__ == "__main__":
main()
|
faisalece/CMSE830_Midterm_Project
|
app.py
|
app.py
|
py
| 21,083 |
python
|
en
|
code
| 0 |
github-code
|
50
|
16139769924
|
# 2022.01.21
import faiss
class ProductQuantizer():
def __init__(self, n_codes, code_size=1):
self.log_n_codes = (int)(np.log2(n_codes-1))+1
self.n_codes = pow(2, self.log_n_codes)
self.code_size = code_size
self.dim = -1
self.codebook = None
def fit(self, X):
X = X.reshape(-1, X.shape[-1])
self.dim = X.shape[-1]
pq = faiss.ProductQuantizer(self.dim, self.code_size, self.log_n_codes)
pq.train(X)
self.codebook = faiss.vector_to_array(pq.centroids).reshape(pq.M, pq.ksub, pq.dsub)
return self
def predict(self, X):
S = (list)(X.shape)
S[-1] = -1
X = X.reshape(-1, X.shape[-1])
pq = faiss.ProductQuantizer(self.dim, self.code_size, self.log_n_codes)
faiss.copy_array_to_vector(self.codebook.ravel(), pq.centroids)
codes = pq.compute_codes(X)
return codes.reshape(S)
def inverse_predict(self, codes):
S = (list)(codes.shape)
S[-1] = -1
codes = codes.reshape(-1, codes.shape[-1])
pq = faiss.ProductQuantizer(self.dim, self.code_size, self.log_n_codes)
faiss.copy_array_to_vector(self.codebook.ravel(), pq.centroids)
X = pq.decode(codes)
return X.reshape(S)
|
yifan-fanyi/Func-Pool
|
ProductQuantizer.py
|
ProductQuantizer.py
|
py
| 1,291 |
python
|
en
|
code
| 2 |
github-code
|
50
|
32053161555
|
# pip3.10 install openpyxl
import openpyxl
import io
# Ruta al archivo byte descargado
archivo_byte = 'data2.net_7e80c8ad-b3b2-4fd9-90e6-35791b123e5e'
# Abre el archivo byte
with open(archivo_byte, 'rb') as f:
contenido_byte = io.BytesIO(f.read())
# Carga el archivo byte en openpyxl
libro_excel = openpyxl.load_workbook(filename=contenido_byte)
# Haz algo con el archivo de Excel
hoja = libro_excel.active
print(hoja['A1'].value)
# Guarda el archivo de Excel
libro_excel.save('newexcel2.xlsx')
|
GerardoRosas-27/examplesPy
|
converteByteToExcel.py
|
converteByteToExcel.py
|
py
| 504 |
python
|
es
|
code
| 0 |
github-code
|
50
|
27351665750
|
import socket
import psutil
dsk = psutil.disk_usage('/')
F = dsk.free #Fに空き容量を代入
FM = F/1000000 #1000000で割ってmbの値にして代入
with socket.socket(socket.AF_INET,socket.SOCK_STREAM) as s:
s.connect(('192.168.0.57', 50007))
#メッセージ
s.sendall(b'Sensor Connected')
data = s.recv(1024)
print(repr(data))
print("空き容量(mb)", +FM)
|
KanekoTW/Python
|
socket/clienthdd.py
|
clienthdd.py
|
py
| 397 |
python
|
ja
|
code
| 0 |
github-code
|
50
|
7422846475
|
import random
# prompt the user to enter the maximum number that can be guessed
max_num = int(input("Masukkan angka terbesar yang diinginkan: "))
# randomly choose a number to be guessed
number = random.randint(1, max_num)
# set the initial number of guesses to zero
num_guesses = 0
# set the initial range of possible numbers to be all numbers between 1 and max_num
low = 1
high = max_num
# prompt the user to start guessing
print("Sedang mengacak sebuah angka antara 1 dan", max_num)
# keep looping until the computer guesses the correct number
while True:
# have the computer make a guess
guess = (low + high) // 2
# increment the number of guesses
num_guesses += 1
# check if the guess is correct
if guess == number:
print("Komputer berhasil menebaknya! Angka acak yang kamu dapatkan adalah", number)
print("Komputer membutuhkan", num_guesses, "tebakan untuk menebak angka yang benar.")
break
# give the computer a hint if its guess was too low or too high
elif guess < number:
print("Tebakan komputer terlalu rendah. Mengubah range angka yang memungkinkan.")
low = guess
elif guess > number:
print("Tebakan komputer terlalu tinggi. Mengubah range angka yang memungkinkan.")
high = guess
|
lunaticbugbear/guess-the-number
|
guess_computer.py
|
guess_computer.py
|
py
| 1,247 |
python
|
en
|
code
| 0 |
github-code
|
50
|
43161433239
|
from django.conf.urls import url
from scouts.sub_tasks.api import views
urlpatterns = (
# MoveOut Sub Tasks
url(r'^move_out/remarks/$', views.MoveOutRemarkUpdateView.as_view()),
url(r'^move_out/amenity_check/$', views.MoveOutAmenitiesCheckupRetrieveUpdateView.as_view()),
# PropertyOnBoarding Sub Tasks
url(r'^property_onboard/house_address/$', views.PropertyOnBoardHouseAddressCreateView.as_view()),
url(r'^property_onboard/house_photos/$', views.PropertyOnBoardHousePhotosUploadView.as_view()),
url(r'^property_onboard/house_amenities/$', views.PropertyOnBoardHouseAmenitiesUpdateView.as_view()),
url(r'^property_onboard/house_basic_details/$', views.PropertyOnBoardHouseBasicDetailsCreateView.as_view()),
url(r'^property_onboard/self_task/$', views.create_property_on_boarding_scout_task_by_scout_himself),
)
|
HalanxDev/Halanx-Scout-Backend
|
scouts/sub_tasks/urls.py
|
urls.py
|
py
| 852 |
python
|
en
|
code
| 0 |
github-code
|
50
|
11607558430
|
# '''
# Tema 1 _ Setup, Variabile, Tipuri de date
# Exerciții Recomandate - grad de dificultate: Ușor .
# 1. Revizualizează întâlnirea 1 și ia notițe în caz că ți-a scăpat ceva.
# 2. Vizualizează din videoul ‘Primii pași în Programare’:
# - Variabile și Tipuri;
# - Operatori și Flow Control.
# Astfel, la întâlnirea LIVE deja va fi a 2-a oară când vei auzi conceptele și sigur ți
# se vor întipări mai bine în minte.
# Link: https://www.itfactory.ro/8174437-intro-in-programare/
# '''
# '''
# TEMA 1 Exerciții obligatorii - grad de dificultate: Ușor spre Mediu:
# '''
# 1. În cadrul unui comentariu, explică cu cuvintele tale ce este o variabilă.
# O variabila este un tip de date stocata in memoria unui computer
# '''
# 2. Declară și initializează câte o variabilă din fiecare din următoarele tipuri de
# variabilă :
# - string
# - int
# - float
# - bool
# Observație: Valorile vor fi alese de tine după preferințe.
# '''
nume = 'Sergiu'
prenume = 'Gavrila-Ursa'
varsta = 39
inaltime = 1.79
saten = True
# print('Ma numesc ' + nume + ' am varsta de ' + str(varsta) + ' ani ' + ' si inaltimea de ' + str(inaltime) + ' si sunt ' + str('saten'))
# print(f'{nume} \n{varsta} \n{inaltime} \n{saten}')
# 3. Utilizează funcția type pentru a verifica dacă au tipul de date așteptat.
# nume = str('Sergiu')
# print(type(nume))
# varsta = int('39')
# print(type(varsta))
# inaltime = float(1.79)
# print(type(inaltime))
# saten = bool(True)
# print(type(saten))
'''
4. Rotunjește ‘float’-ul folosind funcția round() și salvează această modificare în aceeași variabilă (suprascriere):
- Verifică tipul acesteia.
'''
# print(round(inaltime, 1))
# inaltime = float(1.7)
# print(type(inaltime))
'''
5. Folosește print() și printează în consola 4 propoziții folosind cele 4 variabile.
Rezolvă nepotrivirile de tip prin ce modalitate dorești.
'''
# print('Numele meu este ' + nume)
# print('Am varsta de ' + str(varsta) + ' ani.')
# print('Inaltimea mea este de ' + str(inaltime))
# print('Culoarea parului meu este satena ' + str(saten))
# '''
# 6. Citește de la tastatură:
# - numele;
# - prenumele.
# Afișează: 'Numele complet are x caractere'.
# '''
# nume = input('Introdu numele\n')
# prenume = input('Introdu prenumele\n')
# lung_nume = len(nume) + len(prenume)
# print(f'Numele complet este {len(nume + prenume)}')
'''
7. Citește de la tastatură:
- lungimea;
- lățimea.
Afișează: 'Aria dreptunghiului este x'.
'''
# lungimea = int(input('Introdu lungimea\n'))
# latimea = int(input('Introdu latimea\n'))
# aria = lungimea * latimea
# print('Aria dreptunghiului este', aria)
'''
8. Având stringul: 'Coral is either the stupidest animal or the smartest rock':
- afișează de câte ori apare cuvântul 'the';
9. Același string.
● Afișează de câte ori apare cuvântul 'the';
● Printează rezultatul.
'''
# narativ = 'Coral is either the stupidest animal or the smartest rock'
# print(narativ.count(' the'))
# print(narativ.replace('the', 'THE', 3))
'''
# 10. Același string.
# ● Folosiți un assert ca să verificați dacă acest string conține doar numere.
# '''
# narativ = 'Coral is either the stupidest animal or the smartest rock'
# print(type(narativ))
# assert narativ == str('Coral is either the stupidest animal or the smartest rock')
# print('narativul este un string')
# assert narativ == int('Coral is either the stupidest animal or the smartest rock')
# print('narativul contine doar numere')
''''
Exerciții Opționale - grad de dificultate: Mediu spre greu
(s-ar putea să ai nevoie de Google).
'''
'''1. Exercițiu:
- citește de la tastatură un string de dimensiune impară;
- afișează caracterul din mijloc.
'''
# cuvant = input('Introdu cuvantul\n')
# lungime_cuvant = len(cuvant)
# print(lungime_cuvant)
# print(cuvant[2])
# print(f'Caracterul din mijloc este ') #-vezi tema rezolvat
# # 2. Folosind assert, verifică dacă un string este palindrom.
# x = input('palindrom\n')
# assert x == x[::-1]
# print('este un palindrom')
'''
3. Folosind o singură linie de cod :
- citește un string de la tastatură (ex: 'alabala portocala');
- salvează fiecare cuvânt într-o variabilă;
- printează ambele variabile pentru verificare.
'''
# glasul_copilariei = input('Introdu\n')
# print(glasul_copilariei)
# glasul = input('Alabala\n')
# copilariei = input('Portocala\n')
# print(glasul)
# print(copilariei)
# print(glasul + ' ' + copilariei)
'''
4. Exercițiu:
- citește un string de la tastatură (ex: alabala portocala);
- salvează primul caracter într-o variabilă - indiferent care este el, încearcă
cu 2 stringuri diferite;
- capitalizează acest caracter peste tot, mai puțin pentru primul și ultimul
caracter => alAbAlA portocAla.
'''
# myStr = input('alabala portocala\n')
# s = myStr[1:16].replace('a', 'A')
# print(f'{myStr[0]}{s}{myStr[16]}')
# # cu ajutorul Alinei
'''
5.Exercițiu:
- citește un user de la tastatură;
- citește o parolă;
- afișează: 'Parola pt user x este ***** și are x caractere';
- ***** se va calcula dinamic, indiferent de dimensiunea parolei, trebuie să
afișeze corect.
eg: parola abc => ***
parola abcd => ****
'''
# User= input("User:")
# Parola = input("Parola:")
# Lungime_parola=len(Parola)
# print(f'Parola pentru Userul {User} este {Lungime_parola * "*"} si are {len(Parola)} caractere')
# # cu ajutorul lui Cosmin
|
GavrilaSergiuGVS/TESTGH
|
tema1.py
|
tema1.py
|
py
| 5,419 |
python
|
ro
|
code
| 0 |
github-code
|
50
|
23589033627
|
import telebot
from django.shortcuts import render, redirect
from django.http import HttpResponse
from . import models
# Create your views here.
bot = telebot.TeleBot('5459935331:AAGVWpnqIK_bYMatPGDtqTWS8iPiWZgTJBc')
def home_page(request):
all_category = models.Category.objects.all()
return render(request, 'index.html',
{'all_categories': all_category})
def get_all_products(request):
all_products = models.Product.objects.all()
return render(request, 'product_index.html', {'all_products': all_products})
def get_exact_product(request, pk):
current_product = models.Product.objects.get(product_name=pk)
return render(request, 'get_exact_product_index.html', {'current_product': current_product})
def get_exact_category(request, pk):
current_category = models.Category.objects.get(id=pk)
category_products = models.Product.objects.filter(product_category = current_category)
return render(request, 'get_exact_category_index.html', {'category_products': category_products})
def get_search_product(request, pk):
current_product = models.Product.objects.get(product_name=pk)
return render(request, 'search.html', {'current_product': current_product})
def search_exact_product(request):
if request.method == 'POST':
get_product = request.POST.get('search_product')
try:
models.Product.objects.get(product_name=get_product)
return redirect(f'/search/{get_product}')
except:
return redirect('/')
def add_product_to_user(request, pk):
if request.method == 'POST':
checker = models.Product.objects.get(id=pk)
if checker.product_count >= int(request.POST.get('pr_count')):
models.UserCart.objects.create(user_id=request.user.id,
user_product = checker,
user_product_quantity = request.POST.get('pr_count')).save()
return redirect(f'/products/')
else:
return redirect(f'/product/{checker.product_name}')
def get_exact_card(request):
id = request.user.id
all_card = models.UserCart.objects.filter(user_id = id)
return render(request, 'user_card.html', {'all_card': all_card})
def delete_exact_user_cart(request, pk):
product_to_delete = models.Product.objects.get(id = pk)
models.UserCart.objects.filter(user_id=request.user.id,
user_product = product_to_delete).delete()
return redirect('/card')
def shopping_cart(request):
return render(request, 'registratsiya.html')
def same_cart(request):
if request.method == 'POST':
user_id = 1006779184
total = 0
user = models.UserCart.objects.filter(user_id = request.user.id)
text = ' --------- xaridor ----'
text += f'firstname :{request.POST.get("firstname")} \nlastname:{request.POST.get("lastname")}\n' \
f'Email :{request.POST.get("email")}\nManzil :{request.POST.get("address")}\n' \
f'Tolov turi :{request.POST.get("address_oplata")}\n'
text += '----- products-----'
for users in user:
text += f'Tovar :{users.user_product.product_name} \n' \
f'Narxi:{users.user_product.product_price}\n' \
f'Soni :{users.user_product_quantity}\nZakaz qilingan sanasi :{users.cart_date}\n' \
f'Xaridor :{users.user_id}\n'
total = int(users.user_product_quantity) * float(users.user_product.product_price) + total
text += f'summa = {total}\n'
bot.send_message(user_id, text)
models.UserCart.objects.filter(user_id=request.user.id).delete()
return redirect('/card')
|
khurshid02/internet_magazin_django
|
main_page/views.py
|
views.py
|
py
| 3,801 |
python
|
en
|
code
| 0 |
github-code
|
50
|
8876495576
|
import click
from flask import Flask
from flask.cli import AppGroup
# from .models.common import db
# from flask_sqlalchemy import SQLAlchemy
from app.models import (
db, Stock
)
from app.logic.stock import stock_init_db
stock_cli = AppGroup('stock')
@stock_cli.command('init-db')
def cmd_stock_init_db():
"""
$ flask stock init-db --- populates stocks table
"""
stock_init_db()
# DB Create based on Models
mydb_cli = AppGroup('mydb')
@mydb_cli.command('create_all')
def db_create():
print(db)
db.create_all()
@mydb_cli.command('drop_all')
def db_drop():
print(db)
db.drop_all()
def init_cli(application: Flask):
application.cli.add_command(stock_cli)
application.cli.add_command(mydb_cli)
|
jackalissimo/pipkoff
|
app/cli.py
|
cli.py
|
py
| 742 |
python
|
en
|
code
| 0 |
github-code
|
50
|
25588677373
|
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import codecs
import datetime
import hashlib
import logging
import os
import shutil
import stat
import sys
import tempfile
import unicodedata
import unittest
from os.path import join as j
import mock
from io import StringIO
import bagit
logging.basicConfig(filename="test.log", level=logging.DEBUG)
stderr = logging.StreamHandler()
stderr.setLevel(logging.WARNING)
logging.getLogger().addHandler(stderr)
# But we do want any exceptions raised in the logging path to be raised:
logging.raiseExceptions = True
def slurp_text_file(filename):
with bagit.open_text_file(filename) as f:
return f.read()
class SelfCleaningTestCase(unittest.TestCase):
"""TestCase subclass which cleans up self.tmpdir after each test"""
def setUp(self):
super(SelfCleaningTestCase, self).setUp()
self.starting_directory = (
os.getcwd()
) # FIXME: remove this after we stop changing directories in bagit.py
self.tmpdir = tempfile.mkdtemp()
if os.path.isdir(self.tmpdir):
shutil.rmtree(self.tmpdir)
shutil.copytree("test-data", self.tmpdir)
def tearDown(self):
# FIXME: remove this after we stop changing directories in bagit.py
os.chdir(self.starting_directory)
if os.path.isdir(self.tmpdir):
# Clean up after tests which leave inaccessible files behind:
os.chmod(self.tmpdir, 0o700)
for dirpath, subdirs, filenames in os.walk(self.tmpdir, topdown=True):
for i in subdirs:
os.chmod(os.path.join(dirpath, i), 0o700)
shutil.rmtree(self.tmpdir)
super(SelfCleaningTestCase, self).tearDown()
@mock.patch(
"bagit.VERSION", new="1.5.4"
) # This avoids needing to change expected hashes on each release
class TestSingleProcessValidation(SelfCleaningTestCase):
def validate(self, bag, *args, **kwargs):
return bag.validate(*args, **kwargs)
def test_make_bag_sha1_sha256_manifest(self):
bag = bagit.make_bag(self.tmpdir, checksum=["sha1", "sha256"])
# check that relevant manifests are created
self.assertTrue(os.path.isfile(j(self.tmpdir, "manifest-sha1.txt")))
self.assertTrue(os.path.isfile(j(self.tmpdir, "manifest-sha256.txt")))
# check valid with two manifests
self.assertTrue(self.validate(bag, fast=True))
def test_make_bag_md5_sha256_manifest(self):
bag = bagit.make_bag(self.tmpdir, checksum=["md5", "sha256"])
# check that relevant manifests are created
self.assertTrue(os.path.isfile(j(self.tmpdir, "manifest-md5.txt")))
self.assertTrue(os.path.isfile(j(self.tmpdir, "manifest-sha256.txt")))
# check valid with two manifests
self.assertTrue(self.validate(bag, fast=True))
def test_make_bag_md5_sha1_sha256_manifest(self):
bag = bagit.make_bag(self.tmpdir, checksum=["md5", "sha1", "sha256"])
# check that relevant manifests are created
self.assertTrue(os.path.isfile(j(self.tmpdir, "manifest-md5.txt")))
self.assertTrue(os.path.isfile(j(self.tmpdir, "manifest-sha1.txt")))
self.assertTrue(os.path.isfile(j(self.tmpdir, "manifest-sha256.txt")))
# check valid with three manifests
self.assertTrue(self.validate(bag, fast=True))
def test_validate_flipped_bit(self):
bag = bagit.make_bag(self.tmpdir)
readme = j(self.tmpdir, "data", "README")
txt = slurp_text_file(readme)
txt = "A" + txt[1:]
with open(readme, "w") as r:
r.write(txt)
bag = bagit.Bag(self.tmpdir)
self.assertRaises(bagit.BagValidationError, self.validate, bag)
# fast doesn't catch the flipped bit, since oxsum is the same
self.assertTrue(self.validate(bag, fast=True))
self.assertTrue(self.validate(bag, completeness_only=True))
def test_validate_fast(self):
bag = bagit.make_bag(self.tmpdir)
self.assertEqual(self.validate(bag, fast=True), True)
os.remove(j(self.tmpdir, "data", "loc", "2478433644_2839c5e8b8_o_d.jpg"))
self.assertRaises(bagit.BagValidationError, self.validate, bag, fast=True)
def test_validate_completeness(self):
bag = bagit.make_bag(self.tmpdir)
old_path = j(self.tmpdir, "data", "README")
new_path = j(self.tmpdir, "data", "extra_file")
os.rename(old_path, new_path)
bag = bagit.Bag(self.tmpdir)
self.assertTrue(self.validate(bag, fast=True))
with mock.patch.object(bag, "_validate_entries") as m:
self.assertRaises(
bagit.BagValidationError, self.validate, bag, completeness_only=True
)
self.assertEqual(m.call_count, 0)
def test_validate_fast_without_oxum(self):
bag = bagit.make_bag(self.tmpdir)
os.remove(j(self.tmpdir, "bag-info.txt"))
bag = bagit.Bag(self.tmpdir)
self.assertRaises(bagit.BagValidationError, self.validate, bag, fast=True)
def test_validate_slow_without_oxum_extra_file(self):
bag = bagit.make_bag(self.tmpdir)
os.remove(j(self.tmpdir, "bag-info.txt"))
with open(j(self.tmpdir, "data", "extra_file"), "w") as ef:
ef.write("foo")
bag = bagit.Bag(self.tmpdir)
self.assertRaises(bagit.BagValidationError, self.validate, bag, fast=False)
def test_validate_missing_directory(self):
bagit.make_bag(self.tmpdir)
tmp_data_dir = os.path.join(self.tmpdir, "data")
shutil.rmtree(tmp_data_dir)
bag = bagit.Bag(self.tmpdir)
with self.assertRaises(bagit.BagValidationError) as error_catcher:
bag.validate()
self.assertEqual(
"Expected data directory %s does not exist" % tmp_data_dir,
str(error_catcher.exception),
)
def test_validation_error_details(self):
bag = bagit.make_bag(
self.tmpdir, checksums=["md5"], bag_info={"Bagging-Date": "1970-01-01"}
)
readme = j(self.tmpdir, "data", "README")
txt = slurp_text_file(readme)
txt = "A" + txt[1:]
with open(readme, "w") as r:
r.write(txt)
bag = bagit.Bag(self.tmpdir)
got_exception = False
try:
self.validate(bag)
except bagit.BagValidationError as e:
got_exception = True
exc_str = str(e)
self.assertIn(
'data/README md5 validation failed: expected="8e2af7a0143c7b8f4de0b3fc90f27354" found="fd41543285d17e7c29cd953f5cf5b955"',
exc_str,
)
self.assertEqual(len(e.details), 1)
readme_error = e.details[0]
self.assertEqual(
'data/README md5 validation failed: expected="8e2af7a0143c7b8f4de0b3fc90f27354" found="fd41543285d17e7c29cd953f5cf5b955"',
str(readme_error),
)
self.assertIsInstance(readme_error, bagit.ChecksumMismatch)
self.assertEqual(readme_error.algorithm, "md5")
self.assertEqual(readme_error.path, "data/README")
self.assertEqual(readme_error.expected, "8e2af7a0143c7b8f4de0b3fc90f27354")
self.assertEqual(readme_error.found, "fd41543285d17e7c29cd953f5cf5b955")
if not got_exception:
self.fail("didn't get BagValidationError")
def test_validation_completeness_error_details(self):
bag = bagit.make_bag(
self.tmpdir, checksums=["md5"], bag_info={"Bagging-Date": "1970-01-01"}
)
old_path = j(self.tmpdir, "data", "README")
new_path = j(self.tmpdir, "data", "extra")
os.rename(old_path, new_path)
# remove the bag-info.txt which contains the oxum to force a full
# check of the manifest
os.remove(j(self.tmpdir, "bag-info.txt"))
bag = bagit.Bag(self.tmpdir)
got_exception = False
try:
self.validate(bag)
except bagit.BagValidationError as e:
got_exception = True
exc_str = str(e)
self.assertIn("Bag is incomplete: ", exc_str)
self.assertIn(
"bag-info.txt exists in manifest but was not found on filesystem",
exc_str,
)
self.assertIn(
"data/README exists in manifest but was not found on filesystem",
exc_str,
)
self.assertIn(
"data/extra exists on filesystem but is not in the manifest", exc_str
)
self.assertEqual(len(e.details), 3)
if e.details[0].path == "bag-info.txt":
baginfo_error = e.details[0]
readme_error = e.details[1]
else:
baginfo_error = e.details[1]
readme_error = e.details[0]
self.assertEqual(
str(baginfo_error),
"bag-info.txt exists in manifest but was not found on filesystem",
)
self.assertIsInstance(baginfo_error, bagit.FileMissing)
self.assertEqual(baginfo_error.path, "bag-info.txt")
self.assertEqual(
str(readme_error),
"data/README exists in manifest but was not found on filesystem",
)
self.assertIsInstance(readme_error, bagit.FileMissing)
self.assertEqual(readme_error.path, "data/README")
error = e.details[2]
self.assertEqual(
str(error), "data/extra exists on filesystem but is not in the manifest"
)
self.assertTrue(error, bagit.UnexpectedFile)
self.assertEqual(error.path, "data/extra")
if not got_exception:
self.fail("didn't get BagValidationError")
def test_bom_in_bagit_txt(self):
bag = bagit.make_bag(self.tmpdir)
BOM = codecs.BOM_UTF8
if sys.version_info[0] >= 3:
BOM = BOM.decode("utf-8")
with open(j(self.tmpdir, "bagit.txt"), "r") as bf:
bagfile = BOM + bf.read()
with open(j(self.tmpdir, "bagit.txt"), "w") as bf:
bf.write(bagfile)
bag = bagit.Bag(self.tmpdir)
self.assertRaises(bagit.BagValidationError, self.validate, bag)
def test_missing_file(self):
bag = bagit.make_bag(self.tmpdir)
os.remove(j(self.tmpdir, "data", "loc", "3314493806_6f1db86d66_o_d.jpg"))
self.assertRaises(bagit.BagValidationError, self.validate, bag)
def test_handle_directory_end_slash_gracefully(self):
bag = bagit.make_bag(self.tmpdir + "/")
self.assertTrue(self.validate(bag))
bag2 = bagit.Bag(self.tmpdir + "/")
self.assertTrue(self.validate(bag2))
def test_allow_extraneous_files_in_base(self):
bag = bagit.make_bag(self.tmpdir)
self.assertTrue(self.validate(bag))
f = j(self.tmpdir, "IGNOREFILE")
with open(f, "w"):
self.assertTrue(self.validate(bag))
def test_allow_extraneous_dirs_in_base(self):
bag = bagit.make_bag(self.tmpdir)
self.assertTrue(self.validate(bag))
d = j(self.tmpdir, "IGNOREDIR")
os.mkdir(d)
self.assertTrue(self.validate(bag))
def test_missing_tagfile_raises_error(self):
bag = bagit.make_bag(self.tmpdir)
self.assertTrue(self.validate(bag))
os.remove(j(self.tmpdir, "bagit.txt"))
self.assertRaises(bagit.BagValidationError, self.validate, bag)
def test_missing_manifest_raises_error(self):
bag = bagit.make_bag(self.tmpdir, checksums=["sha512"])
self.assertTrue(self.validate(bag))
os.remove(j(self.tmpdir, "manifest-sha512.txt"))
self.assertRaises(bagit.BagValidationError, self.validate, bag)
def test_mixed_case_checksums(self):
bag = bagit.make_bag(self.tmpdir, checksums=["md5"])
hashstr = {}
# Extract entries only for the payload and ignore
# entries from the tagmanifest file
for key in bag.entries.keys():
if key.startswith("data" + os.sep):
hashstr = bag.entries[key]
hashstr = next(iter(hashstr.values()))
manifest = slurp_text_file(j(self.tmpdir, "manifest-md5.txt"))
manifest = manifest.replace(hashstr, hashstr.upper())
with open(j(self.tmpdir, "manifest-md5.txt"), "wb") as m:
m.write(manifest.encode("utf-8"))
# Since manifest-md5.txt file is updated, re-calculate its
# md5 checksum and update it in the tagmanifest-md5.txt file
hasher = hashlib.new("md5")
contents = slurp_text_file(j(self.tmpdir, "manifest-md5.txt")).encode("utf-8")
hasher.update(contents)
with open(j(self.tmpdir, "tagmanifest-md5.txt"), "r") as tagmanifest:
tagman_contents = tagmanifest.read()
tagman_contents = tagman_contents.replace(
bag.entries["manifest-md5.txt"]["md5"], hasher.hexdigest()
)
with open(j(self.tmpdir, "tagmanifest-md5.txt"), "w") as tagmanifest:
tagmanifest.write(tagman_contents)
bag = bagit.Bag(self.tmpdir)
self.assertTrue(self.validate(bag))
def test_unsafe_directory_entries_raise_error(self):
bad_paths = None
# This could be more granular, but ought to be
# adequate.
if os.name == "nt":
bad_paths = (
r"C:\win32\cmd.exe",
"\\\\?\\C:\\",
"COM1:",
"\\\\.\\COM56",
"..\\..\\..\\win32\\cmd.exe",
"data\\..\\..\\..\\win32\\cmd.exe",
)
else:
bad_paths = (
"../../../secrets.json",
"~/.pgp/id_rsa",
"/dev/null",
"data/../../../secrets.json",
)
hasher = hashlib.new("md5")
corpus = "this is not a real checksum"
hasher.update(corpus.encode("utf-8"))
for bad_path in bad_paths:
bagit.make_bag(self.tmpdir, checksums=["md5"])
with open(j(self.tmpdir, "manifest-md5.txt"), "wb+") as manifest_out:
line = "%s %s\n" % (hasher.hexdigest(), bad_path)
manifest_out.write(line.encode("utf-8"))
self.assertRaises(bagit.BagError, bagit.Bag, self.tmpdir)
def test_multiple_oxum_values(self):
bag = bagit.make_bag(self.tmpdir)
with open(j(self.tmpdir, "bag-info.txt"), "a") as baginfo:
baginfo.write("Payload-Oxum: 7.7\n")
bag = bagit.Bag(self.tmpdir)
self.assertTrue(self.validate(bag, fast=True))
def test_validate_optional_tagfile(self):
bag = bagit.make_bag(self.tmpdir, checksums=["md5"])
tagdir = tempfile.mkdtemp(dir=self.tmpdir)
with open(j(tagdir, "tagfile"), "w") as tagfile:
tagfile.write("test")
relpath = j(tagdir, "tagfile").replace(self.tmpdir + os.sep, "")
relpath.replace("\\", "/")
with open(j(self.tmpdir, "tagmanifest-md5.txt"), "w") as tagman:
# Incorrect checksum.
tagman.write("8e2af7a0143c7b8f4de0b3fc90f27354 " + relpath + "\n")
bag = bagit.Bag(self.tmpdir)
self.assertRaises(bagit.BagValidationError, self.validate, bag)
hasher = hashlib.new("md5")
contents = slurp_text_file(j(tagdir, "tagfile")).encode("utf-8")
hasher.update(contents)
with open(j(self.tmpdir, "tagmanifest-md5.txt"), "w") as tagman:
tagman.write(hasher.hexdigest() + " " + relpath + "\n")
bag = bagit.Bag(self.tmpdir)
self.assertTrue(self.validate(bag))
# Missing tagfile.
os.remove(j(tagdir, "tagfile"))
bag = bagit.Bag(self.tmpdir)
self.assertRaises(bagit.BagValidationError, self.validate, bag)
def test_validate_optional_tagfile_in_directory(self):
bag = bagit.make_bag(self.tmpdir, checksums=["md5"])
tagdir = tempfile.mkdtemp(dir=self.tmpdir)
if not os.path.exists(j(tagdir, "tagfolder")):
os.makedirs(j(tagdir, "tagfolder"))
with open(j(tagdir, "tagfolder", "tagfile"), "w") as tagfile:
tagfile.write("test")
relpath = j(tagdir, "tagfolder", "tagfile").replace(self.tmpdir + os.sep, "")
relpath.replace("\\", "/")
with open(j(self.tmpdir, "tagmanifest-md5.txt"), "w") as tagman:
# Incorrect checksum.
tagman.write("8e2af7a0143c7b8f4de0b3fc90f27354 " + relpath + "\n")
bag = bagit.Bag(self.tmpdir)
self.assertRaises(bagit.BagValidationError, self.validate, bag)
hasher = hashlib.new("md5")
with open(j(tagdir, "tagfolder", "tagfile"), "r") as tf:
contents = tf.read().encode("utf-8")
hasher.update(contents)
with open(j(self.tmpdir, "tagmanifest-md5.txt"), "w") as tagman:
tagman.write(hasher.hexdigest() + " " + relpath + "\n")
bag = bagit.Bag(self.tmpdir)
self.assertTrue(self.validate(bag))
# Missing tagfile.
os.remove(j(tagdir, "tagfolder", "tagfile"))
bag = bagit.Bag(self.tmpdir)
self.assertRaises(bagit.BagValidationError, self.validate, bag)
def test_sha1_tagfile(self):
info = {"Bagging-Date": "1970-01-01", "Contact-Email": "[email protected]"}
bag = bagit.make_bag(self.tmpdir, checksum=["sha1"], bag_info=info)
self.assertTrue(os.path.isfile(j(self.tmpdir, "tagmanifest-sha1.txt")))
self.assertEqual(
"f69110479d0d395f7c321b3860c2bc0c96ae9fe8",
bag.entries["bag-info.txt"]["sha1"],
)
def test_validate_unreadable_file(self):
bag = bagit.make_bag(self.tmpdir, checksum=["md5"])
os.chmod(j(self.tmpdir, "data/loc/2478433644_2839c5e8b8_o_d.jpg"), 0)
self.assertRaises(bagit.BagValidationError, self.validate, bag, fast=False)
class TestMultiprocessValidation(TestSingleProcessValidation):
def validate(self, bag, *args, **kwargs):
return super(TestMultiprocessValidation, self).validate(
bag, *args, processes=2, **kwargs
)
@mock.patch(
"bagit.VERSION", new="1.5.4"
) # This avoids needing to change expected hashes on each release
class TestBag(SelfCleaningTestCase):
def test_make_bag(self):
info = {"Bagging-Date": "1970-01-01", "Contact-Email": "[email protected]"}
bagit.make_bag(self.tmpdir, bag_info=info, checksums=["md5"])
# data dir should've been created
self.assertTrue(os.path.isdir(j(self.tmpdir, "data")))
# check bagit.txt
self.assertTrue(os.path.isfile(j(self.tmpdir, "bagit.txt")))
bagit_txt = slurp_text_file(j(self.tmpdir, "bagit.txt"))
self.assertTrue("BagIt-Version: 0.97", bagit_txt)
self.assertTrue("Tag-File-Character-Encoding: UTF-8", bagit_txt)
# check manifest
self.assertTrue(os.path.isfile(j(self.tmpdir, "manifest-md5.txt")))
manifest_txt = slurp_text_file(j(self.tmpdir, "manifest-md5.txt")).splitlines()
self.assertIn("8e2af7a0143c7b8f4de0b3fc90f27354 data/README", manifest_txt)
self.assertIn(
"9a2b89e9940fea6ac3a0cc71b0a933a0 data/loc/2478433644_2839c5e8b8_o_d.jpg",
manifest_txt,
)
self.assertIn(
"6172e980c2767c12135e3b9d246af5a3 data/loc/3314493806_6f1db86d66_o_d.jpg",
manifest_txt,
)
self.assertIn(
"38a84cd1c41de793a0bccff6f3ec8ad0 data/si/2584174182_ffd5c24905_b_d.jpg",
manifest_txt,
)
self.assertIn(
"5580eaa31ad1549739de12df819e9af8 data/si/4011399822_65987a4806_b_d.jpg",
manifest_txt,
)
# check bag-info.txt
self.assertTrue(os.path.isfile(j(self.tmpdir, "bag-info.txt")))
bag_info_txt = slurp_text_file(j(self.tmpdir, "bag-info.txt"))
bag_info_txt = bag_info_txt.splitlines()
self.assertIn("Contact-Email: [email protected]", bag_info_txt)
self.assertIn("Bagging-Date: 1970-01-01", bag_info_txt)
self.assertIn("Payload-Oxum: 991765.5", bag_info_txt)
self.assertIn(
"Bag-Software-Agent: bagit.py v1.5.4 <https://github.com/LibraryOfCongress/bagit-python>",
bag_info_txt,
)
# check tagmanifest-md5.txt
self.assertTrue(os.path.isfile(j(self.tmpdir, "tagmanifest-md5.txt")))
tagmanifest_txt = slurp_text_file(
j(self.tmpdir, "tagmanifest-md5.txt")
).splitlines()
self.assertIn("9e5ad981e0d29adc278f6a294b8c2aca bagit.txt", tagmanifest_txt)
self.assertIn(
"a0ce6631a2a6d1a88e6d38453ccc72a5 manifest-md5.txt", tagmanifest_txt
)
self.assertIn("0a6ffcffe67e9a34e44220f7ebcb4baa bag-info.txt", tagmanifest_txt)
def test_make_bag_sha1_manifest(self):
bagit.make_bag(self.tmpdir, checksum=["sha1"])
# check manifest
self.assertTrue(os.path.isfile(j(self.tmpdir, "manifest-sha1.txt")))
manifest_txt = slurp_text_file(j(self.tmpdir, "manifest-sha1.txt")).splitlines()
self.assertIn(
"ace19416e605cfb12ab11df4898ca7fd9979ee43 data/README", manifest_txt
)
self.assertIn(
"4c0a3da57374e8db379145f18601b159f3cad44b data/loc/2478433644_2839c5e8b8_o_d.jpg",
manifest_txt,
)
self.assertIn(
"62095aeddae2f3207cb77c85937e13c51641ef71 data/loc/3314493806_6f1db86d66_o_d.jpg",
manifest_txt,
)
self.assertIn(
"e592194b3733e25166a631e1ec55bac08066cbc1 data/si/2584174182_ffd5c24905_b_d.jpg",
manifest_txt,
)
self.assertIn(
"db49ef009f85a5d0701829f38d29f8cf9c5df2ea data/si/4011399822_65987a4806_b_d.jpg",
manifest_txt,
)
def test_make_bag_sha256_manifest(self):
bagit.make_bag(self.tmpdir, checksum=["sha256"])
# check manifest
self.assertTrue(os.path.isfile(j(self.tmpdir, "manifest-sha256.txt")))
manifest_txt = slurp_text_file(
j(self.tmpdir, "manifest-sha256.txt")
).splitlines()
self.assertIn(
"b6df8058fa818acfd91759edffa27e473f2308d5a6fca1e07a79189b95879953 data/loc/2478433644_2839c5e8b8_o_d.jpg",
manifest_txt,
)
self.assertIn(
"1af90c21e72bb0575ae63877b3c69cfb88284f6e8c7820f2c48dc40a08569da5 data/loc/3314493806_6f1db86d66_o_d.jpg",
manifest_txt,
)
self.assertIn(
"f065a4ae2bc5d47c6d046c3cba5c8cdfd66b07c96ff3604164e2c31328e41c1a data/si/2584174182_ffd5c24905_b_d.jpg",
manifest_txt,
)
self.assertIn(
"45d257c93e59ec35187c6a34c8e62e72c3e9cfbb548984d6f6e8deb84bac41f4 data/si/4011399822_65987a4806_b_d.jpg",
manifest_txt,
)
def test_make_bag_sha512_manifest(self):
bagit.make_bag(self.tmpdir, checksum=["sha512"])
# check manifest
self.assertTrue(os.path.isfile(j(self.tmpdir, "manifest-sha512.txt")))
manifest_txt = slurp_text_file(
j(self.tmpdir, "manifest-sha512.txt")
).splitlines()
self.assertIn(
"51fb9236a23795886cf42d539d580739245dc08f72c3748b60ed8803c9cb0e2accdb91b75dbe7d94a0a461827929d720ef45fe80b825941862fcde4c546a376d data/loc/2478433644_2839c5e8b8_o_d.jpg",
manifest_txt,
)
self.assertIn(
"627c15be7f9aabc395c8b2e4c3ff0b50fd84b3c217ca38044cde50fd4749621e43e63828201fa66a97975e316033e4748fb7a4a500183b571ecf17715ec3aea3 data/loc/3314493806_6f1db86d66_o_d.jpg",
manifest_txt,
)
self.assertIn(
"4cb4dafe39b2539536a9cb31d5addf335734cb91e2d2786d212a9b574e094d7619a84ad53f82bd9421478a7994cf9d3f44fea271d542af09d26ce764edbada46 data/si/2584174182_ffd5c24905_b_d.jpg",
manifest_txt,
)
self.assertIn(
"af1c03483cd1999098cce5f9e7689eea1f81899587508f59ba3c582d376f8bad34e75fed55fd1b1c26bd0c7a06671b85e90af99abac8753ad3d76d8d6bb31ebd data/si/4011399822_65987a4806_b_d.jpg",
manifest_txt,
)
def test_make_bag_unknown_algorithm(self):
self.assertRaises(
ValueError, bagit.make_bag, self.tmpdir, checksum=["not-really-a-name"]
)
def test_make_bag_with_empty_directory(self):
tmpdir = tempfile.mkdtemp()
try:
bagit.make_bag(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_make_bag_with_empty_directory_tree(self):
tmpdir = tempfile.mkdtemp()
path = j(tmpdir, "test1", "test2")
try:
os.makedirs(path)
bagit.make_bag(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_make_bag_with_bogus_directory(self):
bogus_directory = os.path.realpath("this-directory-does-not-exist")
with self.assertRaises(RuntimeError) as error_catcher:
bagit.make_bag(bogus_directory)
self.assertEqual(
"Bag directory %s does not exist" % bogus_directory,
str(error_catcher.exception),
)
def test_make_bag_with_unreadable_source(self):
os.chmod(self.tmpdir, 0)
with self.assertRaises(bagit.BagError) as error_catcher:
bagit.make_bag(self.tmpdir, checksum=["sha256"])
self.assertEqual(
"Missing permissions to move all files and directories",
str(error_catcher.exception),
)
def test_make_bag_with_unreadable_subdirectory(self):
# We'll set this write-only to exercise the second permission check in make_bag:
os.chmod(j(self.tmpdir, "loc"), 0o200)
with self.assertRaises(bagit.BagError) as error_catcher:
bagit.make_bag(self.tmpdir, checksum=["sha256"])
self.assertEqual(
"Read permissions are required to calculate file fixities",
str(error_catcher.exception),
)
def test_make_bag_with_unwritable_source(self):
path_suffixes = ("", "loc")
for path_suffix in reversed(path_suffixes):
os.chmod(j(self.tmpdir, path_suffix), 0o500)
with self.assertRaises(bagit.BagError) as error_catcher:
bagit.make_bag(self.tmpdir, checksum=["sha256"])
self.assertEqual(
"Missing permissions to move all files and directories",
str(error_catcher.exception),
)
def test_make_bag_with_unreadable_file(self):
os.chmod(j(self.tmpdir, "loc", "2478433644_2839c5e8b8_o_d.jpg"), 0)
with self.assertRaises(bagit.BagError) as error_catcher:
bagit.make_bag(self.tmpdir, checksum=["sha256"])
self.assertEqual(
"Read permissions are required to calculate file fixities",
str(error_catcher.exception),
)
def test_make_bag_with_data_dir_present(self):
os.mkdir(j(self.tmpdir, "data"))
bagit.make_bag(self.tmpdir)
# data dir should now contain another data dir
self.assertTrue(os.path.isdir(j(self.tmpdir, "data", "data")))
def test_bag_class(self):
info = {"Contact-Email": "[email protected]"}
bag = bagit.make_bag(self.tmpdir, bag_info=info, checksums=["sha384"])
self.assertIsInstance(bag, bagit.Bag)
self.assertEqual(
set(bag.payload_files()),
set(
[
"data/README",
"data/si/2584174182_ffd5c24905_b_d.jpg",
"data/si/4011399822_65987a4806_b_d.jpg",
"data/loc/2478433644_2839c5e8b8_o_d.jpg",
"data/loc/3314493806_6f1db86d66_o_d.jpg",
]
),
)
self.assertEqual(
list(bag.manifest_files()), ["%s/manifest-sha384.txt" % self.tmpdir]
)
def test_bag_string_representation(self):
bag = bagit.make_bag(self.tmpdir)
self.assertEqual(self.tmpdir, str(bag))
def test_has_oxum(self):
bag = bagit.make_bag(self.tmpdir)
self.assertTrue(bag.has_oxum())
def test_bag_constructor(self):
bag = bagit.make_bag(self.tmpdir)
bag = bagit.Bag(self.tmpdir)
self.assertEqual(type(bag), bagit.Bag)
self.assertEqual(len(list(bag.payload_files())), 5)
def test_is_valid(self):
bag = bagit.make_bag(self.tmpdir)
bag = bagit.Bag(self.tmpdir)
self.assertTrue(bag.is_valid())
with open(j(self.tmpdir, "data", "extra_file"), "w") as ef:
ef.write("bar")
self.assertFalse(bag.is_valid())
def test_garbage_in_bagit_txt(self):
bagit.make_bag(self.tmpdir)
bagfile = """BagIt-Version: 0.97
Tag-File-Character-Encoding: UTF-8
==================================
"""
with open(j(self.tmpdir, "bagit.txt"), "w") as bf:
bf.write(bagfile)
self.assertRaises(bagit.BagValidationError, bagit.Bag, self.tmpdir)
def test_make_bag_multiprocessing(self):
bagit.make_bag(self.tmpdir, processes=2)
self.assertTrue(os.path.isdir(j(self.tmpdir, "data")))
def test_multiple_meta_values(self):
baginfo = {"Multival-Meta": [7, 4, 8, 6, 8]}
bag = bagit.make_bag(self.tmpdir, baginfo)
meta = bag.info.get("Multival-Meta")
self.assertEqual(type(meta), list)
self.assertEqual(len(meta), len(baginfo["Multival-Meta"]))
def test_unicode_bag_info(self):
info = {
"Test-BMP": "This element contains a \N{LATIN SMALL LETTER U WITH DIAERESIS}",
"Test-SMP": "This element contains a \N{LINEAR B SYMBOL B049}",
}
bagit.make_bag(self.tmpdir, bag_info=info, checksums=["md5"])
bag_info_txt = slurp_text_file(j(self.tmpdir, "bag-info.txt"))
for v in info.values():
self.assertIn(v, bag_info_txt)
def test_unusual_bag_info_separators(self):
bag = bagit.make_bag(self.tmpdir)
with open(j(self.tmpdir, "bag-info.txt"), "a") as f:
print("Test-Tag: 1", file=f)
print("Test-Tag:\t2", file=f)
print("Test-Tag\t: 3", file=f)
print("Test-Tag\t:\t4", file=f)
print("Test-Tag\t \t: 5", file=f)
print("Test-Tag:\t \t 6", file=f)
bag = bagit.Bag(self.tmpdir)
bag.save(manifests=True)
self.assertTrue(bag.is_valid())
self.assertEqual(bag.info["Test-Tag"], list(map(str, range(1, 7))))
def test_default_bagging_date(self):
info = {"Contact-Email": "[email protected]"}
bagit.make_bag(self.tmpdir, bag_info=info)
bag_info_txt = slurp_text_file(j(self.tmpdir, "bag-info.txt"))
self.assertTrue("Contact-Email: [email protected]" in bag_info_txt)
today = datetime.date.strftime(datetime.date.today(), "%Y-%m-%d")
self.assertTrue("Bagging-Date: %s" % today in bag_info_txt)
def test_missing_tagmanifest_valid(self):
info = {"Contact-Email": "[email protected]"}
bag = bagit.make_bag(self.tmpdir, bag_info=info, checksums=["md5"])
self.assertTrue(bag.is_valid())
os.remove(j(self.tmpdir, "tagmanifest-md5.txt"))
self.assertTrue(bag.is_valid())
def test_carriage_return_manifest(self):
with open(j(self.tmpdir, "newline\r"), "w") as whatever:
whatever.write("ugh")
bag = bagit.make_bag(self.tmpdir)
self.assertTrue(bag.is_valid())
def test_payload_permissions(self):
perms = os.stat(self.tmpdir).st_mode
# our tmpdir should not be writeable by group
self.assertEqual(perms & stat.S_IWOTH, 0)
# but if we make it writeable by the group then resulting
# payload directory should have the same permissions
new_perms = perms | stat.S_IWOTH
self.assertTrue(perms != new_perms)
os.chmod(self.tmpdir, new_perms)
bagit.make_bag(self.tmpdir)
payload_dir = j(self.tmpdir, "data")
self.assertEqual(os.stat(payload_dir).st_mode, new_perms)
def test_save_bag_to_unwritable_directory(self):
bag = bagit.make_bag(self.tmpdir, checksum=["sha256"])
os.chmod(self.tmpdir, 0)
with self.assertRaises(bagit.BagError) as error_catcher:
bag.save()
self.assertEqual(
"Cannot save bag to non-existent or inaccessible directory %s"
% self.tmpdir,
str(error_catcher.exception),
)
def test_save_bag_with_unwritable_file(self):
bag = bagit.make_bag(self.tmpdir, checksum=["sha256"])
os.chmod(os.path.join(self.tmpdir, "bag-info.txt"), 0)
with self.assertRaises(bagit.BagError) as error_catcher:
bag.save()
self.assertEqual(
"Read permissions are required to calculate file fixities",
str(error_catcher.exception),
)
def test_save_manifests(self):
bag = bagit.make_bag(self.tmpdir)
self.assertTrue(bag.is_valid())
bag.save(manifests=True)
self.assertTrue(bag.is_valid())
with open(j(self.tmpdir, "data", "newfile"), "w") as nf:
nf.write("newfile")
self.assertRaises(bagit.BagValidationError, bag.validate, bag, fast=False)
bag.save(manifests=True)
self.assertTrue(bag.is_valid())
def test_save_manifests_deleted_files(self):
bag = bagit.make_bag(self.tmpdir)
self.assertTrue(bag.is_valid())
bag.save(manifests=True)
self.assertTrue(bag.is_valid())
os.remove(j(self.tmpdir, "data", "loc", "2478433644_2839c5e8b8_o_d.jpg"))
self.assertRaises(bagit.BagValidationError, bag.validate, bag, fast=False)
bag.save(manifests=True)
self.assertTrue(bag.is_valid())
def test_save_baginfo(self):
bag = bagit.make_bag(self.tmpdir)
bag.info["foo"] = "bar"
bag.save()
bag = bagit.Bag(self.tmpdir)
self.assertEqual(bag.info["foo"], "bar")
self.assertTrue(bag.is_valid())
bag.info["x"] = ["a", "b", "c"]
bag.save()
b = bagit.Bag(self.tmpdir)
self.assertEqual(b.info["x"], ["a", "b", "c"])
self.assertTrue(bag.is_valid())
def test_save_baginfo_with_sha1(self):
bag = bagit.make_bag(self.tmpdir, checksum=["sha1", "md5"])
self.assertTrue(bag.is_valid())
bag.save()
bag.info["foo"] = "bar"
bag.save()
bag = bagit.Bag(self.tmpdir)
self.assertTrue(bag.is_valid())
def test_save_only_baginfo(self):
bag = bagit.make_bag(self.tmpdir)
with open(j(self.tmpdir, "data", "newfile"), "w") as nf:
nf.write("newfile")
bag.info["foo"] = "bar"
bag.save()
bag = bagit.Bag(self.tmpdir)
self.assertEqual(bag.info["foo"], "bar")
self.assertFalse(bag.is_valid())
def test_make_bag_with_newline(self):
bag = bagit.make_bag(self.tmpdir, {"test": "foo\nbar"})
self.assertEqual(bag.info["test"], "foobar")
def test_unicode_in_tags(self):
bag = bagit.make_bag(self.tmpdir, {"test": "♡"})
bag = bagit.Bag(self.tmpdir)
self.assertEqual(bag.info["test"], "♡")
def test_filename_unicode_normalization(self):
# We need to handle cases where the Unicode normalization form of a
# filename has changed in-transit. This is hard to do portably in both
# directions because OS X normalizes *all* filenames to an NFD variant
# so we'll start with a basic test which writes the manifest using the
# NFC form and confirm that this does not cause the bag to fail when it
# is written to the filesystem using the NFD form, which will not be
# altered when saved to an HFS+ filesystem:
test_filename = "Núñez Papers.txt"
test_filename_nfd = unicodedata.normalize("NFD", test_filename)
os.makedirs(j(self.tmpdir, "unicode-normalization"))
with open(j(self.tmpdir, "unicode-normalization", test_filename_nfd), "w") as f:
f.write("This is a test filename written using NFD normalization\n")
bag = bagit.make_bag(self.tmpdir)
bag.save()
self.assertTrue(bag.is_valid())
# Now we'll cause the entire manifest file was normalized to NFC:
for m_f in bag.manifest_files():
contents = slurp_text_file(m_f)
normalized_bytes = unicodedata.normalize("NFC", contents).encode("utf-8")
with open(m_f, "wb") as f:
f.write(normalized_bytes)
for alg in bag.algorithms:
bagit._make_tagmanifest_file(alg, bag.path, encoding=bag.encoding)
# Now we'll reload the whole thing:
bag = bagit.Bag(self.tmpdir)
self.assertTrue(bag.is_valid())
def test_open_bag_with_missing_bagit_txt(self):
bagit.make_bag(self.tmpdir)
os.unlink(j(self.tmpdir, "bagit.txt"))
with self.assertRaises(bagit.BagError) as error_catcher:
bagit.Bag(self.tmpdir)
self.assertEqual(
"Expected bagit.txt does not exist: %s/bagit.txt" % self.tmpdir,
str(error_catcher.exception),
)
def test_open_bag_with_malformed_bagit_txt(self):
bagit.make_bag(self.tmpdir)
with open(j(self.tmpdir, "bagit.txt"), "w") as f:
os.ftruncate(f.fileno(), 0)
with self.assertRaises(bagit.BagError) as error_catcher:
bagit.Bag(self.tmpdir)
self.assertEqual(
"Missing required tag in bagit.txt: BagIt-Version, Tag-File-Character-Encoding",
str(error_catcher.exception),
)
def test_open_bag_with_invalid_versions(self):
bagit.make_bag(self.tmpdir)
for v in ("a.b", "2.", "0.1.2", "1.2.3"):
with open(j(self.tmpdir, "bagit.txt"), "w") as f:
f.write("BagIt-Version: %s\nTag-File-Character-Encoding: UTF-8\n" % v)
with self.assertRaises(bagit.BagError) as error_catcher:
bagit.Bag(self.tmpdir)
self.assertEqual(
"Bag version numbers must be MAJOR.MINOR numbers, not %s" % v,
str(error_catcher.exception),
)
def test_open_bag_with_unsupported_version(self):
bagit.make_bag(self.tmpdir)
with open(j(self.tmpdir, "bagit.txt"), "w") as f:
f.write("BagIt-Version: 2.0\nTag-File-Character-Encoding: UTF-8\n")
with self.assertRaises(bagit.BagError) as error_catcher:
bagit.Bag(self.tmpdir)
self.assertEqual("Unsupported bag version: 2.0", str(error_catcher.exception))
def test_open_bag_with_unknown_encoding(self):
bagit.make_bag(self.tmpdir)
with open(j(self.tmpdir, "bagit.txt"), "w") as f:
f.write("BagIt-Version: 0.97\nTag-File-Character-Encoding: WTF-8\n")
with self.assertRaises(bagit.BagError) as error_catcher:
bagit.Bag(self.tmpdir)
self.assertEqual("Unsupported encoding: WTF-8", str(error_catcher.exception))
class TestFetch(SelfCleaningTestCase):
def setUp(self):
super(TestFetch, self).setUp()
# All of these tests will involve fetch.txt usage with an existing bag
# so we'll simply create one:
self.bag = bagit.make_bag(self.tmpdir)
def test_fetch_loader(self):
with open(j(self.tmpdir, "fetch.txt"), "w") as fetch_txt:
print(
"https://photojournal.jpl.nasa.gov/jpeg/PIA21390.jpg - data/nasa/PIA21390.jpg",
file=fetch_txt,
)
self.bag.save(manifests=True)
self.bag.validate()
self.assertListEqual(
[
(
"https://photojournal.jpl.nasa.gov/jpeg/PIA21390.jpg",
"-",
"data/nasa/PIA21390.jpg",
)
],
list(self.bag.fetch_entries()),
)
self.assertListEqual(
["data/nasa/PIA21390.jpg"], list(self.bag.files_to_be_fetched())
)
self.assertListEqual(
["data/nasa/PIA21390.jpg"], list(self.bag.compare_fetch_with_fs())
)
def test_fetch_validation(self):
with open(j(self.tmpdir, "fetch.txt"), "w") as fetch_txt:
print(
"https://photojournal.jpl.nasa.gov/jpeg/PIA21390.jpg - data/nasa/PIA21390.jpg",
file=fetch_txt,
)
self.bag.save(manifests=True)
with mock.patch.object(bagit.Bag, "validate_fetch") as mock_vf:
self.bag.validate()
self.assertTrue(
mock_vf.called, msg="Bag.validate() should call Bag.validate_fetch()"
)
def test_fetch_unsafe_payloads(self):
with open(j(self.tmpdir, "fetch.txt"), "w") as fetch_txt:
print(
"https://photojournal.jpl.nasa.gov/jpeg/PIA21390.jpg - /etc/passwd",
file=fetch_txt,
)
self.bag.save(manifests=True)
expected_msg = 'Path "/etc/passwd" in "%s/fetch.txt" is unsafe' % self.tmpdir
# We expect both validate() and fetch entry iteration to raise errors on security hazards
# so we'll test both:
with self.assertRaises(bagit.BagError) as cm:
self.bag.validate()
self.assertEqual(expected_msg, str(cm.exception))
# Note the use of list() to exhaust the fetch_entries generator:
with self.assertRaises(bagit.BagError) as cm:
list(self.bag.fetch_entries())
self.assertEqual(expected_msg, str(cm.exception))
def test_fetch_malformed_url(self):
with open(j(self.tmpdir, "fetch.txt"), "w") as fetch_txt:
print(
"//photojournal.jpl.nasa.gov/jpeg/PIA21390.jpg - data/nasa/PIA21390.jpg",
file=fetch_txt,
)
self.bag.save(manifests=True)
expected_msg = (
"Malformed URL in fetch.txt: //photojournal.jpl.nasa.gov/jpeg/PIA21390.jpg"
)
with self.assertRaises(bagit.BagError) as cm:
self.bag.validate_fetch()
self.assertEqual(expected_msg, str(cm.exception))
class TestCLI(SelfCleaningTestCase):
@mock.patch('sys.stderr', new_callable=StringIO)
def test_directory_required(self, mock_stderr):
testargs = ["bagit.py"]
with self.assertRaises(SystemExit) as cm:
with mock.patch.object(sys, 'argv', testargs):
bagit.main()
self.assertEqual(cm.exception.code, 2)
self.assertIn(
"error: the following arguments are required: directory",
mock_stderr.getvalue()
)
@mock.patch('sys.stderr', new_callable=StringIO)
def test_not_enough_processes(self, mock_stderr):
testargs = ["bagit.py", "--processes", "0", self.tmpdir]
with self.assertRaises(SystemExit) as cm:
with mock.patch.object(sys, 'argv', testargs):
bagit.main()
self.assertEqual(cm.exception.code, 2)
self.assertIn(
"error: The number of processes must be greater than 0",
mock_stderr.getvalue()
)
@mock.patch('sys.stderr', new_callable=StringIO)
def test_fast_flag_without_validate(self, mock_stderr):
bag = bagit.make_bag(self.tmpdir)
testargs = ["bagit.py", "--fast", self.tmpdir]
with self.assertRaises(SystemExit) as cm:
with mock.patch.object(sys, 'argv', testargs):
bagit.main()
self.assertEqual(cm.exception.code, 2)
self.assertIn(
"error: --fast is only allowed as an option for --validate!",
mock_stderr.getvalue()
)
def test_invalid_fast_validate(self):
bag = bagit.make_bag(self.tmpdir)
os.remove(j(self.tmpdir, "data", "loc", "2478433644_2839c5e8b8_o_d.jpg"))
testargs = ["bagit.py", "--validate", "--completeness-only", self.tmpdir]
with self.assertLogs() as captured:
with self.assertRaises(SystemExit) as cm:
with mock.patch.object(sys, 'argv', testargs):
bagit.main()
self.assertEqual(cm.exception.code, 1)
self.assertIn(
"%s is invalid: Payload-Oxum validation failed." % self.tmpdir,
captured.records[0].getMessage()
)
def test_valid_fast_validate(self):
bag = bagit.make_bag(self.tmpdir)
testargs = ["bagit.py", "--validate", "--fast", self.tmpdir]
with self.assertLogs() as captured:
with self.assertRaises(SystemExit) as cm:
with mock.patch.object(sys, 'argv', testargs):
bagit.main()
self.assertEqual(cm.exception.code, 0)
self.assertEqual(
"%s valid according to Payload-Oxum" % self.tmpdir,
captured.records[0].getMessage()
)
@mock.patch('sys.stderr', new_callable=StringIO)
def test_completeness_flag_without_validate(self, mock_stderr):
bag = bagit.make_bag(self.tmpdir)
testargs = ["bagit.py", "--completeness-only", self.tmpdir]
with self.assertRaises(SystemExit) as cm:
with mock.patch.object(sys, 'argv', testargs):
bagit.main()
self.assertEqual(cm.exception.code, 2)
self.assertIn(
"error: --completeness-only is only allowed as an option for --validate!",
mock_stderr.getvalue()
)
def test_invalid_completeness_validate(self):
bag = bagit.make_bag(self.tmpdir)
old_path = j(self.tmpdir, "data", "README")
new_path = j(self.tmpdir, "data", "extra_file")
os.rename(old_path, new_path)
testargs = ["bagit.py", "--validate", "--completeness-only", self.tmpdir]
with self.assertLogs() as captured:
with self.assertRaises(SystemExit) as cm:
with mock.patch.object(sys, 'argv', testargs):
bagit.main()
self.assertEqual(cm.exception.code, 1)
self.assertIn(
"%s is invalid: Bag is incomplete" % self.tmpdir,
captured.records[-1].getMessage()
)
def test_valid_completeness_validate(self):
bag = bagit.make_bag(self.tmpdir)
testargs = ["bagit.py", "--validate", "--completeness-only", self.tmpdir]
with self.assertLogs() as captured:
with self.assertRaises(SystemExit) as cm:
with mock.patch.object(sys, 'argv', testargs):
bagit.main()
self.assertEqual(cm.exception.code, 0)
self.assertEqual(
"%s is complete and valid according to Payload-Oxum" % self.tmpdir,
captured.records[0].getMessage()
)
def test_invalid_full_validate(self):
bag = bagit.make_bag(self.tmpdir)
readme = j(self.tmpdir, "data", "README")
txt = slurp_text_file(readme)
txt = "A" + txt[1:]
with open(readme, "w") as r:
r.write(txt)
testargs = ["bagit.py", "--validate", self.tmpdir]
with self.assertLogs() as captured:
with self.assertRaises(SystemExit) as cm:
with mock.patch.object(sys, 'argv', testargs):
bagit.main()
self.assertEqual(cm.exception.code, 1)
self.assertIn("Bag validation failed", captured.records[-1].getMessage())
def test_valid_full_validate(self):
bag = bagit.make_bag(self.tmpdir)
testargs = ["bagit.py", "--validate", self.tmpdir]
with self.assertLogs() as captured:
with self.assertRaises(SystemExit) as cm:
with mock.patch.object(sys, 'argv', testargs):
bagit.main()
self.assertEqual(cm.exception.code, 0)
self.assertEqual(
"%s is valid" % self.tmpdir,
captured.records[-1].getMessage()
)
def test_failed_create_bag(self):
os.chmod(self.tmpdir, 0)
testargs = ["bagit.py", self.tmpdir]
with self.assertLogs() as captured:
with self.assertRaises(SystemExit) as cm:
with mock.patch.object(sys, 'argv', testargs):
bagit.main()
self.assertEqual(cm.exception.code, 1)
self.assertIn(
"Failed to create bag in %s" % self.tmpdir,
captured.records[-1].getMessage()
)
def test_create_bag(self):
testargs = ["bagit.py", self.tmpdir]
with self.assertLogs() as captured:
with self.assertRaises(SystemExit) as cm:
with mock.patch.object(sys, 'argv', testargs):
bagit.main()
for rec in captured.records:
print(rec.getMessage())
self.assertEqual(cm.exception.code, 0)
class TestUtils(unittest.TestCase):
def setUp(self):
super(TestUtils, self).setUp()
if sys.version_info >= (3,):
self.unicode_class = str
else:
self.unicode_class = unicode
def test_force_unicode_str_to_unicode(self):
self.assertIsInstance(bagit.force_unicode("foobar"), self.unicode_class)
def test_force_unicode_pass_through(self):
self.assertIsInstance(bagit.force_unicode("foobar"), self.unicode_class)
def test_force_unicode_int(self):
self.assertIsInstance(bagit.force_unicode(1234), self.unicode_class)
if __name__ == "__main__":
unittest.main()
|
LibraryOfCongress/bagit-python
|
test.py
|
test.py
|
py
| 49,635 |
python
|
en
|
code
| 198 |
github-code
|
50
|
4682107697
|
from dal_select2.views import Select2QuerySetView
from django.http import JsonResponse
from django.utils import timezone
from rest_framework.fields import IntegerField, DateField
from rest_framework.serializers import Serializer
from rest_framework.views import APIView
from the_redhuman_is.models import Worker
from the_redhuman_is.services.delivery import retrieve
from the_redhuman_is.services.delivery.utils import ObjectNotFoundError
from the_redhuman_is.views import delivery
from the_redhuman_is.views.backoffice_app.auth import bo_api
from utils import date_time
class MapRequestAutocomplete(Select2QuerySetView, APIView):
def get_queryset(self):
zone_id = self.forwarded.get('zone')
delivery_requests, _, _ = retrieve.get_requests_on_map_querysets(
timezone.localdate(),
self.request.user,
zone_id,
)
if self.q:
delivery_requests = delivery_requests.filter_by_text(self.q)
return delivery_requests.order_by(
'pk'
).values(
'pk',
'driver_name',
)
def get_result_value(self, result):
return str(result['pk'])
def get_result_label(self, result):
return f"{result['pk']} {result['driver_name']}"
class MapWorkerAutocomplete(Select2QuerySetView, APIView):
def get_queryset(self):
zone_id = self.forwarded.get('zone')
_, workers, _ = retrieve.get_requests_on_map_querysets(
timezone.localdate(),
self.request.user,
zone_id,
)
if self.q:
workers = workers.filter_by_text(self.q)
return workers.order_by(
'full_name'
).values(
'pk',
'full_name',
)
def get_result_value(self, result):
return str(result['pk'])
def get_result_label(self, result):
return result['full_name']
class ZoneSerializer(Serializer):
zone = IntegerField(min_value=1, source='zone_id', allow_null=True, default=None)
@bo_api(['GET'])
def requests_on_map(request):
serializer = ZoneSerializer(data=request.GET)
serializer.is_valid(raise_exception=True)
return JsonResponse(
retrieve.get_requests_on_map(
user=request.user,
date=timezone.localdate(),
**serializer.validated_data
)
)
class WorkerMapDataSerializer(Serializer):
worker = IntegerField(min_value=1, source='worker_id')
date = DateField(input_formats=[date_time.DATE_FORMAT])
@bo_api(['GET'])
def worker_map_data(request):
serializer = WorkerMapDataSerializer(data=request.GET)
serializer.is_valid(raise_exception=True)
worker_id = serializer.validated_data['worker_id']
try:
worker = Worker.objects.get(pk=worker_id)
except Worker.DoesNotExist:
raise ObjectNotFoundError(f'Работник {worker_id} не найден.')
return JsonResponse(
delivery.worker_on_map_link(worker, serializer.validated_data['date'])
)
|
yaykarov/Gettask
|
the_redhuman_is/views/backoffice_app/delivery/requests_on_map.py
|
requests_on_map.py
|
py
| 3,040 |
python
|
en
|
code
| 0 |
github-code
|
50
|
34655456874
|
_author_ = 'jake'
_project_ = 'leetcode'
# https://leetcode.com/problems/largest-rectangle-in-histogram/
# Given n non-negative integers representing the histogram's bar height where the width of each bar is 1,
# find the area of largest rectangle in the histogram.
# For each bar, find the largest rectangle including that bar as the lowest bar.
# An index is popped from the stack when a lower bar to the right is found.
# We calculate the largest area with the bar at the popped index as the height (lowest bar in rectangle).
# Width is determined by the closest lower bar to the left and right.
# Time - O(n)
# Space - O(n)
class Solution(object):
def largestRectangleArea(self, heights):
"""
:type heights: List[int]
:rtype: int
"""
max_area = 0
heights = [0] + heights + [0] # stack will not be empty and last genuine bar will be popped
stack = [0] # indices of bars in non-decreasing height order
for i, bar in enumerate(heights[1:], 1):
while heights[stack[-1]] > bar: # pop taller off stack
height = heights[stack.pop()] # form rectangle with popped bar determining height
width = i - stack[-1] - 1 # i and stack[-1] - 1 are the first lower bars on left and right
max_area = max(max_area, height * width)
stack.append(i)
return max_area
|
jakehoare/leetcode
|
python_1_to_1000/084_Largest_Rectangle_in_Histogram.py
|
084_Largest_Rectangle_in_Histogram.py
|
py
| 1,437 |
python
|
en
|
code
| 49 |
github-code
|
50
|
34884768649
|
"""
输入某二叉树的前序遍历和中序遍历的结果,请重建该二叉树。假设输入的前序遍历和中序遍历的结果中都不含重复的数字。
"""
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def buildTree(self, preorder, inorder) -> TreeNode:
# 1.根据前序遍历确树的根节点
# 2. 根据中序遍历顺序 找到两个子树的集合
if not preorder or not inorder:
return None
root_val = preorder[0] # 根节点的值
root = TreeNode(root_val) #根节点创建node
index_root = inorder.index(root_val)
inorder_left_subtree = inorder[:index_root]
inorde_right_subtree = inorder[index_root+1:]
preorder_left_subtree = preorder[1:1+len(inorder_left_subtree)]
preorder_rigth_subtree = preorder[-len(inorde_right_subtree):]
root.left = self.buildTree(preorder_left_subtree,inorder_left_subtree)
root.right =self.buildTree(preorder_rigth_subtree, inorde_right_subtree)
return root
|
Dong98-code/leetcode
|
codes/got-Offer/07.buildTree.py
|
07.buildTree.py
|
py
| 1,119 |
python
|
en
|
code
| 0 |
github-code
|
50
|
32570914038
|
import pyark.cva_client as cva_client
from protocols.protocol_7_3.cva import ReportEventType, Transaction
import logging
import pandas as pd
REPORT_EVENT_TYPES = [ReportEventType.genomics_england_tiering, ReportEventType.candidate, ReportEventType.reported,
ReportEventType.questionnaire]
class CasesClient(cva_client.CvaClient):
_BASE_ENDPOINT = "cases"
def __init__(self, **params):
cva_client.CvaClient.__init__(self, **params)
def count(self, **params):
"""
:type params: dict
:rtype: int
"""
params['count'] = True
return self.get_cases(**params)
def get_cases_ids(self, as_data_frame=False, max_results=None, **params):
"""
:type as_data_frame: bool
:type max_results: int
:type params: dict
:rtype: generator
"""
params['include'] = ["identifier", "version"]
return self._paginate(
endpoint=self._BASE_ENDPOINT, as_data_frame=as_data_frame, max_results=max_results,
transformer=lambda x: "{}-{}".format(x["identifier"], x["version"]), **params)
def get_cases(self, as_data_frame=False, max_results=None, include_all=True, **params):
"""
:type as_data_frame: bool
:type max_results: int
:param include_all: use False for the default minimal representation of case, it will be faster
:type include_all: bool
:type params: dict
:rtype: generator
"""
if params.get('count', False):
results, next_page_params = self._get(self._BASE_ENDPOINT, **params)
return results[0]
else:
if include_all:
params['include'] = [self._INCLUDE_ALL]
return self._paginate(
endpoint=self._BASE_ENDPOINT, as_data_frame=as_data_frame, max_results=max_results, **params)
def get_summary(self, as_data_frame=False, params_list=[], **params):
"""
:type as_data_frame: bool
:type params_list: list
:rtype: dict | pd.DataFrame
"""
if params_list:
self._params_sanity_checks(params_list)
for p in params_list:
p.update(params)
results_list = [self.get_summary(as_data_frame=as_data_frame, **p) for p in params_list]
return self._render_multiple_results(results_list, as_data_frame=as_data_frame)
else:
results, _ = self._get("{endpoint}/summary".format(endpoint=self._BASE_ENDPOINT), **params)
if not results:
logging.warning("No summary found")
return None
assert len(results) == 1, "Unexpected number of summaries"
return self._render_single_result(results, as_data_frame=as_data_frame, indexes=params)
def delete(self, case_id, case_version):
path = "{endpoint}/{case_id}/{case_version}".format(
endpoint=self._BASE_ENDPOINT, case_id=case_id, case_version=case_version
)
results, _ = self._delete(path)
result = self._render_single_result(results)
return Transaction.fromJsonDict(result) if result else None
@staticmethod
def _params_sanity_checks(params_list):
if not all(isinstance(p, dict) for p in params_list):
raise ValueError("Cannot accept a list of 'params' combined with other parameters. " +
"Include all parameters in the list")
keys = None
for p in params_list:
if keys is None:
keys = set(p.keys())
else:
if len(set(p.keys()).difference(keys)) > 0:
raise ValueError("Cannot accept a list of 'params' with different lists of filters")
def get_case(self, identifier, version, as_data_frame=False, include_all=True, **params):
"""
:type as_data_frame: bool
:type identifier: str
:type version: str
:type include_all: bool
:rtype: dict | pd.DataFrame
"""
if include_all:
params['include'] = [self._INCLUDE_ALL]
results, _ = self._get("{endpoint}/{identifier}/{version}".format(
endpoint=self._BASE_ENDPOINT, identifier=identifier, version=version), **params)
if not results:
logging.warning("No case found with id-version {}-{}".format(identifier, version))
return None
assert len(results) == 1, "Unexpected number of cases returned when searching by identifier"
return self._render_single_result(results, as_data_frame=as_data_frame)
def get_case_by_identifiers(self, identifiers, as_data_frame=False, include_all=True, **params):
"""
:type as_data_frame: bool
:type identifiers: list
:type include_all: bool
:rtype: list | pd.DataFrame
"""
if include_all:
params['include'] = [self._INCLUDE_ALL]
results, _ = self._get("{endpoint}/{identifiers}".format(
endpoint=self._BASE_ENDPOINT, identifiers=",".join(identifiers)), **params)
return self._render(results, as_data_frame=as_data_frame)
def search(self, query):
results, _ = self._get("{endpoint}/search/{query}".format(endpoint=self._BASE_ENDPOINT, query=query))
return self._render(results, as_data_frame=False)
def get_similar_cases_by_case(self, case_id, case_version, as_data_frame=False, **params):
"""
:type as_data_frame: bool
:type case_id: str
:type case_version: int
:type params: dict
:rtype: list | pd.DataFrame
"""
results, _ = self._get([self._BASE_ENDPOINT, case_id, case_version, "similar-cases"], **params)
if not results:
logging.warning("No similar cases found")
return None
return self._render(results, as_data_frame=as_data_frame)
def get_similar_cases_by_phenotypes(self, phenotypes, as_data_frame=False, **params):
"""
:type as_data_frame: bool
:type phenotypes: list
:type params: dict
:rtype: list | pd.DataFrame
"""
params['hpoIds'] = phenotypes
results, _ = self._get([self._BASE_ENDPOINT, "phenotypes", "similar-cases"], **params)
if not results:
logging.warning("No similar cases found")
return None
return self._render(results, as_data_frame=as_data_frame)
def get_shared_variants_cases_by_case(self, case_id, case_version, report_event_type, **params):
"""
:type case_id: str
:type case_version: int
:type report_event_type: ReportEventType
:type limit: int
:type params: dict
:rtype: list
"""
assert report_event_type in REPORT_EVENT_TYPES, \
"Invalid report event type provided '{}'. Valid values: {}".format(report_event_type, REPORT_EVENT_TYPES)
params['type'] = report_event_type
results, _ = self._get([self._BASE_ENDPOINT, case_id, case_version, "shared-variants"], **params)
if not results:
logging.warning("No cases sharing {} variants found".format(report_event_type))
return None
return results
def get_shared_genes_cases_by_case(self, case_id, case_version, report_event_type, **params):
"""
:type case_id: str
:type case_version: int
:type report_event_type: ReportEventType
:type params: dict
:rtype: list
"""
assert report_event_type in REPORT_EVENT_TYPES, \
"Invalid report event type provided '{}'. Valid values: {}".format(report_event_type, REPORT_EVENT_TYPES)
params['type'] = report_event_type
results, _ = self._get([self._BASE_ENDPOINT, case_id, case_version, "shared-genes"], **params)
if not results:
logging.warning("No cases sharing {} genes found".format(report_event_type))
return None
return results
def get_shared_variants_counts(self, variant_ids, **params):
"""
:type variant_ids: list
:type params: dict
:rtype: list
"""
variant_coordinates = [v.toJsonDict() for v in self.variants().variant_ids_to_coordinates(variant_ids)]
results, _ = self._post([self._BASE_ENDPOINT, "shared-variants-counts"], variant_coordinates, **params)
return results
def get_phenosim_matrix(self, as_data_frame=False, **params):
"""
:type as_data_frame: bool
:rtype: list | pd.DataFrame
"""
results, _ = self._get("{endpoint}/similarity-matrix".format(endpoint=self._BASE_ENDPOINT), **params)
if not results:
logging.warning("No similarity matrix found")
return None
return self._render(results, as_data_frame=as_data_frame)
def get_pedigree(self, identifier, version, as_data_frame=False):
results, _ = self._get("{endpoint}/{identifier}/{version}".format(
endpoint="pedigrees", identifier=identifier, version=version))
if not results:
logging.warning("No pedigree found with id-version {}-{}".format(identifier, version))
return None
assert len(results) == 1, "Unexpected number of pedigrees returned when searching by identifier"
return self._render_single_result(results, as_data_frame=as_data_frame)
def get_clinical_report(self, identifier, version, as_data_frame=False):
results, _ = self._get("{endpoint}/{identifier}/{version}".format(
endpoint="clinical-reports", identifier=identifier, version=version))
if not results:
logging.warning("No clinical report found with id-version {}-{}".format(identifier, version))
return None
assert len(results) == 1, "Unexpected number of clinical reports returned when searching by identifier"
return self._render_single_result(results, as_data_frame=as_data_frame)
def get_rd_exit_questionnaire(self, identifier, version, as_data_frame=False):
results, _ = self._get("{endpoint}/{identifier}/{version}".format(
endpoint="rare-disease-exit-questionnaires", identifier=identifier, version=version))
if not results:
logging.warning("No questionnaire found with id-version {}-{}".format(identifier, version))
return None
assert len(results) == 1, "Unexpected number of questionnaires returned when searching by identifier"
return self._render_single_result(results, as_data_frame=as_data_frame)
def get_cancer_participant(self, identifier, version, as_data_frame=False):
results, _ = self._get("{endpoint}/{identifier}/{version}".format(
endpoint="participants", identifier=identifier, version=version))
if not results:
logging.warning("No cancer participant found with id-version {}-{}".format(identifier, version))
return None
assert len(results) == 1, "Unexpected number of cancer participants returned when searching by identifier"
return self._render_single_result(results, as_data_frame=as_data_frame)
|
genomicsengland/pyark
|
pyark/subclients/cases_client.py
|
cases_client.py
|
py
| 11,220 |
python
|
en
|
code
| 1 |
github-code
|
50
|
3059198420
|
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Conv2D, Dropout, BatchNormalization, MaxPooling2D, Flatten
x_train = np.load('../data/image/brain/npy/keras66_train_x.npy')
x_val = np.load('../data/image/brain/npy/keras66_val_x.npy')
x_test = np.load('../data/image/brain/npy/keras66_test_x.npy')
y_train = np.load('../data/image/brain/npy/keras66_train_y.npy')
y_val = np.load('../data/image/brain/npy/keras66_val_y.npy')
y_test = np.load('../data/image/brain/npy/keras66_test_y.npy')
print(x_train.shape, y_train.shape)
print(x_val.shape, y_val.shape)
print(x_test.shape, y_test.shape)
model = Sequential()
model.add(Conv2D(64, 3, padding='same', activation='relu', input_shape=(150,150,3)))
model.add(BatchNormalization())
model.add(Conv2D(128,3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64,3, activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(3))
model.add(Conv2D(32,3, activation='relu'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
es = EarlyStopping(monitor = 'val_loss', patience = 50)
lr = ReduceLROnPlateau(monitor = 'val_loss', patience = 5, factor = 0.5, verbose = 1)
filepath = 'c:/data/modelcheckpoint/keras62_1_checkpoint_{val_loss:.4f}-{epoch:02d}.hdf5'
cp = ModelCheckpoint(filepath, save_best_only=True, monitor = 'val_loss')
history = model.fit(x_train,y_train, epochs=500, validation_data=(x_val,y_val),callbacks=[es])
result = model.evaluate(x_test, y_test)
print(result)
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
import matplotlib.pyplot as plt
epochs = len(acc)
x_axis = range(0,epochs)
fig, ax = plt.subplots()
ax.plot(x_axis, acc, label='train')
ax.plot(x_axis, val_acc, label='val')
ax.legend()
plt.ylabel('acc')
plt.title('acc')
# plt.show()
fig, ax = plt.subplots()
ax.plot(x_axis, loss, label='train')
ax.plot(x_axis, val_loss, label='val')
ax.legend()
plt.ylabel('loss')
plt.title('loss')
plt.show()
|
SunghoonSeok/Study
|
keras2/keras66_4_load_npy_fit.py
|
keras66_4_load_npy_fit.py
|
py
| 2,463 |
python
|
en
|
code
| 2 |
github-code
|
50
|
32266419487
|
import requests
import time
from data import TOKEN
API_URL: str = 'https://api.telegram.org/bot'
BOT_TOKEN: str = TOKEN
TEXT: str = 'Мы законектились!'
MAX_COUNTER: int = 100
offset: int = -2
counter: int = 0
chat_id: int
while counter < MAX_COUNTER:
print('attempt =', counter) #Чтобы видеть в консоли, что код живет
updates = requests.get(f'{API_URL}{BOT_TOKEN}/getUpdates?offset={offset + 1}').json()
if updates['result']:
print(updates['result'])
for result in updates['result']:
offset = result['update_id']
chat_id = result['message']['from']['id']
requests.get(f'{API_URL}{BOT_TOKEN}/sendMessage?chat_id={chat_id}&text={TEXT}')
time.sleep(1)
counter += 1
##if updates['result'] если все ок то updates['result'] будет в таком JSON формате
# [
# {
# 'update_id': 792864391,
# 'message': {
# 'message_id': 44,
# 'from': {
# 'id': 413281115,
# 'is_bot': False,
# 'first_name': '...',
# 'username': 'username22549',
# 'language_code': 'ru'
# },
# 'chat': {
# 'id': 413281115,
# 'first_name': '...',
# 'username': 'username22549',
# 'type': 'private'
# },
# 'date': 1675094450,
# 'text': 'пр'
# }
# }
# ]
|
geronda94/aiogram_learning
|
experiments_with_token/simle_requests.py
|
simle_requests.py
|
py
| 1,394 |
python
|
ru
|
code
| 0 |
github-code
|
50
|
37935322797
|
import scrapy
import logging
from scrapy.contrib.spiders import Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
class StackOverflowSpider(scrapy.Spider):
name = 'stackoverflow'
start_urls = ['http://www.mirrorkart.com/Buy-Designers-Mirrors-online']
rules = (
Rule(SgmlLinkExtractor(allow=(), restrict_xpaths=('//ul[@class="pagination"]/li',)), callback="parse", follow=True),
)
def parse(self, response):
for href in response.css('.product-thumb .image a::attr(href)'):
full_url = response.urljoin(href.extract())
yield scrapy.Request(full_url, callback=self.parse_product)
def parse_product(self, response):
yield {
'title': response.css('h1::text').extract_first(),
'image': response.css('.thumbnails a::attr(href)').extract_first(),
'desc' : response.css('div[id="tab-description"] p').extract(),
'link': response.url,
}
|
amititash/hc_scrapy
|
tmp_spider.py
|
tmp_spider.py
|
py
| 966 |
python
|
en
|
code
| 0 |
github-code
|
50
|
44323507785
|
import pickle
from typing import Any
import numpy as np
def load_pickle(filepath: str) -> Any:
"""Load a pickle file
Args:
filepath (str): path to pickle file
"""
with open(filepath, "rb") as pickle_file:
data = pickle.load(pickle_file)
return data
def min_max_normalize(a: np.ndarray, dim=None) -> np.ndarray:
"""Normalize an array to [0, 1]
If dim is not None, normalize along that dimension.
Else scalar normalize everything
Args:
a (np.ndarray): array to normalize
dim (int, optional): dimension to normalize. Defaults to None.
Returns:
np.ndarray: normalized array
"""
if dim is not None:
return (a - a.min(dim=dim, keepdims=True)) / (
a.max(dim=dim, keepdims=True) - a.min(dim=dim, keepdims=True)
)
else:
return (a - a.min()) / (a.max() - a.min())
|
AntoineRichard/LunarDiffusion
|
dem_zoomer/utils/data_utils.py
|
data_utils.py
|
py
| 900 |
python
|
en
|
code
| 0 |
github-code
|
50
|
25249220807
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 6 10:29:13 2021
@author: Oscar
"""
"""
# Calulate the sum of squares of however many natural numbers
def squaresum(n):
#Initiate a variable for holding the sums
sm = 0
# Iterate the addition of each individual squares from 1 to n+1 number
for i in range(1, n+1):
sm = sm + (i*i) # Adding each iteration of , for example 2x2 to 1x1 etc
return sm #returns the value of the summation
#Drivers for the program
n = 100
print(squaresum(n))
"""
# Calculate the sum of the squares of the first 20 odd natural numbers
#Natural number: Numbers used" for counti"ng (1,2,3,4",5,...)
# User input command
n = int(input("Print sum of square of first odd numbres up to the following number:")) # Input must be an integer, hence "int"
def squaresum(n):
#Initiate a variable for holding the sums
sm = 0
# Iterate the addition of each individual squares from 1 to n+1 number
for i in range(1, n+1):
# determine in i in range(1, n+1) is odd
if (i % 2 != 0): # Modulo Operator "%" finds the remainder of the specified values
sm = sm + (i*i) # Adding each iteration of , for example 2x2 to 1x1 etc
return sm #returns the value of the summation
#Drivers for the program
print("Sum of Squares of odd numbers from 1 to", n, "is :",squaresum(n))
|
oliver779/Computational_Methods_Course
|
Sum of Squares.py
|
Sum of Squares.py
|
py
| 1,392 |
python
|
en
|
code
| 0 |
github-code
|
50
|
29362569826
|
# *************************************************************************************************
# quant_momentum_strategy
#
# The goal of this script is to delop a investing strategy that recommends an equal weight
# portfolio of the 50 stocks with the highest price momentum.
#
# Following @nickmccullum Algorithmic Trading in Python course. Available at:
# https://github.com/nickmccullum/algorithmic-trading-python
#
# API documentation: https://iexcloud.io/docs/api/
import numpy
import pandas
import requests
import math
from scipy import stats
import xlsxwriter
stocks = pandas.read_csv('sp_500_stocks.csv')
from secrets import IEX_CLOUD_API_TOKEN # variable in secret file
symbol = 'AAPL'
api_url = f'https://sandbox.iexapis.com/stable/stock/{symbol}/stats/?token={IEX_CLOUD_API_TOKEN}'
data = requests.get(api_url).json()
# GET stock data from IEX API in batches ######################################
def chunks(lst, n):
'''Yield succesive n-sized chinks from lst.'''
for i in range(0, len(lst), n):
yield lst[i:i+n]
stock_chunks = list(chunks(stocks['Ticker'], 100)) #Chunkify stocks for batch api calls
stock_strings = []
for i in range(0, len(stock_chunks)):
stock_strings.append(','.join(stock_chunks[i]))
# Create DataFrame
dataframe_columns = ['Ticker', 'Stock Price', 'One-Year Price Return', 'Shares to Buy']
dataframe = pandas.DataFrame(columns = dataframe_columns)
for stock_string in stock_strings: # GET all stock stats
batch_api_call_url = f'https://sandbox.iexapis.com/stable/stock/market/batch/?symbols={stock_string}&types=price,stats&token={IEX_CLOUD_API_TOKEN}'
data = requests.get(batch_api_call_url).json()
for stock in stock_string.split(','): #Fill each stock row in the dataframe
dataframe = dataframe.append(
pandas.Series(
[
stock, #Ticker
data[stock]['price'], #Stock price
data[stock]['stats']['year1ChangePercent'], #One-Year Price Return
'N/A' #Shares to Buy
],
index = dataframe_columns
),
ignore_index = True
)
# Removing low momentum stocks ################################################
dataframe.sort_values('One-Year Price Return', ascending = False, inplace = True)
dataframe = dataframe[:50]
dataframe.reset_index(inplace = True)
# Calculate the number of shares to buy #######################################
def get_portfolio_size():
portfolio_incorrect = True
while portfolio_incorrect:
portfolio_size = input('Enter value of portfolio: ') #Calculate number of shares to buy
try:
portfolio_size = float(portfolio_size)
portfolio_incorrect = False
except ValueError:
print('Error: Enter a number! \n')
return portfolio_size
position_size = get_portfolio_size() / len(dataframe.index)
for i in range(0, len(dataframe)):
dataframe.loc[i, 'Shares to Buy'] = math.floor(position_size / dataframe.loc[i, 'Stock Price'])
print(dataframe)
|
Dialvive/Python-Algorithmic-Trading
|
quantitative-momentum-strategy/quant_momentum_strategy.py
|
quant_momentum_strategy.py
|
py
| 3,190 |
python
|
en
|
code
| 0 |
github-code
|
50
|
16751400524
|
n1=float(input("Ingresa el primer numero: "))
n2=float(input("Ingresa el segundo numero: "))
print("Son iguales?",n1==n2)
print("Son iguales?",n1!=n2)
##
cadena=input("Escribe una cadena")
lon=len(cadena)
print("es mayor que 3 y menor que 10?",3<lon<10)
##
NumeroMagico=12345679
NumeroUsuario=int(input("ingresa un numero entero entre 1 y 9: "))
NumeroUsuario*=9
NumeroMagico*=NumeroUsuario
print("Numero Magico: ",NumeroMagico)
##
comando="Salir"
if comando=="Entrar":
print("Entrar")
elif comando=="saluda":
print("hola")
elif comando=="Salir":
print("Salir")
else:
print("comando no reconocido")
pass
|
jesusRL96/python_curso
|
2.py
|
2.py
|
py
| 607 |
python
|
es
|
code
| 0 |
github-code
|
50
|
26241642628
|
# -*- coding: utf-8 -*-
import logging
import openai
from modelcache.adapter.adapter_query import adapt_query
from modelcache.adapter.adapter_insert import adapt_insert
from modelcache.adapter.adapter_remove import adapt_remove
class ChatCompletion(openai.ChatCompletion):
"""Openai ChatCompletion Wrapper"""
@classmethod
def create_query(cls, *args, **kwargs):
def cache_data_convert(cache_data, cache_query):
return construct_resp_from_cache(cache_data, cache_query)
try:
return adapt_query(
cache_data_convert,
*args,
**kwargs
)
except Exception as e:
return str(e)
@classmethod
def create_insert(cls, *args, **kwargs):
try:
return adapt_insert(
*args,
**kwargs
)
except Exception as e:
return str(e)
@classmethod
def create_remove(cls, *args, **kwargs):
try:
return adapt_remove(
*args,
**kwargs
)
except Exception as e:
logging.info('adapt_remove_e: {}'.format(e))
return str(e)
def construct_resp_from_cache(return_message, return_query):
return {
"modelcache": True,
"hitQuery": return_query,
"data": return_message,
"errorCode": 0
}
|
kpister/prompt-linter
|
data/scraping/repos/codefuse-ai~CodeFuse-ModelCache/modelcache~adapter~adapter.py
|
modelcache~adapter~adapter.py
|
py
| 1,417 |
python
|
en
|
code
| 0 |
github-code
|
50
|
73638395356
|
__author__ = 'Canon'
from PIL import Image
from StringIO import StringIO
def crop_save_img(filename, data, x1, y1, x2, y2):
imgIO = StringIO(data)
img = Image.open(imgIO)
croped_img = img.crop((x1, y1, x2, y2))
dot_pos = filename.rfind('.')
absfilename = filename[:dot_pos]
croped_img.save(absfilename+'.jpg', 'JPEG')
|
silentcanon/Anya
|
service/photo.py
|
photo.py
|
py
| 344 |
python
|
en
|
code
| 0 |
github-code
|
50
|
25507326314
|
# Python program to identify the identifier
# import re module
# re module provides support
# for regular expressions
import re
# Make a regular expression
# for identify valid identifier
regex = "^[A-Za-z_][A-Za-z0-9_]*"
# Define a function for
# identifying valid identifier
def check(word):
keywords = [
"int",
"double",
"auto",
"break",
"case",
"char",
"const",
"continue",
"default",
"do",
"else",
"enum",
"extern",
"float",
"for",
"goto",
"if",
"long",
"register",
"return",
"short",
"signed",
"sizeof",
"static",
"struct",
"switch",
"typedef",
"union",
"unsigned",
"void",
"volatile",
"while",
]
# pass the regular expression
# and the string in search() method
if re.search(regex, word):
if word in keywords:
print("It is a c keyword")
else:
print("Valid Identifier")
else:
print("Invalid Identifier")
# Driver Code
if __name__ == "__main__":
character = input("Enter a string: ")
check(character)
|
roshanxshrestha/college-codes
|
4-TOC/5cidentifiers.py
|
5cidentifiers.py
|
py
| 1,249 |
python
|
en
|
code
| 0 |
github-code
|
50
|
7440046301
|
import cv2
import matplotlib.pyplot as plt
def plt_imshow(title="image", img=None, figsize=(8, 5)):
plt.figure(figsize=figsize)
if type(img) == list:
if type(title) == list:
titles = title
else:
titles = []
for i in range(len(img)):
titles.append(title)
for i in range(len(img)):
if len(img[i].shape) <= 2:
rgbImg = cv2.cvtColor(img[i], cv2.COLOR_GRAY2RGB)
else:
rgbImg = cv2.cvtColor(img[i], cv2.COLOR_BGR2RGB)
plt.subplot(1, len(img), i + 1), plt.imshow(rgbImg)
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
else:
if len(img.shape) < 3:
rgbImg = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
else:
rgbImg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(rgbImg)
plt.title(title)
plt.xticks([]), plt.yticks([])
plt.show()
|
lee-lou2/ocr
|
utils/image_show.py
|
image_show.py
|
py
| 1,001 |
python
|
en
|
code
| 2 |
github-code
|
50
|
12042695517
|
from sklearn.datasets import fetch_20newsgroups
from collections import Counter
import re
import spacy
from tqdm import tqdm
import string
import numpy as np
def count_words(data):
nlp = spacy.load('en_core_web_sm')
counter = Counter()
for sentence in tqdm(data.data):
sentence = sentence.lower().translate(str.maketrans('', '', string.punctuation))
sentence = re.sub("\W+", " ", sentence)
words = [token.lemma_ for token in nlp(sentence)
if not token.is_stop and not token.is_punct and len(token.text) > 3]
counter += Counter(words)
return counter
def count_nouns(data):
nlp = spacy.load('en_core_web_sm')
counter = Counter()
for sentence in tqdm(data.data):
sentence = sentence.lower().translate(str.maketrans('', '', string.punctuation))
sentence = re.sub("\W+", " ", sentence)
nouns = [token.lemma_ for token in nlp(sentence)
if token.pos_ == 'NOUN' and len(token.text) > 3]
counter += Counter(nouns)
return counter
def keep_most_frequent(sci_count, politics_count, top_k):
words = set()
for word in dict(sci_count.most_common(top_k)).keys():
words.add(word)
for word in dict(politics_count.most_common(top_k)).keys():
words.add(word)
return list(words)
if __name__ == '__main__':
sci_data = fetch_20newsgroups(subset='train',
categories=['sci.crypt', 'sci.electronics','sci.space'],
remove=('headers', 'footers', 'quotes'),
)
politics_data = fetch_20newsgroups(subset='train',
categories=['talk.politics.guns', 'talk.politics.mideast', 'talk.politics.misc'],
remove=('headers', 'footers', 'quotes'),
)
# sci_count = count_words(sci_data)
# politics_count = count_words(politics_data)
sci_count = count_nouns(sci_data)
politics_count = count_nouns(politics_data)
words = keep_most_frequent(sci_count, politics_count, 100)
prob_xy = np.zeros((len(words), 2), dtype=np.float32)
prob_xy[:, 0] = np.array([sci_count[word] for word in words])
prob_xy[:, 1] = np.array([politics_count[word] for word in words])
prob_xy /= prob_xy.sum()
np.save("./prob_nouns_docs_full", prob_xy)
np.save("./nouns_full", np.array(words))
|
nviolante25/mva
|
neuroscience/project/src/preprocess.py
|
preprocess.py
|
py
| 2,460 |
python
|
en
|
code
| 0 |
github-code
|
50
|
3319870177
|
class LLQueue:
class Node:
def __init__(self, data=None, next=None, prev=None) -> None:
self.data = data if data != None else None
self.next = next if next != None else None
self.prev = prev if prev != None else None
def __init__(self, items=None) -> None:
# dummy nodes
self.head = self.Node()
self.tail = self.Node(prev=self.head)
self.head.next = self.tail
if items != None:
for e in items:
self.enQueue(e)
def enQueue(self, data):
# append ต่อท้าย
rear = self.tail.prev
rear.next = self.Node(data, self.tail, rear)
self.tail.prev = rear.next
def deQueue(self):
if self.isEmpty():
return print("Queue is Empty")
else:
dq = self.head.next
self.head.next = dq.next
return dq.data
def isEmpty(self):
return self.head.next == self.tail
def peek(self):
return self.head.next.data
def __len__(self):
if self.isEmpty():
return 0
else:
t = self.head.next
size = 0
while t.next != self.tail:
size += 1
return size
def __str__(self) -> str:
if self.isEmpty():
return ''
else:
t = self.head.next
s = []
while t != self.tail:
s.append(str(t.data))
t = t.next
return ' '.join(s)
data = [10, 10,10,10,10,10,10,10,10,10,10,10,10,10,10]
q = LLQueue(data)
qq = LLQueue()
print(f"q = {q}")
print(f"qq = {qq}")
while not q.isEmpty():
qq.enQueue(q.deQueue())
print(f"q = {q}")
print(f"qq = {qq}")
while not qq.isEmpty():
q.enQueue(qq.deQueue())
print(f"qq = {qq}")
print(f"q = {q}")
|
erumtw/oods-in-practice
|
5_LinkedList/Untitled-1.py
|
Untitled-1.py
|
py
| 1,876 |
python
|
en
|
code
| 0 |
github-code
|
50
|
37454116649
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re
import time
from pprint import pprint
from warnings import warn
from datetime import datetime
import itertools
import inspect
from bidi import algorithm as bidi
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
from urllib.request import urlopen
from bs4 import BeautifulSoup
import general_utils.utils as utils
import Scrapper.ScrapperTools as st
############## MAIN FUNCTIONS ##############
def data_description(df):
sources = np.unique(df['source'])
n = len(sources)
f, axs = plt.subplots(2, n)
# counters per source
bar_per_source(axs[0,0], df, ylab='Articles\n(black = partially blocked contents)',
fun=lambda d: d.shape[0], title='\nArticles per Source')
bar_per_source(axs[0,1], df,
ylab='Words [x1000]\n(black = partially blocked contents)',
fun=lambda d: sum(len(l.split()) for t in d['text'].values
for l in t.split('\n')) / 1e3,
title='BASIC DATA DESCRIPTION\nWords per Source')
# remove blocked haaretz texts before next analysis
df = df[np.logical_not(df['blocked'])]
# sections per source
articles_per_section =\
[df[np.logical_and(df.source==src,df.section==sec)].shape[0]
for src in sources
for sec in np.unique(df[df.source==src].section)]
axs[0,2].pie([df[df.source==src].shape[0] for src in sources],
labels=sources, colors=utils.DEF_COLORS[:3], startangle=90,
frame=True, counterclock=False)
patches,_ = axs[0,2].pie(articles_per_section,
radius=0.75, startangle=90, counterclock=False)
centre_circle =\
plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0)
axs[0,2].add_artist(centre_circle)
axs[0,2].set_title('\nSources and Sections', fontsize=14)
axs[0,2].legend(
patches, [bidi.get_display(sec) for src in sources
for sec in np.unique(df[df.source==src].section)],
ncol=5, loc='upper right', bbox_to_anchor=(1, 0.11), fontsize=8 )
# dates & authors
date_hist(axs[1,0], df)
author_concentration(axs[1,1], df)
top_authors(axs[1,2], df)
# draw
utils.draw()
def validity_tests(df):
sources = np.unique(df['source'])
blocked_contents = (1-check_haaretz_blocked_text(df[df['source'] == 'haaretz'])\
/ np.sum(df['source']=='haaretz')) * 100
df = df[np.logical_not(df['blocked'])]
n = {src: np.sum(df['source'] == src) for src in sources}
# get anomalies
bad_types = {src: verify_valid(df[df['source']==src],
{'date':datetime,'blocked':np.bool_})
for src in sources}
bad_lengths = {src: check_lengths(df[df['source']==src]) for src in sources}
bad_tokens = {src: verify_hebrew_words(df[df['source']==src]) for src in sources}
# plot anomalies
f, axs = plt.subplots(3, len(sources))
for i, src in enumerate(sources):
tit = ('DATA SANITY TESTS\n' if i==int(len(sources)/2) else '\n') +\
f'[{src:s}] Invalid field types' +\
(f'\n(out of {blocked_contents:.0f}% unblocked articles)'
if src=='haaretz' else '\n')
utils.barplot(axs[0, i], bad_types[src].keys(),
100 * np.array(tuple(bad_types[src].values())) / n[src],
vertical_xlabs=True, title=tit,
ylab='Having invalid type [%]', ylim=(0, 100))
sp = inspect.getfullargspec(check_lengths)
limits = list(itertools.chain.from_iterable(sp[3][0].values()))
for i, src in enumerate(sources):
utils.barplot(axs[1, i],
[a+f'\n({b:.0f} chars)' for a,b in
zip(bad_lengths[src].keys(),limits)],
100 * np.array(tuple(bad_lengths[src].values())) / n[src],
vertical_xlabs=True,
title=f'[{src:s}] Suspicious string-field lengths',
ylab='Having invalid length [%]', ylim=(0, 100))
utils.barplot(axs[2,0], sources, [100*(1-bad_tokens[src][0]) for src in sources],
xlab='Source', ylab='Words without numbers\nor Hebrew letters [%]')
utils.barplot(axs[2,1], sources, [100*(1-bad_tokens[src][1]) for src in sources],
xlab='Source', ylab='Words of length <=1 [%]')
for i in range(2,len(sources)):
utils.clean_figure(axs[2,i])
# draw
utils.draw()
def lengths_analysis(df, by=None):
f, axs = plt.subplots(3, 3)
# remove blocked haaretz texts before analysis
df = df[np.logical_not(df['blocked'])]
# count units
df['words_per_text'] = count_words(df.text)
df['words_per_title'] = count_words(df.title)
df['words_per_subtitle'] = count_words(df.subtitle)
df['characters_per_text'] = [len(s) for s in df.text]
df['sentences_per_text'] = count_sentences(df.text)
df['paragraphs_per_text'] = count_paragraphs(df.text)
df['characters_per_title'] = [len(s) for s in df.title]
df['unique_words_per_100_words'] =\
[100*len(np.unique(list(filter(None,re.split(' |\t|\n\r|\n',s))))) /
len(list(filter(None,re.split(' |\t|\n\r|\n',s))))
for s in df.text]
df['characters_per_word'] =\
[len(s)/len(list(filter(None,re.split(' |\t|\n\r|\n',s))))
for s in df.text]
# plot
columns = ('words_per_text', 'words_per_subtitle', 'words_per_title',
'characters_per_text', 'sentences_per_text', 'paragraphs_per_text',
'characters_per_title', 'unique_words_per_100_words',
'characters_per_word')
for i,col in enumerate(columns):
ax = axs[int(i/3),i%3]
bp = df.boxplot(ax=ax, column=col, by=['source']+([by] if by else []),
return_type='both', patch_artist=True)
colors = np.repeat(('blue','red','green'), int(len(bp[0][1]['boxes'])/3))
for box, color in zip(bp[0][1]['boxes'], colors):
box.set_facecolor(color)
ax.set_xlabel('')#'Source', fontsize=12)
ax.set_ylabel(col.replace('_',' ').capitalize(), fontsize=12)
if by:
ax.set_xticklabels(
[bidi.get_display(
t._text.replace('(', '').replace(')', '').replace(', ', '\n') )
for t in ax.get_xticklabels()],
rotation=90)
if i==0:
ax.set_title('TOKENS COUNT', fontsize=14)
else:
ax.set_title('')
# draw
utils.draw()
############## LOAD DATA ##############
def load_data(path,
sheets=('ynet', 'mako', 'haaretz'),
filter_str=('source','title','text'),
force_string=('title','subtitle','text','url','link_title',
'author','section','source'),
verbose=1):
df = st.load_data_frame(path, sheets=sheets, verbose=verbose)
for h in filter_str:
df = df[[(isinstance(t, str) and len(t)>0) for t in df[h].values]]
pd.options.mode.chained_assignment = None
for col in force_string:
df.loc[[not isinstance(s,str) for s in df[col]], col] = ''
df['blocked'] = [src=='haaretz' and txt.endswith('...')
for src,txt in zip(df['source'], df['text'])]
return df
############## DEDICATED FUNCTIONS ##############
def date_hist(ax, df, old_thresh=np.datetime64(datetime(2019,3,1))):
dts = [str(dt) if str(dt)=='NaT'
else str(dt)[:4] if dt<old_thresh else str(dt)[:10]
for dt in df.date]
dts_vals = sorted(list(set(dts)))
sources = np.unique(df.source)
date_count = {src: [np.sum(sc==src and dt==dt_val for sc,dt in zip(df.source,dts))
for dt_val in dts_vals]
for src in sources}
bottom = np.array([0 for _ in dts_vals])
for i,src in enumerate(sources):
utils.barplot(ax, dts_vals, date_count[src], bottom=bottom, title='Dates',
ylab='Articles', vertical_xlabs=True, label=src,
colors=('b','r','g')[i], plot_bottom=False)
bottom += date_count[src]
ax.legend(loc='upper left')
def author_concentration(ax, df):
n = 0
for k,src in enumerate(np.unique(df.source)):
# calculate
d = df[df.source==src]
authors = np.array(sorted(list(set([str(a) for a in d.author[d.author!='']]))))
arts_per_aut = np.array([np.sum(d.author==a) for a in authors])
ids = sorted(range(len(arts_per_aut)),
key=lambda i: arts_per_aut[i], reverse=True)
authors = authors[ids]
arts_per_aut = arts_per_aut[ids]
arts_per_aut = np.cumsum(arts_per_aut)
n = max(n,len(authors))
# plot
ax.plot(list(range(len(arts_per_aut))), 100*arts_per_aut/d.shape[0],
('b-','r-','g-')[k], label=src)
ax.set_title('Authors', fontsize=14)
ax.set_xlabel('K', fontsize=12)
ax.set_ylabel(
'Number of articles by most active K authors [%]\n'+
'(not reaching 100% due to unknown authors)', fontsize=12)
ax.set_xlim((0,n))
ax.set_ylim((0,100))
ax.legend()
def top_authors(ax, df, n=5):
sources = np.unique(df.source)
top_authors = {}
top_authors_arts = {}
for k,src in enumerate(sources):
# calculate
d = df[df.source==src]
authors = np.array(sorted(list(set([str(a) for a in d.author[d.author!='']]))))
arts_per_aut = np.array([np.sum(d.author==a) for a in authors])
ids = sorted(range(len(arts_per_aut)),
key=lambda i: arts_per_aut[i], reverse=True)
top_authors[src] = authors[ids[:n]]
top_authors_arts[src] = arts_per_aut[ids[:n]]
# plot
width = 1/(n+1)
for i in range(n):
rects = ax.bar(
np.arange(len(sources))+i*width,
[top_authors_arts[src][i] for src in sources],
width
)
for rect,src in zip(rects,sources):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., height+0.5,
f'{bidi.get_display(top_authors[src][i]):s}',
ha='center', va='bottom', rotation=90)
ax.set_ylabel('Articles', fontsize=12)
ax.set_xlabel('Top Authors', fontsize=12)
ax.set_xticks(np.arange(len(sources)) + n*width/2)
ax.set_xticklabels(sources)
def verify_valid(df, types=()):
'''
Count invalid entries - either empty (default) or invalid type.
:param df: data frame
:param types: dictionary of columns and their desired types
:return: count of invalid entries per column (as dictionary)
'''
bad = {}
for col in df.columns:
if col in types:
bad[col] = np.sum([not isinstance(x, types[col])
for x in df[col]])
else:
bad[col] = np.sum([not x for x in df[col]])
return bad
def check_lengths(df, lengths={'section': (2, 20), 'title': (10, 6 * 30),
'subtitle': (10, 6 * 70), 'date': (6, 12),
'author': (2, 30), 'text': (6 * 60, np.inf)}):
exceptional_length = {}
for l in lengths:
exceptional_length['short_'+l] =\
np.sum([isinstance(s,str) and len(s)<lengths[l][0] for s in df[l]])
exceptional_length['long_'+l] =\
np.sum([isinstance(s,str) and len(s)>lengths[l][1] for s in df[l]])
return exceptional_length
def verify_hebrew_words(df):
heb = np.mean(
[any('א'<=c<='ת' or '1'<=c<='9' for c in w)
for w in list(
filter(None, re.split(' | - |\t|\n\r|\n', ' '.join(df.text) )))
])
word = np.mean(
[len(w)>=2
for w in list(
filter(None, re.split(' | - |\t|\n\r|\n', ' '.join(df.text) )))
])
return (word, heb)
def check_haaretz_blocked_text(df):
assert (all(src == 'haaretz' for src in df['source']))
return np.sum([s.endswith('...') for s in df['text']])
############## GENERAL TOOLS ##############
def bar_per_source(ax, df, fun, ylab, title,
colors='black', bcolors=utils.DEF_COLORS):
sources = np.unique(df.source)
utils.barplot(
ax, sources,
[fun(df[np.logical_and(df.source==src,df.blocked)]) for src in sources],
bottom=
[fun(df[np.logical_and(df.source==src,np.logical_not(df.blocked))])
for src in sources],
ylab=ylab, title=title, colors=colors, bcolors=bcolors
)
def count_words(txt, sep=' | - |\t|\n\r|\n'):
return utils.count(txt,sep)
def count_sentences(txt, sep='\. |\.\n|\.\r'):
return utils.count(txt,sep)
def count_paragraphs(txt, sep='\n|\n\r'):
return utils.count(txt,sep)
############## MAIN ##############
if __name__ == "__main__":
df = load_data(r'..\Data\articles')
utils.info(df)
data_description(df.copy())
validity_tests(df.copy())
lengths_analysis(
df[df.section.isin(('חדשות','כלכלה','כסף','ספורט','אוכל'))].copy(),
by='section')
plt.show()
|
ido90/News
|
Analyzer/BasicAnalyzer.py
|
BasicAnalyzer.py
|
py
| 13,277 |
python
|
en
|
code
| 1 |
github-code
|
50
|
12272900226
|
import argparse
def load(filepath):
"""Loads data from file to database"""
try:
with open(filepath) as file_:
for line in file_:
print(line)
except FileNotFoundError as e:
print(f"File not found {e}")
def main():
parser = argparse.ArgumentParser(
description="Dunder Mifflin Rewards CLI",
epilog="Enjoy and use with caution!",
)
parser.add_argument(
"subcommand",
type=str,
help="the subcommand to run",
choices=("load", "show", "send"),
default="help"
)
parser.add_argument(
"filepath",
type=str,
help="file path to load from",
default=None
)
args = parser.parse_args()
globals()[args.subcommand](args.filepath)
if __name__ == "__main__":
main()
|
brunoades/dundie-rewards
|
dundie/__main__.py
|
__main__.py
|
py
| 836 |
python
|
en
|
code
| 0 |
github-code
|
50
|
28609719527
|
import os
from PyQt4 import QtGui, uic
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import QWebView
from qgis.gui import *
import plotly
from plotly.graph_objs import Scatter, Box, Layout
FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'ui/data_plot_dialog_base.ui'))
class DataPlotDialog(QtGui.QDialog, FORM_CLASS):
def __init__(self, parent=None):
"""Constructor."""
super(DataPlotDialog, self).__init__(parent)
# Set up the user interface from Designer.
# After setupUI you can access any designer object by doing
# self.<objectname>, and you can use autoconnect slots - see
# http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html
# #widgets-and-dialogs-with-auto-connect
self.setupUi(self)
self.scatterButton.clicked.connect(self.ScatterPlot)
self.boxplotButton.clicked.connect(self.BoxPlot)
self.barplotButton.clicked.connect(self.BarPlot)
self.histogramplotButton.clicked.connect(self.HistogramPlot)
self.pieplotButton.clicked.connect(self.PiePlot)
self.scatter3DButton.clicked.connect(self.Scatter3DPlot)
self.distplotButton.clicked.connect(self.DistPlot)
self.polarButton.clicked.connect(self.PolarPlot)
# Each function is linked to the button and it imports and allows to run the code of that plot type
# Open scatter plot dialog
def ScatterPlot(self):
import plots.scatter_dialog as Scatter
dlg = Scatter.ScatterPlotDialog()
# show the dialog
dlg.show()
# Run the dialog event loop
dlg.exec_()
# Open boxplot dialog
def BoxPlot(self):
import plots.box_dialog as Box
dlg = Box.BoxPlotDialog()
# show the dialog
dlg.show()
# Run the dialog event loop
dlg.exec_()
# Open barplot dialog
def BarPlot(self):
import plots.bar_dialog as Bar
dlg = Bar.BarPlotDialog()
# show the dialog
dlg.show()
# Run the dialog event loop
dlg.exec_()
# Open histogram dialog
def HistogramPlot(self):
import plots.histogram_dialog as Histogram
dlg = Histogram.HistogramPlotDialog()
# show the dialog
dlg.show()
# Run the dialog event loop
dlg.exec_()
# Open pie plot dialog
def PiePlot(self):
import plots.pie_dialog as Pie
dlg = Pie.PiePlotDialog()
# show the dialog
dlg.show()
# Run the dialog event loop
dlg.exec_()
# Open scatter 3D plot dialog
def Scatter3DPlot(self):
import plots.scatter3D_dialog as Scatter3D
dlg = Scatter3D.Scatter3DPlotDialog()
# show the dialog
dlg.show()
# Run the dialog event loop
dlg.exec_()
# Open distplot dialog
def DistPlot(self):
import plots.distplot_dialog as Dist
dlg = Dist.DistPlotDialog()
# show the dialog
dlg.show()
# Run the dialog event loop
dlg.exec_()
# Open polar plot dialog
def PolarPlot(self):
import plots.polar_plot_dialog as Polar
dlg = Polar.PolarPlotDialog()
# show the dialog
dlg.show()
# Run the dialog event loop
dlg.exec_()
|
mdouchin/DataPlot
|
data_plot_dialog.py
|
data_plot_dialog.py
|
py
| 3,341 |
python
|
en
|
code
| 0 |
github-code
|
50
|
20296661811
|
import logging
from google.protobuf.json_format import MessageToDict, ParseDict
from serving.core.error_code import ExistBackendError, RunTimeException, CreateAndLoadModelError, ListOneBackendError, \
ReloadModelOnBackendError, TerminateBackendError
from serving.core import error_reply
from ..core import backend
from ..interface import backend_pb2 as be_pb2
from ..interface import backend_pb2_grpc as be_pb2_grpc
from ..interface import common_pb2 as c_pb2
class Backend(be_pb2_grpc.BackendServicer):
def ListSupportedType(self, request, context):
return be_pb2.SupportedReply(support="not implemented")
def ListRunningBackends(self, request, context):
try:
ret = backend.listAllBackends()
return ParseDict(ret, be_pb2.BackendList())
except Exception as e:
logging.exception(e)
return be_pb2.BackendList()
def InitializeBackend(self, request, context):
try:
ret = backend.initializeBackend(MessageToDict(request), passby_model=None)
return ParseDict(ret, c_pb2.ResultReply())
except CreateAndLoadModelError as e:
return error_reply.error_msg(c_pb2, CreateAndLoadModelError, exception=e)
except Exception as e:
logging.exception(e)
return error_reply.error_msg(c_pb2, RunTimeException,
msg="failed to initialize backend: {}".format(repr(e)))
def ListBackend(self, request, context):
try:
ret = backend.listOneBackend(MessageToDict(request))
return ParseDict(ret, be_pb2.BackendStatus())
except ListOneBackendError as e:
return error_reply.error_msg(c_pb2, ListOneBackendError, exception=e)
except Exception as e:
logging.exception(e)
return be_pb2.BackendStatus()
def ReloadModelOnBackend(self, request, context):
try:
ret = backend.reloadModelOnBackend(MessageToDict(request))
return ParseDict(ret, c_pb2.ResultReply())
except ReloadModelOnBackendError as e:
return error_reply.error_msg(c_pb2, ReloadModelOnBackendError, exception=e)
except Exception as e:
logging.exception(e)
return error_reply.error_msg(c_pb2, RunTimeException,
msg="failed to (re)load model on backend: {}".format(repr(e)))
def TerminateBackend(self, request, context):
try:
backend.terminateBackend(MessageToDict(request))
return c_pb2.ResultReply(code=0, msg="")
except TerminateBackendError as e:
return error_reply.error_msg(c_pb2, TerminateBackendError, exception=e)
except Exception as e:
logging.exception(e)
return error_reply.error_msg(c_pb2, RunTimeException,
msg="failed to terminate backend: {}".format(repr(e)))
def CreateAndLoadModel(self, request, context):
try:
ret = backend.createAndLoadModel(MessageToDict(request))
return ParseDict(ret, c_pb2.ResultReply())
except ExistBackendError as e:
return error_reply.error_msg(c_pb2, ExistBackendError, exception=e)
except Exception as e:
logging.exception(e)
return error_reply.error_msg(c_pb2, RunTimeException, msg=repr(e))
def CreateAndLoadModelV2(self, request, context):
try:
ret = backend.createAndLoadModelV2(MessageToDict(request))
return ParseDict(ret, c_pb2.ResultReply())
except ExistBackendError as e:
return error_reply.error_msg(c_pb2, ExistBackendError, exception=e)
except Exception as e:
logging.exception(e)
return error_reply.error_msg(c_pb2, RunTimeException, msg=repr(e))
|
JK-97/ai-serving
|
src/serving/handler/backend.py
|
backend.py
|
py
| 3,878 |
python
|
en
|
code
| 2 |
github-code
|
50
|
42015918298
|
import json
import urllib
from settings import SERVER_BASE_URL
HEADERS = {'Content-Type': 'application/json'}
def get_nodes(http_client, noun):
noun = urllib.quote_plus(noun)
result = http_client.get('''{0}/nodes?where={{"noun": "{1}"}}'''.format(SERVER_BASE_URL, noun), headers=HEADERS)
items = result.json()['_items']
return items[0]['_id'] if items else []
def post_nodes(http_client, noun, show):
payload = dict(
noun=noun,
show=show,
noun_usages=[]
)
result = http_client.post('{0}/nodes'.format(SERVER_BASE_URL), data=json.dumps(payload), headers=HEADERS)
return result
|
mpmenne/global-hack-II
|
gh2insertworker/nodes.py
|
nodes.py
|
py
| 635 |
python
|
en
|
code
| 0 |
github-code
|
50
|
42596275000
|
import numpy as np
import dolfin
from dolfin import *
from mpi4py import MPI as pyMPI
comm = pyMPI.COMM_WORLD
mpi_comm = MPI.comm_world
#load mesh,boundaries and coefficients from file
mark = {"Internal":0, "wall": 1,"inlet": 2,"outlet": 3 }
#read mesh and boundaries from file
mesh = Mesh()
hdf = HDF5File(mesh.mpi_comm(), "mesh_boundaries.h5", "r")
hdf.read(mesh, "/mesh", False)
boundaries = MeshFunction('size_t', mesh, mesh.topology().dim() - 1)
hdf.read(boundaries, "/boundaries")
hdf.close()
#read viscosity coefficient from file
mu_ele = FunctionSpace(mesh, "DG", 0)
mu = Function(mu_ele)
hdf = HDF5File(mesh.mpi_comm(), "mesh_coeffs.h5", "r")
hdf.read(mu, "/mu")
hdf.close()
#output viscosity to paraview
XDMFFile(mpi_comm, "coeff_preview.xdmf").write_checkpoint(mu, "coeffs", 0)
#Define Taylor-Hood element and function space
P2 = VectorElement("Lagrange", mesh.ufl_cell(), 2)
P1 = FiniteElement("Lagrange", mesh.ufl_cell(), 1)
TH = P2 * P1
W = FunctionSpace(mesh, TH)
# Define variational problem
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
ds = dolfin.Measure('ds',domain=mesh,subdomain_data=boundaries)
n = dolfin.FacetNormal(mesh)
#Define boundary condition
p_in = dolfin.Constant(1.0) # pressure inlet
p_out = dolfin.Constant(0.0) # pressure outlet
noslip = dolfin.Constant([0.0]*mesh.geometry().dim()) # no-slip wall
#Boundary conditions
# No-slip Dirichlet boundary condition for velocity
bc0 = DirichletBC(W.sub(0), noslip, boundaries, mark["wall"])
bcs = [bc0]
#Neumann BC
gNeumann = - p_in * inner(n, v) * ds(mark["inlet"]) + \
- p_out * inner(n, v) * ds(mark["outlet"])
#Body force
f = Constant([0.0]*mesh.geometry().dim())
a = mu*inner(grad(u), grad(v))*dx + div(v)*p*dx + q*div(u)*dx # The sign of the pressure has been flipped for symmetric system
L= inner(f, v)*dx + gNeumann
U = Function(W)
solve(a == L, U, bcs)
uh, ph = U.split()
#Output solution p,u to paraview
dolfin.XDMFFile("pressure.xdmf").write_checkpoint(ph, "p")
dolfin.XDMFFile("velocity.xdmf").write_checkpoint(uh, "u")
flux = [dolfin.assemble(dolfin.dot(uh, n)*ds(i)) for i in range(len(mark))]
if comm.Get_rank() == 0:
for key, value in mark.items():
print("Flux_%s= %.15lf"%(key,flux[value]))
|
BinWang0213/TemporaryProject
|
hdg_test/2d/cg_test.py
|
cg_test.py
|
py
| 2,253 |
python
|
en
|
code
| 1 |
github-code
|
50
|
24020092678
|
import os
import mne
import yaml
import json
import pickle
import numpy as np
import scipy as sp
import pandas as pd
import nibabel as nb
import matplotlib.pyplot as plt
from inspect import getsourcefile
from .acquisition import Acquisition2kHz, Acquisition10kHz
# quick function for coreg checking
def plot_overlay(image, compare, title, thresh=None):
"""Define a helper function for comparing plots."""
image = nb.orientations.apply_orientation(
np.asarray(image.dataobj), nb.orientations.axcodes2ornt(
nb.orientations.aff2axcodes(image.affine))).astype(np.float32)
compare = nb.orientations.apply_orientation(
np.asarray(compare.dataobj), nb.orientations.axcodes2ornt(
nb.orientations.aff2axcodes(compare.affine))).astype(np.float32)
if thresh is not None:
compare[compare < np.quantile(compare, thresh)] = np.nan
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
fig.suptitle(title)
for i, ax in enumerate(axes):
ax.imshow(np.take(image, [image.shape[i] // 2], axis=i).squeeze().T,
cmap='gray')
ax.imshow(np.take(compare, [compare.shape[i] // 2],
axis=i).squeeze().T, cmap='gist_heat', alpha=0.5)
ax.invert_yaxis()
ax.axis('off')
fig.tight_layout()
class Patient:
"""Patient is a single patient, with electrodes in fixed positions,
containing multiple runs of sEEG data as well as pre-op T1w and post-op CT anatomical images
"""
# instance attributes
def __init__(self, subject, raw_dir, derivatives_dir):
"""[summary]
Args:
subject ([type]): [description]
raw_dir ([type]): [description]
derivatives_dir ([type]): [description]
"""
self.subject = subject
self.raw_dir = raw_dir
self.derivatives_dir = derivatives_dir
self.raw_func_dir = os.path.join(raw_dir, self.subject, 'func')
self.raw_anat_dir = os.path.join(raw_dir, self.subject, 'anat')
self.preprocessing_dir = os.path.join(
derivatives_dir, 'prep', self.subject, 'func')
self.localization_dir = os.path.join(
derivatives_dir, 'prep', self.subject, 'loc')
self.tfr_dir = os.path.join(
derivatives_dir, 'tfr', self.subject, 'func')
self.prf_dir = os.path.join(
derivatives_dir, 'pRF', self.subject, 'func')
self.subjects_dir = os.path.join(derivatives_dir, 'freesurfer')
for d in (self.preprocessing_dir, self.localization_dir, self.tfr_dir, self.prf_dir):
os.makedirs(d, exist_ok=True)
self.filepath = os.path.abspath(getsourcefile(lambda: 0))
with open(os.path.join(os.path.split(os.path.split(self.filepath)[0])[0], 'analysis', 'config.yml'), 'r') as yf:
self.analysis_settings = yaml.safe_load(yf)
def __repr__(self):
return f'Patient "{self.subject}" at "{self.raw_dir}", derivatives at {self.derivatives_dir}'
# instance method
def gather_acquisitions(self):
self.acquisitions = []
for run, acq in zip(range(1, self.analysis_settings['nr_runs']+1), self.analysis_settings['acquisition_types']):
if acq == '2kHz':
this_run = Acquisition2kHz(raw_dir=self.raw_func_dir,
run_nr=run,
patient=self,
task=self.analysis_settings['task'])
elif acq == '10kHz':
this_run = Acquisition10kHz(raw_dir=self.raw_func_dir,
run_nr=run,
patient=self,
task=self.analysis_settings['task'])
self.acquisitions.append(this_run)
def preprocess(self):
# 1. resample
# 2. notch filter
# 3. t0 at 't' press
# 4. tfr from t0 to end of last bar pass
for acq in self.acquisitions:
preprocessed_fn = acq.raw_filename.replace(
'bids', 'derivatives/prep').replace('.edf', '_ieeg.fif.gz')
acq.notch_resample_cut(
resample_frequency=self.analysis_settings['preprocessing']['downsample_frequency'],
notch_filter_frequencies=self.analysis_settings['preprocessing']['notch_frequencies'],
raw_file_name=None,
output_file_name=preprocessed_fn)
acq.preprocessed_fn = preprocessed_fn
tfr_fn = preprocessed_fn.replace(
'prep', 'tfr').replace('.fif.gz', '.h5')
acq.tfr(raw_file_name=acq.preprocessed_fn,
tfr_logspace_low=self.analysis_settings['preprocessing']['tfr_logspace_low'],
tfr_logspace_high=self.analysis_settings['preprocessing']['tfr_logspace_high'],
tfr_logspace_nr=self.analysis_settings['preprocessing']['tfr_logspace_nr'],
tfr_subsampling_factor=self.analysis_settings[
'preprocessing']['tfr_subsampling_factor'],
output_filename=tfr_fn)
acq.tfr_fn = tfr_fn
def find_electrode_positions(self, which_run=0, method='flirt'):
# 1. check if freesurfer has run
# 2. run MNE coregistration
# 3. save electrode positions in stereotypical format
# follow: https://mne.tools/stable/auto_tutorials/clinical/10_ieeg_localize.html#sphx-glr-auto-tutorials-clinical-10-ieeg-localize-py
# and: https://mne.tools/stable/auto_tutorials/clinical/20_seeg.html
"""
# on the server:
mkdir /tank/shared/tmp/prf-seeg &&
singularity run --cleanenv -B /tank -B /scratch /tank/shared/software/bids_apps/fmriprep-20.2.6.simg --skip_bids_validation --participant-label 002 --anat-only --nthreads 30 --omp-nthreads 30 --fs-license-file /tank/shared/software/freesurfer_dev/license.txt --notrack -w /tank/shared/tmp/prf-seeg /scratch/2021/prf-seeg/data/bids/ /scratch/2021/prf-seeg/data/derivatives participant
# and for manual/flirt reg:
mri_convert derivatives/freesurfer/sub-001/mri/T1.mgz derivatives/freesurfer/sub-001/mri/T1.nii.gz
flirt -interp sinc -searchcost mutualinfo -in bids/sub-001/anat/sub-001_CT.nii.gz -ref derivatives/freesurfer/sub-001/mri/T1.nii.gz -omat derivatives/prep/sub-001/loc/CT2T1w_flirt.mat -out derivatives/prep/sub-001/loc/CT2T1w_flirt.nii.gz
"""
CT_orig = nb.load(os.path.join(
self.raw_anat_dir, f'{self.subject}_CT.nii.gz'))
T1w_orig = nb.load(os.path.join(
self.raw_anat_dir, f'{self.subject}_T1w.nii.gz'))
T1w_FS = nb.load(os.path.join(self.subjects_dir,
self.subject, 'mri', 'T1.mgz'))
if method == 'mne':
self.reg_affine, _ = mne.transforms.compute_volume_registration(CT_orig, T1w_FS, pipeline='rigids')
self.CT_aligned = mne.transforms.apply_volume_registration(
CT_orig, T1w_FS, self.reg_affine)
elif method == 'flirt':
self.reg_affine = np.loadtxt(os.path.join(self.derivatives_dir, 'prep', 'loc', f'CT2T1w_flirt.mat'), delimiter='\t')
self.CT_aligned = nb.load(os.path.join(self.derivatives_dir, 'prep', 'loc', f'CT2T1w_flirt.nii.gz'))
else:
raise NotImplementedError(f'method {method} not implemented')
self.gather_acquisitions()
self.acquisitions[which_run]._read_raw()
self.subj_trans = mne.coreg.estimate_head_mri_t(subject=self.subject,
subjects_dir=self.subjects_dir)
self.subj_mni_fiducials = mne.coreg.get_mni_fiducials(subject=self.subject,
subjects_dir=self.subjects_dir)
self.subj_trans.save(os.path.join(self.localization_dir, 'subj-trans.fif'))
np.savetxt(os.path.join(self.localization_dir, f'reg_affine_CT2T1w_{method}.tsv'), self.reg_affine, delimiter='\t')
# self.CT_aligned.to_filename(os.path.join(self.localization_dir, 'CT2T1w.nii.gz'))
# with open(os.path.join(self.localization_dir, 'subj_mni_fiducials.pkl'), 'w') as f:
# pickle.dump(self.subj_mni_fiducials, f)
gui = mne.gui.locate_ieeg(self.acquisitions[which_run].raw.info,
self.subj_trans,
self.CT_aligned,
subject=self.subject,
subjects_dir=self.subjects_dir)
|
spinoza-centre/prf-seeg
|
prfseeg/patient.py
|
patient.py
|
py
| 8,674 |
python
|
en
|
code
| 2 |
github-code
|
50
|
32115803578
|
import csv
def setAmount():
data_string = []
with open('500_constituents_financial.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
check = False
for row in csv_reader:
if check:
data_string.append(row)
check = True
return data_string
def output_file(data_string):
csvfile = None
with open("demofile2.csv", "w") as csvfile:
A = [ "Symbol", "Name", "Sector", "Price", "Price/Earnings", "Dividend Yield", "Earnings/Share", "52 Week Low", "52 Week High", "Market Cap", "dag", "Price/Sales", "Price/Book", "SEC Filings"]
writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(A)
print(data_string)
writer.writerows(data_string)
# for i in data_string:
# temp = []
# for j in i:
# t = ""
# if j != ',':
# t += j
# else:
# temp.append(t)
# t = ""
# #print(temp)
# writer.writerows(temp)
print(123)
data = setAmount()
print(123)
output_file(data)
print(123)
|
MinTimmy/Data_Structure
|
First_semester/Demo1/all/test10.py
|
test10.py
|
py
| 1,176 |
python
|
en
|
code
| 0 |
github-code
|
50
|
38409639622
|
import boto3
import email
import json
import urllib.parse
from datetime import datetime
from sms_spam_classifier_utilities import one_hot_encode
from sms_spam_classifier_utilities import vectorize_sequences
region = 'us-east-1'
s3_client = boto3.client('s3')
sagemaker_client = boto3.client('runtime.sagemaker')
ses_client = boto3.client('ses', region_name=region)
def lambda_handler(event, context):
# Get the object from the event
bucket = event['Records'][0]['s3']['bucket']['name']
print(event['Records'][0]['s3']['bucket']['name'])
key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')
try:
response = s3_client.get_object(Bucket=bucket, Key=key)
body = response['Body'].read().decode('utf-8')
# Extract contents from email
email_contents = email.message_from_string(body)
email_datetime = email_contents.get('Date')
email_datetime = email_datetime[:email_datetime.find('-')-1]
dt = datetime.strptime(email_datetime, '%a, %d %b %Y %H:%M:%S')
email_date = str(dt.date())
email_time = str(dt.time())
email_recipient = email_contents.get('To')
email_sender = email_contents.get('From')
email_sender = email_sender[email_sender.find('<')+1:-1]
email_subject = email_contents.get('Subject')
email_body = ''
if email_contents.is_multipart():
for payload in email_contents.get_payload():
if payload.get_content_type() == 'text/plain':
email_body = payload.get_payload()
else:
email_body = email_contents.get_payload()
email_body = email_body.replace("\r", " ").replace("\n", " ")
# Prepare input for sagemaker endpoint
endpoint_name = 'sms-spam-classifier-mxnet-2022-11-17-23-51-03-470'
detector_input = [email_body]
vocabulary_length = 9013
one_hot_detector_input = one_hot_encode(detector_input, vocabulary_length)
encoded_detector_input = vectorize_sequences(one_hot_detector_input, vocabulary_length)
detector_input = json.dumps(encoded_detector_input.tolist())
# Get a response from the sagemaker endpoint and decode it
response = sagemaker_client.invoke_endpoint(EndpointName=endpoint_name, ContentType='application/json', Body=detector_input)
results = response['Body'].read().decode('utf-8')
results_json = json.loads("" + results + "")
# Get the class and confidence percentage
if(results_json['predicted_label'][0][0]==1.0):
spam_class = 'SPAM'
else:
spam_class = 'HAM'
confidence_score = str(results_json['predicted_probability'][0][0]*100)
confidence_score = confidence_score.split('.')[0]
# Send the email through SES
SES_email_body = email_body
if len(SES_email_body) > 240:
SES_email_body = SES_email_body[:240]
SES_email_line1 = 'We received your email sent on ' + email_date + ' at ' + email_time + ' with the subject ' + email_subject + '.\n\n'
SES_email_line2 = 'Here is a 240 character sample of the email body:\n'
SES_email_line3 = SES_email_body + '\n\n'
SES_email_line4 = 'The email was categorized as ' + spam_class + ' with a ' + confidence_score + '% confidence.'
SES_email = SES_email_line1 + SES_email_line2 + SES_email_line3 + SES_email_line4
charset = "UTF-8"
response = ses_client.send_email(
Destination={
"ToAddresses": [
email_sender,
],
},
Message={
"Body": {
"Text": {
"Charset": charset,
"Data": SES_email,
}
},
"Subject": {
"Charset": charset,
"Data": "Spam Detector Results",
},
},
Source="[email protected]",
)
return "success"
except Exception as e:
print(e)
raise e
|
reganbragg/cloud-hw3-ML-spam-detector
|
Lambda/lambda_function.py
|
lambda_function.py
|
py
| 4,305 |
python
|
en
|
code
| 1 |
github-code
|
50
|
11017700567
|
"""Our main visual theme"""
import os
import serge.blocks.themes
W, H = 800, 600
theme = serge.blocks.themes.Manager()
theme.load({
'main': ('', {
# Main properties
'screen-height': H,
'screen-width': W,
'screen-title': 'bomberman',
'screen-icon-filename': 'icon.png',
'screenshot-size': (0, 0, W, H),
'start-level': 4,
# Ending screen
'end-colour': (255, 255, 0),
'end-size': 20,
'end-font': 'main',
'end-position': (W / 2, H / 2),
'end-icon-position': (W / 2, H / 2 - 50),
'pre-stop-pause': 1.5,
'tween-world-time': 0.3,
# Mute button
'mute-button-alpha': 0.4,
'mute-button-position': (W - 30, H - 30),
# FPS display
'fps-x': 50,
'fps-y': H-30,
'fps-colour': (255, 255, 0),
'fps-size': 12,
# Screenshot interval (s)
'auto-screenshots': False,
'screenshot-path': os.path.join('..', '..', '..', 'sandbox', 'screenshots'),
'screenshot-interval': 5,
# Simulation properties
'simulation-on': False,
'simulation-rtf': 10,
'simulation-fps': 1,
'simulation-auto-restart': False,
'store-action-replay': True,
# Board properties
'board-size': (19, 19),
'board-cell-size': (20, 20),
'board-blanks': ['tiles-4', ],
'board-destructible': ['tiles-2'],
'board-position': (W / 2, H / 2),
'board-replay-rectangle': (W / 2 - 200, H / 2 - 200, 400, 400),
'board-replay-max-frames': 500,
'footstep-h-sprite': 'tiles-15',
'footstep-v-sprite': 'tiles-10',
# Sprite names
'bomb-sprite': 'tiles-11',
'explosion-sprite': 'tiles-14',
'gore-sprite': 'tiles-9',
'number-gore': 4,
# Bomb properties
'bomb-fuse-time': 2,
'explosion-time': 1,
'explosion-propagation-time': 0.1,
'explosion-propagation-distance': 3,
'after-explosion-sprite': 'tiles-4',
'explosion-sprites': ['tiles-%d' % i for i in range(21, 26)],
'explosion-velocity': (10, 20),
'explosion-angular': 0,
'explosion-number': 6,
'explosion-range': 20,
'bomb-blast-sprite': 'tiles-14',
'number-bomb-blasts': 4,
# Block properties
'block-sprites': ['tiles-%d' % i for i in range(26, 31)],
'block-number': 6,
'block-velocity': (180, 190),
'block-angular-velocity': (-500, 500),
'block-range': 400,
'block-gravity': (0, +1000),
# Result properties
'result-colour': (255, 255, 0),
'result-font-size': 42,
'result-position': (W / 2, H / 2 - 40),
'result-font': 'main',
# Result properties
'result-reason-colour': (255, 255, 0),
'result-reason-font-size': 36,
'result-reason-position': (W / 2, H / 2),
'result-reason-font': 'main',
# Next properties
'next-colour': (255, 255, 0),
'next-font-size': 25,
'next-position': (W / 2, H / 2 + 80),
'next-font': 'main',
# Flag display properties
'flag-status-position': (3 * W / 4 + 80, 55),
'flag-time-limit': 20,
'flag-sprite-name': 'tiles-34',
'flag-zoom': 2.0,
'flag-position-width': 75,
'flag-position-offset-x': 10,
'flag-position-offset-y': -4,
# Player properties
'player-colour': (255, 255, 0),
'player-highlight-colour': (255, 0, 0),
'player-highlight-time': 1100,
'player-font-size': 25,
'fixed-font-width': 20,
'player-position': (W / 6 - 32, 16),
'player-heart-position': (W / 6 + 46, 16),
'player-font': 'main',
'player-move-interval': 0.15,
'ai-position': (W / 6 - 32, 70),
'ai-heart-position': (W / 6 + 46, 70),
'score-panel-position': (W / 6 - 22, 55),
# AI properties
'ai-move-interval': 0.25,
'ai-wait-cycles': 0,
'ai-bomb-probability': 0.5,
'ai-squares-view': 2,
'ai-look-ahead': 2,
'all-ai': False,
'ai-show-destinations': True,
'ai-show-unsafe': True,
'ai-unsafe-colour': (0, 0, 0, 100),
'ai-strategy-flip-probability': 0.05,
# Properties of the debug ui
'ai-1-colour': (255, 0, 0),
'ai-1-destination-colour': (255, 0, 0, 100),
'ai-1-font-size': 12,
'ai-1-position': (50, 200),
'ai-1-font': 'DEFAULT',
'ai-2-colour': (0, 255, 0),
'ai-2-destination-colour': (0, 255, 0, 100),
'ai-2-font-size': 12,
'ai-2-position': (50, 250),
'ai-2-font': 'DEFAULT',
# Smack talk properties
'smack-icon-position': (120, 520),
'smack-bubble-position': (W / 2, 520),
'smack-text-colour': (0, 0, 0),
'smack-text-font-size': 15,
'smack-text-position': (W / 2, 530),
'smack-text-font': 'main',
'smack-hide-interval': 5,
'smack-line-length': 30,
'smack-delay': 3,
'smack-offset': 5,
# Death animations
'result-start-y': -100,
'result-end-y': H / 2 - 40,
'result-duration': 300.0,
'result-delay': 100.0,
'result-reason-start-y': 650,
'result-reason-end-y': H / 2,
'result-reason-duration': 300.0,
'result-reason-delay': 100.0,
'next-start-x': -1000,
'next-end-x': W / 2,
'next-duration': 350.0,
'next-delay': 500.0,
'chunk-number': 20,
'chunk-velocity': (400, 600),
'chunk-angular-velocity': (-500, 500),
'chunk-gravity': (0, +1000),
'chunk-sprites': ['tiles-%d' % i for i in range(16, 21)],
# Random items creation
# 'random-item-low-time': 10,
# 'random-item-high-time': 15,
'random-item-names': ['Bomb', 'Heart', 'Bomb', 'RedHeart', 'MultiBomb', 'Flag'],
'random-item-low-time': 2,
'random-item-high-time': 4,
# 'random-item-names': ['MultiBomb'],
'random-item-tween-time': 500.0,
# Gift box
'gift-box-position': (W / 2, 55),
'gift-box-sprite-position': (-20, -20),
'gift-box-sprite-zoom': 3.0,
'gift-box-cycle-time': 0.5,
'gift-box-cycles': (5, 10),
'initial-number-hearts': 3,
# Movement
'default-movement-weight': 10.0,
'heart-movement-weight': 0.1,
'flag-movement-weight': 0.01,
'heart-grab-distance': 10,
'flag-grab-distance': 20,
}),
'start-screen': ('sub-screen', {
# Version text
'version-position': (W/2, H-10),
'version-colour': (50, 50, 50),
'version-font-size': 12,
# Start button
'start-position': (W/2, H-120),
'start-colour': (255, 255, 0, 255),
'start-font-size': 48,
# Help button
'help-position': (W-150, H-40),
'help-colour': (0, 255, 0, 255),
'help-font-size': 24,
# Credits button
'credits-position': (150, H-40),
'credits-colour': (0, 255, 0, 255),
'credits-font-size': 24,
# Achievements button
'achievements-position': (W/2, H-40),
'achievements-colour': (0, 255, 0, 255),
'achievements-font-size': 24,
# Volume
'volume': 0.1,
# Face
'face-position': (W / 2 - 150, H / 2 - 28),
'face-probability': 0.4,
# Smack talk properties
'smack-icon-position': (-120, -520), # Hide off screen
'smack-bubble-position': (W / 2 + 150, H / 2 - 28),
'smack-text-position': (W / 2 + 150, H / 2 - 28),
'smack-delay': 1,
'smack-offset': 3,
# Appearing item sprite
'item-start-position': (-150, H / 2 - 50),
'item-end-position': (170, H / 2 - 50),
'item-zoom': 5,
'item-animation-time': 750,
}),
'sub-screen': ('main', {
# Logo and title
'logo-position': (W/2, 60),
'title': 'A Bomberman Clone',
'title-position': (W/2, 120),
'title-colour': (213, 47, 41),
'title-font-size': 25,
'title-font': 'main',
# Back button
'back-colour': (255, 255, 0, 255),
'back-font-size': 24,
}),
'help-screen': ('sub-screen', {
# Help text
'text-position': (W/2, H/2),
'back-position': (W-100, H-40),
# Key text
'keys-title-position': (W/2, 180),
'keys-title-colour': (212, 196, 148),
'keys-title-font-size': 25,
'keys-title-font': 'main',
# Music text
'music-title-position': (W/2, 450),
'music-title-colour': (212, 196, 148),
'music-title-font-size': 25,
'music-title-font': 'main',
# Volume
'vol-down-position': (W / 2 - 80, 500),
'vol-up-position': (W / 2 + 80, 500),
'vol-position': (W / 2, 500),
'volume-colour': (212, 196, 148),
'volume-font': 'main',
'volume-size': 40,
'vol-change-amount': 10,
}),
'level-screen': ('start-screen', {
# Help text
'text-position': (W/2, H/2),
# Grid properties
'grid-size': (5, 1),
'grid-width': 650,
'grid-height': 200,
'grid-position': (W / 2, H / 2 - 70),
# Title properties
'title-colour': (255, 255, 0, 255),
'title-font-size': 15,
'title-font': 'main',
'title-offset-y': 70,
# Random level button
'random-level-position': (W / 2, H / 2 + 100),
# Back button
'back-position': (W/2, H-40),
# Resume button
'resume-position': (W/2, H-90),
}),
'random-level-screen': ('sub-screen', {
# Back button
'back-position': (W/2, H-40),
# Resume button
'resume-position': (W/2, H-90),
# Generate button
'generate-position': (680, H/2 - 20),
# Select button
'select-position': (680, H/2 + 40),
# Size menu
'size-width': 160,
'size-height': 140,
'size-item-width': 140,
'size-item-height': 40,
'size-position': (100, H/2 - 40),
'size-font-size': 18,
# Space menu
'space-width': 160,
'space-height': 100,
'space-position': (100, H/2 + 130),
'space-item-width': 150,
'space-item-height': 40,
'space-font-size': 18,
# Menu properties
'menu-on-colour': (89, 81, 77),
'menu-off-colour': (89, 81, 77, 10),
'menu-font-colour': (255, 255, 0),
'menu-mouse-over-colour': (162, 146, 114),
'menu-font-size': 18,
'menu-font': 'main',
# Level preview
'level-preview-width': 300,
'level-preview-height': 300,
'level-preview-position': (W/2, H/2 + 35),
# Size options
'size-options': {
'Small': (11, 11),
'Medium': (15, 15),
'Large': (19, 19),
},
# Space options
'space-options': {
'Open': (1, 1),
'Blocked': (10, 4),
},
}),
'action-replay-screen': ('start-screen', {
# Transport bar
'bar-width': 660,
'bar-height': 80,
'bar-background-colour': (0, 0, 0, 150),
'bar-position': (W / 2, H - 80),
# Replay display
'replay-position': (W / 2, H / 2),
'replay-slow-fps': 15,
'replay-normal-fps': 50,
'replay-fast-fps': 150,
'replay-width': 400,
'replay-height': 400,
# Current frame display
'current-colour': (255, 255, 0, 50),
'current-font-size': 12,
'current-position': (W / 2, 50),
'current-font': 'main',
# Slider properties
'slider-back-position': (W / 2, 50)
}),
'credits-screen': ('sub-screen', {
# Author
'author-title-colour': (148, 8, 42),
'author-title-font-size': 24,
'author-title-position': (W/2, 170),
'author-colour': (212, 196, 148),
'author-font-size': 32,
'author-position': (W/2, 210),
'url-colour': (156, 140, 116),
'url-font-size': 14,
'url-position': (W/2, 230),
# Music
'music-title1-colour': (148, 8, 42),
'music-title1-font-size': 20,
'music-title1-position': (W/2, 260),
'music-title2-colour': (148, 8, 42),
'music-title2-font-size': 18,
'music-title2-position': (W/2, 280),
'music-colour': (212, 196, 148),
'music-font-size': 16,
'music-position': (W/2, 300),
# Sound
'sound-title1-colour': (148, 8, 42),
'sound-title1-font-size': 20,
'sound-title1-position': (W*3/4, 420),
'sound-title2-colour': (212, 196, 148),
'sound-title2-font-size': 18,
'sound-title2-position': (W*3/4, 440),
# Built using
'built-title-colour': (148, 8, 42),
'built-title-font-size': 20,
'built-title-position': (W/4, 420),
'built-colour': (212, 196, 148),
'built-font-size': 16,
'built-position': (W/4, 440),
# Engine
'engine-title-colour': (148, 8, 42),
'engine-title-font-size': 20,
'engine-title-position': (W/4, 480),
'engine-colour': (212, 196, 148),
'engine-font-size': 16,
'engine-position': (W/4, 500),
# Engine version
'engine-version-colour': (156, 140, 116),
'engine-version-font-size': 10,
'engine-version-position': (W/4, 520),
# Fonts
'font-title1-colour': (148, 8, 42),
'font-title1-font-size': 20,
'font-title1-position': (W*3/4, 480),
'font-title2-colour': (148, 8, 42),
'font-title2-font-size': 18,
'font-title2-position': (W*3/4, 500),
'font-colour': (212, 196, 148),
'font-font-size': 16,
'font-position': (W*3/4, 520),
#
'back-position': (100, H-40),
}),
'achievements': ('main', {
# Properties of the achievements system
'banner-duration': 5,
'banner-position': (175, 525),
'banner-size': (300, 50),
'banner-backcolour': (0, 0, 0, 50),
'banner-font-colour': (255, 255, 0, 255),
'banner-name-size': 14,
'banner-description-size': 8,
'banner-name-position': (-100, -18),
'banner-description-position': (-100, 0),
'banner-font-name': 'DEFAULT',
'banner-graphic-position': (-125, 0),
'time-colour': (255, 255, 255, 100),
'time-size': 10,
'time-position': (-100, 24),
'logo-position': (400, 50),
'screen-background-sprite': None,
'screen-background-position': (400, 300),
'grid-size': (2, 5),
'grid-width': 800,
'grid-height': 400,
'grid-position': (400, 320),
'back-colour': (255, 255, 255, 255),
'back-font-size': 20,
'back-font-name': 'DEFAULT',
'back-position': (400, 560),
'back-sound': 'click',
}),
'__default__': 'main',
})
G = theme.getProperty
|
IndexErrorCoders/PygamesCompilation
|
IE_games_2/bombr-0.3/game/theme.py
|
theme.py
|
py
| 14,528 |
python
|
en
|
code
| 2 |
github-code
|
50
|
19524836247
|
from urllib import request
import http.cookiejar
import re
def getXsrf(data):
cer = re.compile('name=\"_xsrf\" value=\"(.*)\"', flags=0)
strlist = cer.findall(data)
return strlist[0]
def makeMyOpener(head={
'Connection': 'Keep-Alive',
'Accept': 'text/html, application/xhtml+xml, */*',
'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'
}):
cookie_jar = http.cookiejar.CookieJar()
opener = request.build_opener(request.HTTPCookieProcessor(cookie_jar))
header = []
for key, value in head.items():
elem = (key, value)
header.append(elem)
opener.addheaders = header
return opener
opener = makeMyOpener()
uop = opener.open('http://www.zhihu.com', timeout=2)
data = uop.read().decode()
xsrf = getXsrf(data)
print("xsrf:" + xsrf)
|
minghzhang007/python-learn
|
pythondemo1/crawler/demo4.py
|
demo4.py
|
py
| 900 |
python
|
en
|
code
| 0 |
github-code
|
50
|
19942848799
|
from socket import MSG_CONFIRM
from nonebot.adapters.onebot.v11 import Bot, MessageEvent, MessageSegment
from nonebot import on_command
from jmcomic import *
from jmcomic.jm_option import *
jm = on_command("jm", aliases={"JM"}, priority=2, block=True)
search = on_command("search", priority=2, block=True)
jm_option = create_option('/root/nonebot/kou/config/option.yml')
@jm.handle()
async def jm_sender(bot: Bot, event: MessageEvent):
plain_msg = str(event.get_message())
uid = plain_msg.split(' ')[1]
try:
await jm.send(message='bot加载中')
msg = search_jm_album(uid)
print(msg)
# await pixiv.send(message=msg)
gid = event.get_session_id()
gid = int(gid[6:15])
print(gid)
# for i in range(len(msg)):
await bot.send_group_forward_msg(group_id=gid, messages=msg)
except Exception as e:
print(e)
await jm.send(message=str(e))
await jm.send(message='发送失败')
@search.handle()
async def jm_search(bot: Bot, event: MessageEvent):
plain_msg = str(event.get_message())
keyword = plain_msg.split(' ')[1]
try:
await jm.send(message='bot搜索中')
msg = search_jm_album_by_keyword(keyword)
print(msg)
gid = event.get_session_id()
gid = int(gid[6:15])
print(gid)
await bot.send_group_forward_msg(group_id=gid, messages=msg)
except Exception as e:
print(e)
await jm.send(message=str(e))
await jm.send(message='发送失败')
def search_jm_album_by_keyword(keyword):
uin = 425831926
name = '雪豹'
client = jm_option.build_jm_client()
search_page: JmSearchPage = client.search_album(
search_query=keyword, page=1)
forward_msg = []
for album_id, title in search_page:
msg = {
"type": "node",
"data": {
"name": name,
"uin": uin,
"content": f'[{album_id}]: {title}'
}
}
forward_msg.append(msg)
return forward_msg
def search_jm_album(id):
uin = 425831926
name = '雪豹'
client = jm_option.build_jm_client()
search_page = client.search_album(search_query=id)
album: JmAlbumDetail = search_page.single_album
title = album.title
author = album.author
tags = album.keywords
page_count = album.page_count
msg = f'title: {title}\nauthor: {author}, page:{page_count}\ntags: {tags}\n'
print(msg)
forward_msg = [
{
"type": "node",
"data": {
"name": name,
"uin": uin,
"content": msg
}
}
]
def download(p):
p: JmPhotoDetail = client.get_photo_detail(p.photo_id, False)
client.ensure_photo_can_use(p)
decode_image = jm_option.download_image_decode
img_save_path = "/root/jm/download/tmp.jpg"
client.download_by_image_detail(p[0], img_save_path, decode_image)
msg = {
"type": "node",
"data": {
"name": name,
"uin": uin,
"content": MessageSegment.image("file:///root/jm/download/tmp.jpg")
}
}
forward_msg.append(msg)
multi_thread_launcher(
iter_objs=album,
apply_each_obj_func=download
)
return forward_msg
|
kyoshiki214/nonebot_kou
|
src/plugins/jm/__init__.py
|
__init__.py
|
py
| 3,410 |
python
|
en
|
code
| 0 |
github-code
|
50
|
27386022394
|
# sort() method = used with lists
# sort() function = used with iterables
student = ["Squidward", "Sandy", "Patrick", "Spongebob", "Mr. Krabs"]
#Only works with lists not tuples
student.sort() #alphabetical order. student.sort(reverse=True) will do reverse alphabetical order
for i in student:
print (i)
print()
#For tuples
students1 = ["Squidward", "Sandy", "Patrick", "Spongebob", "Mr. Krabs"]
sorted_students1 = sorted(students1) #sorted function. Putting sorted(students1, reverse=True) will give reverse order
for i in sorted_students1:
print(i)
print()
#list of tuples
students2 = [("Squidward", "F", 60),
("Sandy", "A", 33),
("Patrick","D", 36),
("Spongebob","B", 20),
("Mr.Krabs","C", 78)]
#Alphabetical order by first object in tuple
students2.sort()
for i in students2:
print(i)
print()
#Alphabetical by grade
grade = lambda grades:grades[1]
students2.sort(key=grade) # sorts current list. Can use students2.sort(key=grade, reverse=True) to reverse alphabetical order
for i in students2:
print(i)
print()
#alphabetical by age
age = lambda ages:ages[2]
students2.sort(key=age) # sorts current list. Can use students2.sort(key=age, reverse=True) to reverse alphabetical order
for i in students2:
print(i)
print()
#for tuples
people = (("Bob","Driver",22),
("Fred","Unemployed",18),
("Ted",'Student',15),
("Ben","Clerk",25))
employment_status = lambda job:job[1]
sorted_Employment_status = sorted(people, key=employment_status)
for i in sorted_Employment_status:
print (i)
|
18gwoo/Python-Practice
|
BroCode52_Python_Sort.py
|
BroCode52_Python_Sort.py
|
py
| 1,597 |
python
|
en
|
code
| 0 |
github-code
|
50
|
17403548098
|
#!/home/mark/phd/venv/bin/python
# coding: utf-8
"""Function to persist experiment results."""
from typing import Dict
from typing import Any
from typing import Optional
import torch as th
from hashlib import sha256
from base64 import b64encode
from os import makedirs
from os.path import isdir
from os.path import basename
from os.path import getctime
from json import dumps
from json import dump
from glob import glob
Params = Dict[str, Any]
HASH_LEN: int = 10
def save_tensor(t: th.Tensor, path: str, params: Optional[Params]=None, overwrite:bool=False) -> None:
"""Save a tensor.
Parameters:
t: Tensor to save
path: Place to save tensor to (includes filename)
overwrite: Overwrite the last save.
"""
path = get_path_with_hash(path, params)
file_name: int = 0
if isdir(path):
last_file = get_last_file(path)
file_name = int(basename(last_file)) + int(not overwrite)
else:
makedirs(path)
with open(f'{path}/params.json', 'w') as f:
dump(params, f)
th.save(t, f'{path}/{file_name}')
def load_tensor(path: str, params: Optional[Params]) -> th.Tensor:
"""Load all the tensors into a stack."""
path = get_path_with_hash(path, params)
files = glob(f'{path}/*')
return th.stack([th.tensor(th.load(f)) for f in files])
def load_last_tensor(path: str, params: Optional[Params]) -> Optional[th.Tensor]:
"""Load only the last saved tensor."""
path = get_path_with_hash(path, params)
f = get_last_file(path)
return th.load(f) if f else None
def get_last_file(path: str) -> Optional[str]:
try:
path = max(glob(f'{path}/*'), key=getctime)
except ValueError:
path = None
return path
def get_path_with_hash(path: str, params: Optional[Params]) -> str:
if params is not None:
h = hash_p(params)
return f'{path}_{h}' if params else path
def hash_p(params: Params) -> str:
s: str = dumps(params)
h: str = sha256(s.encode()).hexdigest()
b64: bytes = b64encode(h.encode())
return b64.decode('utf-8')[:HASH_LEN]
|
MarkTuddenham/pytorch_research
|
pytorch_research/persist.py
|
persist.py
|
py
| 2,139 |
python
|
en
|
code
| 0 |
github-code
|
50
|
42577337398
|
def flatten(list):
return aflatten(list, [])
def aflatten(list, a):
for i in list:
print(i)
try:
if len(i)>1:
a=aflatten(i,a)
except:
a.append(i)
return a
print(flatten([[1,1],2,[1,1]]))
|
RamonRomeroQro/ProgrammingPractice
|
code/FlattenNestedList.py
|
FlattenNestedList.py
|
py
| 265 |
python
|
en
|
code
| 1 |
github-code
|
50
|
40209166347
|
#! /usr/bin/env python
import argparse
import os
import sys
import json
import math
import pickle
import torch
import numpy as np
from scipy.stats import entropy
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
from models import ElectraQA
parser = argparse.ArgumentParser(description='Get all command line arguments.')
parser.add_argument('--batch_size', type=int, default=32, help='Specify the training batch size')
parser.add_argument('--predictions_save_path', type=str, help="Where to save predicted values")
def sig(x):
return 1/(1+np.exp(-x))
def main(args):
if not os.path.isdir('CMDs'):
os.mkdir('CMDs')
with open('CMDs/train.cmd', 'a') as f:
f.write(' '.join(sys.argv) + '\n')
f.write('--------------------------------\n')
dev_data = load_dataset('squad_v2', split='validation')
print(len(dev_data))
electrasquad2 = "ahotrod/electra_large_discriminator_squad2_512"
electrasquad1 = "mrm8488/electra-large-finetuned-squadv1"
huggingface_model = electrasquad1
tokenizer = AutoTokenizer.from_pretrained(huggingface_model)
model = AutoModelForQuestionAnswering.from_pretrained(huggingface_model)
count = 0
# span_predictions = {}
entropy_on = []
entropy_off = []
pred_start_probs = []
pred_end_probs = []
for ex in dev_data:
count+=1
print(count)
# if count<3105:
# continue
# if count==3200:
# break
question, passage, qid = ex["question"], ex["context"], ex["id"]
inputs = tokenizer.encode_plus(question, passage, add_special_tokens=True, return_tensors="pt")
inp_ids = inputs["input_ids"]
if inp_ids.shape[1] > 512:
# print("in here")
inputs["input_ids"] = inputs["input_ids"][:,:512]
inp_ids = inp_ids[:,:512]
# start_logits, end_logits = model(**inputs)
start_logits, end_logits = model(input_ids=inp_ids)
# answer_start = torch.argmax(start_logits)
# answer_end = torch.argmax(end_logits)
inp_ids = inputs["input_ids"].tolist()[0]
# answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(inp_ids[answer_start:answer_end+1]))
# if answer == "[CLS]":
# answer = ""
# span_predictions[qid] = answer
start_logits = sig(torch.squeeze(start_logits).detach().cpu().numpy())
end_logits = sig(torch.squeeze(end_logits).detach().cpu().numpy())
start_probs = start_logits / np.sum(start_logits)
end_probs = end_logits / np.sum(end_logits)
pred_start_probs.append(start_probs)
pred_end_probs.append(end_probs)
sep = tokenizer.convert_ids_to_tokens(inp_ids).index("[SEP]")
resp_start = start_probs[sep+1:-1] / np.sum(start_probs[sep+1:-1])
resp_end = end_probs[sep+1:-1] / np.sum(end_probs[sep+1:-1])
entrop = ((entropy(resp_start, base=2) + entropy(resp_end, base=2)) / 2) / math.log(len(resp_start), 2)
# print(entrop)
# with open(args.predictions_save_path + "predictions.json", 'w') as fp:
# json.dump(span_predictions, fp)
if len(ex["answers"]["text"])==0:
entropy_off.append(entrop)
# print(question)
# print(passage)
# print(ex["answers"]["text"])
# print(answer)
else:
entropy_on.append(entrop)
print(np.mean(entropy_on))
print(np.std(entropy_on))
print(np.mean(entropy_off))
print(np.std(entropy_off))
with open(args.predictions_save_path +"start_probs.txt", "wb") as fp:
pickle.dump(pred_start_probs, fp)
with open(args.predictions_save_path +"end_probs.txt", "wb") as fp:
pickle.dump(pred_end_probs, fp)
# pred_start_logits = []
# pred_end_logits = []
# count = 0
# for item in dl:
# print(count)
# count+=1
# inp_id = item[0].to(device)
# with torch.no_grad():
# start_logits, end_logits = model(inp_id)
# b_start_logits = start_logits.detach().cpu().numpy().tolist()
# # pred_start_logits += b_start_logits
# b_end_logits = end_logits.detach().cpu().numpy().tolist()
# if len(pred_start_logits)==0:
# pred_start_logits += b_start_logits
# pred_end_logits += b_end_logits
# else:
# pred_start_logits.extend(b_start_logits)
# pred_end_logits.extend(b_end_logits)
# # pred_end_logits += b_end_logits
# pred_start_logits, pred_end_logits = np.asarray(pred_start_logits), np.asarray(pred_end_logits)
# Save all necessary file (in order to be able to ensemble)
# np.savetxt(args.predictions_save_path + "pred_start_logits_all.txt", pred_start_logits)
# np.savetxt(args.predictions_save_path + "pred_end_logits_all.txt", pred_end_logits)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
VatsalRaina/question_answering_squad2
|
combo_electra/entropy_large.py
|
entropy_large.py
|
py
| 5,053 |
python
|
en
|
code
| 0 |
github-code
|
50
|
13194295159
|
"""
You're given the root node of a Binary Tree. Write a function that returns true if this Binary Tree is height balanced
and false if it isn't.
A Binary Tree is height balanced if for each node in the tree, the difference between the height of its
left subtree and the height of its right subtree is at most 1.
Each Binary Tree node has an integer value, a left child node and a right child node. Children nodes can either be
Binary Tree node themselves or NULL/None.
"""
# Recursive approach
# Time: O(n) | Space: O(h)
###########################
# This approach recursively traverses every node and checks the balance status and height between its left and right
# subtrees.
# For each node, it returns the balance status in the form of array. 0th index represents whether the
# left and right subtree is balanced or not. 1st index represents the height of the current node from leaf.
#
# If the tree is null, its by default balanced. So we return True.
# Call get_height_balance function with root node --> get_height_balance(node).
# This returns an array as [X, Y]. X -> Balance status (True/False), Y -> Height
# return the 0th index value
#
# Declare a function --> get_height_balance(node)
# If the node is null
# return [True, 0] (null nodes are always balanced)
# recursively call get_height_balance on left subtree -> left_subtree_height_balance
# recursively call get_height_balance on right subtree -> right_subtree_height_balance
# Initialize is_balanced = False
# If balance status for both left and right subtree are True and absolute difference between their heights are <= 1
# Set is_balanced = True
# Set height as maximum of left and right subtree heights + 1
# return [is_balanced, height]
###########################
class BinaryTree:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
def heightBalancedBinaryTree(tree):
if not tree:
return True
is_tree_balanced = get_height_balance(tree)
return is_tree_balanced[0]
def get_height_balance(node):
if not node:
return [True, 0]
left_subtree_height_balance = get_height_balance(node.left)
right_subtree_height_balance = get_height_balance(node.right)
is_balanced = False
if (left_subtree_height_balance[0] and right_subtree_height_balance[0]) and (abs(left_subtree_height_balance[1] - right_subtree_height_balance[1]) <= 1):
is_balanced = True
height = max(left_subtree_height_balance[1], right_subtree_height_balance[1]) + 1
return [is_balanced, height]
|
rageshn/AlgoExpert
|
BinaryTrees/height-balanced-binary-tree.py
|
height-balanced-binary-tree.py
|
py
| 2,620 |
python
|
en
|
code
| 0 |
github-code
|
50
|
12415513344
|
import numpy as np
import os
import turtle
import time
import random
import pyaudio
import sys
import struct
from datetime import datetime
# Config
INITIAL_TAP_THRESHOLD = 0.010
FORMAT = pyaudio.paInt16
SHORT_NORMALIZE = (1.0/32768.0)
CHANNELS = 2
RATE = 44100
INPUT_BLOCK_TIME = 0.05
INPUT_FRAMES_PER_BLOCK = int(RATE*INPUT_BLOCK_TIME)
OVERSENSITIVE = 15.0/INPUT_BLOCK_TIME
UNDERSENSITIVE = 120.0/INPUT_BLOCK_TIME
MAX_TAP_BLOCKS = 0.15/INPUT_BLOCK_TIME
pa = pyaudio.PyAudio()
# Background
screen = turtle.Screen()
screen.title("CPEN 441 Experiment")
screen.setup(width = 1.0, height = 1.0, startx=None, starty=None)
screen.bgcolor("black")
screen.tracer(0)
screen.bgpic('bgd.png')
f = open("distraction_data.txt", "a")
f.write(f'\nExperiment at {datetime.now()}\n')
def makeTurtle(shape, color, shapesizeX, shapesizeY, posX, posY):
t = turtle.Turtle()
t.speed(0)
t.penup()
t.shape(shape)
t.pencolor("black")
t.color(color)
t.shapesize(shapesizeX, shapesizeY)
t.setpos(posX, posY)
return t
# Status
status = 0
status = turtle.Turtle()
status.speed(0)
status.penup()
status.hideturtle()
status.color("white")
status.goto(0, 330)
status.write("Press Enter to start", align="center", font=("Arial", 24, "normal"))
# grass = makeTurtle('square', 'green', 31, 50, -200, 50)
# track = makeTurtle('square', 'grey', 10, 50, -200, 50)
# start = makeTurtle('square', 'black', 10, 1, -640, 50)
# end = makeTurtle('square', 'black', 10, 1, 230, 50)
# Player Sprites
snail_red = 'snail_red.gif'
snail_blue = 'snail_blue.gif'
screen.addshape(snail_red)
screen.addshape(snail_blue)
p1 = makeTurtle(snail_red, 'red', 1, 1, -570, -20)
p2 = makeTurtle(snail_blue, 'blue', 1, 1, -530, 50)
# Audio bar player 1
bar1 = makeTurtle('square', 'white', 20, 1, 450, 50)
level1 = makeTurtle('square', 'red', 0.5, 1, 450, -145)
# Audio bar player 2
bar2 = makeTurtle('square', 'white', 20, 1, 550, 50)
level2 = makeTurtle('square', 'blue', 0.5, 1, 550, 0)
# RNG distractor
coor = [(-744, -354), (-744, 373), (729, -354), (729, 373)]
beep = makeTurtle('square', 'white', 1, 1, coor[0][0], coor[0][1])
beep.hideturtle()
screen.update()
def get_mouse_click_coor(x, y):
print(x, y)
turtle.onscreenclick(get_mouse_click_coor)
p = pyaudio.PyAudio()
def get_rms( block ):
count = len(block)/2
format = "%dh"%(count)
shorts = struct.unpack( format, block )
np_arr = np.array([shorts])
np_arr = np_arr * SHORT_NORMALIZE
np_arr = np.square(np_arr)
return 100 * np.sqrt(np.sum(np_arr) / count) / 0.4
def find_input_device():
device_index = 3 # Hardcoded
# for i in range( pa.get_device_count() ):
# devinfo = pa.get_device_info_by_index(i)
# print( "Device %d: %s"%(i,devinfo["name"]) )
# for keyword in ["mic","input"]:
# if keyword in devinfo["name"].lower():
# print( "Found an input: device %d - %s"% (i,devinfo["name"]) )
# device_index = i
# return device_index
# if device_index == None:
# print( "No preferred input found; using default input device." )
return device_index
def open_mic_stream():
device_index = find_input_device()
stream = pa.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
input_device_index = device_index,
frames_per_buffer = INPUT_FRAMES_PER_BLOCK)
return stream
def updateBeep(coor, t, old):
idx = random.randint(0, 3)
while(old == idx):
idx = random.randint(0, 3)
x, y = coor[idx]
t.setpos(x, y)
return idx
user_timer_start = -1
def store_user_timer():
global user_timer_start
if(user_timer_start != -1):
f.write(f'{(datetime.now() - user_timer_start).total_seconds()}\n')
started = False
def start_game():
global started
status.clear()
started = True
screen.listen()
screen.onkey(store_user_timer, "space")
screen.onkey(start_game, "Return")
stream = open_mic_stream()
amplitude = 0
x1, y1 = p1.pos()
x2, y2 = p2.pos()
xl1, yl1 = level1.pos()
xl2, yl2 = level2.pos()
old = datetime.now()
new = datetime.now()
idx = 0
iteration = 0
while(x1 < 90 and x2 < 130):
if(started):
new = datetime.now()
diff = (new - old).total_seconds()
if(amplitude < 10):
delX1 = 5
elif(amplitude < 20):
delX1 = 7
elif(amplitude < 50):
delX1 = 20
elif(amplitude < 100):
delX1 = 30
level1.setpos(xl1, yl1 + amplitude * 400 / 100)
level2.setpos(xl2, yl2 + amplitude * 20 / 100)
p1.setpos(x1 + delX1, y1)
p2.setpos(x2 + 10, y2)
if(diff//3 == 1):
old = new
idx = updateBeep(coor, beep, idx)
user_timer_start = datetime.now()
beep.showturtle()
iteration = 0
if(iteration == 3):
beep.hideturtle()
iteration += 1
time.sleep(0.2)
block = stream.read(INPUT_FRAMES_PER_BLOCK)
amplitude = get_rms(block)
x1, y1 = p1.pos()
x2, y2 = p2.pos()
screen.update()
else:
screen.update()
p1.setpos(x1, y1)
p2.setpos(x2, y2)
if(x1 >= 90 and x2 >= 130):
score_string = "It's a tie!"
elif(x1 >= 90):
score_string = "Team 1 won!"
else:
score_string = "Team 2 won!"
status.clear()
status.write(score_string, align="center", font=("Arial", 24, "normal"))
# screen.mainloop()
time.sleep(3)
f.write(f'Status: {score_string}\n')
f.close()
|
T4w51f/StadiumExperiment
|
module_4_experiment.py
|
module_4_experiment.py
|
py
| 5,645 |
python
|
en
|
code
| 0 |
github-code
|
50
|
72926904475
|
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.scrollview import ScrollView
from kivy.core.audio import SoundLoader
import csv
import math
class RadioApp(App):
def build(self):
self.title = 'Offline Radio App'
layout = BoxLayout(orientation='vertical')
# Load radio station data from CSV
self.radio_stations = []
with open('radio_stations.csv', 'r') as file:
reader = csv.reader(file)
next(reader) # Skip header
for row in reader:
self.radio_stations.append(row)
# Display radio stations in a ScrollView
scroll_view = ScrollView()
station_list = BoxLayout(orientation='vertical', size_hint_y=None)
for station in self.radio_stations:
station_button = Button(text=station[0], size_hint_y=None, height=44)
station_button.bind(on_press=self.play_station)
station_list.add_widget(station_button)
scroll_view.add_widget(station_list)
layout.add_widget(Label(text="Available FM Stations"))
layout.add_widget(scroll_view)
self.audio_player = None
return layout
def play_station(self, instance):
station_name = instance.text
for station in self.radio_stations:
if station[0] == station_name:
if self.audio_player:
self.audio_player.stop()
self.audio_player = SoundLoader.load('audio/' + station[0] + '.mp3')
if self.audio_player:
self.audio_player.play()
def get_user_location(self):
# Implement logic to use Android's Location API or Geolocation API here
# This will involve requesting user's permission for location access
# and then retrieving the user's latitude and longitude
# For example, using Google's Geolocation API (requires API key):
api_key = 'YOUR_GOOGLE_API_KEY'
url = f'https://www.googleapis.com/geolocation/v1/geolocate?key={api_key}'
response = requests.post(url)
location_data = response.json()
user_latitude = location_data['location']['lat']
user_longitude = location_data['location']['lng']
return user_latitude, user_longitude
def calculate_distance(self, lat1, lon1, lat2, lon2):
# Calculate distance between two coordinates using Haversine formula
radius = 6371 # Earth's radius in kilometers
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = math.sin(dlat / 2) ** 2 + math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) * math.sin(dlon / 2) ** 2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
distance = radius * c
return distance
def search_radio_stations(self):
user_latitude, user_longitude = self.get_user_location()
max_distance_km = 10 # Maximum distance to consider stations available
available_stations = []
for station in self.radio_stations:
station_name, company, frequency, station_latitude, station_longitude = station
station_latitude = float(station_latitude)
station_longitude = float(station_longitude)
distance = self.calculate_distance(user_latitude, user_longitude, station_latitude, station_longitude)
if distance <= max_distance_km:
available_stations.append(station_name)
print("Available FM Stations in Vicinity:")
print(available_stations)
if __name__ == '__main__':
RadioApp().run()
|
Turyamureeba/radio
|
radio.py
|
radio.py
|
py
| 3,721 |
python
|
en
|
code
| 0 |
github-code
|
50
|
72087492955
|
def nb_voyelles(chaine):
"Retourner le nombre de voyelles présentes dans une chaîne donnée"
liste_voyelles = ["a", "A", "e", "E", "i","I", "o","O", "u","U", "y","Y"] # Liste qui contient les voyelles (en maj. et en min.) auxquelles seront comparés les caractères de la chaîne
n_voyelles = 0 # Nombre de voyelles dans la chaîne
for caractere in chaine: # Pour chaque caractère de la chaîne
for voyelle in liste_voyelles:
if caractere == voyelle:
n_voyelles += 1
return n_voyelles
chaine = input("Tapez un mot ou des caractères quelconques :")
i = 0
while i < len(chaine):
compter_voyelles = nb_voyelles(chaine)
i+= 1
print(compter_voyelles, "voyelle(s) trouvée(s)")
|
terenceithaque/stage-python-2022-2023
|
nb_voyelles.py
|
nb_voyelles.py
|
py
| 763 |
python
|
fr
|
code
| 0 |
github-code
|
50
|
21277287943
|
'''
Given a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order.
You may assume NO duplicates in the array.
Example
[1,3,5,6], 5 → 2
[1,3,5,6], 2 → 1
[1,3,5,6], 7 → 4
[1,3,5,6], 0 → 0
Challenge
O(log(n)) time
'''
class Solution:
def searchInsert(self, A, target):
if not A or A[0] >= target:
return 0
if A[-1] < target:
return len(A)
start, end = 0, len(A) - 1
while start + 1 < end:
mid = start + (end - start) // 2
if A[mid] >= target:
end = mid
else:
start = mid
if A[start] >= target:
return start
return end
|
dragonforce2010/interview-algothims
|
lecture_basic/Lecture2.Binary_Search/60. Search Insert Position.py
|
60. Search Insert Position.py
|
py
| 782 |
python
|
en
|
code
| 19 |
github-code
|
50
|
15981818502
|
import pickle
import pprint
import time
from selenium import webdriver
def save_cookies(driver, location):
pickle.dump(driver.get_cookies(), open(location, "wb"))
def load_cookies(driver, location, url=None):
cookies = pickle.load(open(location, "rb"))
driver.delete_all_cookies()
# have to be on a page before you can add any cookies, any page - does not matter which
driver.get("https://google.com" if url is None else url)
for cookie in cookies:
if isinstance(cookie.get('expiry'), float):#Checks if the instance expiry a float
cookie['expiry'] = int(cookie['expiry'])# it converts expiry cookie to a int
driver.add_cookie(cookie)
def delete_cookies(driver, domains=None):
if domains is not None:
cookies = driver.get_cookies()
original_len = len(cookies)
for cookie in cookies:
if str(cookie["domain"]) in domains:
cookies.remove(cookie)
if len(cookies) < original_len: # if cookies changed, we will update them
# deleting everything and adding the modified cookie object
driver.delete_all_cookies()
for cookie in cookies:
driver.add_cookie(cookie)
else:
driver.delete_all_cookies()
# Path where you want to save/load cookies to/from aka C:\my\fav\directory\cookies.txt
cookies_location = "E:\Development\Webdriver-Tutorials\cookies.txt"
# Initial load of the domain that we want to save cookies for
chrome = webdriver.Chrome()
chrome.get("https://www.hackerrank.com/login")
chrome.find_element_by_xpath("//input[@id='login']").send_keys("infunig1986")
chrome.find_element_by_xpath("(//input[@id='password'])[2]").send_keys("TestUserAccount")
chrome.find_element_by_xpath("(//button[@name='commit'])[2]").click()
save_cookies(chrome, cookies_location)
chrome.quit()
# Load of the page you cant access without cookies, this one will fail
chrome = webdriver.Chrome()
chrome.get("https://www.hackerrank.com/settings/profile")
# Load of the page you cant access without cookies, this one will go through
chrome = webdriver.Chrome()
load_cookies(chrome, cookies_location)
chrome.get("https://www.hackerrank.com/settings/profile")
# chrome = webdriver.Chrome()
# chrome.get("https://google.com")
# time.sleep(2)
# pprint.pprint(chrome.get_cookies())
# print "=========================\n"
#
# delete_cookies(chrome, domains=["www.google.com"])
# pprint.pprint(chrome.get_cookies())
# print "=========================\n"
#
# delete_cookies(chrome)
# pprint.pprint(chrome.get_cookies())
|
ArturSpirin/YouTube-WebDriver-Tutorials
|
Cookies.py
|
Cookies.py
|
py
| 2,582 |
python
|
en
|
code
| 44 |
github-code
|
50
|
74816851036
|
import argparse
import txaio
txaio.use_twisted()
from autobahn.twisted.util import sleep
from autobahn.wamp.types import PublishOptions
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
from autobahn.wamp.serializer import JsonSerializer, CBORSerializer, MsgPackSerializer
class ClientSession(ApplicationSession):
async def onJoin(self, details):
print('Client session joined: {}'.format(details))
topic = 'com.example.topic1'
def on_event(i):
print('Event received: {}'.format(i))
await self.subscribe(on_event, topic)
for i in range(5):
self.publish(topic, i, options=PublishOptions(acknowledge=True, exclude_me=False))
await sleep(1)
self.leave()
def onLeave(self, details):
print('Client session left: {}'.format(details))
self.config.runner.stop()
self.disconnect()
def onDisconnect(self):
print('Client session disconnected.')
from twisted.internet import reactor
reactor.stop()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d',
'--debug',
action='store_true',
help='Enable debug output.')
parser.add_argument('--url',
dest='url',
type=str,
default="ws://localhost:8080/ws",
help='The router URL, eg "ws://localhost:8080/ws" or "rs://localhost:8081" (default: "ws://localhost:8080/ws").')
parser.add_argument('--realm',
dest='realm',
type=str,
default="realm1",
help='The realm to join (default: "realm1").')
parser.add_argument('--serializer',
dest='serializer',
type=str,
default="json",
help='Serializer to use, one of "json", "cbor", "msgpack", "all" or "unspecified" (default: "unspecified")')
args = parser.parse_args()
# start logging
if args.debug:
txaio.start_logging(level='debug')
else:
txaio.start_logging(level='info')
# explicitly select serializer
if args.serializer == 'unspecified':
serializers = None
else:
serializers = []
if args.serializer in ['cbor', 'all']:
serializers.append(CBORSerializer())
if args.serializer in ['msgpack', 'all']:
serializers.append(MsgPackSerializer())
if args.serializer in ['json', 'all']:
serializers.append(JsonSerializer())
# any extra info we want to forward to our ClientSession (in self.config.extra)
extra = {}
# now actually run a WAMP client using our session class ClientSession
runner = ApplicationRunner(url=args.url, realm=args.realm, extra=extra, serializers=serializers)
runner.run(ClientSession, auto_reconnect=True)
|
crossbario/crossbar-examples
|
stats/client.py
|
client.py
|
py
| 2,951 |
python
|
en
|
code
| 169 |
github-code
|
50
|
11939063154
|
__all__ = ['unet_v',
'unet_v2',
'hourglass_wres',
'hourglass_wores',
'unet_v_synth',
'unet_v2_synth',
'hourglass_wres_synth',
'hourglass_wores_synth',
'unet_v_tr',
'hourglass_wres_tr',
'unet_v_k5',
'hourglass_wres_k5',
'unet_v_patch',
'hourglass_wres_patch']
|
shiveshc/NIDDL
|
cnn_archs/__init__.py
|
__init__.py
|
py
| 402 |
python
|
en
|
code
| 4 |
github-code
|
50
|
32826423573
|
import logging
import logging.config
from logging.handlers import RotatingFileHandler
def init_service_logger():
logger = logging.getLogger('TRANSFER-LOGGER')
logging.getLogger('TRANSFER-LOGGER').addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
fh = logging.FileHandler(f"/home/doc/OraclePostgreTransfer/logs/transfer.log", encoding="UTF-8")
# fh = RotatingFileHandler(cfg.LOG_FILE, encoding="UTF-8", maxBytes=100000000, backupCount=5)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info('TRANSFER-LOGGER started')
return logger
log = init_service_logger()
|
Shamil-G/OraclePostgreTransfer
|
util/logger.py
|
logger.py
|
py
| 715 |
python
|
en
|
code
| 0 |
github-code
|
50
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.