seq_id
stringlengths 4
11
| text
stringlengths 113
2.92M
| repo_name
stringlengths 4
125
⌀ | sub_path
stringlengths 3
214
| file_name
stringlengths 3
160
| file_ext
stringclasses 18
values | file_size_in_byte
int64 113
2.92M
| program_lang
stringclasses 1
value | lang
stringclasses 93
values | doc_type
stringclasses 1
value | stars
int64 0
179k
⌀ | dataset
stringclasses 3
values | pt
stringclasses 78
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|
22372400313
|
from math import *
def smooth(path, weight_data = 0.1, weight_smooth = 0.1, tolerance = 0.00001):
#
# Enter code here
#
# Make a deep copy of path into newpath
newpath = [[0 for row in range(len(path[0]))] for col in range(len(path))]
change = tolerance
while change >= tolerance:
change = 0.0
for i in range(len(path)):
for j in range(len(path[0])):
aux = newpath[i][j]
newpath[i][j] += weight_data * (path[i][j] - newpath[i][j]) + weight_smooth * (newpath[(i-1)%len(path)][j] + newpath[(i+1)%len(path)][j] - 2.0 * newpath[i][j])
change += abs(aux - newpath[i][j])
return newpath
|
petrooha/ML-notes
|
AI for Robotics/PID Control (Cyclic Smoothing).py
|
PID Control (Cyclic Smoothing).py
|
py
| 718 |
python
|
en
|
code
| 0 |
github-code
|
50
|
70207179676
|
import base64
import functools
import web3
from bicxer import config
def get_klaytn_web3_provider_using_kas(
kas_access_key_id: str, kas_secret_access_key: str
) -> web3.Web3:
"""
Returns a Web3 instance that is connected to the Klaytn network using the KAS API.
Args:
kas_access_key_id (str): The access key ID for the KAS API.
kas_secret_access_key (str): The secret access key for the KAS API.
Returns:
web3.Web3: A Web3 instance connected to the Klaytn network.
"""
KLAYTN_CYPRESS_CHAIN_ID = "8217"
return web3.Web3(
web3.HTTPProvider(
"https://node-api.klaytnapi.com/v1/klaytn",
{
"headers": {
"Authorization": "Basic "
+ base64.b64encode(
f"{kas_access_key_id}:{kas_secret_access_key}".encode()
).decode(),
"x-chain-id": KLAYTN_CYPRESS_CHAIN_ID,
"Content-Type": "application/json",
}
},
)
)
@functools.lru_cache
def get_klaytn_web3_provider():
"""
Returns a Klaytn web3 provider using the Klaytn API Service (KAS).
:return: A Klaytn web3 provider.
"""
settings = config.Settings()
return get_klaytn_web3_provider_using_kas(
settings.kas_access_key_id, settings.kas_secret_key_id
)
|
ricepotato/block-indexer
|
bicxer/w3.py
|
w3.py
|
py
| 1,400 |
python
|
en
|
code
| 0 |
github-code
|
50
|
19162579450
|
T = int(input())
nums = [int(input()) for i in range(T)]
def isPrime(n):
if n==2 or n==3: return True
if n%2==0 or n<2: return False
for i in range(3, int(n**0.5)+1, 2):
if n%i==0:
return False
return True
for i in nums:
A = B = 0
for Ai in range(2, i):
sumN = i * 2
if isPrime(Ai):
sumN -= Ai
A = Ai
if isPrime(sumN):
B = sumN
break
print(str(A) + " " + str(B))
# Algorithm Explanation
# Since we are solving for N = (A+B)/2, where A and B are primes, we will implement a prime number checker to check if A and B is prime.
# If N = (A+B)/2 then A+B = 2N. This means if we find a prime number for A, then B will be the difference between N and A. If the difference between N and A is a prime number, then we will have our two answers.
|
sushimon/CCC-Solutions
|
ccc 2019/S2.py
|
S2.py
|
py
| 896 |
python
|
en
|
code
| 0 |
github-code
|
50
|
44267456248
|
# 백준 1966
# 프린터 큐(완성)
T = int(input()) # 테스트 케이스 입력
index_list = []
for i in range(T):
N, M = input().split()
N_int = int(N)
M_int = int(M)
doc_list = input().split()
index_list = list(range(N_int)) # 인덱스를 비교할 index_list 생성
most_imp = max(doc_list) # doc_list 중에서 가장 중요도가 높은 것 변수에 저장
count = 0 # 몇 번째로 출력 되는지 확인하기 위함
while True: # 몇 번 반복될지 모르기 때문에 while
if doc_list[0] < most_imp: # 현재 비교하는 문서 중요도 < 가장 높은 중요도
doc_list.append(doc_list[0]) # 맨 뒤에 0번 인덱스 값을 삽입
doc_list.remove(doc_list[0]) # 맨 앞의 값을 삭제
index_list.append(index_list[0]) # 맨 뒤에 인덱스 번호 삽입
index_list.remove(index_list[0]) # 맨 앞의 인덱스 번호를 삭제
elif doc_list[0] == most_imp: # 현재 비교하는 문서 중요도 = 가장 높은 중요도
if index_list[0] == M_int: # 현재 비교하는 문서의 초기 위치 = M
count += 1 # 출력 횟수 증가
print(count) # 출력
break # 종료
# 현재 비교하는 문서 중요도 != 가장 높은 중요도
doc_list.remove(doc_list[0]) # 맨 앞의 값을 삭제
index_list.remove(index_list[0]) # 맨 앞의 인덱스 번호를 삭제
count += 1 # 출력 횟수 증가
most_imp = max(doc_list) # 가장 높은 중요도 재 지정
|
dupe-in/BaekJoon
|
2022_07/1966.py
|
1966.py
|
py
| 1,864 |
python
|
ko
|
code
| 0 |
github-code
|
50
|
40905887066
|
from ...Qt import QtGui, QtWidgets
from ..Parameter import Parameter
from .basetypes import WidgetParameterItem
class FontParameterItem(WidgetParameterItem):
def makeWidget(self):
w = QtWidgets.QFontComboBox()
w.setMaximumHeight(20)
w.sigChanged = w.currentFontChanged
w.value = w.currentFont
w.setValue = w.setCurrentFont
self.hideWidget = False
return w
def updateDisplayLabel(self, value=None):
if value is None:
value = self.widget.currentText()
super().updateDisplayLabel(value)
class FontParameter(Parameter):
"""
Creates and controls a QFont value. Be careful when selecting options from the font dropdown. since not all
fonts are available on all systems
"""
itemClass = FontParameterItem
def _interpretValue(self, v):
if isinstance(v, str):
newVal = QtGui.QFont()
if not newVal.fromString(v):
raise ValueError(f'Error parsing font "{v}"')
v = newVal
return v
def saveState(self, filter=None):
state = super().saveState(filter)
state['value'] = state['value'].toString()
return state
|
pyqtgraph/pyqtgraph
|
pyqtgraph/parametertree/parameterTypes/font.py
|
font.py
|
py
| 1,211 |
python
|
en
|
code
| 3,463 |
github-code
|
50
|
12356599649
|
#!/usr/bin/env python
from kivy.app import App
from kivy.config import Config
from kivy.uix.screenmanager import ScreenManager, FadeTransition
Config.set('input', 'mouse', 'mouse,disable_multitouch')
class Katurigja(App):
view = ScreenManager(transition=FadeTransition())
def build(self):
return self.view
if __name__ == '__main__':
from controllers.client import Local
from views.menu import Menu
from views.game_setup import GameSetup
from views.game import Game
game = Katurigja()
client = Local()
game.view.add_widget(
Menu(
name='main_menu',
)
)
game.view.add_widget(
GameSetup(
last_screen='main_menu',
client=client,
name='game_setup',
)
)
game.view.add_widget(
Game(
client=client,
name='game',
)
)
game.run()
|
matumaros/katurigja
|
main.py
|
main.py
|
py
| 916 |
python
|
en
|
code
| 1 |
github-code
|
50
|
29633842802
|
import bpy
import csv
import mathutils as mt
# globals
frames = []
rotation_frames = {}
hipy = []
hipx = []
class CSVPropertiesPanel(bpy.types.Panel):
bl_label = "CSV Properties"
bl_idname = "OBJECT_PT_csv_properties"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "object"
def draw(self, context):
layout = self.layout
scene = context.scene
layout.prop(scene, "csv_file_path")
layout.prop(scene, "armature_name")
# print(bpy.data.scenes[0].csv_file_path)
col = layout.column()
col.operator("object.read_csv", text="Read CSV")
row = layout.row()
col = layout.column()
col.label(text = "Modelling Properties")
col.prop(scene, "avg_frames_per_sample")
col.prop(scene, "min_frames_between_sample")
col.prop(scene, "min_confidence")
row = layout.row()
row = layout.row()
row.operator("object.get_armature", text="Get Armature")
row.operator("object.start_modeling", text="Model Armature")
class Rotation:
def __init__(self, euler, bone, axis=None, axis_val=None, visibility = None):
self.euler = euler
self.bone = bone
self.axis = axis
self.axis_val = axis_val
self.visibility = visibility
def __str__(self):
return f'{self.euler}, {self.visibility}, {self.bone}'
def assign_pt(b_name, ld_vec, ref_vec, swizzle):
ld_vec = swizzle(ld_vec)
diff = ref_vec.rotation_difference(ld_vec).to_euler()
bpy.context.object.pose.bones[b_name].rotation_euler = diff
def assign_pt_by_bone(bone, ld_vec, ref_vec, swizzle):
ld_vec = swizzle(ld_vec)
diff = ref_vec.rotation_difference(ld_vec).to_euler()
bone.rotation_euler = diff
def swizzle_l_shoulder(vec):
return -vec.z, vec.x,-vec.y
def swizzle_r_shoulder(vec):
return vec.z,-vec.x,-vec.y
def swizzle_l_thigh(vec):
return -vec.x, vec.y, -vec.z
# return vec
def swizzle_default(vec):
return vec
# clean these up
def insert_frame(bone, time, ld_vec, ref_vec, swizzle, visibility = 1.0, rotations = None):
# assign_pt_by_bone(bone, ld_vec, ref_vec, swizzle)
# @nisan here
ld_vec = swizzle(ld_vec)
diff = ref_vec.rotation_difference(ld_vec).to_euler()
# euler, bone, axis=None, axis_val=None, visibility = None
if rotations != None:
rotations[bone.name].append(Rotation(diff, bone.name, visibility = visibility))
#if visibility >= 0.5:
# bone.keyframe_insert(data_path="rotation_euler",frame=time)
class GetArmatureOperator(bpy.types.Operator):
bl_idname = "object.get_armature"
bl_label = "Get Armature"
def execute(self, context):
CO = bpy.context.object
if CO.type == 'ARMATURE':
bpy.data.scenes[0].armature_name = CO.name_full
self.report({'INFO'}, "Armature Selected in Add-on" )
elif CO.parent.type == "ARMATURE":
self.report({'INFO'}, "Armature Selected from parent of Mesh in Add-on" )
bpy.data.scenes[0].armature_name = CO.parent.name_full
else:
print("Couldn't find linked armature")
self.report({'ERROR'}, f"Couldn't Find linked Armature" )
return {'FINISHED'}
class ReadCSVOperator(bpy.types.Operator):
bl_idname = "object.read_csv"
bl_label = "Read CSV"
#filepath: bpy.props.StringProperty(subtype="FILE_PATH")
def execute(self, context):
global frames
frames = []
scene = context.scene
# csv_path = "D:\Download\input - Sheet1.csv"
csv_path = bpy.data.scenes[0].csv_file_path
try:
with open(csv_path) as csv_file:
csv_reader = csv.reader(csv_file)
frame = []
for row in csv_reader:
if row == ['index','x','y','z','vis']:
pass
elif row[0] == '-1':
frames.append(frame)
frame = []
else:
index,x,y,z,vis = row
scale = 0.3
frame.append([int(index), float(x), float(y), float(z) * scale, float(vis)])
#bpy.ops.mesh.primitive_uv_sphere_add(radius = 1,location=(x, y, 0))
# print(data)
print("CSV Reading: Complete")
self.report({'INFO'}, "CSV Reading: Complete" )
except Exception as e:
print(f"CSV Reading Failed.\nReason: {e}")
self.report({'ERROR'}, f"CSV Reading Failed.\nReason: {e}" )
return {'FINISHED'}
# landmark_vectors = {}
swizzle_functions = {
'l_shoulder' : swizzle_l_shoulder,
'l_forearm' : swizzle_l_shoulder,
'r_shoulder' : swizzle_r_shoulder,
'r_forearm' : swizzle_r_shoulder,
'l_thigh': swizzle_l_thigh, # calculate what this should be, reusing for now
'r_thigh': swizzle_l_thigh,
'l_shin' : swizzle_l_thigh,
'r_shin' : swizzle_l_thigh,
}
reference_vectors = {
'l_shoulder' : mt.Vector([0, 1, 0]),
'l_forearm' : mt.Vector([0, 1, 0]),
'r_shoulder' : mt.Vector([0, 1, 0]),
'r_forearm' : mt.Vector([0, 1, 0]),
#'l_thigh': mt.Vector([-0.12187,0.99255,0])
'l_thigh' : mt.Vector([-0.12187,0.99255,0]),
'r_thigh' : mt.Vector([0.12187,0.99255,0]),
}
"""
ld_vec = landmarks[25] - landmarks[23]
#bpy.context.object.pose.bones['l_thigh'].rotation_euler = Euler([0,0,radians(-7)],"XYZ")
ref_vec =
ld_vec.x,ld_vec.y,ld_vec.z =
diff = ref_vec.rotation_difference(ld_vec).to_euler()
bpy.context.object.pose.bones['l_thigh'].rotation_euler = diff
"""
class StartModelingOperator(bpy.types.Operator):
bl_idname = "object.start_modeling"
bl_label = "Model"
def execute(self, context):
global frames
global rotaion_frames
armature_name = bpy.data.scenes[0].armature_name
armature = bpy.data.objects[armature_name]
AFPS = bpy.data.scenes[0].avg_frames_per_sample
MFBS = bpy.data.scenes[0].min_frames_between_sample
min_conf = bpy.data.scenes[0].min_confidence
# print(AFPS)
# clear all animations
#for bone in bpy.data.armatures[armature_name].bones:
# bone.select = True
# bpy.ops.anim.keyframe_clear_v3d()
# bone.select = False
armature.animation_data_clear()
# set mode to pose mode if its already not set
if bpy.context.mode != 'POSE':
bpy.ops.object.posemode_toggle()
# turn off inherit rotaion for all bones and reset them to default T pose
for i in armature.data.bones:
i.use_inherit_rotation = i.name in [ 'head_end', 'r_hand_end', 'l_hand_end', 'l_hand', 'r_hand', 'r_foot', 'r_foot_end', 'l_foot', 'l_foot_end']
for i in armature.pose.bones:
i.rotation_mode = 'XYZ'
i.rotation_euler = mt.Euler((0,0,0),"XYZ")
i.location = mt.Vector()
frame_counter = 0
visibility = {}
hipy = []
hipx = []
for bone in armature.pose.bones:
rotation_frames[bone.name] = []
for frame_points in frames:
# @nisan here
# rotation_frames = []
visibilities = [pt[4] for pt in frame_points]
landmarks = [mt.Vector(pt[1:4]) for pt in frame_points]
# print(landmarks)
landmark_vectors = {}
# grab all desired landmark vectors
try:
landmark_vectors['l_shoulder'] = (landmarks[13] - landmarks[11]).normalized()
landmark_vectors['l_forearm'] = (landmarks[15] - landmarks[13]).normalized()
landmark_vectors['r_shoulder'] = (landmarks[14] - landmarks[12]).normalized()
landmark_vectors['r_forearm'] = (landmarks[16] - landmarks[14]).normalized()
landmark_vectors['l_thigh'] = (landmarks[25] - landmarks[23]).normalized()
landmark_vectors['r_thigh'] = (landmarks[26] - landmarks[24]).normalized()
landmark_vectors['l_shin'] = (landmarks[27] - landmarks[25]).normalized()
landmark_vectors['r_shin'] = (landmarks[28] - landmarks[26]).normalized()
visibility['l_shoulder'] = min(visibilities[13], visibilities[11])
visibility['l_forearm'] = min(visibilities[15], visibilities[13])
visibility['r_shoulder'] = min(visibilities[14], visibilities[12])
visibility['r_forearm'] = min(visibilities[16], visibilities[14])
visibility['l_thigh'] = min(visibilities[25], visibilities[23])
visibility['r_thigh'] = min(visibilities[26], visibilities[24])
visibility['l_shin'] = min(visibilities[27], visibilities[25])
visibility['r_shin'] = min(visibilities[28], visibilities[26])
visibility['head'] = min( [visibilities[x] for x in range(0,7)] + [visibilities[x] for x in range(9,11)])
visibility['body'] = min( [visibilities[x] for x in [11,12,23,24] ])
visibility['waist'] = min( [visibilities[x] for x in [11,12,23,24] ])
except Exception as e:
self.report({'ERROR'}, f"landmark and visibility assignement error: {e}" )
pass
hipx.append( (landmarks[23].x + landmarks[23].x)/2 )
hipy.append( (landmarks[23].y + landmarks[23].y)/2 )
for bone in armature.pose.bones:
# insert_frame(armature.pose.bones['l_shoulder'], (0, 20), (landmarks[13] - landmarks[11]).normalized(), mt.Vector([0, 1, 0]), swizzle_l_shoulder)
if bone.name == 'head_end':
continue
# elif bone.name == 'r_ankle':
elif bone.name == 'head':
t= {}
for i in range(3):
t[i+1] = (landmarks[i+1]+landmarks[i+4])/2
t[4] = (landmarks[9]+landmarks[10])/2
sum_eye = (t[1]+t[2]+t[3])/3
sum_mouth_and_nose = (t[4]+sum_eye+landmarks[0])/3
v1 = landmarks[5]
v2 = landmarks[2]
v3 = sum_eye
v4 = sum_mouth_and_nose
ld_vec = v2-v1
ld_vec.x,ld_vec.y,ld_vec.z = ld_vec.x , -ld_vec.y, -ld_vec.z
ref_vec = mt.Vector([1,0,0])
diff = ref_vec.rotation_difference(ld_vec).to_euler()
yrot = diff.y
#bpy.context.object.pose.bones['body'].rotation_euler = diff
m1 = v3
m2 = v4
rm = (m1-m2)
ld_vec = rm
ref_vec = mt.Vector([0,1,0])
ld_vec.x,ld_vec.y,ld_vec.z = ld_vec.x , -ld_vec.y, -ld_vec.z
diff = ref_vec.rotation_difference(ld_vec).to_euler()
##diff.y = yrotto_
# @nisan here
#bone.rotation_euler = diff
#bone.rotation_euler.rotate_axis("Y",yrot)
#bone.keyframe_insert(data_path="rotation_euler",frame=frame_counter)
# euler, bone, axis=None, axis_val=None, visibility = None
rotation_frames[bone.name].append(Rotation(diff, bone.name, "Y", yrot, visibility.get(bone.name, 0)))
# print(len(rotation_frames[bone.name]))
elif bone.name == 'body':
v1 = landmarks[12]
v2 = landmarks[11]
v3 = landmarks[24]
v4 = landmarks[23]
ld_vec = v2-v1
ld_vec.x,ld_vec.y,ld_vec.z = ld_vec.x , -ld_vec.y, -ld_vec.z
ref_vec = mt.Vector([1,0,0])
diff = ref_vec.rotation_difference(ld_vec).to_euler()
yrot = diff.y
#bpy.context.object.pose.bones['body'].rotation_euler = diff
m1 = (v2+v1)/2
m2 = (v4+v3)/2
rm = (m1-m2)/2
ld_vec = rm
ref_vec = mt.Vector([0,1,0])
ld_vec.x,ld_vec.y,ld_vec.z = ld_vec.x , -ld_vec.y, -ld_vec.z
diff = ref_vec.rotation_difference(ld_vec).to_euler()
##diff.y = yrotto_
# @nisan here
#bone.rotation_euler = diff
#bone.rotation_euler.rotate_axis("Y",yrot)
#bone.keyframe_insert(data_path="rotation_euler",frame=frame_counter)
rotation_frames[bone.name].append(Rotation(diff, bone.name, "Y", yrot, visibility.get(bone.name, 0)))
elif bone.name == 'waist':
v1 = landmarks[12]
v2 = landmarks[11]
v3 = landmarks[24]
v4 = landmarks[23]
ld_vec = v4-v3
ld_vec.x,ld_vec.y,ld_vec.z = ld_vec.x , -ld_vec.y, -ld_vec.z
ref_vec = mt.Vector([1,0,0])
diff = ref_vec.rotation_difference(ld_vec).to_euler()
yrot = diff.y
#bpy.context.object.pose.bones['body'].rotation_euler = diff
m1 = (v2+v1)/2
m2 = (v4+v3)/2
rm = (m1-m2)/2
ld_vec = rm
ref_vec = mt.Vector([0,1,0])
ld_vec.x,ld_vec.y,ld_vec.z = ld_vec.x , -ld_vec.y, -ld_vec.z
diff = ref_vec.rotation_difference(ld_vec).to_euler()
##diff.y = yrotto_
# @nisan here
#bone.rotation_euler = diff
#bone.rotation_euler.rotate_axis("Y",yrot)
#bone.keyframe_insert(data_path="rotation_euler",frame=frame_counter)
rotation_frames[bone.name].append(Rotation(diff, bone.name, "Y", yrot, visibility.get(bone.name, 0)))
else:
insert_frame(bone, frame_counter, \
landmark_vectors.get(bone.name, mt.Vector()), \
reference_vectors.get(bone.name, mt.Vector()), \
swizzle_functions.get(bone.name, swizzle_default), visibility.get(bone.name, 0), rotation_frames)
# if not first:
# lis = [landmarks,lm,lm1,lm2]
# for i in range(len(lis)):
# hip_center.append((lis[i][24]/2 + lis[i][23]/2))
# diff = []
# for i in range(len(hip_center)-1):
# diff.append(current_hip - prev_hip)
# for i in range(len(diff)):
# # bone.keyframe_insert(data_path="rotation_euler",frame=time)
# bpy.context.object.pose.bones['waist'].location.x += diff[i].x
# bpy.context.object.pose.bones['waist'].location.z += diff[i].y
# bpy.context.object.pose.bones['waist'].keyframe_insert(data_path="location",index = 0,frame=i)
# bpy.context.object.pose.bones['waist'].keyframe_insert(data_path="location",index = 1,frame=i)
# prev_hip = current_hip
frame_counter += 1
# rotation_frames.append(rotation_frames)
# hip_center = []
# for i in range(len(frames)):
# hip_center.append((mt.Vector(frames[i][24])/2 + mt.Vector(frames[i][23])/2))
# diff = []
# for i in range(1, len(hip_center)):
# diff.append(hip_center[i -1] - hip_center[i])
# for i in range(len(diff)):
# armature.pose.bones['waist'].location.x += diff[i].x
# armature.pose.bones['waist'].location.y += diff[i].y * 2
# armature.pose.bones['waist'].keyframe_insert(data_path="location",frame=i)
# print(rotation_frames)
if AFPS > len(rotation_frames["body"]):
AFPS = len(rotation_frames["body"])
if AFPS<MFBS:
AFPS = MFBS+1
print(AFPS , MFBS , min_conf/100)
for bone in rotation_frames.keys():
l_index =0
for i in range(0,len(rotation_frames[bone]),AFPS):
index = 0
# if i<l_index:
# print("break")
# continue
# var = i - l_index
i = l_index + MFBS
sample_visib = [x.visibility for x in rotation_frames[bone][i:i + AFPS ]]
if len(sample_visib) == 0:
continue
max_visib = max(sample_visib)
# print(min_conf)
if max_visib > min_conf/100.0:
index = sample_visib.index(max_visib) + i
if bone == "body":
print(f"index {index} , i: {i}, l_index: {l_index}")
l_index = index
# print("hehre")
armature.pose.bones[bone].rotation_euler = rotation_frames[bone][index].euler
if rotation_frames[bone][index].axis != None:
armature.pose.bones[bone].rotation_euler.rotate_axis(rotation_frames[bone][index].axis, rotation_frames[bone][index].axis_val)
armature.pose.bones[bone].keyframe_insert(data_path="rotation_euler",frame=index)
# i +=
else:
sample_visib = [x.visibility for x in rotation_frames[bone][i:i + AFPS * 2]]
max_visib = max(sample_visib)
index = sample_visib.index(max_visib) + i
l_index = index
# print("hehre")
armature.pose.bones[bone].rotation_euler = rotation_frames[bone][index].euler
if rotation_frames[bone][index].axis != None:
armature.pose.bones[bone].rotation_euler.rotate_axis(rotation_frames[bone][index].axis, rotation_frames[bone][index].axis_val)
armature.pose.bones[bone].keyframe_insert(data_path="rotation_euler",frame=index)
testy = hipy[0]
armature.pose.bones['waist'].location.y = 0
armature.pose.bones['waist'].keyframe_insert(data_path="location",index=1,frame=0)
delta = 0
for i in range(1,len(hipy)):
delta += (testy - hipy[i])
if abs(delta)>.1:
testy = hipy[i]
armature.pose.bones['waist'].location.y = hipy[i] - hipy[0]
armature.pose.bones['waist'].keyframe_insert(data_path="location",index=1,frame=i)
delta =0
testx = hipx[0]
armature.pose.bones['waist'].location.x = 0
armature.pose.bones['waist'].keyframe_insert(data_path="location",index=0,frame=0)
delta = 0
for i in range(1,len(hipx)):
delta += (testx - hipx[i])
if abs(delta)>.1:
testx = hipx[i]
armature.pose.bones['waist'].location.x = hipx[i] - hipx[0]
armature.pose.bones['waist'].keyframe_insert(data_path="location",index=0,frame=i)
delta =0
# def __init__(self, euler, bone, axis=None, axis_val=None, visibility = None):
# self.euler = euler
# self.bone = bone
# self.axis = axis
# self.axis_val = axis_val
# self.visibility = visibility
# print(rotation_frames)
bpy.data.scenes["Scene"].frame_end = len(frames)
return {'FINISHED'}
def register():
bpy.utils.register_class(CSVPropertiesPanel)
bpy.utils.register_class(ReadCSVOperator)
bpy.utils.register_class(GetArmatureOperator)
bpy.utils.register_class(StartModelingOperator)
bpy.types.Scene.armature_name = bpy.props.StringProperty(name = "Armature Name")
bpy.types.Scene.csv_file_path = bpy.props.StringProperty(subtype="FILE_PATH" , name="CSV File Path")
bpy.types.Scene.avg_frames_per_sample = bpy.props.IntProperty(name = "Avg. Frames per Sample", min =1,max=90)
bpy.types.Scene.min_frames_between_sample = bpy.props.IntProperty(name = "Min. Frames Between Samples", min =1,max=90)
bpy.types.Scene.min_confidence = bpy.props.IntProperty(subtype="PERCENTAGE" , name="Minimum Confidence",min = 0, max = 100)
def unregister():
del bpy.types.Scene.csv_file_path
bpy.utils.unregister_class(CSVPropertiesPanel)
bpy.utils.unregister_class(ReadCSVOperator)
bpy.utils.unregister_class(StartModelingOperator)
bpy.utils.unregister_class(GetArmatureOperator)
if __name__ == "__main__":
register()
|
Pramish-Aryal/Pratibimbha
|
src/minor_blender.py
|
minor_blender.py
|
py
| 21,332 |
python
|
en
|
code
| 1 |
github-code
|
50
|
4395418255
|
#!/usr/bin/python3
#coding:utf-8
import os
import sys
import time
import signal
import pyqrcode
#By:H3C4
def handler(signum, frame):
print("Exit..")
time.sleep(1)
sys.exit(0)
signal.signal(signal.SIGINT, handler)
def error():
if len(sys.argv) != 2:
print("[!] Options\n")
print("\t[1] Create directory")
print("\n\t[2] Create QR code")
print("\n\t[3] Make to host discovery\n")
print("\n[*]Uso: python3 " + sys.argv[0] + " option\n")
sys.exit(1)
def createDirectory():
try :
path=input("Write the name of directory: ")
os.mkdir(path)
time.sleep(1)
print("\n[!] Directory created in the current path whit the name:" + path +"\n")
except OSError:
print("\n[x] Sorry, the directory alredy exist in the current path")
def createQrcode():
try :
QRstring =input("Insert the url of the page: ")
file =input("Insert the name of qr: ")
img=file+'.png'
genereate=pyqrcode.create(QRstring)
save=genereate.png(img,scale=8)
save
print ("\n[!]Creating QR code for: ",QRstring)
time.sleep(2)
print("\n\t[*]Succes, QR code created")
time.sleep(1)
print("\n[!]Show in ",img)
except OSError:
print("[x] Sorry couldn't to create the qr ")
def hostDiscovery():
ip = input("Insert the name host or ip : ")
print(f"[!] Sending trace ICMP to {ip}")
time.sleep(0.5)
print (os.system(f"ping {ip}"))
if __name__ == '__main__':
try:
if int(sys.argv[1]) == 1:
createDirectory()
elif int(sys.argv[1]) == 2:
createQrcode()
elif int(sys.argv[1]) == 3:
hostDiscovery()
except:
error()
|
h3c4/basicTool
|
basicTool.py
|
basicTool.py
|
py
| 1,815 |
python
|
en
|
code
| 0 |
github-code
|
50
|
26715520977
|
import os
import re
import numpy as np
from PIL import Image
def parse(filename):
chunks = re.split(r'A|E',filename[0:-4])
A = int(chunks[1])
E = int(chunks[2])
return [A, E]
### Transform spherical coordinates into cartesian coordinate
def sph2cart(theta, phi, r=1): # theta is elevation and phi is azimuth
'''
y
|
|____ x
/
z
'''
z = r * np.cos(theta/180 * np.pi) * np.cos(phi/180 * np.pi)
x = r * np.cos(theta/180 * np.pi) * np.sin(phi/180 * np.pi)
y = r * np.sin(theta)
return np.array([x,y,z])
def load_dataset(path):
files = [filename for filename in os.listdir(path) if re.match(r'.*\.pgm', filename) and (not re.match(r'.*_Ambient\.pgm', filename))]
h, w = np.array(Image.open(os.path.join(path, files[0]))).shape
images = np.zeros((h, w, len(files)))
source = np.zeros((len(files), 3))
for i, filename in enumerate(files):
A, E = parse(filename)
source[i] = sph2cart(E,A)
images[:, :, i] = Image.open(os.path.join(path, filename))
return [images, source]
|
CharlesLiangZHY/PhotometricStereo
|
read.py
|
read.py
|
py
| 1,094 |
python
|
en
|
code
| 1 |
github-code
|
50
|
6574272414
|
import pytest
from pylixir.application.council import CouncilType
from pylixir.core.committee import Sage
from pylixir.core.randomness import SeededRandomness
from pylixir.core.state import GameState
from pylixir.data.council_pool import ConcreteCouncilPool
from pylixir.data.pool import get_ingame_council_pool
@pytest.fixture(name="council_pool")
def fixture_council_pool() -> ConcreteCouncilPool:
return get_ingame_council_pool(skip=True)
def test_pool_size_exact(council_pool: ConcreteCouncilPool) -> None:
assert len(council_pool) == 294
@pytest.mark.parametrize(
"council_type, count",
[
(CouncilType.chaos, 0),
(CouncilType.chaosLock, 0),
(CouncilType.lawful, 0),
(CouncilType.lawfulLock, 0),
(CouncilType.common, 0),
(CouncilType.lock, 0),
(CouncilType.exhausted, 0),
],
)
def test_get_council(
council_pool: ConcreteCouncilPool, council_type: CouncilType, count: int
) -> None:
councils = council_pool.get_available_councils(1, council_type)
assert len(councils) > count
def test_sample_council(
council_pool: ConcreteCouncilPool, abundant_state: GameState
) -> None:
for seed in range(50):
randomness = SeededRandomness(seed)
council_pool.sample_council(
abundant_state, Sage(power=2, is_removed=False, slot=1), randomness, []
)
|
oleneyl/pylixir
|
tests/data/test_pool.py
|
test_pool.py
|
py
| 1,381 |
python
|
en
|
code
| 0 |
github-code
|
50
|
11464794711
|
# BOJ 17779 게리맨더링 2
# x, y, d1, d2 로 경계선 찾아서 5번 구역 찾고
# 5번 구역에 표시
# 각 구역별로 더하는데 표시된건 뺴고 더함
def min_dif(x,y,d1,d2):
zone = [[0] * (N+1) for _ in range(N+1)]
population = [0] * 6
for i in range(d1+1):
zone[x+i][y-i] = 5
zone[x+d2+i][y+d2-i] = 5
for i in range(d2+1):
zone[x+i][y+i] = 5
zone[x+d1+i][y-d1+i] = 5
# 여기까지 경계설정은 끝
for i in range(x+1, x+d1+d2):# 경계사이의 공간을 채우기 (줄마다)
state = False
for j in range(1,N+1):
if zone[i][j] == 5:
state = not state
if state:
zone[i][j] = 5
for r in range(1, N+1):
for c in range(1, N+1):
# 1번 선거구
if r < x+d1 and c<= y and zone[r][c] == 0:
#print(P[r][c])
population[1] += P[r][c]
# 2번 선거구
elif r <= x+d2 and y<c and zone[r][c] == 0:
population[2] += P[r][c]
# 3번 선거구
elif x+d1 <= r and c<y-d1+d2 and zone[r][c] == 0:
population[3] += P[r][c]
# 4번 선거구
elif x+d2 < r and y-d1+d2 <= c and zone[r][c] == 0:
population[4] += P[r][c]
# 5번 선거구
elif zone[r][c] == 5:
population[5] += P[r][c]
# for i in range(N+1):
# print(zone[i])
#
# print()
return max(population[1:]) - min(population[1:])
N = int(input())
P = [[0]*(N+1)] + [([0] + list(map(int, input().split()))) for _ in range(N)]
#print(P)
result = 20*20*100
# 모든 x, y, d1, d2에 대해서??
# print(min_dif(2,3,1,2))
for x in range(1,N+1):
for y in range(1,N+1):
for d1 in range(1,N+1):
for d2 in range(1,N+1):
if 1 <= x < x+d1+d2 <= N and 1 <= y-d1 < y < y+d2 <= N:
# print(x, y, d1, d2, min_dif(x,y,d1,d2))
result = min(result, min_dif(x,y,d1,d2))
print(result)
|
5angjae/Algorithm
|
BAEKJOON/Python/BJ17779.py
|
BJ17779.py
|
py
| 2,086 |
python
|
ko
|
code
| 0 |
github-code
|
50
|
71265917596
|
from webob.exc import HTTPForbidden, HTTPNotFound, HTTPUnauthorized
from swift.common.utils import get_logger, split_path, get_remote_client
from swift.common.middleware.acl import clean_acl, parse_acl, referrer_allowed
class SwiftAuth(object):
"""
Keystone to Swift authorization system.
Add to your pipeline in proxy-server.conf, such as::
[pipeline:main]
pipeline = catch_errors cache tokenauth swiftauth proxy-server
Set account auto creation to true::
[app:proxy-server]
account_autocreate = true
And add a swift authorization filter section, such as::
[filter:swiftauth]
use = egg:keystone#swiftauth
keystone_swift_operator_roles = Admin, SwiftOperator
keystone_tenant_user_admin = true
If Swift memcache is to be used for caching tokens, add the additional
property in the tokenauth filter:
[filter:tokenauth]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
...
cache = swift.cache
This maps tenants to account in Swift.
The user whose able to give ACL / create Containers permissions
will be the one that are inside the keystone_swift_operator_roles
setting which by default includes the Admin and the SwiftOperator
roles.
The option keystone_tenant_user_admin if set to true will allow the
username that has the same name as the account name to be the owner.
Example: If we have the account called hellocorp with a user
hellocorp that user will be admin on that account and can give ACL
to all other users for hellocorp.
:param app: The next WSGI app in the pipeline
:param conf: The dict of configuration values
"""
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.logger = get_logger(conf, log_route='keystone')
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip()
self.keystone_swift_operator_roles = \
conf.get('keystone_swift_operator_roles', 'Admin, SwiftOperator')
self.keystone_tenant_user_admin = \
conf.get('keystone_tenant_user_admin', "false").lower() in \
('true', 't', '1', 'on', 'yes', 'y')
self.allowed_sync_hosts = [h.strip()
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
if h.strip()]
def __call__(self, environ, start_response):
self.logger.debug('Initialise keystone middleware')
identity = self._keystone_identity(environ)
if not identity:
#TODO: non authenticated access allow via refer
environ['swift.authorize'] = self.denied_response
return self.app(environ, start_response)
self.logger.debug("Using identity: %r" % (identity))
environ['keystone.identity'] = identity
environ['REMOTE_USER'] = identity.get('tenant')
environ['swift.authorize'] = self.authorize
environ['swift.clean_acl'] = clean_acl
return self.app(environ, start_response)
def _keystone_identity(self, environ):
""" Extract the identity from the Keystone auth component """
if (environ.get('HTTP_X_IDENTITY_STATUS') != 'Confirmed'):
return None
roles = []
if ('HTTP_X_ROLES' in environ):
roles = environ.get('HTTP_X_ROLES').split(',')
identity = {'user': environ.get('HTTP_X_USER_NAME'),
'tenant': (environ.get('HTTP_X_TENANT_ID'),
environ.get('HTTP_X_TENANT_NAME')),
'roles': roles}
return identity
def _reseller_check(self, account, tenant_id):
""" Check reseller prefix """
return account == '%s_%s' % (self.reseller_prefix, tenant_id)
def authorize(self, req):
env = req.environ
env_identity = env.get('keystone.identity', {})
tenant = env_identity.get('tenant')
try:
version, account, container, obj = split_path(req.path, 1, 4, True)
except ValueError:
return HTTPNotFound(request=req)
if not self._reseller_check(account, tenant[0]):
self.logger.debug('tenant mismatch')
return self.denied_response(req)
user_groups = env_identity.get('roles', [])
# If user is in the swift operator group then make the owner of it.
for _group in self.keystone_swift_operator_roles.split(','):
_group = _group.strip()
if _group in user_groups:
self.logger.debug(
"User in group: %s allow to manage this account" % \
(_group))
req.environ['swift_owner'] = True
return None
# If user is of the same name of the tenant then make owner of it.
user = env_identity.get('user', '')
if self.keystone_tenant_user_admin and user == tenant[1]:
self.logger.debug("user: %s == %s tenant and option "\
"keystone_tenant_user_admin is set" % \
(user, tenant))
req.environ['swift_owner'] = True
return None
# Allow container sync
if (req.environ.get('swift_sync_key') and
req.environ['swift_sync_key'] ==
req.headers.get('x-container-sync-key', None) and
'x-timestamp' in req.headers and
(req.remote_addr in self.allowed_sync_hosts or
get_remote_client(req) in self.allowed_sync_hosts)):
self.logger.debug('allowing container-sync')
return None
# Check if Referrer allow it
referrers, groups = parse_acl(getattr(req, 'acl', None))
if referrer_allowed(req.referer, referrers):
if obj or '.rlistings' in groups:
self.logger.debug('authorizing via ACL')
return None
return self.denied_response(req)
# Allow ACL at individual user level (tenant:user format)
if '%s:%s' % (tenant[0], user) in groups:
self.logger.debug('user explicitly allowed in ACL authorizing')
return None
# Check if we have the group in the usergroups and allow it
for user_group in user_groups:
if user_group in groups:
self.logger.debug('user in group which is allowed in' \
' ACL: %s authorizing' % (user_group))
return None
# last but not least retun deny
return self.denied_response(req)
def denied_response(self, req):
"""
Returns a standard WSGI response callable with the status of 403 or 401
depending on whether the REMOTE_USER is set or not.
"""
if req.remote_user:
return HTTPForbidden(request=req)
else:
return HTTPUnauthorized(request=req)
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return SwiftAuth(app, conf)
return auth_filter
|
HugoKuo/keystone-essex3
|
keystone/middleware/swift_auth.py
|
swift_auth.py
|
py
| 7,195 |
python
|
en
|
code
| 1 |
github-code
|
50
|
26212448058
|
# import sqlite3
# def display_values():
# conn = sqlite3.connect("Result.db")
# cur = conn.cursor()
# cur.execute("SELECT Data, attacktype FROM Datas")
# rows = cur.fetchall()
# print("Values in the table:")
# for row in rows:
# print("Data:", row[0])
# print("Attack Type:", row[1])
# conn.close()
# display_values()
import openai
from colorama import Fore,init
init()
# openai.api_key = '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'
async def chat(prompt,key):
try:
openai.api_key = key
response = openai.Completion.create(
engine='text-davinci-002',
prompt=prompt,
max_tokens=50,
n=1,
stop=None,
temperature=0.7
)
return response.choices[0].text.strip()
except openai.error:
print(Fore.RED+"Error in hackgpt console: Could not connect to OpenAI API")
async def M_chat():
try:
while True:
user_input = input("User: ")
if user_input.lower() == 'exit':
break
response = chat(user_input)
print("ChatGPT: " + response)
except Exception as err:
print(Fore.BLACK+"Error in hackgpt console:",err)
|
kpister/prompt-linter
|
data/scraping/repos/HeisenbergCipherCracker~SQLJ/lib~scripts~Hackgpt.py
|
lib~scripts~Hackgpt.py
|
py
| 1,277 |
python
|
en
|
code
| 0 |
github-code
|
50
|
37248842828
|
"""
===================================================
UTILS.PY
Helper functions.
===================================================
"""
# Imports
import pennylane as qml
from model import num_qubits
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss += (l - p) ** 2
loss = loss / len(labels)
return loss
def layer(weights):
for wire in range(num_qubits - 1):
qml.RX(weights[wire, 0], wires=[wire])
qml.RY(weights[wire, 1], wires=[wire])
qml.RZ(weights[wire, 2], wires=[wire])
qml.CNOT(wires=[wire, wire + 1])
def full_layer(weights):
for wire in range(num_qubits):
qml.RX(weights[wire, 0], wires=[wire])
qml.RY(weights[wire, 1], wires=[wire])
qml.RZ(weights[wire, 2], wires=[wire])
for wire in range(num_qubits-1):
for wire2 in range(wire + 1, num_qubits):
qml.CNOT(wires=[wire, wire2])
|
emilystamm/crypto_time_series_quantum
|
src/utils.py
|
utils.py
|
py
| 947 |
python
|
en
|
code
| 1 |
github-code
|
50
|
557495729
|
import numpy as np
def axis_angle_rot_matrix(k,q):
c_theta = np.cos(q)
s_theta = np.sin(q)
v_theta = 1 - np.cos(q)
kx = k[0]
ky = k[1]
kz = k[2]
# Row 1
r00 = kx * kx * v_theta + c_theta
r01 = kx * ky * v_theta - kz * s_theta
r02 = kx * kz * v_theta + ky * s_theta
# Row 2
r10 = kx * ky * v_theta + kz * s_theta
r11 = ky * ky * v_theta + c_theta
r12 = ky * kz * v_theta - kx * s_theta
# Row 3
r20 = kx * kz * v_theta - ky * s_theta
r21 = ky * kz * v_theta + kx * s_theta
r22 = kz * kz * v_theta + c_theta
# 3x3 rotation matrix
rot_matrix = np.array([[r00, r01, r02],
[r10, r11, r12],
[r20, r21, r22]])
return rot_matrix
def hr_matrix(k,t,q):
# Calculate the rotation matrix (angle-axis representation)
rot_matrix_A_B = axis_angle_rot_matrix(k,q)
# Store the translation vector t
translation_vec_A_B = t
# Convert to a 2D matrix
t0 = translation_vec_A_B[0]
t1 = translation_vec_A_B[1]
t2 = translation_vec_A_B[2]
translation_vec_A_B = np.array([[t0],
[t1],
[t2]])
# Create the homogeneous transformation matrix
homgen_mat = np.concatenate((rot_matrix_A_B, translation_vec_A_B), axis=1) # side by side
# Row vector for bottom of homogeneous transformation matrix
extra_row_homgen = np.array([[0, 0, 0, 1]])
# Add extra row to homogeneous transformation matrix
homgen_mat = np.concatenate((homgen_mat, extra_row_homgen), axis=0) # one above the other
return homgen_mat
class RoboticArm:
def __init__(self,k_arm,t_arm):
self.k = np.array(k_arm)
self.t = np.array(t_arm)
assert k_arm.shape == t_arm.shape, 'Warning! Improper definition of rotation axes and translations'
self.N_joints = k_arm.shape[0]
def position(self,Q,index=-1,p_i=[0,0,0]):
# The position of this joint described by the index
p_i_x = p_i[0]
p_i_y = p_i[1]
p_i_z = p_i[2]
this_joint_position = np.array([[p_i_x],
[p_i_y],
[p_i_z],
[1]])
# End effector joint
if (index == -1):
index = self.N_joints - 1
# Store the original index of this joint
orig_joint_index = index
# Store the result of matrix multiplication
running_multiplication = None
# Start from the index of this joint and work backwards to index 0
while (index >= 0):
# If we are at the original joint index
if (index == orig_joint_index):
running_multiplication = hr_matrix(self.k[index],self.t[index],Q[index]) @ this_joint_position
# If we are not at the original joint index
else:
running_multiplication = hr_matrix(self.k[index],self.t[index],Q[index]) @ running_multiplication
index = index - 1
# extract the points
px = running_multiplication[0][0]
py = running_multiplication[1][0]
pz = running_multiplication[2][0]
position_global_frame = np.array([px, py, pz])
return position_global_frame
def pseudo_inverse(self,theta_start,p_eff_N,goal_position,max_steps=np.inf):
v_step_size = 0.05
theta_max_step = 0.2
Q_j = theta_start # Array containing the starting joint angles
p_end = np.array([goal_position[0], goal_position[1], goal_position[2]]) # desired x, y, z coordinate of the end effector in the base frame
p_j = self.position(Q_j,p_i=p_eff_N) # x, y, z coordinate of the position of the end effector in the global reference frame
delta_p = p_end - p_j # delta_x, delta_y, delta_z between start position and desired final position of end effector
j = 0 # Initialize the counter variable
# While the magnitude of the delta_p vector is greater than 0.01
# and we are less than the max number of steps
while np.linalg.norm(delta_p) > 0.01 and j<max_steps:
print(f'j{j}: Q[{Q_j}] , P[{p_j}]') # Print the current joint angles and position of the end effector in the global frame
# Reduce the delta_p 3-element delta_p vector by some scaling factor
# delta_p represents the distance between where the end effector is now and our goal position.
v_p = delta_p * v_step_size / np.linalg.norm(delta_p)
# Get the jacobian matrix given the current joint angles
J_j = self.jacobian(Q_j,p_eff_N)
# Calculate the pseudo-inverse of the Jacobian matrix
J_invj = np.linalg.pinv(J_j)
# Multiply the two matrices together
v_Q = np.matmul(J_invj,v_p)
# Move the joints to new angles
Q_j = Q_j + np.clip(v_Q,-1*theta_max_step,theta_max_step)#[:self.N_joints]
# Get the current position of the end-effector in the global frame
p_j = self.position(Q_j,p_i=p_eff_N)
# Increment the time step
j = j + 1
# Determine the difference between the new position and the desired end position
delta_p = p_end - p_j
# Return the final angles for each joint
return Q_j
def jacobian(self,Q,p_eff_N=[0,0,0]):
# Position of the end effector in global frame
p_eff = self.position(Q,-1,p_eff_N)
first_iter = True
jacobian_matrix = None
for i in range(0, self.N_joints):
if (first_iter == True):
# Difference in the position of the end effector in the global frame
# and this joint in the global frame
p_eff_minus_this_p = p_eff - self.position(Q,index=i)
# Axes
kx = self.k[i][0]
ky = self.k[i][1]
kz = self.k[i][2]
k = np.array([kx, ky, kz])
px = p_eff_minus_this_p[0]
py = p_eff_minus_this_p[1]
pz = p_eff_minus_this_p[2]
p_eff_minus_this_p = np.array([px, py, pz])
this_jacobian = np.cross(k, p_eff_minus_this_p)
# Convert to a 2D matrix
j0 = this_jacobian[0]
j1 = this_jacobian[1]
j2 = this_jacobian[2]
this_jacobian = np.array([[j0],
[j1],
[j2]])
jacobian_matrix = this_jacobian
first_iter = False
else:
p_eff_minus_this_p = p_eff - self.position(Q,index=i)
# Axes
kx = self.k[i][0]
ky = self.k[i][1]
kz = self.k[i][2]
k = np.array([kx, ky, kz])
# Difference between this joint's position and end effector's position
px = p_eff_minus_this_p[0]
py = p_eff_minus_this_p[1]
pz = p_eff_minus_this_p[2]
p_eff_minus_this_p = np.array([px, py, pz])
this_jacobian = np.cross(k, p_eff_minus_this_p)
# Convert to a 2D matrix
j0 = this_jacobian[0]
j1 = this_jacobian[1]
j2 = this_jacobian[2]
this_jacobian = np.array([[j0],
[j1],
[j2]])
jacobian_matrix = np.concatenate((jacobian_matrix, this_jacobian), axis=1) # side by side
return jacobian_matrix
def main():
k = np.array([[0,0,1],[0,0,1]])
a1 = 4.7
a2 = 5.9
a3 = 5.4
a4 = 6.0
t = np.array([[0,0,0],[a2,0,a1]])
# Position of end effector in joint 2 (i.e. the last joint) frame
p_eff_2 = [a4,0,a3]
# Create an object of the RoboticArm class
k_c = RoboticArm(k,t)
# Starting joint angles in radians (joint 1, joint 2)
q_0 = np.array([0,0])
# desired end position for the end effector with respect to the base frame of the robotic arm
endeffector_goal_position = np.array([4.0,10.0,a1 + a4])
# Display the starting position of each joint in the global frame
for i in np.arange(0,k_c.N_joints):
print(f'joint {i} position = {k_c.position(q_0,index=i)}')
print(f'end_effector = {k_c.position(q_0,index=-1,p_i=p_eff_2)}')
print(f'goal = {endeffector_goal_position}')
# Return joint angles that result in the end effector reaching endeffector_goal_position
final_q = k_c.pseudo_inverse(q_0, p_eff_N=p_eff_2, goal_position=endeffector_goal_position, max_steps=500)
# Final Joint Angles in degrees
print('\n\nFinal Joint Angles in Degrees')
print(f'Joint 1: {np.degrees(final_q[0])} , Joint 2: {np.degrees(final_q[1])}')
if __name__ == '__main__':
main()
|
Nil69420/Newton-Raphson-Forward-and-Inverse-Kinematics
|
Python/inverse_kinematics.py
|
inverse_kinematics.py
|
py
| 9,497 |
python
|
en
|
code
| 0 |
github-code
|
50
|
75180342555
|
from bs4 import BeautifulSoup
from numpy import NaN
import csv
import requests
import json
import pandas as pd
df1 = pd.read_csv("stage1-part3.csv")
df2 = pd.read_csv("stage1-part1-stage2.csv")
df3 = pd.read_csv("stage3.csv")
df4 = pd.read_csv("daily-matchups.csv")
playerName = df2['Player'].tolist()
playerTeam = df2['Player Team'].tolist()
homeTeam = df4['Home Team'].tolist()
awayTeam = df4['Away Team'].tolist()
dailyTeams = homeTeam + awayTeam
homeTeamPlayers = []
awayTeamPlayers = []
playerInfo = list(zip(playerName, playerTeam))
playerMatchups = dict()
for player in playerInfo:
# print(player)
# print(player[0])
# print(player[1])
if player[1] not in playerMatchups:
playerMatchups[player[1]] = [player[0]]
else:
playerMatchups[player[1]].append(player[0])
dailyMatchup = dict()
for team in homeTeam:
if team in playerMatchups:
homeTeamPlayers.append(playerMatchups[team])
for team in awayTeam:
if team in playerMatchups:
awayTeamPlayers.append(playerMatchups[team])
for roster in homeTeamPlayers:
opposingTeam = awayTeam.pop(0)
dailyMatchup[opposingTeam] = roster
for roster in awayTeamPlayers:
opposingTeam = homeTeam.pop(0)
dailyMatchup[opposingTeam] = roster
print(dailyMatchup)
# ! NOW MATCHUP HOMETEAMS VS AWAYTEAMPLAYERS AND AWAYTEAMS VS HOMETEAMPLAYERS
# for i in homeTeamPlayers:
# print(i)
df_final = df1.join(df2, how='right')
df_final = df_final.join(df3, how='left', lsuffix='_left', rsuffix='_right')
df_final.to_csv('jock-nba-final.csv')
# ~ POSSIBLE SOLUTION 1, STACKS THE COLUMNS
# in_1_name = csv.reader(open("stage1-part3.csv", 'r'))
# in_2_name = csv.reader(open("stage1-part1-stage2.csv", 'r'))
# in_3_name = csv.reader(open("stage3.csv", 'r'))
# out_name = csv.writer(open("jock-nba-final.csv", 'w'))
# for i, row in enumerate(in_1_name):
# out_name.writerow(row)
# for i, row in enumerate(in_2_name):
# out_name.writerow(row)
# for i, row in enumerate(in_3_name):
# out_name.writerow(row)
|
daygodavy/nba-jock
|
merge-stages.py
|
merge-stages.py
|
py
| 2,011 |
python
|
en
|
code
| 0 |
github-code
|
50
|
12864614224
|
import sys
import numpy
import matplotlib.pyplot
def Func(x):
return pow(x,2)
def Trapz():
A=float(input("Please enter the location of the 1D domain left bound A: "))
B=float(input("Please enter the location of the 1D domain right bound B: "))
M=int(input("Please enter your desired number of subintervals M: "))
H=float(abs((B-A)/M)) #computing the spatial increment
SUM=0.0
for K in range(1,M): #loop over subintervals
X=A+float(K)*H
SUM=SUM+Func(X)
SUM=H*(Func(A)+Func(B)+2.0*SUM)/2.0 #computing the trapezoidal sum
print('The approximate value of the integral of your desired function on the interval',A,'==>',B,'using',M,'subintervals is',SUM)
Trapz()
|
manirazi/Python_codes
|
Trapezoidal_rule.py
|
Trapezoidal_rule.py
|
py
| 741 |
python
|
en
|
code
| 0 |
github-code
|
50
|
73065585116
|
import os
from imagekitio import ImageKit
from dotenv import load_dotenv
load_dotenv()
# Imagekit config
imagekit = ImageKit(
public_key=os.getenv('IMAGE_KIT_PUBLIC_KEY'),
private_key=os.getenv('IMAGE_KIT_PRIVATE_KEY'),
url_endpoint=os.getenv('IMAGE_KIT_URL')
)
# Folder with images
# Images should be in a folder with the name of the style
baseFolder = 'styles/'
# Loop through each folder
with os.scandir(baseFolder) as entries:
for entry in entries:
fileFolder = baseFolder + entry.name # generate folder names
# Loop through each file inside the folder
with os.scandir(fileFolder) as files:
for file in files:
if file.is_file():
imgPath = fileFolder + "/" + file.name # generate file names
# generate a tag from the folder name
# converting the name to lowercase and replacing spaces with an underscore
tag = entry.name.lower().replace(" ", "_")
# uploading to imagekit
upload = imagekit.upload(
file=open(imgPath, "rb"),
file_name=tag,
options={
"folder": "/elysian/",
'tags': [tag]
},
)
# print status of each upload
print("Upload binary", upload, end="\n\n")
print("done") # end program
|
dayumsam/elyssian-api
|
image_upload.py
|
image_upload.py
|
py
| 1,509 |
python
|
en
|
code
| 0 |
github-code
|
50
|
26201185030
|
# Fonksiyon nesne oluştururken isim, maas, rutbe değerlerinin alınmasını sağlıyor.
# Çalış fonksiyonu çağrılması ile personelin gün sayısı bir artıyor ve çalıştığı söyleniyor.
# Terfi fonksiyonu çağrılması ile personele atanan maaş değeri artıyor.(200)
# Bilgileri göster fonksiyonu çağrılması ile ekrana isim, maaş, toplam çalışılan gün sayısı, personelin rütbesi yazılıyor.
class Staffs:
def __init__(self, name, maas, rutbe, gunsayisi):
self.name = name
self.maas = maas
self.rutbe = rutbe
self.gunsayisi = gunsayisi
def calis(self):
self.gunsayisi += 1
return f"Çalıştığı gün sayısı {self.gunsayisi}"
def terfi(self):
self.maas += 200
return f"Terfi aldınız yeni maaşınız {self.maas}"
def bilgileriGoster (self):
return f"Personel Ad - SOYAD: {self.name}, çalışan aldığı maaş: {self.maas}, çalıştığı gün sayısı: {self.gunsayisi}, personel rütbe: {self.rutbe} "
staf1 = Staffs("Şeref Kekeç", 1500, " Usta ", 200)
staf2 = Staffs("Ömer Faruk",2000, " Usta Başı ", 500)
staf3 = Staffs("Kayahan Bayazit",1000, " Çırak ", 0)
stafs = [staf1, staf2, staf3]
for s in stafs:
print(s.bilgileriGoster())
|
seref7275/Python-Beginners-Project
|
personel-calisma-ucretlenirme.py
|
personel-calisma-ucretlenirme.py
|
py
| 1,388 |
python
|
tr
|
code
| 0 |
github-code
|
50
|
10502105582
|
#!/usr/bin/env python3
"""
Count and change workers salary.
.manage_salary_list - method, that provides to edit list of workers with
and add salary to them.
"""
from .support_modules.standart_functions import (
BasicFunctionsS as BasF_S
)
class WorkersSalary(BasF_S):
"""Manage workers salary."""
__slots__ = [
'salary_list_path',
'salary_list',
'user',
]
def __init__(self, user):
"""Load data."""
self.user = user
self.salary_list_path = (
super().get_root_path() / 'data' / 'salary_list'
)
if self.salary_list_path.exists():
self.salary_list = super().load_data(
data_path=self.salary_list_path,
user=user,
)
else:
self.salary_list = {
'Карьер': {},
'Офис': {},
'КОЦ': {},
}
self._dump_salary_list()
def _dump_salary_list(self):
"""Dump salary list to file."""
super().dump_data(
data_path=self.salary_list_path,
base_to_dump=self.salary_list,
user=self.user
)
def _choose_division(self):
"""Choose divisioon to manage."""
print("[ENTER] - выйти."
"\nВыберете подразделение.")
division = super().choise_from_list(self.salary_list, none_option=True)
return division
def _add_profession(self, division):
"""Add profession to list."""
profession = input("Введите название новой профессии: ")
self.salary_list[division][profession] = 0
self._change_salary(division, profession)
def _change_salary(self, division, profession=None):
"""Change salary for profession."""
if not profession:
print("Выберете профессию из списка:")
profession = super().choise_from_list(self.salary_list[division])
salary = super().float_input(msg="Введите оклад: ")
self.salary_list[division][profession] = salary
def _delete_profession(self, division):
"""Delete profession from list."""
print("Выберете профессию для удаления:")
profession = super().choise_from_list(self.salary_list[division])
self.salary_list[division].pop(profession)
def manage_salary_list(self):
"""Manage salary list."""
division = self._choose_division()
super().clear_screen()
while division:
for profession in sorted(self.salary_list[division]):
print(
"{:<23} - {:<9,}p."
.format(profession, self.salary_list[division][profession])
)
actions_list = {
'Добавить профессию': self._add_profession,
'Изменить оклад': self._change_salary,
'Удалить профессию': self._delete_profession,
}
print("\n[ENTER] - выход"
"\nВыберете действие:")
action = super().choise_from_list(actions_list, none_option=True)
if not action:
break
else:
actions_list[action](division)
self._dump_salary_list()
super().clear_screen()
|
Acetonen/Interkamen_career
|
interkamen_career/modules/workers_salary.py
|
workers_salary.py
|
py
| 3,476 |
python
|
en
|
code
| 0 |
github-code
|
50
|
10011091812
|
#coding:utf-8
'''
example:
print pcolor.pcolorstr("hello",4,4,4)
print pcolor.pcolorstr("world",pcolor.PHIGHLIGHT,pcolor.PRED,pcolor.PWHITE)
'''
PESC=chr(27)
POFF=0
PHIGHLIGHT=1
PUNDERLINE=4
PFLICKER=5
PINVERSE=7
PHIDDEN=8
PBLACK=0
PRED=1
PGREEN=2
PYELLOW=3
PBLUE=4
PMAUVE=5
PCYAN=6
PWHITE=7
def pcolorstr(mystr,attr,fore,back):
fore=fore+30
back=back+40
temp=PESC+"["+str(attr)+";"+str(fore)+";"+str(back)+"m";
temp=temp+mystr+PESC+"[0;0;0;m"
return temp
|
xurenlu/hyer
|
hyer/pcolor.py
|
pcolor.py
|
py
| 477 |
python
|
en
|
code
| 38 |
github-code
|
50
|
18499028963
|
from model.CRNN import CRNN
C = CRNN()
Trainmodels,PredictModel = C.build((None,32,1))
adadelta = Adadelta(lr=0.05)
Trainmodels.compile(loss=lambda y_true, y_pred: y_pred , optimizer=adadelta)
Trainmodels.summary()
Trainmodels.fit_generator(
C.generateBacthData(32),
epochs=3,
steps_per_epoch=100,
verbose=1)
labels_pred = PredictModel.predict_on_batch(next(C.generate_test_data(32)))
|
Shaocr/Keras-OCR
|
src/main.py
|
main.py
|
py
| 403 |
python
|
en
|
code
| 0 |
github-code
|
50
|
72423229915
|
import json
import cv2
shot_count = 1
frame_list = []
frame = 1
with open("MiSang_Frame.json", 'r') as outfile:
frame_list = json.load(outfile)
shot_count = frame_list[-1]["shot"] + 1
frame = frame_list[-1]["frame"] + 1
print(f'Current Shot : {shot_count}')
while(True):
image = cv2.imread(f'C:\\Users\\user\\Desktop\\MiSang_Frame\\{frame}.jpg') #작업 이미지 폴더
cv2.imshow('img', image)
key = cv2.waitKey(0)
if key == ord('q'):
break
elif key == ord('s'):
frame_list.append(
{
"shot": shot_count,
"frame": frame
}
)
print(f'Saved Frame : {frame}')
frame = frame + 1
shot_count = shot_count + 1
print(f'Current Shot : {shot_count}')
elif key == ord('a'):
frame = frame - 1
else :
frame = frame + 1
with open("MiSang_Frame.json", 'w') as outfile:
json.dump(frame_list, outfile, indent=4)
cv2.destroyAllWindows()
|
inha-slab/similaritySegmentation
|
shot_json_maker/generator_cv.py
|
generator_cv.py
|
py
| 1,015 |
python
|
en
|
code
| 0 |
github-code
|
50
|
26886777255
|
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
connection = psycopg2.connect("postgresql://postgres:1234@localhost:5432")
connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cursor = connection.cursor()
table_name = "ex6"
cursor.execute("SELECT 1 FROM pg_database WHERE datname='jokes'")
if cursor.fetchone() is None:
cursor.execute(f"""CREATE DATABASE jokes""")
connection.commit()
connection = psycopg2.connect("postgresql://postgres:1234@localhost:5432/jokes")
cursor = connection.cursor()
query = f'''CREATE TABLE {table_name}(
id SERIAL PRIMARY KEY,
category TEXT,
joke TEXT,
time time without time zone
)'''
cursor.execute(query)
connection.commit()
|
deniss619/parser
|
createDB_for_ex6.py
|
createDB_for_ex6.py
|
py
| 775 |
python
|
en
|
code
| 0 |
github-code
|
50
|
15305509461
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import json
json_content = u"""
{
"server":"45.76.222.198",
"server_port":8888,
"local_address": "127.0.0.1",
"local_port":1080,
"password":"self boot",
"timeout":300,
"method":"aes-256-cfb",
"comments": ["中文内容", 1, 2]
}
"""
def _decode_list(data):
rv = []
for item in data:
if hasattr(item, 'encode'):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if hasattr(value, 'encode'):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
if __name__ == "__main__":
# Without object_hook
config = json.loads(json_content)
print(config)
# With object_hook
config_2 = json.loads(json_content, object_hook=_decode_dict)
print(config_2)
# json content is str before load.
config_3 = json.loads(json_content.encode("utf-8"))
print(config_3)
|
selfboot/AnnotatedShadowSocks
|
issue_codes/4_hook.py
|
4_hook.py
|
py
| 1,355 |
python
|
en
|
code
| 3 |
github-code
|
50
|
17855217888
|
import logging
import uuid
from decimal import Decimal
from uuid import UUID
from asyncpg import ForeignKeyViolationError
from sqlalchemy.dialects.postgresql import insert
from app.db import database
from app.db.models import accounts, operation_history
from app.billing.entities import *
logger = logging.getLogger(__name__)
class BillingService:
async def create_account(self, user_id: UUID) -> UUID:
query = insert(accounts).values(
id=str(uuid.uuid4()),
user_id=user_id,
amount=0
).on_conflict_do_nothing(
constraint='accounts_user_id'
).returning(accounts.c.id)
return await database.fetch_val(query)
async def refill_deposit(self, charge: DepositRefill):
async with database.transaction():
amount = await self._update_account(charge.destination, charge.amount)
if amount is not None:
await self._update_history(charge.destination, charge.amount)
return amount
async def _update_account(self, account_id: UUID, amount: Decimal) -> Decimal:
query = accounts.update().values(
amount=accounts.c.amount + amount
).where(
accounts.c.id == account_id
).returning(accounts.c.amount)
return await database.fetch_val(query)
async def _update_history(self, destination, amount, source=None):
query = insert(operation_history).values(
source=source,
destination=destination,
amount=amount
)
await database.execute(query)
async def transfer(self, transfer: MoneyTransfer):
async with database.transaction():
amount = await self._update_account(transfer.source, -transfer.amount)
if amount is None:
logger.error("Account with id=%s doesn't exist", transfer.source)
return
dest_amount = await self._update_account(transfer.destination, transfer.amount)
if dest_amount is None:
logger.error("Account with id=%s doesn't exist", transfer.destination)
raise ForeignKeyViolationError
await self._update_history(transfer.destination, transfer.amount, transfer.source)
return amount
|
Nerevarsoul/billing_system
|
app/billing/services.py
|
services.py
|
py
| 2,300 |
python
|
en
|
code
| 0 |
github-code
|
50
|
70068221917
|
#!/usr/bin/python3
from flask import Flask, request
import requests
app = Flask(__name__)
key = "[api key telegram]"
users = {}
text = ""
@app.route('/hello')
def hello():
return 'Hello World'
def send_message(chat_id, text):
url = "https://api.telegram.org/bot{}/sendMessage".format(key)
payload = {
"text": text,
"chat_id": chat_id
}
resp = requests.get(url,params=payload)
# envia msg ao telegram para usuários que acessaram o bot
@app.route("/", methods=["POST","GET"])
def index():
if(request.method == "POST"):
msg_text, sender_name, sender_id, chat_id = "","","",""
response = request.get_json()
if 'message' in response:
msg_text = response["message"]["text"]
sender_name = response["message"]["from"]["first_name"]
sender_id = response["message"]["from"]["id"]
chat_id = response["message"]["chat"]["id"]
print(users)
if sender_id in users:
text = f"{sender_name}, estamos monitorando..."
else:
text = f"Olá {sender_name}!\n A partir de agora está inscrito no sistema de detecção de invasão de pombos.\
\nAguarde para receber o aviso quando o pombo invadir o ambiente..."
send_message(chat_id, text)
users[sender_id] = sender_name
return "Done"
# envia a notificação que o pombo invadiu o ambiente
@app.route("/send", methods=["POST"])
def send_warnings():
if(request.method == "POST"):
if users:
for i in users.keys():
send_message(i, "O pombo invadiu!!!")
return f"number of users who received the message: {len(users)}."
# Ativa a api do bot para responder no endereço(url). Utilizando ngrok.
@app.route("/activate")
def activate():
url = "https://fc49-144-22-140-105.sa.ngrok.io"
response = requests.get("https://api.telegram.org/bot{}/setWebhook?url={}".format(key,url))
return "Telegram Bot activate" if response else "Fail to activate Telegram Bot"
if __name__ == "__main__":
app.run(debug=True)
|
amauri/ey-fast-track-ml
|
main.py
|
main.py
|
py
| 2,101 |
python
|
en
|
code
| 0 |
github-code
|
50
|
34983693359
|
from tqdm import tqdm as tqdm
def check_divisible( num, start, end ):
for i in range( start, end+1 ):
if num % i != 0:
return( False )
return( True )
number = None
for i in tqdm( range( 1, int( 1e10 ) ) ):
if check_divisible( i, 1, 20 ) == True:
number = i
break
print( number )
|
drosophominin/project_euler
|
0005-smallest_multiple/problem-0005.py
|
problem-0005.py
|
py
| 295 |
python
|
en
|
code
| 0 |
github-code
|
50
|
22148877569
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import cv2
import numpy as np
import tensorflow as tf
from help_file import label_map_util
from help_file import visualization_utils as vis_util
import sys
import time
import roslib
import rospy
from sensor_msgs.msg import Image
from geometry_msgs.msg import Pose
#from imagetran.msg import Calibrationimage
from probot_vision.srv import *
from cv_bridge import CvBridge, CvBridgeError
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
model_path = os.path.join(os.getcwd(),'pbF/')
Label_path = model_path + 'my_label_map.pbtxt'
Meta_graph_path = model_path + 'model.ckpt.meta'
class TOD(object):
def __init__(self):
self.PATH_TO_LABELS = Label_path
self.NUM_CLASS = 3
self.category_index = self._load_label_map()
#shiyixia
self.sess = tf.InteractiveSession()
self.saver = tf.train.import_meta_graph(Meta_graph_path)
self.graph = tf.get_default_graph()
self.ckpt = tf.train.get_checkpoint_state(model_path)
self.saver.restore(self.sess,self.ckpt.model_checkpoint_path)
def _load_label_map(self):
label_map = label_map_util.load_labelmap(self.PATH_TO_LABELS)
print('label_map',label_map)
categories = label_map_util.convert_label_map_to_categories(label_map,
max_num_classes=self.NUM_CLASS,
use_display_name=True)
print('categories',categories)
category_index = label_map_util.create_category_index(categories)
print('category_index',category_index)
return category_index
def detect2(self,req):
rospy.loginfo("Try to detect objects...")
HM = rospy.get_param("~hmatrix")
XM = rospy.get_param("~xmatrix")
CM = rospy.get_param("cameraMatrix")
EM = rospy.get_param("eyebase")
image_params = rospy.get_param("~image")
H = HM['data']
X = XM['data']
C = CM['data']
E = EM['data']
#C = [592.988765, 0.0, 316.144026, 0.0, 589.679756, 244.158662,0.0, 0.0, 1.0]
#E = [0.9999262927350819, 0.012140295651447686, 0.00014939744324133248,0.0362733301805, 0.012138358976642286, -0.9998822784140743, 0.009385603649183276, 0.289040732468, 0.0002633238591056371,-0.009383098422207622, -0.9999559430922794,0.83550686443 , 0.0, 0.0, 0.0, 1.0]
giraffeObjList = []
duckObjList = []
barrotObjList = []
res = DetectObjectSrvResponse.NOT_DETECTED
imageData = rospy.wait_for_message('/camera/color/image_raw', Image)
try:
cv_image = CvBridge().imgmsg_to_cv2(imageData,"bgr8")
except CvBridgeError as e:
print(e)
image_np_expanded = np.expand_dims(cv_image, axis=0)
image_tensor = self.graph.get_tensor_by_name('image_tensor:0')
boxes = self.graph.get_tensor_by_name('detection_boxes:0')
scores = self.graph.get_tensor_by_name('detection_scores:0')
classes = self.graph.get_tensor_by_name('detection_classes:0')
num_detections = self.graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = self.sess.run([boxes, scores, classes, num_detections],feed_dict={image_tensor: image_np_expanded})
imagedeal = cv_image.copy()
imagecopy = cv_image.copy()
imagecopyp = cv_image.copy()
vis_util.visualize_boxes_and_labels_on_image_array(
imagecopy,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
self.category_index,
use_normalized_coordinates=True,
line_thickness=3)
box_to_color_map, str_to_display = vis_util.need_location(
imagecopy,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
self.category_index,
use_normalized_coordinates=True,
line_thickness=3)
cv2.imwrite("jiance.jpg",imagecopy)
gray_image = cv2.cvtColor(imagedeal, cv2.COLOR_BGR2GRAY)
gray_image = cv2.GaussianBlur(gray_image, (5, 5), 0)
ret3, th3 = cv2.threshold(gray_image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
cv2.imwrite("beijing.jpg",th3)
sp = imagecopy.shape
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
x = int((xmax+xmin)*sp[1]/2.0)
y = int((ymax+ymin)*sp[0]/2.0)
cv2.circle(imagecopy,(x, y), 2, (0, 255, 0), 3)
label = str_to_display[box]
caijian = th3[int(ymin*sp[0]):int(ymax*sp[0]), int(xmin*sp[1]):int(xmax*sp[1])]
if(label[0][0] == 'p'):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (26, 26))
kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
else:
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (6, 6))
kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
caijian = cv2.dilate(caijian, kernel)
caijian = cv2.erode(caijian, kernel2)
contours,hier = cv2.findContours(caijian, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
M = cv2.moments(contours[0])
for cnt in contours:
N = cv2.moments(cnt)
if M['m00'] < N['m00']:
M = cv2.moments(cnt)
#print(M['m00'])
centroid_xD = M['m10']/M['m00'] + xmin*sp[1]
centroid_yD = M['m01']/M['m00'] + ymin*sp[0]
centroid_x = int(M['m10']/M['m00'] + xmin*sp[1])
centroid_y = int(M['m01']/M['m00'] + ymin*sp[0])
xc = (C[4] * centroid_xD - centroid_yD * C[1] - C[2] * C[4] + C[1] * C[5])/(C[4] * C[0])
yc = (centroid_yD - C[5])/C[4]
zc = 1
objPose = Pose()
objPose.position.x = E[0] * xc + E[1] * yc + E[2] * zc + E[3]
objPose.position.y = E[4] * xc + E[5] * yc + E[6] * zc + E[7]
print("objPose.position.x,",objPose.position.x)
print("objPose.position.y",objPose.position.y)
if label[0][0] == 'p':
barrotObjList.append(objPose)
elif label[0][0] == 'd':
duckObjList.append(objPose)
else:
giraffeObjList.append(objPose)
res = DetectObjectSrvResponse.SUCCESS
cv2.circle(imagecopy, (centroid_x, centroid_y), 2, (0, 255, 255), 3)
cv2.imwrite("i.jpg",imagecopy)
return DetectObjectSrvResponse(res, giraffeObjList, duckObjList, barrotObjList)
if __name__ == '__main__':
rospy.init_node('Object_Detect_network')
rospy.loginfo("Server is ready to detect.")
detecte = TOD()
ser = rospy.Service('probot_detect_object_network', DetectObjectSrv, detecte.detect2)
rospy.loginfo("Server is ready to detect.")
rospy.spin()
|
ps-micro/PROBOT_Anno
|
probot_vision_pick/probot_detect/scripts/Detector_pick.py
|
Detector_pick.py
|
py
| 7,340 |
python
|
en
|
code
| 84 |
github-code
|
50
|
42786611333
|
import socket
from utils.initializehttpsocket import InitializeHttpSocket
from threading import Thread
def NUTS():
print("nutsocket")
if __name__ == "__main__":
webpage = InitializeHttpSocket(HOST="localhost", PORT=80)
Thread(target=webpage.add_endpoint(endpoint="/uwu",
html_file="OWO/ilovehate.html", METHOD=["POST", "GET"], function_hook=NUTS)).start()
Thread(target=webpage.add_endpoint(endpoint="/",
html_file="OWO/test.html", METHOD=["GET"])).start()
Thread(target=webpage.run()).start()
|
LumieOwO/bruh-
|
main.py
|
main.py
|
py
| 584 |
python
|
en
|
code
| 0 |
github-code
|
50
|
13966505388
|
#!/usr/bin/env python
"""
OTU_sampleData: make a 'sample_data' table (phyloseq) from the OTU table
Usage:
OTU_sampleData [options] <OTU_table_file>
OTU_sampleData -h | --help
OTU_sampleData --version
Options:
<OTU_table_file> OTU table file name.
-h --help Show this screen.
--version Show version.
--debug Debug mode
Description:
Create a sample_data table for import into phyloseq.
The sample_data table will consist of BD value stats for each
sample (fraction).
"""
# import
## batteries
from docopt import docopt
import os, sys
## application libraries
import numpy as np
from SIPSim.OTU_Table import OTU_table
def main(args=None):
# loading otu table
otu_tbl = OTU_table.from_csv(args['<OTU_table_file>'], sep='\t')
# otu table in long format
if not otu_tbl.is_long:
otu_tbl.wide2long()
# editing columns
df = otu_tbl.df[['library','fraction']].drop_duplicates()
L = lambda x: x['library'] + '__' + x['fraction']
df['sample'] = df.apply(L, 1)
df[['BD_min','BD_max']] = df['fraction'].str.extract('(.+)-(.+)',
expand=False)
L = lambda x: round((float(x['BD_min']) + float(x['BD_max'])) / 2, 4)
df['BD_mid'] = df.apply(L, 1)
cols = df.columns.tolist()
cols = ['sample'] + [x for x in cols if x != 'sample']
df = df[cols]
df.sort_values(by=['sample'], inplace=True)
# writing otu table
df.to_csv(sys.stdout, sep='\t', index=False)
def opt_parse(args=None):
if args is None:
args = docopt(__doc__, version='0.1')
else:
args = docopt(__doc__, version='0.1', argv=args)
main(args)
|
nick-youngblut/SIPSim
|
SIPSim/Commands/OTU_sampleData.py
|
OTU_sampleData.py
|
py
| 1,742 |
python
|
en
|
code
| 1 |
github-code
|
50
|
36535021736
|
import inspect
import pandas as pd
import numpy as np
from ..plot import candles, show
class placeholder:
def __call__(self, *args, **kwargs):
return None
class Indicator:
function = placeholder()
on_chart = True
def __init__(
self, *args, **kwargs
):
self.axis = None
self.called = False
self.kwargs = kwargs
assert self.function.__class__ is not placeholder,\
'function must be set'
argspect = inspect.getfullargspec(self.function)
all_args = argspect.args
len_kws = len(argspect.kwonlyargs)
self.arg_names = all_args[0:len(all_args)-len_kws]
self.arg_names = [
arg for arg in self.arg_names if arg !=self
]
self.kwarg_names = all_args[
(len(all_args) - len_kws):
]
self.kwarg_defaults = argspect.defaults
if self.kwarg_defaults is not None:
self.defaults = list(
zip(
self.kwarg_names, self.kwarg_defaults
)
)
self.defaults = dict(self.defaults)
for key in self.kwarg_names:
self.kwargs[key] = self.kwargs.get(
self.kwargs[key], self.defaults[key]
)
def __call__(
self, OHLCV
):
assert OHLCV.__class__ is pd.DataFrame
OHLCV.columns = OHLCV.columns.str.lower()
self.ohlcv = OHLCV
self.called = True
args = [
pd.Series(OHLCV[arg]) for arg in self.arg_names if \
arg not in 'self'
]
features = self.function(*args, **self.kwargs)
self.features = self.feature_extraction(
features
)
return self.features
def feature_extraction(self, features):
return features
def plot_candles(self, *args, **kwargs):
assert self.called
self.axis = candles(
self.ohlcv, **kwargs
)
return self.axis
def plot_features(self, *args, **kwargs):
NotImplementedError
def show(self):
show(self.axis)
__all__ = [
'Indicator'
]
|
JizzFactoryEmployee/nibblerppman
|
nibbler/trading/collectors/AlgoTrader/indicators/indicator_base.py
|
indicator_base.py
|
py
| 2,192 |
python
|
en
|
code
| 0 |
github-code
|
50
|
20299841387
|
from pymodbus.client.sync import ModbusTcpClient
def write(c):
c.write_coil(1, False)
c.read_coils(1, 1)
if __name__ == "__main__":
import timeit
client = ModbusTcpClient('127.0.0.1', 5020)
client.connect()
print (timeit.timeit("lamda:write(client)", setup="from __main__ import write"))
client.close()
|
mjfarmer/scada_py
|
simpleTest_with_timeit.py
|
simpleTest_with_timeit.py
|
py
| 334 |
python
|
en
|
code
| 0 |
github-code
|
50
|
35646175719
|
import geopandas as gpd
import pandas as pd
import numpy as np
from scipy.stats import rankdata
import copy
def _homogenise_gdf_(
geodataframe,
id_header,
name_header
):
gdf = copy.deepcopy(geodataframe)
# Add a dummy variable to allow cross-joins later
gdf['__dummy__'] = gdf.apply(lambda row: 1, axis=1)
# Find the centroids
gdf['__centroid__'] = gdf.centroid
return gdf[[id_header, name_header, gdf.geometry.name, '__dummy__', '__centroid__']]
class _Index_Id_Name_Transformer_:
# Class to provide conversion methods between geography IDs, names, and indexes.
# Indexes are not equivalent to the GeoDataFrame index column of the input file.
def __init__(
self,
geo_list,
name_list
):
# Generate lookup dicts
g_i = {}
g_n = {}
i_g = {}
i_n = {}
n_g = {}
n_i = {}
for i,g,n in zip(range(len(geo_list)), geo_list, name_list):
g_i[g] = i
g_n[g] = n
i_g[i] = g
i_n[i] = n
n_g[n] = g
n_i[n] = i
self._geo_to_index_ = g_i
self._geo_to_name_ = g_n
self._index_to_geo_ = i_g
self._index_to_name_ = i_n
self._name_to_geo_ = n_g
self._name_to_index_ = n_i
# Define functions to use the dicts to translate
def id_to_ix(self, code):
return self._geo_to_index_[code]
def id_to_name(self, code):
return self._geo_to_name_[code]
def ix_to_id(self, index):
return self._index_to_geo_[index]
def ix_to_name(self, index):
return self._index_to_name_[index]
def name_to_id(self, name):
return self._name_to_geo_[name]
def name_to_ix(self, name):
return self._name_to_index_[name]
def _turn_df_into_matrix_(
row_labels, col_labels, # Must have values of form ID
values,
matrix_size,
transformer, # Must be of class _Index_Id_Name_Transformer_
default_value=0
):
# Takes a set of row-column-value lists and creates a 2D numpy array
dtype = values.dtype
matrix = np.ones((matrix_size, matrix_size), dtype=dtype)*default_value
for row_lab, col_lab, value in zip(row_labels, col_labels, values):
matrix[transformer.id_to_ix(row_lab), transformer.id_to_ix(col_lab)] = value
return matrix
def _make_neighbours_array_(
inf, # Must be class Input_File
use_fractional_borders
):
# Generates a 2D matrix of weights, where the weights are equal to the fraction of column geometry's
# border which is shared with row geometry.
# Add an extra geometry column to keep during crossjoining
g_copy = copy.deepcopy(inf.geodata)
g_copy['geocopy'] = g_copy.geometry
# Find all geometries which touch each other
neighbours = g_copy.sjoin(g_copy, how='inner', predicate='touches')
neighbours = neighbours.loc[neighbours[inf.id_header+'_left']!=neighbours[inf.id_header+'_right']]
if use_fractional_borders:
# Find the length of the border as a fraction of the left geometry's total border
neighbours['intersection'] = neighbours.apply(
lambda row: row['geocopy_left'].intersection(row['geocopy_right']),
axis=1
)
neighbours['weight'] = neighbours.apply(
lambda row: row['intersection'].length/row['geocopy_left'].length,
axis=1
)
else:
neighbours['weight'] = neighbours.apply(lambda row: 1.0, axis=1)
return _turn_df_into_matrix_(
neighbours[inf.id_header+'_left'].values,
neighbours[inf.id_header+'_right'].values,
neighbours['weight'].values,
inf.number_of_geographies,
inf.lookups
)
def _make_coastline_array_(
inf
):
# Generates a 1D vector of weights, where the weights are equal to the geometry's coast as a
# fraction of its perimeter
coastline_vector = 1-np.sum(inf.neighbours_array(use_fractional_borders=True), axis=1)
coastline_vector[coastline_vector<0.01] = 0 # To allow for rounding errors
return coastline_vector
def _make_distance_array_(
inf
):
# Generates a 2D matrix of the distances between each row/column geometry
geom_name = inf.geodata.geometry.name
# Do a cross-join
crossjoin = inf.geodata.merge(
inf.geodata,
how='inner',
on='__dummy__',
suffixes=('_left', '_right')
)
# Find the separation between each pair
crossjoin = crossjoin.loc[crossjoin[inf.id_header+'_left']!=crossjoin[inf.id_header+'_right']]
crossjoin['distance'] = crossjoin.apply(
lambda row: row[geom_name+'_left'].distance(row[geom_name+'_right']),
axis=1
)
# Rank the distances
crossjoin['distance_rank'] = crossjoin.groupby(inf.id_header+'_left')['distance'].rank()
return _turn_df_into_matrix_(
crossjoin[inf.id_header+'_left'].values,
crossjoin[inf.id_header+'_right'].values,
crossjoin['distance_rank'].values,
inf.number_of_geographies,
inf.lookups
)
|
tompeterken-os/AutoCartogram
|
autocartogram_utils.py
|
autocartogram_utils.py
|
py
| 5,221 |
python
|
en
|
code
| 0 |
github-code
|
50
|
37990438174
|
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
from pyramid.view import view_config
from pyramid.response import Response
from pyramid.httpexceptions import HTTPFound
from sqlalchemy.exc import DBAPIError
from .. import process_request
from sqlalchemy import or_, between
from .. import models
import logging
import traceback
import sys
from webapp import automapper
from sqlalchemy import func, case
from sqlalchemy import desc
import os
head_path = os.path.dirname(__file__).split("webapp/views")[0]
config_path = os.path.join(head_path, 'development.ini')
am = automapper.Automapper(config_path)
Base_automap = am.generate_base("db2.")
def distance_query(Base, repeat_list, request, distance, db):
case_list = []
mlvaTable = getattr(Base, db) #Base.classes.mlva_normalized
for repeats in repeat_list:
if float(request.matchdict[repeats]) == 0:
continue
else:
repeat_model = getattr(mlvaTable, repeats)
repeat_case = case([
(repeat_model == float(request.matchdict[repeats]), 1)], else_=0
)
case_list.append(repeat_case)
try:
query = (
request.db2_session.query(mlvaTable).filter(sum(case_list) >= distance)
.order_by(desc(sum(case_list))).all()
)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
inf = "".join("!!" + line for line in lines)
return {"line": inf}
return query
@view_config(route_name="mlvaquery", renderer="../templates/mlva_query.jinja2")
def mlva_view(request):
return {}
@view_config(route_name="fp_query_api", renderer="json")
def fpq_view(request):
RP = process_request.RequestProcessor()
repeat_list = [
"ms01",
"ms03",
"ms20",
"ms21",
"ms22",
"ms23",
"ms24",
"ms26",
"ms27",
"ms28",
"ms30",
"ms31",
"ms33",
"ms34",
]
distance = {"0":14, "1":13, "2":12, "3":11, "4":10, "5":9, "xx":1}[request.matchdict["distance"]]
query = distance_query(Base_automap,
repeat_list,
request,
distance,
"mlva_normalized")
return RP._serialize_mlva(query)
@view_config(route_name="tp_query_api", renderer="json")
def tpq_view(request):
RP = process_request.RequestProcessor()
repeat_list = [
"ms23",
"ms24",
"ms27",
"ms28",
"ms33",
"ms34",
]
distance = {"0":6, "1":5, "2":4, "3":3, "4":2, "5":1, "xx":0}[request.matchdict["distance"]]
query = distance_query(Base_automap,
repeat_list,
request,
distance,
"tilburg_profile2022")
return RP._serialize_mlva_tillburg(query)
|
foerstner-lab/CoxBase-Webapp
|
webapp/views/mlva_query.py
|
mlva_query.py
|
py
| 2,995 |
python
|
en
|
code
| 0 |
github-code
|
50
|
14033669313
|
"""
Given the head of a linked list and a value x, partition it such that all nodes less than x come before nodes greater than or equal to x.
You should preserve the original relative order of the nodes in each of the two partitions.
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def partition(self, head: Optional[ListNode], x: int) -> Optional[ListNode]:
if head is None or head.next is None:
return head
pleft, pright = ListNode(-1), ListNode(-1)
tmpleft, tmpright = pleft, pright
while head:
if head.val < x:
tmpleft.next = head
tmpleft = tmpleft.next
else:
tmpright.next = head
tmpright = tmpright.next
head = head.next
tmpleft.next = pright.next
tmpright.next = None
return pleft.next
|
KaneZhao/LeetCode
|
Python/086_Partition_List.py
|
086_Partition_List.py
|
py
| 1,009 |
python
|
en
|
code
| 0 |
github-code
|
50
|
12356680349
|
from kivy.clock import Clock
from kivy.uix.screenmanager import Screen
from kivy.lang import Builder
from views.world import World
class Game(Screen):
Builder.load_file('views/game.kv')
def __init__(self, client, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client = client
self.set_player(client.character)
self.client.bind(
on_tick=lambda wg, band=self.character.band:
self.update_world_center(band.pos)
)
self.client.bind(
on_band_update=lambda ev, band: self.world.update_band(band)
)
self.client.bind(
on_character_knowledge_update=self.update_character_knowledge
)
self.client.bind(
on_character=self.set_player
)
self.world.bind(
on_touch_down=lambda wg, ev: self.on_action(ev)
)
def on_enter(self):
self.client.run()
def set_player(self, player):
self.character = player
def on_action(self, ev):
if ev.is_mouse_scrolling:
return
pos = ev.pos
off_x = (pos[0] - self.world.center[0]) / self.world.zoom
off_y = (pos[1] - self.world.center[1]) / self.world.zoom
cx, cy = self.world.real_center[0], self.world.real_center[1]
path = [(cx + off_x, cy + off_y)]
self.client.set_band_path(path)
def test_action_menu_item(self, *args):
print("Test Item")
args[0].parent.dismiss()
def move_player_to(self, x, y):
self.client.move_player_to(x, y)
def update_world_center(self, pos):
self.world.real_center = pos
def update_character_knowledge(self, ev, knowledge):
self.world.update_tiles(knowledge['tiles'])
|
matumaros/katurigja
|
views/game.py
|
game.py
|
py
| 1,772 |
python
|
en
|
code
| 1 |
github-code
|
50
|
39899165330
|
from gde.models import *
class Cart:
def __init__(self, request):
self.request = request
self.session = request.session
cart = self.session.get("cart")
if not cart:
cart = self.session["cart"] = {}
self.cart = cart
def save(self):
self.session["cart"] = self.cart
self.session.modified = True
def add_videogame(self, videogame):
if videogame.videogame_name not in self.cart.keys():
self.cart[videogame.videogame_name] = {
"name" : videogame.videogame_name,
"quantity" : 1,
"price" : str(videogame.unit_price),
"image": videogame.photo.url,
}
else:
for key, value in self.cart.items():
if key == videogame.videogame_name:
value["quantity"] += 1
break
self.save()
def add_dlc(self, dlc):
if dlc.dlc_name not in self.cart.keys():
self.cart[dlc.dlc_name] = {
"name" : dlc.dlc_name,
"quantity" : 1,
"price" : str(dlc.unit_price),
#"image": dlc.photo.url,
}
else:
for key, value in self.cart.items():
if key == dlc.dlc_name:
value["quantity"] += 1
break
self.save()
def add_package(self, package):
if package.package_name not in self.cart.keys():
self.cart[package.package_name] = {
"name" : package.package_name,
"quantity" : 1,
"price" : str(package.unit_price),
#"image": package.photo.url,
}
else:
for key, value in self.cart.items():
if key == package.package_name:
value["quantity"] += 1
break
self.save()
def remove_videogame(self, videogame):
if videogame.videogame_name in self.cart:
del self.cart[videogame.videogame_name]
self.save()
def remove_dlc(self, dlc):
if dlc.dlc_name in self.cart:
del self.cart[dlc.dlc_name]
self.save()
def remove_package(self, package):
if package.package_name in self.cart:
del self.cart[package.package_name]
self.save()
def decrease_videogame(self, videogame):
for key, value in self.cart.items():
if key == videogame.videogame_name:
value["quantity"] -= 1
if value["quantity"] < 1:
self.remove_videogame(videogame)
break
self.save()
break
def decrease_dlc(self, dlc):
for key, value in self.cart.items():
if key == dlc.dlc_name:
value["quantity"] -= 1
if value["quantity"] < 1:
self.remove_dlc(dlc)
break
self.save()
break
def decrease_package(self, package):
for key, value in self.cart.items():
if key == package.package_name:
value["quantity"] -= 1
if value["quantity"] < 1:
self.remove_package(package)
break
self.save()
break
def clear(self):
self.session["cart"] = {}
self.session.modified = True
|
sadj123/Ecommerce-Videogames
|
cart/cart.py
|
cart.py
|
py
| 3,480 |
python
|
en
|
code
| 0 |
github-code
|
50
|
41515839192
|
#!/usr/bin/python
# take_psf.py
#
# PURPOSE:
# Basic script to take PSF data.
# The script also saves a "psf.fits" file on the local machine for later use (e.g., with QACITS).
#
# INPUTS
# Three arguments giving the (1) the X position of the PSF, (2) the Y position of the PSF, and (3) an argument to take data (if = 1)
#
# MODIFICATION HISTORY:
# 151218 -- DD : first version for December 2015 run
# Import Python libraries
from pyindi import * # import pyINDI
import numpy as flx
import numpy as pixels
import matplotlib
matplotlib.use('QT4Agg')
import matplotlib.pyplot as plt
import warnings
import time
import sys
# Start time counter
t0 = time.time()
# Ignore deprecation warnings
def fxn():
warnings.warn("deprecated", DeprecationWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fxn()
#pi is an instance of PyINDI. Here we connect to the INDI server
pi=PyINDI(verbose=False)
# Retrieve input paramaters
arg = sys.argv
x = int(arg[1])
y = int(arg[2])
z = int(arg[3])
# Declare paramaters
aper = 'L' # Define which aperture is used (L for left, R for right)
dit = 0.290 # Detector integration time in s
n_coadd = 1 # Number of coadds
n_img = 10 # Number of images per nod (0 to use the GUI value)
offx = 2 # Nodding offset in X direction
offy = 0 # Nodding offset in Y direction
test = 0 # No AO interaction
# Declare function
def wait4AORunning():
while True:
pi.setINDI("LBTO.Dictionary.Name=" + aper + "_AOStatus;Value=")
status = pi.getINDI("LBTO.Dictionary.Value")
time.sleep(0.05)
if status == "AORunning":
break
# Get last displayed frame
f = pi.getFITS("LMIRCAM.DisplayImage.File", "LMIRCAM.GetDisplayImage.Now")
img = f[0].data
# Save current display as fits file
hdu = pyfits.PrimaryHDU()
hdu.data = img
hdu.header['DIT'] = dit*n_coadd
hdu.header['X'] = x
hdu.header['Y'] = y
hdu.writeto('/home/observer/scripts_obs/observing/general/psf.fits', clobber=True)
print("PSF file saved under /home/observer/scripts_obs/observing/general/psf.fits")
print(" ")
# RUN PSF sequence if requested
if z == 1:
# Check whether dome is open (double check)
shut = pi.getINDI("LBTO.Status." + aper + "_Shutter")
if shut == 0:
test = 1
# Stop continuous acquisition if running
pi.setINDI("LMIRCAM.Command.text","0 contacq")
# Set integration time
pi.setINDI("LMIRCAM.Command.text", "%f %i %i lbtintpar" % (dit, n_coadd, n_img), timeout=100, wait=True)
# Save
pi.setINDI("LMIRCAM.Command.text","1 savedata")
# Turn on display
# pi.setINDI("LMIRCAM.Command.text", "1 autodispwhat", wait=True)
# Mark data as PSF
pi.setINDI("LMIRCAM.EditFITS.Keyword=FLAG;Value=PSF;Comment=frame type", wait=False)
# Take a frames
pi.setINDI("LMIRCAM.Command.text", "go", timeout=3000, wait=True)
# Use as background
pi.setINDI("LMIRCAM.Command.text","rawbg",timeout=300,wait=True)
# Nod the telescope to take the PSF and wait for AO
if test != 1:
pi.setINDI("LBTO.OffsetPointing.CoordSys", "DETXY", "LBTO.OffsetPointing.OffsetX", offx, "LBTO.OffsetPointing.OffsetY", offy, "LBTO.OffsetPointing.Side", "left", "LBTO.OffsetPointing.Type", "REL", timeout=150, Wait=False)
wait4AORunning()
# Take a frames
pi.setINDI("LMIRCAM.Command.text", "go", timeout=3000, wait=True)
# Nod back the telescope
if test != 1:
pi.setINDI("LBTO.OffsetPointing.CoordSys", "DETXY", "LBTO.OffsetPointing.OffsetX", -offx, "LBTO.OffsetPointing.OffsetY", -offy, "LBTO.OffsetPointing.Side", "left", "LBTO.OffsetPointing.Type", "REL", timeout=150, Wait=False)
# Start time counter
t1 = time.time()-t0
# Print status
print("Total script time: %fs" % (t1))
print(" ")
|
mwanakijiji/NOMIC_scripts
|
observing/general/take_psf.py
|
take_psf.py
|
py
| 3,765 |
python
|
en
|
code
| 0 |
github-code
|
50
|
32676445394
|
import sys
from collections import namedtuple
from enum import Enum
from typing import Callable
from typing import Mapping
from maps_generator.checks import check
from maps_generator.checks.check_addresses import get_addresses_check_set
from maps_generator.checks.check_categories import get_categories_check_set
from maps_generator.checks.check_log_levels import get_log_levels_check_set
from maps_generator.checks.check_mwm_types import get_all_mwm_types_check_set
from maps_generator.checks.check_mwm_types import get_mwm_type_check_set
from maps_generator.checks.check_sections import get_sections_existence_check_set
from maps_generator.checks.check_sections import get_sections_size_check_set
from maps_generator.checks.check_size import get_size_check_set
class CheckType(Enum):
low = 1
medium = 2
hard = 3
strict = 4
Threshold = namedtuple("Threshold", ["abs", "rel"])
_check_type_map = {
CheckType.low: Threshold(abs=20, rel=20),
CheckType.medium: Threshold(abs=15, rel=15),
CheckType.hard: Threshold(abs=10, rel=10),
CheckType.strict: Threshold(abs=0, rel=0),
}
def set_threshold(check_type_map: Mapping[CheckType, Threshold]):
global _check_type_map
_check_type_map = check_type_map
def make_default_filter(check_type_map: Mapping[CheckType, Threshold] = None):
if check_type_map is None:
check_type_map = _check_type_map
def maker(check_type: CheckType):
threshold = check_type_map[check_type]
def default_filter(r: check.ResLine):
return (
check.norm(r.diff) > threshold.abs and check.get_rel(r) > threshold.rel
)
return default_filter
return maker
def get_mwm_check_sets_and_filters(
old_path: str, new_path: str, categories_path: str
) -> Mapping[check.Check, Callable]:
check_type_map_size = {
CheckType.low: Threshold(abs=20, rel=20000),
CheckType.medium: Threshold(abs=15, rel=15000),
CheckType.hard: Threshold(abs=10, rel=1000),
CheckType.strict: Threshold(abs=0, rel=0),
}
return {
get_categories_check_set(
old_path, new_path, categories_path
): make_default_filter(),
get_mwm_type_check_set(
old_path, new_path, "sponsored-booking"
): make_default_filter(),
get_all_mwm_types_check_set(old_path, new_path): make_default_filter(),
get_size_check_set(old_path, new_path): make_default_filter(
check_type_map_size
),
get_sections_size_check_set(old_path, new_path): make_default_filter(
check_type_map_size
),
get_sections_existence_check_set(old_path, new_path): None,
}
def get_logs_check_sets_and_filters(
old_path: str, new_path: str
) -> Mapping[check.Check, Callable]:
return {
get_addresses_check_set(old_path, new_path): make_default_filter(),
get_log_levels_check_set(old_path, new_path): None,
}
def _print_header(file, header, width=100, s="="):
stars = s * ((width - len(header)) // 2)
rstars = stars
if 2 * len(stars) + len(header) < width:
rstars += s
print(stars, header, rstars, file=file)
def run_checks_and_print_results(
checks: Mapping[check.Check, Callable],
check_type: CheckType,
silent_if_no_results: bool = True,
file=sys.stdout,
):
for check, make_filt in checks.items():
check.check()
_print_header(file, check.name)
check.print(
silent_if_no_results=silent_if_no_results,
filt=None if make_filt is None else make_filt(check_type),
file=file,
)
|
muralito/omim
|
tools/python/maps_generator/checks/default_check_set.py
|
default_check_set.py
|
py
| 3,658 |
python
|
en
|
code
| null |
github-code
|
50
|
21277621497
|
from tkinter import *
import datetime
import csv
from datetime import timedelta
import shutil
import os
class Invoice():
def __init__(self, master):
self.master = master
self.toolsinfo ={}
path = r".\dump"
files = os.listdir(path)
yesterday = datetime.date.today()-datetime.timedelta(days=1)
yr = yesterday.year
mth = yesterday.month
if "hired_tools,year{},month{}.csv".format(yr, mth) not in files:
shutil.copy("hired_tools.csv", r".\dump")
os.rename(".\dump\hired_tools.csv", ".\dump\hired_tools,year{},month{}.csv".format(yr, mth))
self.frame1 = Frame(self.master, width=850, height = 200)
self.frame1.pack()
self.canvas = Canvas(self.master, width=850, height=300)
self.canvas.pack()
self.innerframe1 = Frame(self.canvas,width=90,height = 200)
self.innerframe2 = Frame(self.canvas,width=90,height = 200)
self.innerframe3 = Frame(self.canvas,width=90,height = 200)
self.innerframe4 = Frame(self.canvas,width=90,height = 200)
self.innerframe5 = Frame(self.canvas,width=90,height = 200)
self.innerframe6 = Frame(self.canvas, width=90, height=200)
self.innerframe7 = Frame(self.canvas, width=90, height=200)
self.innerframe8 = Frame(self.canvas, width=90, height=200)
self.innerframe9 = Frame(self.canvas, width=90, height=200)
self.innerframe1.pack(side=LEFT)
self.innerframe1.pack_propagate(False)
self.innerframe2.pack(side=LEFT)
self.innerframe2.pack_propagate(False)
self.innerframe3.pack(side=LEFT)
self.innerframe3.pack_propagate(False)
self.innerframe4.pack(side=LEFT)
self.innerframe4.pack_propagate(False)
self.innerframe5.pack(side=LEFT)
self.innerframe5.pack_propagate(False)
self.innerframe6.pack(side=LEFT)
self.innerframe6.pack_propagate(False)
self.innerframe7.pack(side=LEFT)
self.innerframe7.pack_propagate(False)
self.innerframe8.pack(side=LEFT)
self.innerframe8.pack_propagate(False)
self.innerframe9.pack(side=LEFT)
self.innerframe9.pack_propagate(False)
Label(self.frame1, text="- INVOICE -", font=("Helvetica", 20)).place(relx=0.37, rely=0.01)
Label(self.frame1, text="Billed To:", fg="gray", font=("Helvetica", 8, "bold")).place(relx=0.03, rely=0.3)
Label(self.frame1, text="First name:",font=("Helvetica", 10)).place(relx=0.05, rely=0.4)
Label(self.frame1, text="Last name:", font=("Helvetica", 10)).place(relx=0.05, rely=0.5)
Label(self.frame1, text="Username:", font=("Helvetica", 10)).place(relx=0.05, rely=0.6)
Label(self.frame1, text="Invoice Total:", fg="gray", font=("Helvetica", 8, "bold")).place(relx=0.8, rely=0.3)
with open("logged_user.txt") as file:
user = file.read()
with open("users.csv", "r") as csv_file:
read = csv.reader(csv_file)
for row in read:
if row:
if row[2] == user:
first = row[0]
last = row[1]
Label(self.frame1, text=first, font=("Helvetica", 10)).place(relx=0.15, rely=0.4)
Label(self.frame1, text=last, font=("Helvetica", 10)).place(relx=0.15, rely=0.5)
Label(self.frame1, text=user, font=("Helvetica", 10)).place(relx=0.15, rely=0.6)
with open(".\dump\hired_tools,year{},month{}.csv".format(yr, mth), "r") as file:
read = csv.reader(file)
for row in read:
if row:
if row[0] == user and len(row) == 8:
used_for = datetime.datetime.strptime(row[7], "%Y-%m-%d %H:%M") - datetime.datetime.strptime(row[4], "%Y-%m-%d")
self.toolsinfo[row[3]] = {"tool":row[2], "owner":row[1], "used for":used_for}
with open("tools.csv", "r") as file:
read = csv.reader(file)
for row in read:
if row:
if row[4] in self.toolsinfo.keys():
print(self.toolsinfo.keys())
for k,v in self.toolsinfo[row[4]].items():
if k == "used for":
used_time = v
if used_time > datetime.timedelta(days=3):
total = int(row[2]) * 3
extra_time = used_time - datetime.timedelta(days=3)
extra_days = extra_time // datetime.timedelta(days=1)
extra_halfday = (extra_time - timedelta(extra_days)) // datetime.timedelta(days=0.5)
fine = (int(row[2]) * 2 * extra_days) + (int(row[3])* 2 * extra_halfday)
self.toolsinfo[row[4]].update({"fullday price": row[2], "halfday price": row[3], "fee":total, "fine":fine})
else:
fullday = used_time // datetime.timedelta(days=1)
halfday = (used_time - datetime.timedelta(fullday)) // datetime.timedelta(days=0.5)
total = (int(row[2]) * fullday) + (int(row[3]) * halfday)
self.toolsinfo[row[4]].update({"fullday price": row[2], "halfday price": row[3], "fee":total})
n=1
totals = []
for v in self.toolsinfo.values():
if "fine" in v.keys():
total = int(v["fee"]) + int(v["fine"]) + 5
else:
total = int(v["fee"]) + 5
label1 = Label(self.innerframe1, text="Tools",bg="aquamarine", width=10)
label1.grid(row=0)
label1.grid_propagate(False)
label2 = Label(self.innerframe2, text="Owners",bg="aquamarine", width=10)
label2.grid(row=0)
label2.grid_propagate(False)
label3 = Label(self.innerframe3, text ="Usage",bg="aquamarine", width=15)
label3.grid(row=0)
label3.grid_propagate(False)
label4 = Label(self.innerframe4, text="Perday/Halfday Rate", bg="aquamarine", width=15)
label4.grid(row=0)
label4.grid_propagate(False)
label5 = Label(self.innerframe5, text="Charge", bg="aquamarine", width=10)
label5.grid(row=0)
label5.grid_propagate(False)
label5 = Label(self.innerframe6, text="Fine", bg="aquamarine", width = 10)
label5.grid(row=0)
label5.grid_propagate(False)
label5 = Label(self.innerframe7, text="Insurance", bg="aquamarine", width = 10)
label5.grid(row=0)
label5.grid_propagate(False)
label5 = Label(self.innerframe8, text="Total", bg="aquamarine", width = 10)
label5.grid(row=0)
label5.grid_propagate(False)
label1 = Label(self.innerframe1, text=v["tool"], width=10)
label1.grid(row=n)
label1.grid_propagate(False)
label2 = Label(self.innerframe2, text=v["owner"], width=10)
label2.grid(row=n)
label2.grid_propagate(False)
label3 = Label(self.innerframe3, text=v["used for"], width=15)
label3.grid(row=n)
label3.grid_propagate(False)
label4 = Label(self.innerframe4, text="£"+v["fullday price"]+"/£"+v["halfday price"], width=10)
label4.grid(row=n)
label4.grid_propagate(False)
label5 = Label(self.innerframe5, text="£"+str(v["fee"]), width=10)
label5.grid(row=n)
label5.grid_propagate(False)
if "fine" in v.keys():
label6 = Label(self.innerframe6, text="£"+str(v["fine"]), width=10)
label6.grid(row=n)
label6.grid_propagate(False)
else:
label6 = Label(self.innerframe6, text="", width=10)
label6.grid(row=n)
label6.grid_propagate(False)
label7 = Label(self.innerframe7, text="£5", width=10)
label7.grid(row=n)
label7.grid_propagate(False)
label8 = Label(self.innerframe8, text="£{}".format(total), width=10)
label8.grid(row=n)
label8.grid_propagate(False)
totals.append(total)
n += 1
invoice_total = 0
for total in totals:
invoice_total += total
Label(self.frame1, text=invoice_total, font=("Helvetica", 24)).place(relx=0.85, rely=0.4)
root = Tk()
root.resizable(False,False)
w = root.winfo_screenwidth()
h = root.winfo_screenheight()
x = w//2 - 400
y = h//2 - 300
root.geometry("680x500+{}+{}".format(x, y))
Invoice(root)
root.mainloop()
|
sammanadh/Shared_power
|
invoice.py
|
invoice.py
|
py
| 8,996 |
python
|
en
|
code
| 0 |
github-code
|
50
|
34655134074
|
_author_ = 'jake'
_project_ = 'leetcode'
# https://leetcode.com/problems/minimum-time-visiting-all-points/
# On a plane there are n points with integer coordinates points[i] = [xi, yi].
# Your task is to find the minimum time in seconds to visit all points.
# You can move according to the next rules:
# In one second always you can either move vertically, horizontally by one unit or diagonally
# (which means to move one unit vertically and one unit horizontally in one second).
# You have to visit the points in the same order as they appear in the array.
# To move between points take the maximum of the move in the x direction and the move in the y direction.
# This is because we can move diagonally to cover the move in the shorter direction,
# while also moving in the longer direction.
# Time - O(n)
# Space - O(1)
class Solution(object):
def minTimeToVisitAllPoints(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
x1, y1 = points[0] # current point
time = 0
for x2, y2 in points[1:]:
dx, dy = abs(x1 - x2), abs(y1 - y2)
time += max(dx, dy)
x1, y1 = x2, y2 # update current point
return time
|
jakehoare/leetcode
|
python_1001_to_2000/1266_Minimum_Time_Visiting_All_Points.py
|
1266_Minimum_Time_Visiting_All_Points.py
|
py
| 1,244 |
python
|
en
|
code
| 49 |
github-code
|
50
|
17151180334
|
def is_finished(N, board):
for line in board:
for i in range(N):
if line[i]:
return False
return True
def is_board(N, row, col):
return (0 <= row < N) and (0 <= col < N)
N = int(input())
board = [list(map(int, input().split())) for _ in range(N)]
ans = 0
while not is_finished(N, board):
temp = [[0]*N for _ in range(N)]
for row in range(N):
for col in range(N):
if board[row][col]:
temp[row][col] = board[row][col]
for dx, dy in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
if is_board(N, row+dx, col+dy) and board[row+dx][col+dy] == 0 and temp[row][col] > 0:
temp[row][col] -= 1
board = temp
ans += 1
print(ans)
|
kaki1013/Algorithm_Monday_Challenge_Goorm
|
4주차/2번_단풍나무.py
|
2번_단풍나무.py
|
py
| 655 |
python
|
en
|
code
| 0 |
github-code
|
50
|
1554038504
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import icbckg.config as config
print ("Loading model:word2vec ......")
model = config.w2v_model
# 获取两个单词列表的相似度
def get_similarity(seg_list, word_list):
if ("".join(seg_list) == u'是什么') and ("".join(word_list) in [u'业务简述', u'业务简介', u'产品简介', u'卡片简介', u'功能定义', 'description']):
return 0.9
ws1 = []
ws2 = []
point = 0
for word in seg_list:
if word in model:
ws1.append(word)
for word in word_list:
if word in model:
ws2.append(word)
if len(ws1) > 0 and len(ws2) > 0:
point = model.n_similarity(ws1, ws2)
return point
|
sadxiaohu/icbckg
|
graph/serviceWord2vec.py
|
serviceWord2vec.py
|
py
| 726 |
python
|
en
|
code
| 1 |
github-code
|
50
|
28002898574
|
from openerp.osv import osv, fields
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
import datetime
import time
class mc_mrp_tipo(osv.osv) :
_name = "mc.mrp.tipo"
_desc = "Clase para definir los distintos tipos de Fabricacion"
_columns = {
"name" : fields.char("Nombre"),
"estados" : fields.one2many("mc.mrp.estado", "tipo", "Estados")
}
mc_mrp_tipo()
class mc_mrp_estado(osv.osv) :
_name = "mc.mrp.estado"
_desc = "Clase para determinar los distintos estados por los que pasa un tipo de fabricacion"
def create(self, cr, uid, args, context=None):
name = args["name"]
name = name[:3]
args["code"] = name.upper()
return super(mc_mrp_estado, self).create(cr, uid, args, context=context)
def write(self, cr, uid, ids, args, context=None):
name = args["name"]
name = name[:3]
args["code"] = name.upper()
return super(mc_mrp_estado, self).write(cr, uid, ids, args, context=context)
_columns = {
"code" : fields.char("Codigo", size=3),
"name" : fields.char("Nombre", required=True),
"tipo" : fields.many2one("mc.mrp.tipo", "Tipo")
}
mc_mrp_estado()
class mc_mrp_estado_line(osv.osv) :
_name = "mc.mrp.estado.line"
_desc = "Lineas generadas por cada estado de fabricacion relacionado al tipo de fabricacion seleccionado en la orden"
def calcular_fecha(self, cr, uid, context):
a = datetime.datetime.now()
x = a.strftime('%d/%m/%Y %H:%M:%S')
return x
def action_start_mrp(self, cr, uid, ids, context):
line = self.browse(cr, uid, ids, context=context)[0]
sale_obj = self.pool.get("sale.order")
sale_id = line.order_id.id
sale_row = sale_obj.read(cr, uid, [sale_id], ["mrp_sale_state"])[0]
sale_state = sale_row["mrp_sale_state"]
if sale_state == "Sin Iniciar" or sale_state == "En proceso":
sale_obj.write(cr, uid, [sale_id], {"mrp_sale_state" : line.estatus_id.code})
else:
sale_state = sale_state + "," + line.estatus_id.code
sale_obj.write(cr, uid, [sale_id], {"mrp_sale_state" : sale_state})
date_order = self.calcular_fecha(cr, uid, context)#time.strftime(DEFAULT_SERVER_DATE_FORMAT)
self.write(cr, uid, ids, {"state":"started", "user_id" : uid, "date_start" : date_order}, context)
return True
def action_stop_mrp(self, cr, uid, ids, context):
line = self.browse(cr, uid, ids, context=context)[0]
sale_obj = self.pool.get("sale.order")
sale_id = line.order_id.id
sale_row = sale_obj.read(cr, uid, [sale_id], ["mrp_sale_state"])[0]
sale_state = sale_row["mrp_sale_state"]
if line.estatus_id.code in sale_state:
sale_state = sale_state.replace(line.estatus_id.code, "")
if sale_state == "":
sale_state = "En proceso"
else:
if sale_state[0] == ",":
sale_state = sale_state[1:]
if sale_state[len(sale_state) - 1] == ",":
sale_state = sale_state[:-1]
date_order = self.calcular_fecha(cr, uid, context)#time.strftime(DEFAULT_SERVER_DATE_FORMAT)
self.write(cr, uid, ids, {"state":"done", "date_finish" : date_order}, context)
lineas = self.search(cr, uid, [("order_id", "=", sale_id)], context=context)
bnd = True
for linea in lineas:
line_browse = self.browse(cr, uid, linea, context=context)
if line_browse["state"] != "done":
bnd = False
if bnd:
sale_state = "Finalizado"
sale_obj.write(cr, uid, sale_id, {"mrp_sale_state" : sale_state})
return {'type': 'ir.actions.client', 'tag': 'reload'}
return sale_obj.write(cr, uid, sale_id, {"mrp_sale_state" : sale_state})
_columns = {
"name" : fields.char("Nombre"),
"estatus_id" : fields.many2one("mc.mrp.estado", "Proceso"),
"order_id" : fields.many2one("sale.order", "Venta"),
'user_id': fields.many2one('res.users', 'Usuario', select=True),
'date_start': fields.datetime('Fecha de Inicio', select=True),
'date_finish': fields.datetime('Fecha de Termino', select=True),
'state': fields.selection([
('new', 'Sin Iniciar'),
('started', 'Iniciada'),
('done', 'Finalizada'),
('unused', 'No Realizada'),
], 'Produccion', select=True),
}
mc_mrp_estado()
class mc_sale_order(osv.osv):
_inherit = "sale.order"
_columns = {
"estatus_line" : fields.one2many("mc.mrp.estado.line", "order_id", "Proceso"),
'mrp_sale_type': fields.many2one("mc.mrp.tipo", "Tipo de Venta"),
'mrp_sale_state': fields.char("Produccion"),
'mrp_design' : fields.boolean("Diseno"),
'no_pasadas' : fields.char("Numero de pasadas"),
'entrega_state' : fields.selection([("new", "No Entregado"),
("parcial", "Entrega Parcial"),
("done", "Entregado")], "Estado Entrega")
}
_defaults = {
'entrega_state' : "new"
}
def action_entregar_producto(self, cr, uid, ids, context=None):
res = self.write(cr, uid, ids, {"entrega_state" : "done"}, context=context)
return res
def action_ver_mrp(self, cr, uid, ids, context):
this = self.browse(cr, uid, ids, context=context)[0]
return {
'type': 'ir.actions.act_window',
'name': 'Ordenes de Fabricacion',
'view_mode': 'tree,form',
'res_model': 'mrp.production',
'domain': [("origin","=",this.name)]
}
def action_finish_mrp(self, cr, uid, ids, context=None):
state_line_obj = self.pool.get("mc.mrp.estado.line")
lineas = state_line_obj.search(cr, uid, [("order_id", "=", ids[0])], context=context)
for linea in lineas:
line_browse = state_line_obj.browse(cr, uid, linea, context=context)
if line_browse["state"] == "new":
state_line_obj.write(cr, uid, linea, {"state" : "unused"})
self.write(cr, uid, ids, {"mrp_sale_state" : "Finalizado"})
return True
def action_reopen_mrp(self, cr, uid, ids, context=None):
state_line_obj = self.pool.get("mc.mrp.estado.line")
lineas = state_line_obj.search(cr, uid, [("order_id", "=", ids[0])], context=context)
for linea in lineas:
line_browse = state_line_obj.browse(cr, uid, linea, context=context)
if line_browse["state"] == "unused":
state_line_obj.write(cr, uid, linea, {"state" : "new"})
self.write(cr, uid, ids, {"mrp_sale_state" : "En proceso"})
return True
def action_button_confirm(self, cr, uid, ids, context=None):
sale = self.browse(cr, uid, ids, context=context)[0]
if not sale.estatus_line:
estado = ""
state_obj = self.pool.get("mc.mrp.estado")
state_line_obj = self.pool.get("mc.mrp.estado.line")
tipo = sale.mrp_sale_type.id
states_ids = state_obj.search(cr, uid, [("tipo", "=", tipo)], context=context)
for state in states_ids:
state_row = state_obj.read(cr, uid, [state], ["code"], context=None)[0]
if estado == "":
estado = state_row["code"]
else:
estado = estado + "," + state_row["code"]
state_line_obj.create(cr, uid, {"estatus_id":state, "order_id":ids[0], "state":"new"})
self.write(cr, uid, ids, {"mrp_sale_state" : "Sin Iniciar"})
return super(mc_sale_order, self).action_button_confirm(cr, uid, ids, context=context)
def get_default_type(self, cr, uid, ids):
res = self.pool.get("mc.mrp.tipo").search(cr, uid, [])
return res and res[0] or False
mc_sale_order()
|
code2dev/O7-Sales-Manufacture
|
mc_mrp/sale.py
|
sale.py
|
py
| 8,908 |
python
|
en
|
code
| 0 |
github-code
|
50
|
15900967859
|
"""
给一个只包含 '(' 和 ')' 的字符串,找出最长的有效(正确关闭)括号子串的长度。
对于 "(()",最长有效括号子串为 "()" ,它的长度是 2。
另一个例子 ")()())",最长有效括号子串为 "()()",它的长度是 4。
"""
"""
解题思路:
定义个start变量来记录合法括号串的起始位置,
我们遍历字符串,如果遇到左括号,则将当前下标压入栈,
如果遇到右括号,
如果当前栈为空,则将下一个坐标位置记录到start,
如果栈不为空,则将栈顶元素取出,
此时若栈为空,则更新结果和i - start + 1中的较大值,否
则更新结果和i - 栈顶元素中的较大值,
"""
class Solution(object):
def longestValidParentheses(self, s):
"""
:type s: str
:rtype: int
"""
size = len(s)
if size <= 1:
return 0
stack = []
start = 0
res = 0
for i in range(size):
if s[i] == "(":
stack.append(i)
else:
if len(stack) == 0:
start = i + 1
else:
stack.pop()
res = max(res, i - start + 1) if len(stack) == 0 else max(res, i - stack[-1])
return res
a=')()())'
s = Solution()
print(s.longestValidParentheses(a))
|
alexkie007/offer
|
LeetCode/动态规划/32. 最长有效括号.py
|
32. 最长有效括号.py
|
py
| 1,399 |
python
|
zh
|
code
| 0 |
github-code
|
50
|
8454872928
|
from django.shortcuts import render
# Create your views here.
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from .serializers import OrderSerializer, OrderDetailSerializer
from rest_framework.exceptions import APIException
from .models import *
class PlaceOrderAPIView(APIView):
def post(self, request):
try:
order_serializer = OrderSerializer(data=request.data)
order_serializer.is_valid(raise_exception=True)
order = order_serializer.save()
return Response(
{
'order': order_serializer.data
},
status=status.HTTP_201_CREATED
)
except APIException as e:
return Response(
{
'order_errors': str(e)
},
status=status.HTTP_400_BAD_REQUEST
)
|
NithinKrishna10/GarbageGo
|
orders/views.py
|
views.py
|
py
| 962 |
python
|
en
|
code
| 0 |
github-code
|
50
|
25774704324
|
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import scipy.stats
import pandas as pd
import random
import seaborn as sns
def Snippet_121():
print(format('Spearman\'s correlation'))
# Create empty dataframe
df = pd.DataFrame()
# Add columns
#df['x'] = random.sample(range(1, 100), 75)
df['x'] = [39, 16, 20,
31, 15, 25,
16, 17, 22,
24, 10, 21,
20, 16, 25,
]
df['y'] = [448,155,452,
425, 151, 392,
427, 122, 390,
402, 162, 382,
420, 145, 393,
]
def spearmans_rank_correlation(xs, ys):
# Calculate the rank of x's
xranks = pd.Series(xs).rank()
# Caclulate the ranking of the y's
yranks = pd.Series(ys).rank()
# Calculate Pearson's correlation coefficient on the ranked versions of the data
return scipy.stats.pearsonr(xranks, yranks)
# Show Pearson's Correlation Coefficient
result = spearmans_rank_correlation(df.x, df.y)[0]
print("spearmans_rank_correlation is: ", result)
# Calculate Spearman’s Correlation Using SciPy
print("Scipy spearmans_rank_correlation is: ", scipy.stats.spearmanr(df.x, df.y)[0])
# reg plot
# sns.lmplot('x', 'y', data=df, fit_reg=True)
# plt.show()
Snippet_121()
#
# import matplotlib.pyplot as plt
# x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# y = [2, 4, 5, 7, 6, 8, 9, 11, 12, 12]
#
# plt.scatter(x, y, label="stars", color="green",
# marker="1", s=30)
#
#
# plt.xlabel('x - axis')
# plt.ylabel('y - axis')
#
# plt.title('Scatter plot')
# plt.legend()
#
# plt.show()
|
LiadzZ/Article-Vs-Tweets-Sentiment-Analysis-Spearman-correlation
|
SpearManCorrelation.py
|
SpearManCorrelation.py
|
py
| 1,717 |
python
|
en
|
code
| 0 |
github-code
|
50
|
9164914571
|
from PIL import Image
from PIL import ImageFilter
import numpy as np
import cv2
import datetime
import os
import time
import math
import matplotlib.pyplot as plt
#Metodo para guardar los datos
def write2file(hola, nombre):
if type(hola) == list:
f = open("Datos_pruebas/" + nombre +".txt", "w")
for i in hola:
f.write(str(i))
f.write('\n')
# f.writelines(["%s\n" % item for item in hola])
f.close()
i=1
while(i<2):
#os.system('uvccapture -d/dev/video1 -x1280 -y720')
#os.system('uvccapture -m')
imagen = Image.open('snap.jpg')
gray = imagen.convert("L")
width, height = gray.size
b_w = gray.crop((0, 5*height/11, width, (6*height/11)))
# b_w = gray.copy()
b_w.show()
min_value, max_value = b_w.getextrema()
th = 3*(max_value - min_value)/4.
b_w1 = []
width, height = b_w.size
image_pixel = b_w.load()
for h in range(height):
for w in range(width):
if image_pixel[w,h] <= th:
image_pixel[w,h] = 0 #blanco
else:
image_pixel[w,h] = 1 #negro
b_w1.append(image_pixel[w,h])
write2file(b_w1, "b_w1")
#os.system('gedit Datos_pruebas/b_w1.txt &')
b_w.show("0y1")
v = []
label = 2
equivalencias = [0]*500
image_pixel = b_w.load()
for h in range(height):
for w in range(width):
if (w == 0 or h == 0 or w == (width-1)):
pass
else:
#si no es blanco
if image_pixel[w,h] != 0:
vecinos = []
if image_pixel[w-1,h] != 0:
vecinos.append(image_pixel[w-1,h])
if image_pixel[w,h-1] != 0:
vecinos.append(image_pixel[w,h-1])
if image_pixel[w-1,h-1] != 0:
vecinos.append(image_pixel[w-1,h-1])
if image_pixel[w+1,h-1] != 0:
vecinos.append(image_pixel[w+1,h-1])
# print vecinos
v.append(vecinos)
if not vecinos:
image_pixel[w,h] = label
label += 1
else:
min_equivalencia = min(vecinos)
max_equivalencia = max(vecinos)
image_pixel[w,h] = min_equivalencia
for i in range(len(vecinos)):
if (equivalencias[vecinos[i]] == 0 or equivalencias[vecinos[i]] > min_equivalencia):
equivalencias[vecinos[i]] = min_equivalencia
write2file(equivalencias,"equivalencias")
for h in range(height):
for w in range(width):
#if equivalencias[image_pixel[w,h]] != 0:
image_pixel[w,h] = equivalencias[image_pixel[w,h]]
b_w.show()
write2file(v,'v')
write2file(equivalencias,"equivalencias")
b_w.save("b_w.jpg")
#os.system('geeqie b_w.jpg &')
i+=1
#raw_input("Press Enter to continue...")
|
arcoslab/llars
|
depth_calc/tests/test4.py
|
test4.py
|
py
| 2,551 |
python
|
en
|
code
| 0 |
github-code
|
50
|
18121861446
|
import torch
from torch_geometric.data import Data
from torch_geometric.transforms import RadiusGraph
from open_gns.models import EncodeProcessDecode
class Simulator():
def __init__(self, *, positions, properties, velocities=None, device=None, R=0.08):
# initialize the model
self.R = R
self.device = device or torch.device('cuda' if torch.cuda.is_available() else 'cpu')
checkpoint = torch.load('checkpoint_9_7.330730671527333e-07.pt')
input_size = 25
model = EncodeProcessDecode(input_size).to(device)
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()
self.model = model
self.positions = positions.to(device)
self.properties = properties.to(device)
self.velocities = velocities if velocities is not None else torch.zeros((len(positions), 5*3))
self.velocities = self.velocities.to(device)
self.data = self.make_graph(positions, properties, self.velocities)
def make_graph(self, positions, properties, velocities):
d = torch.stack([
positions[:,1], # bottom
positions[:,0], # left
positions[:,2], # back
1.2 - positions[:,0], # right
0.4 - positions[:,2] # front
], dim=1)
d = torch.clamp(d, min=0, max=self.R)
x = torch.cat([positions, properties, velocities, d], 1)
data = Data(x=x, pos=positions)
find_edges = RadiusGraph(self.R)
data = find_edges(data)
return data
def step(self, pos=None):
# Predict accelerations
data = self.data
if pos is not None:
data.x[:,:3] = pos
data.pos = pos
accelerations_ = self.model(data.x, data.edge_index)
velocities_ = data.x[:,17:20] + accelerations_
positions_ = data.pos + velocities_
print('p_t:', data.x[0], data.pos[0])
print('a_t:', accelerations_[0])
print('v_t:', data.x[0,17:20])
print('v_t+1',velocities_[0])
print('p_t+1', positions_[0])
# Reconstruct data for next frame
self.velocities = torch.cat([self.velocities[:,3:], velocities_], 1)
self.data = self.make_graph(positions_, self.properties, self.velocities)
return positions_, velocities_, accelerations_
|
alantrrs/open_gns
|
open_gns/simulator.py
|
simulator.py
|
py
| 2,360 |
python
|
en
|
code
| 2 |
github-code
|
50
|
73901145114
|
import argparse
import sys
import json
import requests
import base64
import time
from git import Repo
import tempfile
import shutil
import os
import stat
import glob
import re
import xml.etree.ElementTree as ET
SLEEP = .3
def del_rw(action, name, exc):
os.chmod(name, stat.S_IWRITE)
os.remove(name)
npm_cache = {}
def check_npm( pack ):
#print(pack, type(pack))
if not pack in npm_cache:
time.sleep( SLEEP )
npm_resp = requests.get("https://registry.npmjs.org/{}".format(pack))#.json()
if npm_resp.status_code == 200:
npm_cache[pack] = True
elif npm_resp.status_code == 404:
npm_cache[pack] = False
else:
print("ERROR:", npm_resp.status_code, npm_resp.text)
return None
return npm_cache[pack]
def check_npm_deps( pack ):
# https://docs.npmjs.com/cli/v6/configuring-npm/package-json
pack_contexts = ['dependencies', 'devDependencies', 'peerDependencies', 'bundledDependencies', 'optionalDependencies']
for context in pack_contexts:
if context in pack:
#for dep in [x.decode() for x in pack[context].keys()]:
for dep in pack[context].keys():
in_npm = check_npm( dep )
print(" -", context, dep, pack[context][dep], in_npm)
pypi_cache = {}
def check_pypi( dep ):
if not dep in pypi_cache:
time.sleep( SLEEP )
pypi_resp = requests.get("https://pypi.org/simple/{}/".format(dep))#.json()
if pypi_resp.status_code == 200:
pypi_cache[dep] = True
elif pypi_resp.status_code == 404:
pypi_cache[dep] = False
else:
print("ERROR:", pypi_resp.status_code, pypi_resp.text)
return None
return pypi_cache[dep]
def check_pypi_deps( deps ):
for dep in deps:
# if not line.startswith("#"):
dep_n = dep
dep_v = ""
if ";" in dep_n:
dep_n = dep_n.split(';')[0].strip()
if "[" in dep_n:
dep_n = dep_n.split('[')[0].strip()
if "==" in dep_n:
dep_n = dep_n.split("==")[0].strip()
elif ">=" in dep_n:
dep_n = dep_n.split('>=')[0].strip()
elif "<=" in dep_n:
dep_n = dep_n.split('<=')[0].strip()
elif "~=" in dep_n:
dep_n = dep_n.split('~=')[0].strip()
elif "=" in dep_n:
dep_n = dep_n.split('=')[0].strip()
elif "<" in dep_n:
dep_n = dep_n.split('<')[0].strip()
elif ">" in dep_n:
dep_n = dep_n.split('>')[0].strip()
else:
dep_n = dep_n
in_pypi = check_pypi( dep_n )
print( " -", dep, in_pypi )
gems_cache = {}
def check_gem( dep ):
if not dep in gems_cache:
time.sleep( SLEEP )
#print( "https://rubygems.org/api/v1/gems/{}.json".format(dep) )
gem_resp = requests.get("https://rubygems.org/api/v1/gems/{}.json".format(dep))
if gem_resp.status_code == 200:
gems_cache[dep] = True
elif gem_resp.status_code == 404:
gems_cache[dep] = False
else:
print("ERROR:", gem_resp.status_code, gem_resp.text)
return None
return gems_cache[dep]
def check_ruby_deps( deps ):
for dep in deps:
dep_n = ""
if "(" in dep:
dep_n = dep.split("(")[1].split()[0]
else:
dep_n = dep.split()[1]
dep_n = dep_n.replace(",","").replace("'","").replace('"','').replace("<","").replace("~","").replace("=","").replace(">","").replace(")","").replace("(","")
in_gems = check_gem( dep_n )
print(" -", dep, in_gems)
nuget_cache = {}
def check_nuget( dep ):
if not dep in nuget_cache:
time.sleep( SLEEP )
nuget_resp = requests.get("https://www.nuget.org/api/v2/package/{}/".format(dep))
if nuget_resp.status_code == 200:
nuget_cache[dep] = True
elif nuget_resp.status_code == 404:
nuget_cache[dep] = False
else:
print("ERROR:", nuget_resp.status_code, nuget_resp.text)
return None
return nuget_cache[dep]
def check_nuget_deps( deps ):
for dep in deps:
in_nuget = check_nuget( dep )
print(" -", dep, in_nuget)
def parse_file(filename, relative_filename, full_filename):
if filename == "package.json":
print( "-", relative_filename )
with open(full_filename,'r') as stream:
content = stream.read()
parsed = json.loads( content )
#print( json.dumps( parsed ) )
check_npm_deps( parsed )
elif filename == "package-lock.json":
print( "-", relative_filename )
elif filename.endswith('.gemspec'):
print("-", relative_filename)
with open(full_filename,'r') as content:
deps = [x.strip() for x in content.readlines()]
deps = [x for x in deps if "_dependency" in x and not x.startswith("#")]
check_ruby_deps( deps )
elif filename == 'Gemfile':
print('-', relative_filename)
with open(full_filename,'r') as content:
deps = [x.strip() for x in content.readlines()]
deps = [x for x in deps if x.startswith("gem ")]
check_ruby_deps( deps )
elif filename == 'Gemfile.lock':
print('-', relative_filename)
elif filename.endswith('requirements.txt'):
print( '-', relative_filename)
with open(full_filename,'r') as content:
deps = [x.strip() for x in content.readlines()]
deps = [x for x in deps if not x.startswith("#") and x != "" and not x.startswith("-")]
check_pypi_deps( deps )
elif filename.lower() == "setup.py":
print( '-', relative_filename)
elif filename == "packages.config":
print( '-', relative_filename)
elif filename == "nuget.config":
print( '-', relative_filename)
elif filename == "NuGet.Config":
print( '-', relative_filename)
elif filename.lower().endswith('.csproj'):
print( '-', relative_filename)
tree = ET.parse( full_filename )
root = tree.getroot()
deps = [x.get('Include') for x in root.findall('ItemGroup/PackageReference')]
check_nuget_deps( deps )
def parse_clone_mode( args ):
for target in args.github:
repo_resp = requests.get("https://api.github.com/users/{}/repos?per_page=1000".format(target)).json()
for repo_json in repo_resp:
git_url = repo_json['html_url']
print( git_url )
project_path = tempfile.mkdtemp()
Repo.clone_from(git_url, project_path, depth=1)
repo = Repo(project_path)
for root, subdirs, files in os.walk(project_path):
for filename in files:
full_filename = os.path.join(root, filename)
relative_filename = full_filename[len(project_path)+1:]
try:
parse_file( filename, relative_filename, full_filename )
except:
print("Error: parsing", relative_filename, "\n", sys.exc_info()[0])
# clean up
shutil.rmtree(project_path, onerror=del_rw)
def run_target( args ):
for target in args.dir:
for root, subdirs, files in os.walk( target ):
for filename in files:
full_filename = os.path.join(root, filename)
relative_filename = full_filename[len( target )+1:]
parse_file( filename, relative_filename, full_filename )
def run_tests():
for root, subdirs, files in os.walk("./data"):
for filename in files:
full_filename = os.path.join(root, filename)
relative_filename = full_filename[len("./data")+1:]
parse_file( filename, relative_filename, full_filename )
def main(argv):
parser = argparse.ArgumentParser(description='Find github vulnerable workflow actions for org.')
parser.add_argument('--github', nargs='+', help='github target orgs', required=False)
parser.add_argument('--test', default=False, action="store_true", help="Run tests")
parser.add_argument('--repo', '-r', required=False, choices=['npm','pypi','gems','nuget'], help='read list from stdiin and search target repo')
parser.add_argument('--dir', '-d', nargs='+', help='target directories')#, required=False)
parser.add_argument('--verbose', '-v', default=False, action="store_true", help="verbose mode")
if len(argv) == 0:
parser.print_help()
sys.exit(0)
try:
global args
args = parser.parse_args()
if args.test:
run_tests()
elif args.github:
parse_clone_mode( args )
elif args.repo:
for line in sys.stdin:
dep = line.strip()
found = None
if args.repo == 'npm':
found = check_npm( dep )
elif args.repo == 'pypi':
found = check_pypi( dep )
elif args.repo == 'gems':
found = check_gem( dep )
elif args.repo == 'nuget':
found = check_nuget( dep )
if found and args.verbose:
print(dep, found)
else:
print(dep, found)
elif args.dir:
run_target( args )
except IOError as err:
print(str(type(err)) + " : " + str(err))
parser.print_help()
sys.exit(2)
if __name__ == "__main__":
main(sys.argv[1:])
|
danamodio/scripts
|
dep_finder.py
|
dep_finder.py
|
py
| 9,658 |
python
|
en
|
code
| 0 |
github-code
|
50
|
74658691356
|
# break - example
print("The break instruction:")
for i in range(1, 6):
if i == 3:
break
print("Inside the loop.", i)
print("Outside the loop.")
# continue - example
print("\nThe continue instruction:")
for i in range(1, 6):
if i == 3:
continue
print("Inside the loop.", i)
print("Outside the loop.")
largest_number = -99999999
counter = 0
while True:
number = int(input("Enter a number or type -1 to end program: "))
if number == -1:
break
counter += 1
if number > largest_number:
largest_number = number
if counter != 0:
print("The largest number is", largest_number)
else:
print("You haven't entered any number.")
|
HongThaiPham/bkcad-python-for-iot
|
python-essentials-1/break-and-continue.py
|
break-and-continue.py
|
py
| 697 |
python
|
en
|
code
| 0 |
github-code
|
50
|
73702162074
|
from shutil import copyfile
import xml.etree.ElementTree as ET
from vtv_utils import initialize_timed_rotating_logger, vtv_send_html_mail_2
allowed_paths = ['/home/veveo/sports_spiders']
def get_table_header(title, headers):
table_header = '<br /><br /><b>%s</b><br /><table border="1" \
style="border-collapse:collapse;" cellpadding="3px" cellspacing="3px"><tr>' %title
for header in headers:
table_header += '<th>%s</th>' %header
table_header += '</tr>'
return table_header
def get_table_body(removed_list):
body = ''
for data in removed_list:
body += '<tr>'
for d in data:
body += '<td>%s</td>' %d
body += '</tr>'
body += '</table>'
return body
class CheckSpiders:
def __init__(self):
self.wrong_paths = {}
self.server = '10.4.1.112'
self.receivers = ['[email protected]']
self.sender = "[email protected]"
self.logger = initialize_timed_rotating_logger('cronjob_check.log')
def collect_data(self):
copyfile('/home/veveo/config/vtv.xml', './vtv.xml')
tree = ET.parse('vtv.xml')
for node in tree.iter('cronjob'):
cronjob_name = node.get('value')
command = node.find('command').get('value')
disable_status = node.find('disable')
disabled = False
if disable_status:
disable_status = disable_status.get('status', '')
if disable_status == "inserted":
disabled = True
if disabled:
continue
for path in allowed_paths:
if path in command:
break
else:
self.wrong_paths[cronjob_name] = (cronjob_name, command)
text = ''
if self.wrong_paths:
subject = "SPORTS spiders running from wrong paths !!!"
headers = ('Cronjob Name', 'Run Path')
text += get_table_header('Spiders not running in production path', headers)
text += get_table_body(self.wrong_paths.values())
vtv_send_html_mail_2(self.logger, self.server, self.sender, self.receivers, subject, '', text, '')
if __name__ == "__main__":
obj = CheckSpiders()
obj.collect_data()
|
headrun/SWIFT
|
SPORTS/sports_spiders/monitoring_mails/check_spider_cronjob.py
|
check_spider_cronjob.py
|
py
| 2,316 |
python
|
en
|
code
| 1 |
github-code
|
50
|
27953709230
|
"""
API controllers for node related operations.
"""
import base64
import hug
from bson.objectid import InvalidId
from marshmallow import ValidationError
from shipyard.errors import (AlreadyPresent, MissingDevices, NotFeasible,
NotFound)
from shipyard.node.model import Node
from shipyard.node.service import NodeService
@hug.get('/')
def get_node_list(response, name: str = None):
"""
Retrieve the full list of nodes or a node with a given name.
If the `name` parameter is specified in the request, this function attempts
to return the node with that name. If it is not found, returns a 404
response.
If no name is given, the function returns the full list of nodes present in
the system.
"""
try:
if name is not None:
result = NodeService.get_by_name(name)
return Node.Schema().dump(result)
results = NodeService.get_all()
return Node.Schema(only=['_id', 'name', 'ip', 'cpu', 'cpu_arch', 'tasks._id', 'tasks.name']).dump(results, many=True)
except NotFound as e:
response.status = hug.HTTP_NOT_FOUND
return {'error': str(e)}
except Exception:
response.status = hug.HTTP_INTERNAL_SERVER_ERROR
return {'error': 'Unable to fetch node list.'}
@hug.post('/')
def post_node(request, body, response):
"""
Create a new node resource.
This function attempts to create a new node resource with the data given in
the body of the request, returning its new ID in the response.
If the name for the new node is already in use, returns a 409 response. If
the new node's data isn't correct, returns a 400 response.
"""
try:
auth_header = request.get_header('AUTHORIZATION')
decoded_auth = base64.b64decode(auth_header.split()[1]).decode()
ssh_user, ssh_pass = decoded_auth.split(':')
new_node = Node.Schema().load(body)
new_id = NodeService.create(new_node, ssh_user, ssh_pass)
return {'_id': new_id}
except ValidationError as e:
response.status = hug.HTTP_BAD_REQUEST
return {'error': e.messages}
except AlreadyPresent as e:
response.status = hug.HTTP_CONFLICT
return {'error': str(e)}
except Exception as e:
response.status = hug.HTTP_INTERNAL_SERVER_ERROR
return {'error': 'Unable to create node.' + str(e)}
@hug.get('/{node_id}')
def get_node(node_id: str, response):
"""
Retrieve the node with the given ID.
If no node is found, returns a 404 response. If the given ID is invalid,
returns a 400 response.
"""
try:
result = NodeService.get_by_id(node_id)
return Node.Schema().dump(result)
except InvalidId as e:
response.status = hug.HTTP_BAD_REQUEST
return {'error': str(e)}
except NotFound as e:
response.status = hug.HTTP_NOT_FOUND
return {'error': str(e)}
except Exception:
response.status = hug.HTTP_INTERNAL_SERVER_ERROR
return {'error': 'Unable to fetch node.'}
@hug.put('/{node_id}')
def put_node(node_id: str, body, response):
"""
Put the values given in the body in a node resource.
Returns the updated node resource in the response.
If no node is found, returns a 404 response. If the given ID is invalid,
returns a 400 response.
"""
try:
result = NodeService.update(node_id, body)
return Node.Schema().dump(result)
except ValidationError as e:
response.status = hug.HTTP_BAD_REQUEST
return {'error': e.messages}
except InvalidId as e:
response.status = hug.HTTP_BAD_REQUEST
return {'error': str(e)}
except NotFound as e:
response.status = hug.HTTP_NOT_FOUND
return {'error': str(e)}
except Exception:
response.status = hug.HTTP_INTERNAL_SERVER_ERROR
return {'error': 'Unable to update node.'}
@hug.delete('/{node_id}')
def delete_node(node_id: str, response):
"""
Delete the node with the given ID.
Returns the deleted node's data in the response.
If no node is found, returns a 404 response. If the given ID is invalid,
returns a 400 response.
"""
try:
result = NodeService.delete(node_id)
return Node.Schema(exclude=['_id', 'tasks']).dump(result)
except InvalidId as e:
response.status = hug.HTTP_BAD_REQUEST
return {'error': str(e)}
except NotFound as e:
response.status = hug.HTTP_NOT_FOUND
return {'error': str(e)}
except Exception:
response.status = hug.HTTP_INTERNAL_SERVER_ERROR
return {'error': 'Unable to delete node.'}
@hug.post('/{node_id}/tasks')
def post_node_tasks(node_id: str, response, task_id: str = None):
"""
Add a task to a node using their IDs.
Returns the updated node's data in the response.
If no task ID is present in the request or any ID is invalid, returns a 400
response.
If no node or task are found with the given IDs, returns a 404 response.
If the operation can't be finished, returns a 500 response.
"""
if task_id is None:
response.status = hug.HTTP_BAD_REQUEST
return {'error': 'No task ID was specified in the request'}
try:
result = NodeService.add_task(node_id, task_id)
if result is None:
response.status = hug.HTTP_INTERNAL_SERVER_ERROR
return {'error': 'Unable to add task to node.'}
return Node.Schema().dump(result)
except InvalidId as e:
response.status = hug.HTTP_BAD_REQUEST
return {'error': str(e)}
except NotFound as e:
response.status = hug.HTTP_NOT_FOUND
return {'error': str(e)}
except (NotFeasible, MissingDevices) as e:
response.status = hug.HTTP_INTERNAL_SERVER_ERROR
return {'error': str(e)}
except Exception:
response.status = hug.HTTP_INTERNAL_SERVER_ERROR
return {'error': 'Unable to add task to node.'}
@hug.delete('/{node_id}/tasks/{task_id}')
def delete_node_tasks(node_id: str, task_id: str, response):
"""
Remove a task from a node's taskset.
Returns the updated node's data in the response.
If no task ID is present in the request or any ID is invalid, returns a 400
response.
If no node or task are found with the given IDs, returns a 404 response.
If the operation can't be finished, returns a 500 response.
"""
try:
result = NodeService.remove_task(node_id, task_id)
if result is None:
response.status = hug.HTTP_INTERNAL_SERVER_ERROR
return {'error': 'Unable to add task to node.'}
return Node.Schema().dump(result)
except InvalidId as e:
response.status = hug.HTTP_BAD_REQUEST
return {'error': str(e)}
except NotFound as e:
response.status = hug.HTTP_NOT_FOUND
return {'error': str(e)}
except Exception:
response.status = hug.HTTP_INTERNAL_SERVER_ERROR
return {'error': 'Unable to remove task from node.'}
|
varrrro/shipyard-server
|
shipyard/node/controllers.py
|
controllers.py
|
py
| 7,073 |
python
|
en
|
code
| 2 |
github-code
|
50
|
69954555034
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 19 02:51:59 2021
@author: lch
"""
import cv2
from torch.utils import data as data_utils
import numpy as np
import os
import pdb
class Dataset(data_utils.Dataset):
"""__init__ and __len__ functions are the same as in TorchvisionDataset"""
def __init__(self, data_dir, imgs, bbox, phase, class_labels=None, data_transforms=None):
self.data_dir = data_dir
self.imgs = imgs
self.bbox = bbox
self.phase = phase
self.class_labels = class_labels
self.data_transforms = data_transforms
def __getitem__(self, idx):
# Read an image with OpenCV
img = cv2.imread(os.path.join(self.data_dir, self.imgs[idx]))
bbox = self.bbox[idx]
# transform이 있을 경우 이를 적용합니다.
if self.data_transforms:
augmented = self.data_transforms[self.phase](image=img, bboxes=bbox, class_labels=self.class_labels)
img = augmented['image']
bbox = augmented['bboxes']
# bbox 객체를 정리합니다.
# 형태 : [min_x, min_y, max_x, max_y]
bbox = list(bbox[0])
bbox[2] = bbox[0] + bbox[2]
bbox[3] = bbox[1] + bbox[3]
bbox = np.array(bbox)
if type(img) != np.ndarray :
img = img.numpy()
return img, bbox
def __len__(self):
return len(self.imgs)
class TestDataset(data_utils.Dataset):
"""__init__ and __len__ functions are the same as in TorchvisionDataset"""
def __init__(self, data_dir, imgs, phase, data_transforms=None):
self.data_dir = data_dir
self.imgs = imgs
self.phase = phase
self.data_transforms = data_transforms
def __getitem__(self, idx):
filename = self.imgs[idx]
# Read an image with OpenCV
img = cv2.imread(os.path.join(self.data_dir, self.imgs[idx]))
if self.data_transforms:
augmented = self.data_transforms[self.phase](image=img)
img = augmented['image']
return filename, img
def __len__(self):
return len(self.imgs)
|
Hwan-I/motion_keypoint
|
HRNet_bbox/lib/dataset/dataset.py
|
dataset.py
|
py
| 2,198 |
python
|
en
|
code
| 0 |
github-code
|
50
|
70897713434
|
from fastapi import Request, status
from fastapi.utils import is_body_allowed_for_status_code
from fastapi.responses import JSONResponse, Response
from starlette.exceptions import HTTPException as StarletteHTTPException
from src.response import ErrorResponse
async def request_validation_exception_handler(request: Request, exc: ValueError):
content = ErrorResponse(
error={
"code": "COM422",
"message": str(exc),
}
)
return JSONResponse(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, content=content.dict())
async def custom_http_exception_handler(request: Request, exc: StarletteHTTPException):
headers = getattr(exc, "headers", None)
if not is_body_allowed_for_status_code(exc.status_code):
return Response(status_code=exc.status_code, headers=headers)
content = ErrorResponse(
error={
"code": exc.status_code,
"message": exc.detail or str(exc),
}
)
return JSONResponse(
status_code=exc.status_code,
headers=headers,
content=content.dict(),
)
|
DylanMsK/fastapi_template
|
src/exception_handlers.py
|
exception_handlers.py
|
py
| 1,103 |
python
|
en
|
code
| 0 |
github-code
|
50
|
4921157118
|
import re
from django.core.exceptions import ValidationError
def validate_slug(value):
regex = r'^[-a-zA-Z0-9_]+$'
if not re.match(regex, value):
raise ValidationError(
'Некорректный слаг'
)
return value
def validate_color(value):
regex = r'^#[A-Fa-f0-9]{6}$'
if not re.match(regex, value):
raise ValidationError(
'Некорректный цвет'
)
return value
def validate_border(value):
num = 2147483647
if value > num:
raise ValidationError('Слишком большое кол-во')
return value
|
methodologyCode/foodgram-project-react
|
backend/foodgram/recipes/validators.py
|
validators.py
|
py
| 635 |
python
|
ru
|
code
| 0 |
github-code
|
50
|
40130456950
|
# This config can be used for tests of XML files containing mappings.
# Since data in CondDB has same labels ESPrefer is needed.
# For internal and testing purposes only
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Run3_cff import Run3
process = cms.Process('RECODQM', Run3)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10000) )
process.verbosity = cms.untracked.PSet( input = cms.untracked.int32(-1) )
# minimum of logs
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
threshold = cms.untracked.string('WARNING')
)
)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
# load DQM framework
process.load("DQM.Integration.config.environment_cfi")
process.dqmEnv.subSystemFolder = "CTPPS"
process.dqmEnv.eventInfoFolder = "EventInfo"
process.dqmSaver.path = ""
process.dqmSaver.tag = "CTPPS"
# data source
process.source = cms.Source("NewEventStreamFileReader",
fileNames = cms.untracked.vstring(
'file:/eos/cms/store/t0streamer/Data/PhysicsZeroBias2/000/369/585/run369585_ls0044_streamPhysicsZeroBias2_StorageManager.dat'
),
)
from Configuration.AlCa.GlobalTag import GlobalTag
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag = GlobalTag(process.GlobalTag, autoCond['run3_data_prompt'], '')
# raw-to-digi conversion
process.load("EventFilter.CTPPSRawToDigi.ctppsRawToDigi_xml_cff")
# prefer mappings from XML files
process.es_prefer_totemTimingMapping = cms.ESPrefer("TotemDAQMappingESSourceXML", "totemDAQMappingESSourceXML_TotemTiming", TotemReadoutRcd=cms.vstring("TotemDAQMapping/TotemTiming"))
process.es_prefer_totemDiamondMapping = cms.ESPrefer("TotemDAQMappingESSourceXML", "totemDAQMappingESSourceXML_TimingDiamond", TotemReadoutRcd=cms.vstring("TotemDAQMapping/TimingDiamond"))
process.es_prefer_totemT2Mapping = cms.ESPrefer("TotemDAQMappingESSourceXML", "totemDAQMappingESSourceXML_TotemT2", TotemReadoutRcd=cms.vstring("TotemDAQMapping/TotemT2"))
process.es_prefer_TrackingStripMapping = cms.ESPrefer("TotemDAQMappingESSourceXML", "totemDAQMappingESSourceXML_TrackingStrip", TotemReadoutRcd=cms.vstring("TotemDAQMapping/TrackingStrip"))
# local RP reconstruction chain with standard settings
process.load("RecoPPS.Configuration.recoCTPPS_cff")
process.load('Geometry.VeryForwardGeometry.geometryRPFromDD_2021_cfi')
# CTPPS DQM modules
process.load("DQM.CTPPS.ctppsDQM_cff")
process.ctppsDiamondDQMSource.excludeMultipleHits = cms.bool(True)
process.ctppsDiamondDQMSource.plotOnline = cms.untracked.bool(True)
process.ctppsDiamondDQMSource.plotOffline = cms.untracked.bool(False)
process.path = cms.Path(
process.ctppsRawToDigi*
process.recoCTPPS*
process.ctppsDQMCalibrationSource*
process.ctppsDQMCalibrationHarvest
)
process.end_path = cms.EndPath(
process.dqmEnv +
process.dqmSaver
)
process.schedule = cms.Schedule(
process.path,
process.end_path
)
|
cms-sw/cmssw
|
DQM/CTPPS/test/strip_dqm_test_xml_cfg.py
|
strip_dqm_test_xml_cfg.py
|
py
| 3,178 |
python
|
en
|
code
| 985 |
github-code
|
50
|
20912198449
|
#! /usr/bin/env python
import openpyxl
import datetime
class WriteXlsxUtil(object):
def __init__(self, file_name='File'):
self.suffix = '.xlsx'
self.workbook = openpyxl.Workbook()
self.sheet = self.workbook.create_sheet(index=0, title="Sheet")
self.file_name = file_name
self.empty = True # 标记是否真的写入了内容
self.row = 0
# 写入单元格内容
def write(self, content):
self.empty = False
self.row += 1
for index in range(1, len(content) + 1): # index = column
v = str(content[index - 1])
if v.isdigit():
self.sheet.cell(self.row, index).value = v.zfill(len(v))
else:
self.sheet.cell(self.row, index).value = v
# 保存文件
def save(self):
if not self.empty:
self.workbook.save("./" + self.file_name + "-" + datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S') + self.suffix)
|
infuq/infuq-others
|
Python/t_excel/WriteXlsxUtil.py
|
WriteXlsxUtil.py
|
py
| 985 |
python
|
en
|
code
| 1 |
github-code
|
50
|
40141905590
|
#!/usr/bin/env python3
import time
import os
import sys
import zlib
import struct
DECOMPRESS_BUF_SIZE = 4*1024*1024
BLOCK_MAGIC = "\x00\x00\xFF\xFF"
ENDLINE_MAGIC = "\n"
# we treat everything as a deflate stream
# gzip header has no power here
def strip_gzip_header(body):
assert body[0:2] == "\x1f\x8b"
method, flags, mtime = struct.unpack("<BBIxx", body[2:10])
FHCRC = 0x02
FEXTRA = 0x04
FNAME = 0x08
FCOMMENT = 0x10
i = 10
if flags & FEXTRA:
size, = struct.unpack("<H", body[i:i+2])
i += size + 2
def skip_until_zero(ix):
while body[ix] != '\x00': ix += 1
return ix + 1
if flags & FNAME: i = skip_until_zero(i)
if flags & FCOMMENT: i = skip_until_zero(i)
if flags & FHCRC: i += 2
body = body[i:]
return body
class Decoder(object):
def __init__(self, fname, last_n_lines):
self.f = open(fname, "rb")
self.last_n_lines = last_n_lines
self.reset()
def reset(self):
self.sync = False
if hasattr(self, 'zstream'):
self.zstream.flush()
self.zstream = zlib.decompressobj(-zlib.MAX_WBITS)
def decode(self, bytes, if_start=False):
if not bytes:
return ""
if if_start:
self.sync = True
#self.zstream = zlib.decompressobj(zlib.MAX_WBITS | 32)
bytes = strip_gzip_header(bytes)
elif not self.sync:
x = bytes.find(BLOCK_MAGIC)
if x != -1:
bytes = bytes[x + len(BLOCK_MAGIC):]
self.sync = True
if not self.sync:
# not in sync, can't decode
return ""
text = self.zstream.decompress(bytes)
#print "decoded:", len(text), len(self.zstream.unused_data)
if len(self.zstream.unused_data) == 8:
# this usually means checksum and len is left
# but we don't care about any of those!
self.zstream.flush()
self.zstream = None
return text
def output_line(self, line):
sys.stdout.write(line)
sys.stdout.flush()
def initial_synchronize(self):
f = self.f
f.seek(0, 2)
end = f.tell()
start = max(0, end - DECOMPRESS_BUF_SIZE)
f.seek(start, 0)
body = f.read(end - start)
text = self.decode(body, start == 0)
self.known_size = end
return text
def initial(self):
text = self.initial_synchronize()
n_lines = self.last_n_lines
lines = text.rsplit(ENDLINE_MAGIC, n_lines + 1)
if len(lines) > n_lines:
lines = lines[1:]
self.output_line(ENDLINE_MAGIC.join(lines))
def follow(self):
if self.known_size is None:
raise Exception("Call initial() first.")
while self.zstream:
size = os.fstat(self.f.fileno()).st_size
if self.known_size > size:
sys.stderr.write("%s: file truncated\n" % sys.argv[0])
sys.stderr.write("%s: waiting for the next write\n" % sys.argv[0])
sys.stderr.flush()
if self.sync:
self.sync = False
self.zstream.flush()
self.zstream = zlib.decompressobj(-zlib.MAX_WBITS)
text = self.initial_synchronize()
continue
elif self.known_size == size:
time.sleep(1)
continue
assert self.f.tell() == self.known_size
body = self.f.read(size - self.known_size)
text = self.decode(body, self.known_size == 0)
self.output_line(text)
self.known_size = size
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='tail, but for gzip files (with Z_FULL_SYNC)')
parser.add_argument('-f', action="store_true", help='watch the file for changes')
parser.add_argument('-n', type=int, help='output the last K lines', metavar='K', default=10)
parser.add_argument('file', help="file name to watch")
args = parser.parse_args()
d = Decoder(args.file, args.n)
d.initial()
if args.f:
d.follow()
|
cms-sw/cmssw
|
DQMServices/StreamerIO/scripts/ztail.py
|
ztail.py
|
py
| 4,270 |
python
|
en
|
code
| 985 |
github-code
|
50
|
71604176156
|
import os
import torch
import mitsuba
# Set the desired mitsuba variant
mitsuba.set_variant('gpu_autodiff_rgb')
from robust_vision.utils.graphics import GaussianSmoothing
import robust_vision.utils.enoki as uek
import enoki as ek
from mitsuba.core import ScalarTransform4f, Transform4f, Vector3f, UInt32, Float32
from mitsuba.core.xml import load_dict
from mitsuba.python.util import traverse
from mitsuba.python.autodiff import render_torch
from skimage.filters import threshold_otsu
def bbox_mask(alpha):
""" alpha is torch bool with shape (1,w,h), returns bbox coordinates """
alpha = alpha.squeeze(0)
rows = torch.any(alpha, axis=1)
cols = torch.any(alpha, axis=0)
rmin, rmax = torch.where(rows)[0][[0, -1]]
cmin, cmax = torch.where(cols)[0][[0, -1]]
return rmin, rmax, cmin, cmax
class Renderer:
def __init__(self, cam_azimuth=0, cam_elevation=0, cam_distance=3.5,
cam_translation=(0,0,0), object_color=(1.,1.,1.),
lamp_radiance=10, lamp_origin=(2,0,0), lamp_up=(0,1,0),
wall_length=1.6, wall_distance=1.6, wall_color=(1.,1.,1.),
res=224, sample_count=32, file_pth="meshes/grid_nogradients.obj",
to_unitsphere=False, center_bbox=False, face_normals=False,
max_depth=3, silhouette=False,
mooney_thresh_method="mean_mask"):
file_type = str.split(os.path.split(file_pth)[-1], ".")[-1]
self.requires_grad = False
cam_translation = ScalarTransform4f.translate(v=cam_translation)
cam_rotation = (ScalarTransform4f.rotate(axis=(0,1,0), angle=cam_azimuth) *
ScalarTransform4f.rotate(axis=(0,0,1), angle=cam_elevation))
self.cam_toworld = (cam_translation * cam_rotation *
ScalarTransform4f.look_at(origin=(cam_distance,0,0),
target=(0, 0, 0),
up=(0, 1, 0)))
if silhouette:
lamp = {
"type" : "constant"
}
object_color = (0., 0., 0.)
else:
self.lamp_toworld = (
ScalarTransform4f.look_at(origin=lamp_origin,
target=(0, 0, 0),
up=lamp_up) *
ScalarTransform4f.scale(v=0.5))
lamp = {
"id" : "Lamp",
"type" : "rectangle",
"to_world" : self.lamp_toworld,
"emitter" : {
"type" : "smootharea",
"radiance" : float(lamp_radiance),
},
}
object_color = object_color
# wall_bsdf = {
# "type" : "diffuse",
# "reflectance" : {
# "type" : "rgb",
# "value" : wall_color,
# }
# }
self.scene = load_dict({
"type" : "scene",
"integrator" : {
"type" : "aov",
"aovs" : "d:depth",
"integrator" : {
"type" : "pathreparam",
"max_depth" : max_depth,
},
},
"sensor" : {
"type" : "perspective",
"to_world" : self.cam_toworld,
# emulating pyrender IntrinsicsCamera
"near_clip" : 0.05,
"far_clip" : 100.,
"film" : {
"type" : "hdrfilm",
"width" : res,
"height" : res,
},
"sampler" : {
"type" : "independent",
"sample_count" : sample_count,
}
},
"lamp" : lamp,
"shape" : {
"id" : "Object",
"type" : file_type,
"filename" : file_pth,
"face_normals" : face_normals,
"bsdf" : {
"type" : "twosided",
"bsdf" : {
"type" : "diffuse",
"reflectance" : {
"type" : "rgb",
"value" : object_color,
},
},
},
},
# "shape1" : {
# "id" : "right_wall",
# "type" : "rectangle",
# "to_world": (ScalarTransform4f.look_at(origin=(0,0,-wall_distance),
# target=(0,0,0),
# up=(0,1,0)) *
# ScalarTransform4f.scale(v=wall_length)),
# # "emitter" : {
# # "type" : "smootharea",
# # "radiance" : wall_radiance,
# # },
# "bsdf" : wall_bsdf,
# },
# "shape2" : {
# "id" : "left_wall",
# "type" : "rectangle",
# "to_world": (ScalarTransform4f.look_at(origin=(0,0,wall_distance),
# target=(0,0,0),
# up=(0,1,0)) *
# ScalarTransform4f.scale(v=wall_length)),
# "bsdf" : wall_bsdf,
# },
# "shape3" : {
# "id" : "bottom_wall",
# "type" : "rectangle",
# "to_world": (ScalarTransform4f.look_at(origin=(0,-wall_distance,0),
# target=(0,0,0),
# up=(0,1,0)) *
# ScalarTransform4f.scale(v=wall_length)),
# "bsdf" : wall_bsdf,
# },
# # "shape4" : {
# # "id" : "top_wall",
# # "type" : "rectangle",
# # "to_world": (cam_rotation *
# # ScalarTransform4f.translate(v=[0, wall_distance, 0]) *
# # ScalarTransform4f.rotate(axis=[1,0,0], angle=90) *
# # ScalarTransform4f.scale(v=wall_length)),
# # "bsdf" : wall_bsdf,
# # },
# "shape5" : {
# "id" : "front_wall",
# "type" : "rectangle",
# "to_world": (cam_rotation *
# ScalarTransform4f.translate(v=[cam_distance+0.5, 0, 0]) *
# ScalarTransform4f.rotate(axis=[0,1,0], angle=270) *
# ScalarTransform4f.scale(v=2)),
# "bsdf" : wall_bsdf,
# },
})
self.params = traverse(self.scene)
self.preprocess_mesh(to_unitsphere=to_unitsphere,
center_bbox=center_bbox)
self.faces_size = ek.slices(self.params["Object.faces_buf"])
self.params_optim = None
self.params_optim_torch = None
self.gray = None
self.alpha = None
self.mooney_thresh_method = mooney_thresh_method
self.smoother = GaussianSmoothing(1, 10, 2).cuda()
def rotate_lamp(self, azimuth, elevation):
""" rotates lamp around x-axis (elevation) and y-axis (azimuth)
in lamp coordinates """
xaxis = self.lamp_toworld.transform_vector([1,0,0])
yaxis = self.lamp_toworld.transform_vector([0,1,0])
self.params['Lamp.to_world'] = (
ScalarTransform4f.rotate(axis=yaxis, angle=azimuth) *
ScalarTransform4f.rotate(axis=xaxis, angle=elevation) *
self.lamp_toworld
)
self.params.set_dirty('Lamp.to_world')
self.params.update()
def preprocess_mesh(self,
center_bbox=False,
to_unitsphere=False,
buf=1.03):
""" Center and normalize the mesh """
if to_unitsphere or center_bbox:
verts = uek.ravel(self.params['Object.vertex_positions_buf'])
transl = (uek.to_each_col(verts, ek.hmax) + \
uek.to_each_col(verts, ek.hmin)) / 2.
verts -= transl
if to_unitsphere:
max_distance = ek.hmax(ek.norm(verts)) * buf
verts /= max_distance
if to_unitsphere or center_bbox:
uek.unravel(verts, self.params['Object.vertex_positions_buf'])
self.params.set_dirty('Object.vertex_positions_buf')
self.params.update()
def release_memory(self):
ek.cuda_malloc_trim()
def get_vertex_grad(self):
return ek.gradient(self.params_optim).torch()
def rotate_mesh(self, rot_angles):
""" rotates the mesh in the scene by rot_angles """
rotate = (Transform4f.rotate(axis=[1, 0, 0], angle=rot_angles[0]) *
Transform4f.rotate(axis=[0, 1, 0], angle=rot_angles[1]) *
Transform4f.rotate(axis=[0, 0, 1], angle=rot_angles[2]))
old_buf = uek.ravel(self.params['Object.vertex_positions_buf'])
new_buf = rotate.transform_point(old_buf)
uek.unravel(new_buf, self.params['Object.vertex_positions_buf'])
self.params.set_dirty('Object.vertex_positions_buf')
self.params.update()
def replace_mesh(self, verts, faces):
""" new_verts, new_faces are enoki vectors or pytorch tensors """
# transform torch tensors to enoki
if isinstance(verts, torch.Tensor) and isinstance(faces, torch.Tensor):
verts_ek = Vector3f(verts)
if self.requires_grad:
ek.set_requires_gradient(verts_ek, verts.requires_grad)
self.params_optim = verts_ek
self.params_optim_torch = verts
faces_ek = UInt32(faces.flatten())
elif isinstance(verts, Float32) and isinstance(faces, UInt32):
verts_ek = uek.ravel(verts)
faces_ek = faces
elif isinstance(verts, Vector3f) and isinstance(faces, UInt32):
verts_ek = verts
faces_ek = faces
else:
raise ValueError("Check types of verts and faces.")
# overwrite the vertex buf - we don't care about "inactive" vertices here
uek.unravel(verts_ek, self.params["Object.vertex_positions_buf"])
# 1..K need to be replaced by new faces
self.params["Object.faces_buf"][:ek.slices(faces_ek)] = faces_ek[:]
# K+1..N need to be set to 0
if self.faces_size > ek.slices(faces_ek):
self.params["Object.faces_buf"][ek.slices(faces_ek):] = \
ek.zero(UInt32, self.faces_size - ek.slices(faces_ek))[:]
self.params.set_dirty("Object.vertex_positions_buf")
self.params.set_dirty("Object.faces_buf")
self.params.update()
def requires_grad_(self, requires:bool=True):
self.requires_grad = requires
if requires:
verts_ek = uek.ravel(self.params['Object.vertex_positions_buf'])
ek.set_requires_gradient(verts_ek, True)
def render_scene(self, spp=None):
if self.requires_grad:
params_torch = {'vertices' : self.params_optim_torch}
# call the scene's integrator to render the loaded scene
rendered = render_torch(self.scene,
spp=spp,
**params_torch,
)
else:
rendered = render_torch(self.scene, spp=spp)
rendered = rendered.permute(2, 0, 1)
# rendered has 8 channels: r-g-b-d(AOV)-r-g-b-a
self.gray = rendered[0].unsqueeze(0) # one color channel
self.alpha = rendered[-1].unsqueeze(0) # alpha channel
def read_output(self, img_mode):
if img_mode == 'silhouette':
return self.read_silhouette()
elif img_mode == 'shading':
return self.read_shading()
elif img_mode == 'mooney':
img, blurred = self.make_mooney()
self.blurred = blurred
return img
else:
print('Not supported.')
def read_shading(self):
return self.gray
def read_alpha(self):
return self.alpha
def read_silhouette(self):
return 1-self.gray
def make_mooney(self):
# apply a Gaussian Filter on the whole image
blurred = self.smoother(self.gray.unsqueeze(0), keep_dim=True).squeeze(0)
if self.mooney_thresh_method == 'mean_mask':
# mean of mask pixels
mask = self.alpha.detach().bool()
mooney_thresh = torch.mean(torch.masked_select(blurred.detach(), mask))
elif self.mooney_thresh_method == 'otsu_mask':
# otsu of mask pixels
mask = self.alpha.detach().bool()
mask_pixels = torch.masked_select(blurred.detach(), mask).cpu().numpy()
mooney_thresh = threshold_otsu(mask_pixels)
elif self.mooney_thresh_method == 'otsu_bbox':
# otsu of bbox pixels
rmin, rmax, cmin, cmax = bbox_mask(self.alpha.detach().bool())
img = blurred.detach().cpu().numpy().squeeze()[rmin:rmax+1, cmin:cmax+1]
mooney_thresh = threshold_otsu(img)
else:
print("Not implemented!")
# differentiable threshold operation with binary mask as output
# mooney = torch.relu(torch.tanh(1e3*(blurred-mooney_thresh)))
mooney = torch.sigmoid(1e4*(blurred-mooney_thresh))
# how many roi (mask or bbox) pixels are white?
# white_ratio = torch.mean((mooney[roi_idx] > mooney_thresh).float()).detach().cpu().numpy()
return mooney, blurred
|
hakanyi/robust-vision-thesis
|
robust_vision/renderer.py
|
renderer.py
|
py
| 13,830 |
python
|
en
|
code
| 0 |
github-code
|
50
|
34036778313
|
import re
from datetime import datetime,timezone,timedelta
def to_timestamp(dt_str,tz_str):
#利用re模块的正则表达式匹配出时区信息
re_tz = re.match(r'UTC([+-]\d+):\d+',tz_str).group(1)
print(re_tz)
#日期时间str转化成datetime
dt = datetime.strptime(dt_str,'%Y-%m-%d %H:%M:%S') #str变量作为参数传入时不需要打''
print(dt)
#本地时间转化成UTC时间
now_tzinfo = timezone(timedelta(hours=int(re_tz))) #获取本地的时区属性
utc_dt = dt.replace(tzinfo = now_tzinfo) #将本地时间根据本地时区属性转化为UTC时间
print(utc_dt)
#datetime转化为timestamp
return utc_dt.timestamp()
#测试
t1 = to_timestamp('2015-6-1 08:10:30', 'UTC+7:00')
assert t1 == 1433121030.0, t1
t2 = to_timestamp('2015-5-31 16:10:30', 'UTC-09:00')
assert t2 == 1433121030.0, t2
print('ok')
|
Brownchen/learn-python
|
do_datetime.py
|
do_datetime.py
|
py
| 899 |
python
|
en
|
code
| 0 |
github-code
|
50
|
13133407123
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 15 23:20:44 2020
@author: max20
"""
def countingSort(arr):
# number constraint: 0-99
arr_counter = [0] * 100
for i in range(len(arr)):
# add 1 to the corresponding position when each arr[i] appears
arr_counter[arr[i]] += 1
arr_sorted = []
for j in range(len(arr_counter)):
if arr_counter[j] != 0:
# add arr_counter[j] number of j to the sorted list
arr_sorted += [j] * arr_counter[j]
return arr_sorted
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().rstrip().split()))
result = countingSort(arr)
print(' '.join(map(str, result)))
|
max2004boy/Hackerrank-Problem-Solving
|
Counting Sort_part 2 practice_20200617.py
|
Counting Sort_part 2 practice_20200617.py
|
py
| 729 |
python
|
en
|
code
| 1 |
github-code
|
50
|
26054434766
|
#!/shared/software/python/bin/python3.2
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
# This script is not essential for the running of the farm
# since it only gives preview images used by the web front end.
#
# Keep this script running in its own shell.
# it loops and sends conversion commands.
import os
import time
DIR = "/render/mango/frames"
CONVERT = "/shared/software/render_farm/slideshow/exr_to_png.sh %s %s %d > /dev/null 2> /dev/null"
PNG_FILE = "/shared/software/render_farm/slideshow/preview"
NUM_IMAGES = 2
SLEEP = 120.0
def remote_command(cmd, ip="192.168.4.71", user="guest"):
cmd_final = "ssh -n -p 22 %s@%s 'umask 000 ; %s'" % (user, ip, cmd)
print(cmd_final)
os.system(cmd_final)
while 1:
print("scanning for exr files")
# find images to display
exr_files = []
for root, dirs, files in os.walk(DIR):
# print(root, files)
# skip old files...
if (os.sep + "old") in root:
continue
for file in files:
# print(os.path.join(root, file))
if file.endswith('.exr'):
name = os.path.join(root, file)
try:
st = os.stat(name)
except OSError:
import traceback
traceback.print_exc()
continue
if st.st_size > 10:
exr_files += [(name, st.st_mtime)]
exr_files.sort(key=lambda pair: pair[1])
exr_files = exr_files[-NUM_IMAGES:]
exr_files.reverse()
# convert images
if exr_files:
for i in range(0, NUM_IMAGES):
name, mtime = exr_files[0]
print("converting big", name)
command = CONVERT % (name, PNG_FILE + "_big" + str(i), 100)
remote_command(command)
for i in range(0, NUM_IMAGES):
name, mtime = exr_files[0]
print("converting small", name)
command = CONVERT % (name, PNG_FILE + "_small" + str(i), 30)
remote_command(command)
else:
print("Can't find any images in:", DIR)
# sleep a while until the next up
print("sleeping for", str(SLEEP / 60), "minutes")
time.sleep(SLEEP)
|
wesen/blender
|
bf-extensions/py/scripts/tools/bi_farm/slideshow/preview_images_update.py
|
preview_images_update.py
|
py
| 2,979 |
python
|
en
|
code
| 1 |
github-code
|
50
|
4897619739
|
from os import cpu_count
import argparse
import uuid
from math import ceil
import pathlib
from datetime import datetime
from multiprocessing import Pool
from functools import partial
import pandas as pd
from tqdm import tqdm
def save_chunk(df_chunk, output_dir):
outfile = f"{output_dir}{uuid.uuid4()}.json"
# TODO could make the orientation a param settable
# through arguments
df_chunk.to_json(outfile, orient="records", lines=True)
def check_args(args):
# check input_file exists
path_input_file = pathlib.Path(args.input_file)
if not path_input_file.exists():
raise ValueError(
f"{args.input_file} doesn't exist, recheck considering relative paths and such")
# esnure output_dir ends with a "/"
if args.output_dir[-1] != "/":
args.output_dir = f"{args.output_dir}/"
# check that the output dir doesn't already exist
# could improve this to check that it's an empty dir
path_output_dir = pathlib.Path(args.output_dir)
if path_output_dir.exists():
raise ValueError(
f"{args.output_dir} already exists - please specify another output directory or delete it")
else:
# create the output folder
path_output_dir.mkdir(parents=True)
# can't chunk more rows than we read in
if args.nrows:
if args.nrows <= args.chunksize:
raise ValueError(
f"Number of rows to be loaded ({args.nrows}) is smaller than chunksize ({args.chunksize}) - load more rows or reduce chunksize.")
def execute(input_file, output_dir, chunksize, processes, nrows=None):
num_tasks = None
if nrows:
num_tasks = ceil(nrows / chunksize)
else:
# determining the number of lines in a large file (ergo tasks)
# takes a long time not worth the trouble
pass
chunked_df = pd.read_json(input_file, chunksize=chunksize,
lines=True, nrows=nrows)
with Pool(processes) as p:
start = datetime.now()
# used https://clay-atlas.com/us/blog/2021/08/02/python-en-use-multi-processing-pool-progress-bar/
# as reference for displaying progress bar
for _ in tqdm(
p.imap_unordered(
partial(save_chunk, output_dir=output_dir), chunked_df, chunksize=1),
total=num_tasks
):
pass
p.close()
p.join()
print("duration in seconds", (datetime.now()-start).total_seconds())
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(prog="Large JSON Chunker",
description="Write out a large JSON file into multiple smaller files")
arg_parser.add_argument("-input_file",
help="path to large JSON file")
arg_parser.add_argument("-output_dir",
help="Output directory where we'll write out the data - must NOT already exist e.g. output/")
arg_parser.add_argument("-nrows",
help="Number of lines to read from large JSON file. If not specified all lines are used.",
nargs="?",
default=None,
type=int
)
arg_parser.add_argument("-chunksize",
help="Process the large JSON file as separate chunks of this size",
nargs="?",
default=10_000,
type=int
)
arg_parser.add_argument("-processes",
help="Number of processes to spawn to write out the chunks, defaults to number of CPUs.",
nargs="?",
default=cpu_count(),
type=int
)
args = arg_parser.parse_args()
check_args(args)
execute(args.input_file, args.output_dir, args.chunksize, args.processes, args.nrows)
|
ryancollingwood/chunk_large_json
|
chunk_large_json.py
|
chunk_large_json.py
|
py
| 4,004 |
python
|
en
|
code
| 0 |
github-code
|
50
|
19450987757
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : test_auth.py
# @Author : mocobk
# @Email : [email protected]
# @Time : 2020/3/11 14:23
import pytest
def test_get_users_info(auth_client, default_super_user):
res = auth_client.get("/v1/users")
assert res.status_code == 200, res.json
assert isinstance(res.json, list) and len(res.json) >= 1
@pytest.mark.parametrize('tmp_user', [{'username': 'no_pemission_user', 'role': 'Member'}], indirect=True)
def test_get_users_with_no_permissions(get_auth_client, tmp_user):
res = get_auth_client(tmp_user).get("/v1/users")
assert res.status_code == 403
assert res.json.get('code') == 'AccessDenied'
def test_get_single_user_info(auth_client, default_super_user):
res = auth_client.get(f"/v1/users/{default_super_user.id}")
assert res.status_code == 200, res.json
assert isinstance(res.json, dict) and res.json.get('id') == default_super_user.id
if __name__ == '__main__':
pytest.main()
|
mocobk/MonlineServer
|
tests/test_users.py
|
test_users.py
|
py
| 983 |
python
|
en
|
code
| 0 |
github-code
|
50
|
419802579
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/5/5 21:39
# @Author : lingxiangxiang
# @File : demon2.py
import pymysql
class TestMysql(object):
def __init__(self):
self.dbConfig = {
"host": "192.168.48.136",
"port": 3306,
"user": "xiang",
"passwd": "xiang",
"db": "test"
}
conn = pymysql.connect(**self.dbConfig)
self.a = conn
def select(self):
print("select")
def update(self):
print("update")
def end(self):
self.a.close()
if __name__ == '__main__':
conn = TestMysql()
|
ajing2/LivePython1
|
第十五课/demon2.py
|
demon2.py
|
py
| 640 |
python
|
en
|
code
| 2 |
github-code
|
50
|
33932476328
|
from tree_node import Tree_Node
class AVL_Tree:
def __init__(self):
self.root = None
def add(self, value):
key = self.hash(value)
if self.root == None:
self.root = Tree_Node((key, value))
else:
current_node = self.root
found = False
while not found:
current_key = current_node.data[0]
if key > current_key:
if current_node.has_right_child():
current_node = current_node.right_child
else:
current_node.set_right_child((key, value))
found = True
else:
if current_node.has_left_child():
current_node = current_node.left_child
else:
current_node.set_left_child((key, value))
found = True
self.update_balance_factors()
self.root = self.rearrange_from(self.root)
def add_values(self, *args):
for value in args:
self.add(value)
def hash(self, value):
hash_code = 0
if type(value) == str:
for character in value:
hash_code += ord(character)
else:
hash_code = value
return hash_code
def search_for(self, value):
current_node = self.root
target_key = self.hash(value)
print(f"Starting from {current_node.data[1]}")
while True:
current_key = current_node.data[0]
current_value = current_node.data[1]
if current_key == target_key and current_value == value:
return current_node.data
elif target_key <= current_key and current_node.has_left_child():
current_node = current_node.left_child
print(f"{target_key} <= {current_key}. Go left")
elif target_key > current_key and current_node.has_right_child():
current_node = current_node.right_child
print(f"{target_key} > {current_key}. Go right")
else:
print(f"No more child nodes found with value = {value}")
return None
def __str__(self) -> str:
self.update_levels()
return str(self.root)
def left_rotate(self, target_node: Tree_Node):
if not target_node.has_right_child():
print(f"Cannot perform right rotation on Node {target_node.data}")
return target_node
new_root = target_node.right_child
target_node.right_child = new_root.left_child
new_root.left_child = target_node
return new_root
def right_rotate(self, target_node: Tree_Node):
if not target_node.has_left_child():
print(f"Cannot perform right rotation on Node {target_node.data}")
return target_node
new_root = target_node.left_child
target_node.left_child = new_root.right_child
new_root.right_child = target_node
return new_root
def left_right_rotate(self, target_node: Tree_Node):
target_node.left_child = self.left_rotate(target_node.left_child)
new_root = self.right_rotate(target_node)
return new_root
def right_left_rotate(self, target_node: Tree_Node):
target_node.right_child = self.right_rotate(target_node.right_child)
new_root = self.left_rotate(target_node)
return new_root
# For String representation
def update_levels(self, iteration_num = 1):
self.root.level = iteration_num - 1
left_child = AVL_Tree()
right_child = AVL_Tree()
if self.root.has_left_child():
left_child.root = self.root.left_child
left_child.update_levels(iteration_num=iteration_num + 1)
if self.root.has_right_child():
right_child.root = self.root.right_child
right_child.update_levels(iteration_num=iteration_num + 1)
def update_balance_factors(self):
left_height = self.get_height(self.root.left_child)
right_height = self.get_height(self.root.right_child)
left_child = AVL_Tree()
right_child = AVL_Tree()
self.root.balance_factor = left_height - right_height
if self.root.has_left_child():
left_child.root = self.root.left_child
left_child.update_balance_factors()
if self.root.has_right_child():
right_child.root = self.root.right_child
right_child.update_balance_factors()
def get_height(self, node: Tree_Node):
if node == None:
return -1
elif not node.has_left_child() and not node.has_right_child():
return 0
else:
left_child_height = self.get_height(node.left_child)
right_child_height = self.get_height(node.right_child)
return max([left_child_height, right_child_height]) + 1
def rearrange_from(self, root: Tree_Node):
new_root = root
if root != None and root.has_children():
root.left_child = self.rearrange_from(root.left_child)
root.right_child = self.rearrange_from(root.right_child)
if root.balance_factor > 1:
if root.left_child.has_left_child():
new_root = self.right_rotate(target_node=root)
else:
new_root = self.left_right_rotate(target_node=root)
elif root.balance_factor < -1:
if root.right_child.has_right_child():
new_root = self.left_rotate(target_node=root)
else:
new_root = self.right_left_rotate(target_node=root)
return new_root
test_tree = AVL_Tree()
test_tree.add_values(1,4,2,3,7,9,5)
print(test_tree)
test_tree.update_balance_factors()
print(f"Root Balance Factor: {test_tree.root.balance_factor}")
|
ehimen-io/python-dsa
|
data-structures/complex-data-structures/trees/avl-tree/avl_tree.py
|
avl_tree.py
|
py
| 6,028 |
python
|
en
|
code
| 0 |
github-code
|
50
|
74890377756
|
import requests
import re
import json
from os import listdir
from random import randrange
#### Created by Matthew Franklin ###
#### Please see www.github.com/mefranklin6 for license, readme, updates ###
# Execute this code on your computer, don't put it on the processor
###############################################################################
# Begin User Variables
###############################################################################
#### Commonly Changed Variables ####
# Names your project descriptor file, processors, and TLP's
SystemName = 'TestRoom'
MainProcessor_IP = '192.168.253.250'
MainProcessor_AVLAN_IP = '192.168.254.250'
First_TLP_IP = '192.168.253.8'
Second_TLP_IP = '' # Leave empty string if none
####################################
#### Setup Variables ####
# Root directory of the project on your computer
# ex: 'C:/Users/<YOURUSER>/Documents/<PROJECTFOLDERNAME>'
ProjectRootDirectory = 'C:/Users/mefranklin/Documents/Github/VSCodeTemplate'
# Directory of your GUI Files.
# Make sure the model number of the TLP is in the file name ex: 'ClientName_525M.gdl'
# Make sure there's only one file per TLP model in the directory
GUI_File_Directory = 'C:/Users/mefranklin/Documents/Github/VSCodeTemplate/layout'
# Default project descriptor JSON file location
# !!!! IMPORTANT Do not have this in the root of your project file !!!!
Default_JSON_File_Location = 'C:/Users/mefranklin/Documents/Github/VSCodeTemplate/rfile/DEFAULT.json'
#### Backend Variables ####
"""
instead of this manual dictionary of {Model : PartNumber},
it could also be possible to script a login to the admin web interface
and grab the part number there (or login ssh/telnet)
I wanted to keep web scraping and passwords to a mimimum,
hence the hardcoded dictionary below """
ProcessorModels = {
'IPCP Pro 355MQ xi' : '60-1919-01',
'IPCP Pro 550' : '60-1418-01',
'IPCP Pro 550 xi' : '60-1913-01A',
'IPCP Pro 555Q xi' : '60-1917-01A',
'IPCP Pro 555' : '60-1434-01',
'IPL Pro S6' : '60-1414-01',
'IPCP Pro 250' : '60-1429-01',
'IPCP Pro 350' : '60-1417-01',
}
TLP_Models = {
'TLP Pro 525M' : '60-1561-02',
'TLP Pro 525T' : '60-1559-02',
'TLP Pro 725M' : '60-1563-02',
'TLP Pro 725T' : '60-1562-02',
'TLP Pro 1025T' : '60-1565-02',
'TLP Pro 1025M' : '60-1566-02',
}
# Models with both LAN and AVLAN
AVLAN_Processors = [
'IPCP Pro 255',
'IPCP Pro 350',
'IPCP Pro 355',
'IPCP Pro 360',
'IPCP Pro 555',
'IPCP Pro 255Q xi',
'IPCP Pro 350 xi',
'IPCP Pro 355MQ xi', #built in 1808
'IPCP Pro 355DRQ xi',
'IPCP Pro 555Q xi'
]
###############################################################################
# End User Variables
###############################################################################
def ScrapeWebInterface(ip):
HTTP = requests.get(f'https://{ip}/www/index.html', verify=False)
return HTTP.text
def ExtractModelName(ip):
HTTP = ScrapeWebInterface(ip)
HTTPSearch = re.search(r'var device_name =(.*?);', HTTP)
DeviceModel = HTTPSearch.group(1).strip().replace('"', '')
return DeviceModel
def GetPartNumber(model_name):
if model_name in ProcessorModels.keys():
model_number = ProcessorModels[model_name]
return model_number
if model_name in TLP_Models.keys():
model_number = TLP_Models[model_name]
return model_number
else:
print(f'Can not find TLP Part Number for {model_name}')
class Processor:
def __init__(self, address, avlan_address):
self.address = address
self.avlan_address = avlan_address
self.model_name = ExtractModelName(self.address)
self.part_number = GetPartNumber(self.model_name)
self.Has_AVLAN = self.DecideProcessorNetworks(self.model_name)
def DecideProcessorNetworks(self, model_name):
for AVLAN_Processor in AVLAN_Processors:
if model_name in AVLAN_Processor:
return True
return False
class TLP:
def __init__(self, address):
self.address = address
self.model_name = ExtractModelName(self.address)
self.part_number = GetPartNumber(self.model_name)
self.layout_file = self.GUI_Selector(self.model_name)
def GUI_Selector(self, tlp_model_name):
TLP_ModelNumberOnly = re.search(r'(\d{3,4})', tlp_model_name)
GUI_Files = listdir(GUI_File_Directory)
for GUI_File in GUI_Files:
if TLP_ModelNumberOnly[1] in GUI_File:
return GUI_File
if len(GUI_Files) == 1:
return GUI_Files[0]
MainProcessor = Processor(MainProcessor_IP, MainProcessor_AVLAN_IP)
First_TLP = TLP(First_TLP_IP)
Second_TLP_Exist = False
if Second_TLP_IP is not None and Second_TLP_IP != '':
Second_TLP_Exist = True
Second_TLP = TLP(Second_TLP_IP)
with open(Default_JSON_File_Location, 'r') as DefaultJSON_File:
JSON_Data = json.load(DefaultJSON_File)
JSON_Data['system']['name'] = SystemName
JSON_Data['system']['system_id'] = str(randrange(1000, 9999))
# Set Main Processor
JSON_Data['devices'][0]['name'] = f'{SystemName} - MainProcessor'
JSON_Data['devices'][0]['part_number'] = MainProcessor.part_number
JSON_Data['devices'][0]['network']['interfaces'][0]['address'] = MainProcessor.address
if MainProcessor.Has_AVLAN == True:
JSON_Data['devices'][0]['network']['interfaces'][1]['address'] = MainProcessor.avlan_address
else:
del(JSON_Data['devices'][0]['network']['interfaces'][1]) # if no AVLAN
# Set TLP('s)
JSON_Data['devices'][1]['name'] = f'{SystemName} - MainTLP'
JSON_Data['devices'][1]['part_number'] = First_TLP.part_number
JSON_Data['devices'][1]['network']['interfaces'][0]['address'] = First_TLP.address
JSON_Data['devices'][1]['ui']['layout_file'] = First_TLP.layout_file
if Second_TLP_Exist:
JSON_Data['devices'][2]['name'] = f'{SystemName} - SecondTLP'
JSON_Data['devices'][2]['part_number'] = Second_TLP.part_number
JSON_Data['devices'][2]['network']['interfaces'][0]['address'] = Second_TLP.address
JSON_Data['devices'][2]['ui']['layout_file'] = Second_TLP.layout_file
else:
del(JSON_Data['devices'][2]) # if no second TLP
with open(f'{ProjectRootDirectory}/{SystemName}.json', 'w') as New_JSON_File:
json.dump(JSON_Data, New_JSON_File)
|
mefranklin6/ControlDeploymentHelper
|
ControlDeploymentHelper.py
|
ControlDeploymentHelper.py
|
py
| 6,752 |
python
|
en
|
code
| 1 |
github-code
|
50
|
12822281609
|
# https://leetcode.com/problems/longest-common-prefix/
# BINARY-SEARCH
class Solution:
# returns if all the words have the prefix till word_of_min_len[0:mid]
def isCP(self, strs, leng)->bool:
sub_stri = strs[0][:leng]
for i in range(1, len(strs)):
if strs[i][0:leng]!=sub_stri:
return False
return True
def longestCommonPrefix(self, strs) -> str:
if len(strs)==0:
return ""
min_len = int(pow(2, 31)-1)
for stri in strs:
min_len = min(min_len, len(stri))
low = 1
high = min_len
while low <= high:
mid = (low+high)//2
if (self.isCP(strs, mid)):
low = mid+1
else:
high = mid-1
return strs[0][:(low+high)//2]
# minimum recursion depths exceeded
# It is a guard against a stack overflow
# Python (or rather, the CPython implementation) doesn't optimize tail recursion,
# and unbridled recursion causes stack overflows.
# import sys
# print(sys.getrecursionlimit())
class Solution:
def prefix_checker(self, strs, low, high):
if (low>=high):
return strs[low]
else:
mid = low+high//2
left_prefix = self.prefix_checker(strs, low, mid)
right_prefix = self.prefix_checker(strs, mid+1, high)
return self.common_prefix(left_prefix, right_prefix)
def common_prefix(self, left, right):
mini = min(len(left), len(right))
for i in range(mini):
if left[i]!=right[i]:
return left[:i]
return left[:mini]
def longestCommonPrefix(self, strs) -> str:
if len(strs)==0:
return ""
return self.prefix_checker(strs, 0, len(strs)-1)
|
bitan2988/leetcode
|
longest_common_prefix.py
|
longest_common_prefix.py
|
py
| 1,917 |
python
|
en
|
code
| 0 |
github-code
|
50
|
34400326511
|
import numpy as np
import pandas as pd
import math
from model import NaiveBayes
def pre_processing(df):
X = df.drop([df.columns[-1]], axis = 1)
y = df[df.columns[-1]]
return X, y
if __name__ == "__main__":
df = pd.read_table("weather.txt")
X,Y = pre_processing(df)
model = NaiveBayes()
model.fit(X,Y)
print(model.predict(X))
|
ahmedbasemdev/Machine-Learning-Algorithms-From-Scratch
|
04- Naive Bayes/main.py
|
main.py
|
py
| 352 |
python
|
en
|
code
| 1 |
github-code
|
50
|
26228265221
|
print("Задача 6")
# Реализуйте программу,
# которая запрашивает два числа у пользователя.
# После этого у каждого числа возьмите две последние цифры.
# Получившиеся два числа сложите и выведите на экран.
# Пример:
# Введите первое число: 456
# Введите второе число: 123
# Сумма: 79
# переменная, которая показывает, что пользователь неправильно ввел данные
n = "=" * 26
print(n)
# вводим значение переменных
number_1 = int(input("Введите первое число: "))
number_2 = int(input("Введите второе число: "))
# вычитаем первые две цифры с числа
number_1 %= 100
number_2 %= 100
result = number_1 + number_2
# ответ
print("Сума:", result)
|
little-beetle/skillbox_python_basic
|
module_3/task_6.py
|
task_6.py
|
py
| 999 |
python
|
ru
|
code
| 0 |
github-code
|
50
|
29002666693
|
import socket
import datetime
# code written by William Ngo for CS3357
UDP_IP = "127.0.0.1"
UDP_PORT = 5005
properQuestion = "What is the current date and time?"
while True:
mySocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
mySocket.bind((UDP_IP, UDP_PORT)) # still need to bind port when handling UDP server
print("UDP server is now running...") # no "handshake" or connection required, and we use datagrams which don't need a connection
while True:
data, clientAddress = mySocket.recvfrom(100) # since no connection to client established, we need to keep track of the client's address
decodeData = data.decode()
print("We received a request: ", decodeData)
if decodeData.lower() == properQuestion.lower():
rawDate = datetime.datetime.now()
formattedDate = rawDate.strftime("%m/%d/%Y %H:%M:%S")
dateMessage = "Current Date and Time: " + formattedDate
mySocket.sendto(dateMessage.encode(), clientAddress) # send encoded message back to the address it came from
break
else:
error = "Invalid question. Please enter your request again."
mySocket.sendto(error.encode(), clientAddress)
print()
print("Waiting to receive another request...")
|
wngo1337/CS3357
|
asn2/Server_UDP.py
|
Server_UDP.py
|
py
| 1,326 |
python
|
en
|
code
| 0 |
github-code
|
50
|
41501331937
|
# #################################################################
# File name: Týr.py
# Author: Need4Swede
# Company: N/A
# Contact: [email protected]
# Description: Omniscient Database for Inventory Notation
# #################################################################
## LIBRARY IMPORTS ################################################
import darkdetect
import os, sqlite3, sys, platform, string, os.path, webbrowser, shutil, csv, simpleaudio, getpass, qdarktheme
import pandas as pd
from fpdf import FPDF
from PyQt6 import *
from csv import reader
from datetime import date
from PyQt6.QtGui import *
from PyQt6.QtWidgets import *
from PyQt6.QtCore import *
## USER DIRECTORY IMPORTS #########################################
global root_dir
app_modules = True
if app_modules:
root_dir = os.path.dirname(os.path.abspath(__file__))
path_parent = os.path.dirname(os.getcwd())
root_dir = path_parent + "/ODIN"
sys.path.append('../ODIN')
import Mimisbrunnr.Mimisbrunnr_1 as Mimisbrunnr_1
import Mimisbrunnr.Mimisbrunnr_2 as Mimisbrunnr_2
from Tyr.clear_term import clear_term
with open(root_dir + "/Tyr/users.csv", 'r') as read_obj:
csv_reader = reader(read_obj)
ad_users = list(csv_reader)
length = len(ad_users)
user_list = ["Assign To...", "To Realm"]
for x in range(length):
user_list = user_list + ad_users[x]
###################################################################
## DIRECTORIES ####################################################
global inventory_db
app_dir = True
if app_dir:
user = getpass.getuser()
documentation_dir = (root_dir + "/Documentation")
mimir_dir = (root_dir + "/Mimir")
mimisbrunnr_dir = (root_dir + "/Mimisbrunnr")
mimisbrunnr_export_dir = (mimisbrunnr_dir + "/exports/")
tyr_dir = (root_dir + "/Tyr")
tyr_log_dir = (tyr_dir + "/logs/")
tyr_log = (tyr_log_dir + "log.txt")
tyr_log_tutorial = (tyr_log_dir + "tutorial.txt")
freya_dir = (root_dir + "/Freya")
sounds_dir = (freya_dir + "/sounds/")
if not os.path.isdir(mimir_dir):
os.makedirs(mimir_dir)
if os.path.isdir(documentation_dir):
if os.path.isdir(mimir_dir + "/Documentation"):
shutil.rmtree(mimir_dir + "/Documentation")
shutil.copytree(documentation_dir, mimir_dir + "/Documentation")
shutil.rmtree(documentation_dir)
if not os.path.isdir(tyr_log_dir):
os.makedirs(tyr_log_dir)
inventory_db = mimir_dir + "/Mimir.db"
date_today = date.today()
today = date_today.strftime("%B %d, %Y")
clean_dir = True
if clean_dir:
if os.path.isdir(root_dir + "/__pycache__"):
shutil.rmtree(root_dir + "/__pycache__")
if os.path.isdir(freya_dir + "/__pycache__"):
shutil.rmtree(freya_dir + "/__pycache__")
shutil.rmtree(mimisbrunnr_dir + "/__pycache__")
shutil.rmtree(tyr_dir + "/__pycache__")
## ICONS/IMAGES ##############
app_icons = True
if app_icons:
png_lab = root_dir + "/Tyr/Icons/lab.png"
png_add = root_dir + "/Tyr/Icons/add.png"
png_delete = root_dir + "/Tyr/Icons/delete.png"
png_search = root_dir + "/Tyr/Icons/search.png"
png_run = root_dir + "/Tyr/Icons/run.png"
png_info = root_dir + "/Tyr/Icons/information.png"
png_view = root_dir + "/Tyr/Icons/view.png"
png_export = root_dir + "/Tyr/Icons/export.png"
png_clear = root_dir + "/Tyr/Icons/clear.png"
png_refresh = root_dir + "/Tyr/Icons/refresh.png"
png_update = root_dir + "/Tyr/Icons/update.png"
png_move = root_dir + "/Tyr/Icons/move.png"
png_logo = root_dir + "/Tyr/Icons/tyr-icon.png"
png_db_primary = root_dir + "/Tyr/Icons/tyr-icon.png"
if darkdetect.isDark():
png_lab = root_dir + "/Tyr/Icons/dark/lab.png"
png_add = root_dir + "/Tyr/Icons/dark/add.png"
png_delete = root_dir + "/Tyr/Icons/dark/delete.png"
png_search = root_dir + "/Tyr/Icons/dark/search.png"
png_run = root_dir + "/Tyr/Icons/dark/run.png"
png_info = root_dir + "/Tyr/Icons/dark/information.png"
png_view = root_dir + "/Tyr/Icons/dark/view.png"
png_export = root_dir + "/Tyr/Icons/dark/export.png"
png_clear = root_dir + "/Tyr/Icons/dark/clear.png"
png_refresh = root_dir + "/Tyr/Icons/dark/refresh.png"
png_update = root_dir + "/Tyr/Icons/dark/update.png"
png_move = root_dir + "/Tyr/Icons/dark/move.png"
png_logo = root_dir + "/Tyr/Icons/dark/tyr-icon.png"
png_db_primary = root_dir + "/Tyr/Icons/dark/tyr-icon.png"
## SOUND FILES ###############
app_sounds = True
if app_sounds:
freya_speak = simpleaudio.WaveObject.from_wave_file
#### SOUNDS ###########
confirm_entry = freya_speak(sounds_dir + 'confirm_entry.wav')
deny_entry = freya_speak(sounds_dir + 'discard_entry.wav')
## TUTORIAL ####
tyr_start = freya_speak(sounds_dir + 'tutorial/Tyr/tyr_start.wav')
tyr_serial = freya_speak(sounds_dir + 'tutorial/Tyr/tyr_serial.wav')
tyr_IP = freya_speak(sounds_dir + 'tutorial/Tyr/tyr_ip_address.wav')
tyr_initialize = freya_speak(sounds_dir + 'tutorial/Tyr/tyr_initialize.wav')
tyr_entry_added = freya_speak(sounds_dir + 'tutorial/Tyr/tyr_entry_added.wav')
#########################
## INPUT LABELS ###################################################
## MAIN LABELS ###############
main_labels = True
if main_labels:
lb_id = "ID #"
lb_site = "Site:"
lb_location = "Location:"
lb_product = "Selection:"
lb_make = "Make:"
lb_asset = "Asset Tag:"
lb_reference = "Reference:"
lb_assigned = "Assigned:"
lb_status = "Status:"
lb_date = "Date:"
lb_info = "Info:"
lb_deployed = "Deployed"
lb_instock = "In Stock"
lb_onorder = "On Order"
lb_oos_repair = "Out of Service - Needs Repair"
lb_oos_obsolete = "Out of Service - Obsolete"
###############################
## DROP LABELS ###############
drop_labels = True
if drop_labels:
lb_default_dropdown = "Choose from List"
lb_ap = "Access Point"
lb_colors = "Black", "Blue", "Brown", "Green", "Grey", "Yellow", "White"
lb_desktop = "Desktop - Windows"
lb_dvr = "DVR"
lb_chromebooks = "Laptop - Chromebook"
lb_winlaptops = "Laptop - Windows"
lb_locprinters = "Printer - Local"
lb_netprinters = "Printer - Network"
lb_server = "Server"
lb_switch = "Switch"
lb_toner = "Printer - Toner"
lb_aesir = "Aesir"
lb_vanir = "Vanir"
lb_midgard = "Midgard"
lb_locations_vanir = ["Choose a Realm"]
lb_locations_aesir = ["Choose a Realm"]
alpha = string.ascii_uppercase
def list_vanir_locations():
for x in range(1,25):
lb_locations_vanir.append("Realm " + str(x))
def list_aesir_locations():
lb_locations_aesir.append("Asgard")
lb_locations_aesir.append("-----------")
for x in range(0, 26):
lb_locations_aesir.append("Realm " + alpha[x])
list_aesir_locations()
list_vanir_locations()
###############################
## DROP-SUB LABELS ###########
drop_sub_labels = True
if drop_sub_labels:
lb_brands_dvr = "LTS Security", "Generic"
lb_brands_desktops = "Dell", "Custom", "HP", "Lenovo"
lb_brands_chromebook = "Dell", "HP", "Lenovo"
lb_brands_laptop = "Dell", "HP", "Lenovo", "Surface"
lb_brands_printer = "Brother", "Canon", "HP"
lb_tbd = "TBD"
###############################
## INFORMATION ####################################################
app_info = True
if app_info:
app_title = "Týr"
app_version = "(Build: v2.8)"
info_title = "About"
app_description = "ODIN's Adaptive Asset Management System"
app_framework = "Python 3.9 / PyQt6 / SQLite3"
app_contact = "Contact: Need4Swede | [email protected]"
## MIMISBRUNNR LIST ###############################################
app_Mimisbrunnr = True
if app_Mimisbrunnr:
db_primary = "Mimisbrunnr 1"
db_secondary = "Mimisbrunnr 2"
db_tertiary = "Mimisbrunnr 3"
###################################################################
## CLEAR TERMINAL
clear_term()
## TYR MAIN #######################################################
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setWindowIcon(QIcon(png_lab))
self.setWindowTitle(app_title)
self.showMaximized()
# self.showFullScreen()
if platform.system() == "Windows":
self.setMinimumSize(1200, 800)
global tutorial
if not os.path.isfile(tyr_log) or not os.path.isfile(tyr_log_tutorial):
with open(tyr_log, 'w') as f:
f.write('First time starting Tyr!\n')
with open(tyr_log_tutorial, 'w') as f:
f.write('Disabled')
tutorial = QMessageBox.question(self, 'Tutorial', 'Welcome to Týr!\n\nWould you like to use the Tutorial?',
QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No, QMessageBox.StandardButton.Yes)
if tutorial == QMessageBox.StandardButton.Yes:
tyr_start.play()
else:
tutorial = QMessageBox.StandardButton.No
AppLog('main', 'start', '', '', '')
# -------------------------------- #
# Menubar and Toolbar #
# -------------------------------- #
file_menu = self.menuBar().addMenu("&File")
help_menu = self.menuBar().addMenu("&About")
toolbar = QToolBar()
toolbar.setMovable(False)
self.addToolBar(toolbar)
toolbar.hide()
statusbar = QStatusBar()
self.setStatusBar(statusbar)
# ========== Menubar ========== #
add_item_action = QAction(QIcon(png_add), "Add New", self)
add_item_action.triggered.connect(self.insert)
file_menu.addAction(add_item_action)
search_item_action = QAction(
QIcon(png_search), "Search", self)
search_item_action.triggered.connect(self.search)
file_menu.addAction(search_item_action)
del_item_action = QAction(QIcon(png_delete), "Delete", self)
del_item_action.triggered.connect(self.delete)
file_menu.addAction(del_item_action)
export_item_action = QAction(QIcon(png_export), "Export", self)
export_item_action.triggered.connect(self.export)
file_menu.addAction(export_item_action)
file_menu.addSeparator()
quit_action = QAction("Exit", self)
quit_action.triggered.connect(self.quit)
file_menu.addAction(quit_action)
about_action = QAction(QIcon(png_info), "Info",
self)
about_action.triggered.connect(self.about)
help_menu.addAction(about_action)
# ========== Toolbar ========== #
# Set toolbar spacing
toolbar.setStyleSheet("QToolBar{spacing:10px;}")
btn_add_item = QAction(QIcon(png_add), "Add New",
self)
btn_add_item.triggered.connect(self.insert)
btn_add_item.setStatusTip("Add new item")
toolbar.addAction(btn_add_item)
btn_view_all = QAction(QIcon(png_view), "View All",
self)
btn_view_all.triggered.connect(self.load_data)
btn_view_all.setStatusTip("View all")
toolbar.addAction(btn_view_all)
btn_search_item = QAction(QIcon(png_search), "Search",
self)
btn_search_item.triggered.connect(self.search_item)
btn_search_item.setStatusTip("Search")
toolbar.addAction(btn_search_item)
btn_delete_item = QAction(
QIcon(png_delete), "Delete", self)
btn_delete_item.triggered.connect(self.delete)
btn_delete_item.setStatusTip("Delete")
toolbar.addAction(btn_delete_item)
btn_export = QAction(QIcon(png_export), "Export to CSV", self)
btn_export.triggered.connect(self.export)
btn_export.setStatusTip("Export to CSV")
toolbar.addAction(btn_export)
# ========== Button Widgets ========== #
global btn_add
btn_add = QPushButton("Add", self)
btn_add.clicked.connect(self.insert)
btn_add.setIcon(QIcon(png_add))
btn_add.hide()
btn_add.setFixedWidth(100)
btn_add.setFixedHeight(35)
global btn_move
btn_move = QPushButton("Move", self)
btn_move.clicked.connect(self.move)
btn_move.setIcon(QIcon(png_move))
btn_move.hide()
btn_move.setFixedWidth(100)
btn_move.setFixedHeight(35)
btn_refresh = QPushButton("Refresh", self)
btn_refresh.clicked.connect(self.refresh)
btn_refresh.setIcon(QIcon(png_refresh))
btn_refresh.setFixedWidth(100)
btn_refresh.setFixedHeight(35)
global btn_clear
btn_clear = QPushButton("Clear", self)
btn_clear.clicked.connect(self.clear)
btn_clear.setIcon(QIcon(png_clear))
btn_clear.hide()
btn_clear.setFixedWidth(100)
btn_clear.setFixedHeight(35)
global search_bar
self.search_box = QLineEdit()
search_bar = self.search_box
self.search_box.setPlaceholderText("ID #...")
search_bar.hide()
search_bar.setFixedWidth(100)
search_bar.setFixedHeight(20)
global btn_search
btn_search = QPushButton("Search ID", self)
btn_search.clicked.connect(self.search_item)
self.search_box.returnPressed.connect(btn_search.click)
btn_search.setIcon(QIcon(png_search))
btn_search.hide()
btn_search.setFixedWidth(100)
btn_search.setFixedHeight(35)
global search_bar_asset_tag
self.search_box_asset_tag = QLineEdit()
search_bar_asset_tag = self.search_box_asset_tag
self.search_box_asset_tag.setPlaceholderText("Asset Tag...")
search_bar_asset_tag.hide()
search_bar_asset_tag.setFixedWidth(100)
search_bar_asset_tag.setFixedHeight(20)
global btn_search_asset_tag
btn_search_asset_tag = QPushButton("Search Tag", self)
btn_search_asset_tag.clicked.connect(self.search_asset_tag)
self.search_box_asset_tag.returnPressed.connect(btn_search_asset_tag.click)
btn_search_asset_tag.setIcon(QIcon(png_search))
btn_search_asset_tag.hide()
btn_search_asset_tag.setFixedWidth(100)
btn_search_asset_tag.setFixedHeight(35)
global search_bar_general
self.search_box_general = QLineEdit()
search_bar_general = self.search_box_general
self.search_box_general.setPlaceholderText("Console...")
search_bar_general.hide()
search_bar_general.setFixedWidth(100)
search_bar_general.setFixedHeight(20)
global btn_search_general
btn_search_general = QPushButton("Run", self)
btn_search_general.clicked.connect(self.search_general)
self.search_box_general.returnPressed.connect(btn_search_general.click)
btn_search_general.setIcon(QIcon(png_run))
btn_search_general.hide()
btn_search_general.setFixedWidth(100)
btn_search_general.setFixedHeight(35)
global btn_delete
btn_delete = QPushButton("Delete", self)
btn_delete.clicked.connect(self.delete)
btn_delete.setIcon(QIcon(png_delete))
btn_delete.hide()
btn_delete.setFixedWidth(100)
btn_delete.setFixedHeight(35)
global btn_update
btn_update = QPushButton("Update", self)
btn_update.clicked.connect(self.update)
btn_update.setIcon(QIcon(png_update))
btn_update.hide()
btn_update.setFixedWidth(100)
btn_update.setFixedHeight(35)
# ------------------------------- #
# Main Window Layout #
# ------------------------------- #
layout = QGridLayout()
layout_buttons = QVBoxLayout()
self.main_window_widget = QWidget()
self.main_window_widget.setLayout(layout)
self.item_info_window = EntryWindow()
self.key = self.item_info_window.pageCombo.activated.connect(
self.select_table)
self.table_title = QLabel("Collection List")
self.table_title.setFont(QFont("Arial", 14))
self.tableWidget = QTableWidget()
self.tableWidget.setAlternatingRowColors(True)
self.tableWidget.setColumnCount(11)
self.tableWidget.horizontalHeader().setCascadingSectionResizes(False)
self.tableWidget.horizontalHeader().setSortIndicatorShown(False)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.tableWidget.verticalHeader().setVisible(False)
self.tableWidget.verticalHeader().setCascadingSectionResizes(False)
self.tableWidget.verticalHeader().setStretchLastSection(False)
self.tableWidget.setHorizontalHeaderLabels(
(lb_id, lb_site, lb_location, lb_product, lb_make,
lb_asset, lb_reference, lb_assigned, lb_status, lb_date, lb_info))
self.tableWidget.setSortingEnabled(True)
empty_widget = QLabel()
empty_widget.setFixedSize(100, 55)
# layout_buttons.addWidget(btn_move)
layout_sub_buttons = QVBoxLayout()
layout_sub_buttons.addWidget(self.search_box)
layout_sub_buttons.addWidget(btn_search)
layout_sub_buttons.addWidget(self.search_box_asset_tag)
layout_sub_buttons.addWidget(btn_search_asset_tag)
layout_sub_buttons.addWidget(self.search_box_general)
layout_sub_buttons.addWidget(btn_search_general)
layout_sub_buttons.addWidget(empty_widget)
layout_sub_buttons.addWidget(btn_add)
layout_sub_buttons.addWidget(btn_refresh)
layout_sub_buttons.addWidget(btn_clear)
layout_sub_buttons.addWidget(btn_delete)
layout_sub_buttons.addWidget(btn_update)
layout_buttons.addLayout(layout_sub_buttons)
layout.addWidget(self.item_info_window, 0, 0, 1, 3)
layout.addLayout(layout_buttons, 0, 3)
layout.addWidget(self.table_title, 1, 0)
layout.addWidget(self.tableWidget, 2, 0, 1, 4)
self.setCentralWidget(self.main_window_widget)
# ------------------------------- #
# Keyboard Shortcuts #
# ------------------------------- #
self.shortcut_asset_tag = QShortcut(QKeySequence('Ctrl+Return'), self)
self.shortcut_asset_tag.activated.connect(btn_add.click)
self.shortcut_asset_tag = QShortcut(QKeySequence('Ctrl+e'), self)
self.shortcut_asset_tag.activated.connect(self.export)
self.shortcut_asset_tag = QShortcut(QKeySequence('Ctrl+t'), self)
self.shortcut_asset_tag.activated.connect(self.search_box_asset_tag.setFocus)
self.shortcut_delete = QShortcut(QKeySequence('Ctrl+d'), self)
self.shortcut_delete.activated.connect(btn_delete.click)
self.shortcut_search_id = QShortcut(QKeySequence('Ctrl+i'), self)
self.shortcut_search_id.activated.connect(self.search_box.setFocus)
self.shortcut_run = QShortcut(QKeySequence('Ctrl+r'), self)
self.shortcut_run.activated.connect(self.search_box_general.setFocus)
self.shortcut_refresh = QShortcut(QKeySequence('Alt+r'), self)
self.shortcut_refresh.activated.connect(btn_refresh.click)
self.shortcut_clear = QShortcut(QKeySequence('Alt+c'), self)
self.shortcut_clear.activated.connect(btn_clear.click)
self.shortcut_wipe_mimir = QShortcut(QKeySequence('Alt+Backspace'), self)
self.shortcut_wipe_mimir.activated.connect(self.wipe_mimir)
self.shortcut_csv2pdf = QShortcut(QKeySequence('Alt+p'), self)
self.shortcut_csv2pdf.activated.connect(self.update_pdf)
## SEE 1293 FOR SEARCH SHORTCUT
# ------------------------------- #
# Variables & Functions #
# ------------------------------- #
self.conn = sqlite3.connect(inventory_db)
self.result = []
def load_data(self):
if self.key == db_primary:
self.result = Mimisbrunnr_1.show_table()
elif self.key == db_secondary:
#self.tableWidget.setColumnCount(9)
self.result = Mimisbrunnr_2.show_table()
self.display()
def load_data_2(self):
if self.key == db_primary:
self.result = Mimisbrunnr_2.show_table()
elif self.key == db_secondary:
#self.tableWidget.setColumnCount(9)
self.result = Mimisbrunnr_1.show_table()
self.display()
def display(self):
self.tableWidget.setRowCount(0)
for row_number, row_data in enumerate(self.result):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
# format the cell information
data = str(data)
if "\n" in data:
data = data.replace("\n", "")
else:
pass
self.tableWidget.setItem(row_number, column_number,
QTableWidgetItem(str(data)))
self.tableWidget.resizeColumnToContents(0)
self.tableWidget.resizeColumnToContents(2)
self.tableWidget.resizeColumnsToContents()
def select_table(self):
self.key = self.item_info_window.pageCombo.currentText()
if self.key == db_primary:
self.tableWidget.setColumnCount(11)
self.tableWidget.setHorizontalHeaderLabels(
(lb_id, lb_site, lb_location, lb_product, lb_make,
lb_asset, lb_reference, lb_assigned, lb_status, lb_date, lb_info))
elif self.key == db_secondary:
self.tableWidget.setColumnCount(11)
self.tableWidget.setHorizontalHeaderLabels(
(lb_id, lb_site, lb_location, lb_product, lb_make,
lb_asset, lb_reference, lb_assigned, lb_status, lb_date, lb_info))
self.load_data()
return self.key
def about(self):
dlg = AboutDialog()
dlg.exec()
def insert(self):
if self.key == db_primary:
description = self.item_info_window.assettag_db1.text().upper()
location = self.item_info_window.location_db1.currentText()
product = self.item_info_window.product_db1.itemText(
self.item_info_window.product_db1.currentIndex())
package = self.item_info_window.package_db1.text().upper() # PACKAGE = SERIAL NUMBER
try:
insert_Serial = str(package).split(": ")
serial_no_length = len(insert_Serial[1])
except Exception:
pass
assigned = self.item_info_window.assigned_db1.currentText()
manufacturer = self.item_info_window.manufacturer_db1.itemText(
self.item_info_window.manufacturer_db1.currentIndex())
try:
if serial_no_length == 7:
manufacturer = 'Dell'
elif serial_no_length == 8:
manufacturer = 'Lenovo'
except Exception:
pass
status = self.item_info_window.status_db1.itemText(
self.item_info_window.status_db1.currentIndex())
dates = self.item_info_window.dates_db1.text()
notes = self.item_info_window.notes_db1.text()
user = self.item_info_window.site_db1.currentText()
for row_count in range(1, 9999):
try:
if "None" in Mimisbrunnr_1.search_row(row_count):
break
if description in Mimisbrunnr_1.search_row(row_count):
if description == "":
pass
else:
identical_AT = QMessageBox.question(self, 'Warning', 'An item with that Asset Tag already exists in your database\n\nWould you like add it anyway?',
QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No, QMessageBox.StandardButton.Yes)
if identical_AT == QMessageBox.StandardButton.No:
return
if package in Mimisbrunnr_1.search_row(row_count):
QMessageBox.information(
QMessageBox(), "Warning", "This item's serial number matches that of an existing item in your database\n\nMatched Item ID Number: {}".format(row_count))
self.search_box.setText(str(row_count))
return
except Exception:
pass
Mimisbrunnr_1.add_row(user, location, product, manufacturer, description, package,
assigned, status, dates, notes)
elif self.key == db_secondary:
description = self.item_info_window.description_db2.text()
location = self.item_info_window.location_db2.text()
product = self.item_info_window.product_db2.itemText(
self.item_info_window.product_db2.currentIndex())
package = self.item_info_window.package_db2.text()
assigned = self.item_info_window.assigned_db2.text()
manufacturer = self.item_info_window.manufacturer_db2.itemText(
self.item_info_window.manufacturer_db2.currentIndex())
status = self.item_info_window.status_db2.text()
dates = self.item_info_window.dates_db2.text()
notes = self.item_info_window.notes_db2.text()
Mimisbrunnr_2.add_row(location, description, package, product,
manufacturer, assigned, status, dates, notes)
self.load_data()
tool_Scan_Mode.show()
global_Asset_Tag.clear()
global_Serial_Number.clear()
if "SN:" in package:
global_Serial_Number.setText('SN: ')
if r"//" in package:
global_Serial_Number.setText('//192.168.')
global_Serial_Number.setFocus()
if tutorial == QMessageBox.StandardButton.Yes:
simpleaudio.stop_all()
tyr_entry_added.play()
## APPLICATION LOG ##############################################
try:
AppLog('main', product, manufacturer, description, package)
except Exception:
pass
def refresh(self):
python = sys.executable
os.execl(python, python, * sys.argv)
def clear(self):
if self.key == db_primary:
self.search_box.clear()
self.search_box_asset_tag.clear()
self.search_box_general.clear()
self.item_info_window.item_db1_id_label.setText(lb_id)
self.item_info_window.site_db1.clear()
self.item_info_window.location_db1.clear()
self.item_info_window.assettag_db1.clear()
self.item_info_window.product_db1.clear()
self.item_info_window.package_db1.clear()
self.item_info_window.manufacturer_db1.clear()
self.item_info_window.assigned_db1.clear()
self.item_info_window.status_db1.clear()
self.item_info_window.notes_db1.clear()
elif self.key == db_secondary:
self.item_info_window.item_db2_id_label.setText(lb_id)
self.item_info_window.description_db2.clear()
self.item_info_window.location_db2.clear()
self.item_info_window.package_db2.clear()
self.item_info_window.assigned_db2.clear()
self.item_info_window.status_db2.clear()
self.item_info_window.dates_db2.clear()
self.item_info_window.notes_db2.clear()
def search(self):
if self.key == db_primary:
description = self.item_info_window.assettag_db1.text()
location = self.item_info_window.location_db1.currentText()
product = self.item_info_window.product_db1.itemText(
self.item_info_window.product_db1.currentIndex())
package = self.item_info_window.package_db1.text()
assigned = self.item_info_window.assigned_db1.currentIndex()
manufacturer = self.item_info_window.manufacturer_db1.itemText(
self.item_info_window.manufacturer_db1.currentIndex())
status = self.item_info_window.status_db1.itemText(
self.item_info_window.status_db1.currentText())
dates = self.item_info_window.dates_db1.text()
notes = self.item_info_window.notes_db1.text()
self.result = Mimisbrunnr_1.search_rows(
description, location, product, package, assigned, manufacturer, status, dates, notes)
elif self.key == db_secondary:
description = self.item_info_window.description_db2.text()
location = self.item_info_window.location_db2.text()
product = self.item_info_window.product_db2.itemText(
self.item_info_window.product_db2.currentIndex())
status = self.item_info_window.status_db2.text()
dates = self.item_info_window.dates_db2.text()
notes = self.item_info_window.notes_db2.text()
self.result = Mimisbrunnr_2.search_rows(
description, location, product, status, dates, notes)
self.display()
def search_item(self, id):
id = self.search_box.text()
## SEARCH BY ID
try:
if self.key == db_primary:
first_matched_item = Mimisbrunnr_1.search_row(id)
self.item_info_window.item_db1_id_label.setText(
"ID #:{:>35}".format(id))
self.item_info_window.site_db1.clear()
self.item_info_window.site_db1.addItem(
str(first_matched_item[1]))
self.item_info_window.location_db1.clear()
self.item_info_window.location_db1.addItem(
str(first_matched_item[2]))
if lb_aesir in str(first_matched_item[1]):
self.item_info_window.location_db1.addItems(lb_locations_aesir)
elif lb_vanir in str(first_matched_item[1]):
self.item_info_window.location_db1.addItems(lb_locations_vanir)
self.item_info_window.product_db1.clear()
self.item_info_window.product_db1.addItem(
first_matched_item[3])
self.item_info_window.manufacturer_db1.clear()
self.item_info_window.manufacturer_db1.addItem(
first_matched_item[4])
if lb_chromebooks in str(first_matched_item[3]):
self.item_info_window.manufacturer_db1.addItems(lb_brands_chromebook)
elif lb_dvr in str(first_matched_item[3]):
self.item_info_window.manufacturer_db1.addItems(lb_brands_dvr)
elif lb_netprinters in str(first_matched_item[3]) or lb_locprinters in str(first_matched_item[3]) or lb_toner in str(first_matched_item[3]):
self.item_info_window.manufacturer_db1.addItems(lb_brands_printer)
elif lb_winlaptops in str(first_matched_item[3]):
self.item_info_window.manufacturer_db1.addItems(lb_brands_laptop)
self.item_info_window.assettag_db1.setText(
str(first_matched_item[5]))
self.item_info_window.package_db1.setText(
str(first_matched_item[6]))
self.item_info_window.assigned_db1.clear()
self.item_info_window.assigned_db1.addItem(
str(first_matched_item[7]))
self.item_info_window.assigned_db1.addItems(user_list)
self.item_info_window.status_db1.clear()
self.item_info_window.status_db1.addItem(lb_deployed)
self.item_info_window.status_db1.addItem(lb_instock)
self.item_info_window.status_db1.addItem(lb_onorder)
self.item_info_window.status_db1.addItem(lb_oos_repair)
self.item_info_window.status_db1.addItem(lb_oos_obsolete)
for x in range(0, 200):
self.item_info_window.status_db1.addItem("Quantity: " + str(x))
self.item_info_window.dates_db1.setText(
today)
self.item_info_window.notes_db1.setText(
str(first_matched_item[10]))
elif self.key == db_secondary:
first_matched_item = Mimisbrunnr_2.search_row(id)
self.item_info_window.item_db2_id_label.setText(
"ID #:{:>35}".format(id))
self.item_info_window.location_db2.setText(
str(first_matched_item[1]))
self.item_info_window.description_db2.setText(
str(first_matched_item[2]))
self.item_info_window.product_db2.setCurrentText(
first_matched_item[4])
self.item_info_window.package_db2.setText(
str(first_matched_item[3]))
self.item_info_window.assigned_db2.setText(
str(first_matched_item[6]))
self.item_info_window.manufacturer_db2.setCurrentText(
first_matched_item[5])
self.item_info_window.status_db2.setText(
str(first_matched_item[7]))
self.item_info_window.dates_db2.setText(
str(first_matched_item[8]))
self.item_info_window.notes_db2.setText(
str(first_matched_item[9]))
if isinstance(id, int):
print("int")
if isinstance(id, str):
print("string")
print(str(first_matched_item[5]))
except Exception:
if self.key == db_primary:
self.item_info_window.item_db1_id_label.setText("ID:")
elif self.key == db_secondary:
self.item_info_window.item_db2_id_label.setText("ID:")
QMessageBox.information(
QMessageBox(), "Search", "Can not find the item")
def search_asset_tag(self, asset_tag_no):
global row_count
## Make asset_tag_no = whatever value you enter in the search bar in uppercase
asset_tag_no = self.search_box_asset_tag.text()
asset_tag_no = asset_tag_no.upper()
run_search = True
if asset_tag_no == "0":
run_search = False
QMessageBox.information(
QMessageBox(), "Search Result", "Invalid Asset Tag: 0")
return
if asset_tag_no == "":
run_search = False
QMessageBox.information(
QMessageBox(), "Search Result", "Please enter an Asset Tag")
return
## SEARCH BY ASSET TAG
try:
try:
if self.key == db_primary:
for row_count in range(1,9999):
# clear_term()
## list_row lists all the values in the given row using .search_row
list_row = Mimisbrunnr_1.search_row(row_count)
## item_asset_tag equals the fifth element in the row, which is the asset tag
try:
item_asset_tag = list_row[5]
except Exception:
for x in range(1,1):
pass
## If the tag that you searched for shows up in the above query
## Populate the forms
try:
if asset_tag_no in item_asset_tag:
while run_search:
first_matched_item = Mimisbrunnr_1.search_row(row_count)
self.item_info_window.item_db1_id_label.setText(
"ID #:{:>35}".format(row_count))
self.item_info_window.site_db1.clear()
self.item_info_window.site_db1.addItem(
str(first_matched_item[1]))
self.item_info_window.location_db1.clear()
self.item_info_window.location_db1.addItem(
str(first_matched_item[2]))
if lb_aesir in str(first_matched_item[1]):
self.item_info_window.location_db1.addItems(lb_locations_aesir)
elif lb_vanir in str(first_matched_item[1]):
self.item_info_window.location_db1.addItems(lb_locations_vanir)
self.item_info_window.product_db1.clear()
self.item_info_window.product_db1.addItem(
first_matched_item[3])
self.item_info_window.manufacturer_db1.clear()
self.item_info_window.manufacturer_db1.addItem(
first_matched_item[4])
if lb_chromebooks in str(first_matched_item[3]):
self.item_info_window.manufacturer_db1.addItems(lb_brands_chromebook)
elif lb_dvr in str(first_matched_item[3]):
self.item_info_window.manufacturer_db1.addItems(lb_brands_dvr)
elif lb_netprinters in str(first_matched_item[3]) or lb_locprinters in str(first_matched_item[3]) or lb_toner in str(first_matched_item[3]):
self.item_info_window.manufacturer_db1.addItems(lb_brands_printer)
elif lb_winlaptops in str(first_matched_item[3]):
self.item_info_window.manufacturer_db1.addItems(lb_brands_laptop)
self.item_info_window.assettag_db1.setText(
str(first_matched_item[5]))
self.item_info_window.package_db1.setText(
str(first_matched_item[6]))
self.item_info_window.assigned_db1.clear()
self.item_info_window.assigned_db1.addItem(
str(first_matched_item[7]))
self.item_info_window.assigned_db1.addItems(user_list)
self.item_info_window.status_db1.clear()
self.item_info_window.status_db1.addItem(lb_deployed)
self.item_info_window.status_db1.addItem(lb_instock)
self.item_info_window.status_db1.addItem(lb_onorder)
self.item_info_window.status_db1.addItem(lb_oos_repair)
self.item_info_window.status_db1.addItem(lb_oos_obsolete)
for x in range(0, 200):
self.item_info_window.status_db1.addItem("Quantity: " + str(x))
self.item_info_window.dates_db1.setText(
today)
self.item_info_window.notes_db1.setText(
str(first_matched_item[10]))
self.search_box.setText(str(row_count))
self.search_box_general.clear()
self.search_box_asset_tag.clear()
break
break
else:
pass
except Exception:
pass
# QMessageBox.information(
# QMessageBox(), "Search Result", "Hmm, I can't find that asset tag :(\nMake sure you entered the information correctly.")
except Exception:
QMessageBox.information(
QMessageBox(), "Search Result", "Hmm, I can't find that asset tag :(\nMake sure you entered the information correctly.")
pass
except Exception:
QMessageBox.information(
QMessageBox(), "Search", "Can not find the item")
def search_general(self, search_input):
global general_input
general_input = self.search_box_general.text()
general_input = general_input.upper()
## ARGUMENTS
help = "HELP"
arg_is_help = "HELP:"
arg_is_asset_tag = "AT:"
arg_is_serial_no = "SN:"
arg_is_location = "LOC:"
arg_is_make = "MAKE:"
arg_is_ip = r"//"
arg_is_toner = "TONER:"
arg_is_user = "USER:"
is_building = "BLD"
## SEARCH BY ARGUMENT
try:
clear_term()
if general_input == help: ## HELP TEXT
print("Help Requested!")
self.search_box_general.clear()
try:
QMessageBox.information(
QMessageBox(), "Help", "Add arguments to your help query to find answers."
"\n\n'help:readme' - Opens program documentation"
"\n\n'help:tags' - List search query tags"
"\n\n'help:shortcuts' - List available keyboard shortcuts")
except Exception:
print("Didn't work")
pass
elif general_input.startswith(arg_is_help): ## HELP : TAGS
print("Help Requested!")
help_requested = general_input.split(":")
help_requested[1] = help_requested[1].upper()
self.search_box_general.clear()
if help_requested[1] == "TAGS":
QMessageBox.information(
QMessageBox(), "Help: Tags", "Search Tags\n\n\nAT: Asset Tag\n\nSN: Serial Number\n\nLOC: Location\n\n"
"MAKE: Manufacturer\n\nTONER: Print Toner\n\n'//' for IP Address\n\n*:list to list tag options")
elif help_requested[1] == "SHORTCUTS":
QMessageBox.information(
QMessageBox(), "Help: Shortcuts", "Keyboard Shortcuts\n\n\nCTRL+S: Run Search\n\nCTRL+I: ID Search\n\nCTRL+T: Asset Tag Search\n\n"
"CTRL+R: Run Console\n\nCTRL+Return: Add Entry\n\nCTRL+D: Delete Entry\n\nCTRL+E: Export to CSV\n\nALT+R: Refresh")
elif help_requested[1] == "README":
online_readme = True
if(online_readme):
readme = ("https://need4swede.github.io/ODIN/Mimir/Documentation/readme.html")
else:
readme = (root_dir + "/Mimir/Documentation/readme.html")
if platform.system() == "Darwin":
print("isMac")
readme = ("file:///" + readme)
webbrowser.open(readme)
elif help_requested[1] == "TUTORIAL":
os.remove(tyr_log_tutorial)
python = sys.executable
os.execl(python, python, * sys.argv)
else:
QMessageBox.information(
QMessageBox(), "Help", "Add arguments to your help query to find answers."
"\n\n'help:howto' - Opens program documentation"
"\n\n'help:tags' - List search query tags"
"\n\n'help:shortcuts' - List available keyboard shortcuts")
elif general_input.startswith(arg_is_asset_tag):
print("Searching by: Asset Tag")
arg_tag = general_input.split(":")
print("Asset Tag:", arg_tag[1])
self.search_box_asset_tag.setText(arg_tag[1])
btn_search_asset_tag.click()
self.search_box_asset_tag.clear()
self.search_box.setText(str(row_count))
elif general_input.startswith(arg_is_serial_no):
run_search = True
print("Searching by: Serial Number")
arg_serial_no = general_input.split(":")
serial_no = arg_serial_no[1]
if serial_no[0] == " ":
serial_no = serial_no.strip()
serial_no_length = len(serial_no)
search_bar_general.clear()
if serial_no_length < 1:
run_search == False
elif serial_no_length == 7:
print("Serial Number Match: Dell")
elif serial_no_length == 8:
print("Serial Number Match: Lenovo")
print("Serial No:", serial_no)
try:
try:
if self.key == db_primary:
for row_count in range(1,9999):
# clear_term()
## list_row lists all the values in the given row using .search_row
list_row = Mimisbrunnr_1.search_row(row_count)
## item_asset_tag equals the fifth element in the row, which is the asset tag
try:
item_serial_num = list_row[6]
except Exception:
for x in range(1,1):
pass
## If the tag that you searched for shows up in the above query
## Populate the forms
try:
item_serial_num = list_row[6]
except Exception:
for x in range(1,1):
pass
## If the tag that you searched for shows up in the above query
## Populate the forms
try:
if serial_no in item_serial_num:
while run_search:
first_matched_item = Mimisbrunnr_1.search_row(row_count)
self.item_info_window.item_db1_id_label.setText(
"ID #:{:>35}".format(row_count))
self.item_info_window.site_db1.clear()
self.item_info_window.site_db1.addItem(
str(first_matched_item[1]))
self.item_info_window.location_db1.clear()
self.item_info_window.location_db1.addItem(
str(first_matched_item[2]))
if lb_aesir in str(first_matched_item[1]):
self.item_info_window.location_db1.addItems(lb_locations_aesir)
elif lb_vanir in str(first_matched_item[1]):
self.item_info_window.location_db1.addItems(lb_locations_vanir)
self.item_info_window.product_db1.clear()
self.item_info_window.product_db1.addItem(
first_matched_item[3])
self.item_info_window.manufacturer_db1.clear()
self.item_info_window.manufacturer_db1.addItem(
first_matched_item[4])
if lb_chromebooks in str(first_matched_item[3]):
self.item_info_window.manufacturer_db1.addItems(lb_brands_chromebook)
elif lb_dvr in str(first_matched_item[3]):
self.item_info_window.manufacturer_db1.addItems(lb_brands_dvr)
elif lb_netprinters in str(first_matched_item[3]) or lb_locprinters in str(first_matched_item[3]) or lb_toner in str(first_matched_item[3]):
self.item_info_window.manufacturer_db1.addItems(lb_brands_printer)
elif lb_winlaptops in str(first_matched_item[3]):
self.item_info_window.manufacturer_db1.addItems(lb_brands_laptop)
self.item_info_window.assettag_db1.setText(
str(first_matched_item[5]))
self.item_info_window.package_db1.setText(
str(first_matched_item[6]))
self.item_info_window.assigned_db1.clear()
self.item_info_window.assigned_db1.addItem(
str(first_matched_item[7]))
self.item_info_window.assigned_db1.addItems(user_list)
self.item_info_window.status_db1.clear()
self.item_info_window.status_db1.addItem(lb_deployed)
self.item_info_window.status_db1.addItem(lb_instock)
self.item_info_window.status_db1.addItem(lb_onorder)
self.item_info_window.status_db1.addItem(lb_oos_repair)
self.item_info_window.status_db1.addItem(lb_oos_obsolete)
for x in range(0, 200):
self.item_info_window.status_db1.addItem("Quantity: " + str(x))
self.item_info_window.dates_db1.setText(
today)
self.item_info_window.notes_db1.setText(
str(first_matched_item[10]))
self.search_box.setText(str(row_count))
self.search_box_general.clear()
self.search_box_asset_tag.clear()
break
break
else:
pass
except Exception:
pass
except Exception:
pass
except Exception:
pass
elif general_input.startswith(arg_is_location):
print("Searching by: Location")
arg_location = general_input.split(":")
if is_building in arg_location[1]:
print("Location Type: Building")
building = arg_location[1]
building = building.replace("BLD", "BLD. ")
print("Building Location:", building)
else:
print("Location:", arg_location[1])
elif general_input.startswith(arg_is_make):
print("Searching by: Make")
make_list = ["CANON", "DELL", "HP", "LENOVO"]
arg_make = general_input.split(":")
make = arg_make[1]
if any(x in make for x in make_list):
if make == "HP":
print("Make:", make)
else:
make = make.capitalize()
print("Make:", make)
else:
make_list = [x.capitalize() for x in make_list]
make = make.capitalize()
if make == "List":
print("List of Manufacturers:", make_list)
elif make == "":
print("No make listed!")
print("List of Manufacturers:", make_list)
else:
print("Unknown Make!")
elif general_input.startswith(arg_is_ip):
print("Searching by: IP Address")
arg_ip = general_input.split(r"//")
if arg_ip[1].startswith("192"):
pass
else:
arg_ip[1] = "192.168." + arg_ip[1]
arg_ip[1] = r"//" + arg_ip[1]
print("IP Address:", arg_ip[1])
elif general_input.startswith(arg_is_toner):
print("Searching by: Toner")
toner_list_canon = ["GPR-37", "GPR-38"]
arg_toner = general_input.split(":")
toner_type = arg_toner[1]
GPR_37 = ["GPR-37", "GPR37", "37"]
GPR_38 = ["GPR-38", "GPR38", "38"]
if any(x in toner_type for x in GPR_37):
print("Toner Make: Canon")
print("Toner Type:", GPR_37[0])
elif any(x in toner_type for x in GPR_38):
print("Toner Make: Canon")
print("Toner Type:", GPR_38[0])
if toner_type == "GPR":
print("Toner Make: Canon")
print("Known Types:", toner_list_canon)
elif general_input.startswith(arg_is_user):
print("Searching by: User")
arg_user = general_input.split(":")
user = arg_user[1]
user = user.lower()
f_name = user[0:1]
f_name = f_name + "."
l_name = user[1:]
user_email = user + "@domain.org"
print("Username:", user)
try:
if any(l_name.capitalize() for x in user_list):
indices = [i for i, s in enumerate(user_list) if l_name.capitalize() in s]
full_name = user_list[indices[0]]
print("Full Name:", full_name)
print("Email:", user_email)
else:
QMessageBox.information(
QMessageBox(), "User Search", "Enter the person's name in 'users.csv'\n\nSearch users by entering the first letter of their first initial, and their full lastname\n\nExample: llarsson")
except Exception:
QMessageBox.information(
QMessageBox(), "User Search", "User not found!\n\nSearch users by entering the first letter of their first initial, and their full lastname")
else:
if ":" in general_input:
QMessageBox.information(
QMessageBox(), "Search", "Invalid Run Argument\n\nRun 'help:tags' to view available run arguments")
else:
print("No search argument passed!")
print("Insert:", general_input)
print("\nType Assumption: Serial Number")
print("Argument Probability: Low")
print("\nNo Argument passed to Search Query")
QMessageBox.information(
QMessageBox(), "Search", "No results found\n\nTry using a search argument\n\nRun 'help' for more options")
pass
except Exception:
QMessageBox.information(
QMessageBox(), "Search", "Unable to process your query!\n\nRun 'help' for more options")
def update(self):
clear_term()
id = self.search_box.text()
asset_tag_no = self.search_box_asset_tag.text()
if str(id) == "":
print("The ID searchbox is empty")
sys.exit(app.exec())
if self.key == db_primary:
asset_tag = self.item_info_window.assettag_db1.text()
location = self.item_info_window.location_db1.currentText()
product = self.item_info_window.product_db1.itemText(
self.item_info_window.product_db1.currentIndex())
package = self.item_info_window.package_db1.text()
assigned = self.item_info_window.assigned_db1.currentText()
manufacturer = self.item_info_window.manufacturer_db1.itemText(
self.item_info_window.manufacturer_db1.currentIndex())
# self.item_info_window.status_db1.clear()
# status = str(self.item_info_window.status_db1.currentText())
status = self.item_info_window.status_db1.itemText(
self.item_info_window.status_db1.currentIndex())
dates = self.item_info_window.dates_db1.text()
notes = self.item_info_window.notes_db1.text()
site = self.item_info_window.site_db1.currentText()
print("\nCurrent Text:", str(self.item_info_window.status_db1.itemText(
self.item_info_window.status_db1.currentIndex())))
Mimisbrunnr_1.update_row(id, site, location, product, manufacturer, asset_tag,
package, assigned, status, dates, notes)
# Mimisbrunnr_1.update_row(id, location, product, asset_tag, manufacturer, package,
# assigned, status, dates, notes)
elif self.key == db_secondary:
description = self.item_info_window.description_db2.text()
location = self.item_info_window.location_db2.text()
product = self.item_info_window.product_db2.itemText(
self.item_info_window.product_db2.currentIndex())
package = self.item_info_window.package_db2.text()
assigned = self.item_info_window.assigned_db2.text()
manufacturer = self.item_info_window.manufacturer_db2.itemText(
self.item_info_window.manufacturer_db2.currentIndex())
status = self.item_info_window.status_db2.text()
dates = self.item_info_window.dates_db2.text()
notes = self.item_info_window.notes_db2.text()
Mimisbrunnr_2.update_row(id, description, location, product,
package, assigned, manufacturer, status, dates, notes)
QMessageBox.information(
QMessageBox(), "Update", "Item has been updated.")
self.load_data()
def clear_contents(self):
self.tableWidget.clearContents()
def delete(self):
id = self.search_box.text()
self.msgSearch = QMessageBox()
try:
if self.key == db_primary:
row = Mimisbrunnr_1.search_row(id)
search_result = lb_id+" "+str(row[0])+"\n"+lb_location+" "+str(row[1])+"\n"+lb_product+" "+str(row[2])+"\n" \
+ lb_make+" "+str(row[3])+"\n"+lb_asset+" "+str(row[4])+"\n"+lb_reference+" "+str(row[5])+"\n"+lb_assigned+" " + str(row[6])+"\n" \
+ lb_status+" " + \
str(row[7])+"\n"+lb_date+" " + \
str(row[8])+"\n"+lb_info+" "+str(row[9])
elif self.key == db_secondary:
row = Mimisbrunnr_2.search_row(id)
search_result = lb_id+" "+str(row[0])+"\n"+lb_location+" "+str(row[1])+"\n"+lb_product+" "+str(row[2])+"\n" \
+ lb_make+" "+str(row[3])+"\n"+lb_asset+" "+str(row[4])+"\n"+lb_reference+" "+str(row[5])+"\n"+lb_assigned+" " + str(row[6])+"\n" \
+ lb_status+" " + \
str(row[7])+"\n"+lb_date+" " + \
str(row[8])+"\n"+lb_info+" "+str(row[9])
self.msgSearch.setText(search_result)
self.msgSearch.setInformativeText(
"Do you want to remove this item?")
self.msgSearch.setStandardButtons(QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No)
self.msgSearch.setDefaultButton(QMessageBox.StandardButton.Yes)
self.msgSearch.setWindowTitle("Remove item?")
ret = self.msgSearch.exec()
if ret == QMessageBox.StandardButton.Yes:
if self.key == db_primary:
Mimisbrunnr_1.delete_row(id)
self.item_info_window.item_db1_id_label.setText(lb_id)
self.item_info_window.site_db1.clear()
self.item_info_window.product_db1.clear()
self.item_info_window.manufacturer_db1.clear()
self.item_info_window.assettag_db1.clear()
self.item_info_window.location_db1.clear()
self.item_info_window.package_db1.clear()
self.item_info_window.assigned_db1.clear()
self.item_info_window.status_db1.clear()
self.item_info_window.dates_db1.clear()
self.item_info_window.notes_db1.clear()
elif self.key == db_secondary:
Mimisbrunnr_2.delete_row(id)
self.item_info_window.item_db2_id_label.setText(lb_id)
self.item_info_window.description_db2.clear()
self.item_info_window.location_db2.clear()
self.item_info_window.package_db2.clear()
self.item_info_window.assigned_db2.clear()
#self.item_info_window.status_db2.clear()
self.item_info_window.dates_db2.clear()
self.item_info_window.notes_db2.clear()
elif ret == QMessageBox.StandardButton.No:
pass
except Exception:
# QMessageBox.warning(QMessageBox(), "Error",
# "Could not remove the item")
pass
finally:
self.load_data()
def delete_move(self):
id = self.search_box.text()
self.msgSearch = QMessageBox()
try:
if self.key == db_primary:
row = Mimisbrunnr_1.search_row(id)
search_result = lb_id+" "+str(row[0])+"\n"+lb_location+" "+str(row[1])+"\n"+lb_product+" "+str(row[2])+"\n" \
+ lb_make+" "+str(row[3])+"\n"+lb_asset+" "+str(row[4])+"\n"+lb_reference+" "+str(row[5])+"\n"+lb_assigned+" " + str(row[6])+"\n" \
+ lb_status+" " + \
str(row[7])+"\n"+lb_date+" " + \
str(row[8])+"\n"+lb_info+" "+str(row[9])
elif self.key == db_secondary:
row = Mimisbrunnr_2.search_row(id)
search_result = lb_id+" "+str(row[0])+"\n"+lb_location+" "+str(row[1])+"\n"+lb_product+" "+str(row[2])+"\n" \
+ lb_make+" "+str(row[3])+"\n"+lb_asset+" "+str(row[4])+"\n"+lb_reference+" "+str(row[5])+"\n"+lb_assigned+" " + str(row[6])+"\n" \
+ lb_status+" " + \
str(row[7])+"\n"+lb_date+" " + \
str(row[8])+"\n"+lb_info+" "+str(row[9])
self.msgSearch.setText(search_result)
self.msgSearch.setInformativeText(
"Do you want to move this item?")
self.msgSearch.setStandardButtons(QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No)
self.msgSearch.setWindowTitle("Move item?")
ret = self.msgSearch.exec()
if ret == QMessageBox.StandardButton.Yes:
if self.key == db_primary:
Mimisbrunnr_1.delete_row(id)
self.item_info_window.item_db1_id_label.setText(lb_id)
self.item_info_window.assettag_db1.clear()
self.item_info_window.location_db1.clear()
self.item_info_window.package_db1.clear()
self.item_info_window.assigned_db1.clear()
self.item_info_window.status_db1.clear()
self.item_info_window.dates_db1.clear()
self.item_info_window.notes_db1.clear()
elif self.key == db_secondary:
Mimisbrunnr_2.delete_row(id)
self.item_info_window.item_db2_id_label.setText(lb_id)
self.item_info_window.description_db2.clear()
self.item_info_window.location_db2.clear()
self.item_info_window.package_db2.clear()
self.item_info_window.assigned_db2.clear()
self.item_info_window.status_db2.clear()
self.item_info_window.dates_db2.clear()
self.item_info_window.notes_db2.clear()
elif ret == QMessageBox.StandardButton.No:
pass
except Exception:
QMessageBox.warning(QMessageBox(), "Error",
"Could not move the item")
finally:
self.load_data()
def move(self):
if self.key == db_primary:
description = self.item_info_window.assettag_db1.text()
location = self.item_info_window.location_db1.currentText()
product = self.item_info_window.product_db1.itemText(
self.item_info_window.product_db1.currentIndex())
package = self.item_info_window.package_db1.text()
assigned = self.item_info_window.assigned_db1.text()
manufacturer = self.item_info_window.manufacturer_db1.itemText(
self.item_info_window.manufacturer_db1.currentIndex())
status = self.item_info_window.status_db1.text()
dates = self.item_info_window.dates_db1.text()
notes = self.item_info_window.notes_db1.text()
Mimisbrunnr_2.add_row(location, description, package, product,
manufacturer, assigned, status, dates, notes)
elif self.key == db_secondary:
description = self.item_info_window.description_db2.text()
location = self.item_info_window.location_db2.text()
product = self.item_info_window.product_db2.itemText(
self.item_info_window.product_db2.currentIndex())
package = self.item_info_window.package_db2.text()
assigned = self.item_info_window.assigned_db2.text()
manufacturer = self.item_info_window.manufacturer_db2.itemText(
self.item_info_window.manufacturer_db2.currentIndex())
status = self.item_info_window.status_db2.text()
dates = self.item_info_window.dates_db2.text()
notes = self.item_info_window.notes_db2.text()
Mimisbrunnr_1.add_row(location, description, package, product,
manufacturer, assigned, status, dates, notes)
self.delete_move()
self.load_data()
def export(self):
export_dir = True
if export_dir:
mimisbrunnr_export_csv_1 = (mimisbrunnr_export_dir + "CSV/" + "Mimisbrunnr_1.csv")
mimisbrunnr_export_html_1 = (mimisbrunnr_export_dir + "HTML/" + "Mimisbrunnr_1.html")
mimisbrunnr_export_pdf_1 = (mimisbrunnr_export_dir + "PDF/" + "Mimisbrunnr_1.pdf")
mimisbrunnr_export_csv_2 = (mimisbrunnr_export_dir + "CSV/Mimisbrunnr_2.csv")
try:
if self.key == db_primary:
Mimisbrunnr_1.to_csv()
loaded_export = pd.read_csv(mimisbrunnr_export_csv_1)
loaded_export.to_html(mimisbrunnr_export_html_1)
with open(mimisbrunnr_export_csv_1, newline='') as f:
reader = csv.reader(f)
pdf = FPDF()
pdf.add_page(orientation = 'L')
page_width = pdf.w - 8 * pdf.l_margin
pdf.set_font('Times','B',14.0)
pdf.cell(page_width, 0.0, 'Mimisbrunnr Export')
pdf.ln(6)
pdf.set_font('Times','',10.0)
pdf.cell(page_width, 0.0, f'Date: {date_today}')
pdf.ln(10)
pdf.set_font('Courier', '', 6.5)
col_width = page_width/6.4
pdf.ln(1)
th = pdf.font_size * 2
bold = True
for row in reader:
if bold:
pdf.set_font('Helvetica', 'B', 9.5)
bold = not bold
else:
pdf.set_font('Times', '', 8.5)
pdf.cell(col_width, th, str(row[0]), border=1, align='C')
pdf.cell(col_width, th, row[1], border=1, align='C')
pdf.cell(col_width, th, row[2], border=1, align='C')
pdf.cell(col_width, th, row[3], border=1, align='C')
pdf.cell(col_width, th, row[4], border=1, align='C')
pdf.cell(col_width, th, row[5], border=1, align='C')
pdf.cell(col_width, th, row[6], border=1, align='C')
pdf.cell(col_width, th, row[9], border=1, align='C')
pdf.ln(th)
pdf.ln(10)
pdf.set_font('Times','',10.0)
pdf.cell(page_width, 0.0, '- end of report -')
pdf.output(mimisbrunnr_export_pdf_1, 'F')
elif self.key == db_secondary:
Mimisbrunnr_2.to_csv()
QMessageBox.information(
QMessageBox(), "File export", "Mimir: Exporting...\n\nCSV: Done\n\nHTML: Done\n\nPDF: Done\n\nMimir: Export Complete!")
except Exception:
QMessageBox.warning(QMessageBox(), "Error",
"Could not export to csv")
finally:
pass
def wipe_mimir(self):
reply = QMessageBox.question(self, 'Wipe Mimir', 'Delete all Entries?',
QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No, QMessageBox.StandardButton.Yes)
if reply == QMessageBox.StandardButton.Yes:
if os.path.isfile(inventory_db):
os.remove(inventory_db)
self.refresh()
else:
pass
def update_pdf(self):
export_dir = True
if export_dir:
mimisbrunnr_export_csv_1 = (mimisbrunnr_export_dir + "CSV/" + "Mimisbrunnr_1.csv")
mimisbrunnr_export_html_1 = (mimisbrunnr_export_dir + "HTML/" + "Mimisbrunnr_1.html")
mimisbrunnr_export_pdf_1 = (mimisbrunnr_export_dir + "PDF/" + "Mimisbrunnr_1.pdf")
mimisbrunnr_export_csv_2 = (mimisbrunnr_export_dir + "CSV/Mimisbrunnr_2.csv")
try:
if self.key == db_primary:
loaded_export = pd.read_csv(mimisbrunnr_export_csv_1)
loaded_export.to_html(mimisbrunnr_export_html_1)
with open(mimisbrunnr_export_csv_1, newline='') as f:
reader = csv.reader(f)
pdf = FPDF()
pdf.add_page(orientation = 'L')
page_width = pdf.w - 8 * pdf.l_margin
pdf.set_font('Times','B',14.0)
pdf.cell(page_width, 0.0, 'Mimisbrunnr Export')
pdf.ln(6)
pdf.set_font('Times','',10.0)
pdf.cell(page_width, 0.0, f'Date: {date_today}')
pdf.ln(10)
pdf.set_font('Courier', '', 6.5)
col_width = page_width/6.4
pdf.ln(1)
th = pdf.font_size * 2
bold = True
for row in reader:
if bold:
pdf.set_font('Helvetica', 'B', 9.5)
bold = not bold
else:
pdf.set_font('Times', '', 8.5)
pdf.cell(col_width, th, str(row[0]), border=1, align='C')
pdf.cell(col_width, th, row[1], border=1, align='C')
pdf.cell(col_width, th, row[2], border=1, align='C')
pdf.cell(col_width, th, row[3], border=1, align='C')
pdf.cell(col_width, th, row[4], border=1, align='C')
pdf.cell(col_width, th, row[5], border=1, align='C')
pdf.cell(col_width, th, row[6], border=1, align='C')
pdf.cell(col_width, th, row[9], border=1, align='C')
pdf.ln(th)
pdf.ln(10)
pdf.set_font('Times','',10.0)
pdf.cell(page_width, 0.0, '- end of report -')
pdf.output(mimisbrunnr_export_pdf_1, 'F')
elif self.key == db_secondary:
Mimisbrunnr_2.to_csv()
QMessageBox.information(
QMessageBox(), "File export", "Mimir: Exporting...\n\nHTML: Done\n\nPDF: Done\n\nMimir: Export Complete!")
except Exception:
QMessageBox.warning(QMessageBox(), "Error",
"Could not export to csv")
finally:
pass
def quit(self):
reply = QMessageBox.question(self, 'Exit', 'Do you want to quit?',
QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No, QMessageBox.StandardButton.Yes)
if reply == QMessageBox.StandardButton.Yes:
sys.exit()
else:
pass
## TYR INFO #######################################################
class AboutDialog(QDialog):
def __init__(self, *args, **kwargs):
super(AboutDialog, self).__init__(*args, **kwargs)
self.setFixedWidth(500)
self.setFixedHeight(245)
QBtn = QDialogButtonBox.StandardButton.Ok
self.buttonBox = QDialogButtonBox(QBtn)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
layout = QVBoxLayout()
self.setWindowTitle(info_title)
title = QLabel(app_title)
font = title.font()
font.setPointSize(65)
title.setFont(font)
labelpic = QLabel()
pixmap = QPixmap(png_logo)
pixmap = pixmap.scaledToWidth(325)
labelpic.setPixmap(pixmap)
labelpic.setFixedHeight(150)
layout.addWidget(title)
layout.addWidget(QLabel(app_version))
layout.addWidget(QLabel(app_description))
layout.addWidget(QLabel(app_framework))
layout.addWidget(QLabel(app_contact))
# layout.addWidget(labelpic)
layout.addWidget(self.buttonBox)
self.setLayout(layout)
## TYR INITIALIZE #################################################
class EntryWindow(QWidget):
def __init__(self):
super().__init__()
layout = QHBoxLayout()
sub_layout = QVBoxLayout()
self.setLayout(layout)
# Label
self.Mimisbrunnr_label = QLabel("Mimisbrunnr")
self.Mimisbrunnr_label.setFont(QFont("Arial", 14))
self.Mimisbrunnr_label.setFixedSize(100, 30)
self.Mimisbrunnr_label.hide()
self.item_label_db1 = QLabel("Item Information")
self.item_label_db1.setFont(QFont("Arial", 14))
self.item_label_db1.setFixedSize(250, 40)
self.item_label_db1.hide()
self.item_label_db2 = QLabel("Item Information")
self.item_label_db2.setFont(QFont("Arial", 14))
self.item_label_db2.setFixedSize(250, 40)
self.item_label_db2.hide()
self.picLabel = QLabel()
self.pixmap = QPixmap(png_db_primary)
self.pixmap = self.pixmap.scaled(300, 300, Qt.AspectRatioMode.KeepAspectRatio, Qt.TransformationMode.FastTransformation)
# self.pixmap = self.pixmap.scaled(300, 200, QtCore.Qt.AspectRatioMode)
# ERRORMESSAGE
# self.pixmap = self.pixmap.scaledToWidth(300)
self.picLabel.setPixmap(self.pixmap)
# self.picLabel.setFixedSize(300, 150)
# self.picLabel.setFixedHeight(300)
# Create and connect the combo box to switch between different inventory Mimisbrunnr
self.pageCombo = QComboBox()
self.pageCombo.addItems(
[db_primary, db_secondary, db_tertiary])
self.pageCombo.hide()
self.pageCombo.activated.connect(self.switchPage)
# Layouts
self.stackedLayout = QStackedLayout()
sub_layout.addWidget(self.Mimisbrunnr_label)
sub_layout.addWidget(self.pageCombo)
sub_layout.addWidget(self.picLabel)
layout.addLayout(sub_layout)
layout.addLayout(self.stackedLayout)
# -------------------------- #
# Primary Mimisbrunnr #
# -------------------------- #
self.page_db1 = QWidget()
self.page_db1_layout = QVBoxLayout()
self.form_layout_db1 = QFormLayout()
self.item_db1_id = ""
self.item_db1_id_label = QLabel(f"Týr - {app_version}" + self.item_db1_id)
self.page_db1_layout.addWidget(self.item_label_db1)
self.page_db1_layout.addWidget(self.item_db1_id_label)
self.site_db1 = QComboBox()
self.site_db1.addItem(lb_aesir)
self.site_db1.addItem(lb_vanir)
self.site_db1.addItem(lb_midgard)
self.form_layout_db1.addRow(lb_site, self.site_db1)
global product_selection
global search_selection
search_selection = "Search..."
self.product_db1 = QComboBox()
self.product_db1.addItem(lb_default_dropdown)
self.product_db1.addItem(search_selection)
self.product_db1.addItem("")
self.product_db1.addItem(lb_desktop, (lb_brands_desktops))
self.product_db1.addItem(lb_dvr, (lb_brands_dvr))
self.product_db1.addItem(lb_chromebooks, (lb_brands_chromebook))
self.product_db1.addItem(lb_winlaptops, (lb_brands_laptop))
self.product_db1.addItem(lb_locprinters, (lb_brands_printer))
self.product_db1.addItem(lb_netprinters, (lb_brands_printer))
self.product_db1.addItem(lb_toner, (lb_brands_printer))
self.product_db1.currentIndexChanged.connect(self.updatemanufacturerInput)
self.form_layout_db1.addRow(lb_product, self.product_db1)
product_selection = self.product_db1
product_selection.activated.connect(self.pass_Net_Adap) # Passes active selection
global starting_page
starting_page = True
if starting_page:
starting_page = not starting_page
self.shortcut_search = QShortcut(QKeySequence('Ctrl+s'), self)
self.shortcut_search.activated.connect(self.pass_Net_Adap)
self.manufacturer_db1 = QComboBox()
self.assettag_db1 = QLineEdit()
self.package_db1 = QLineEdit()
self.page_db1_layout.addLayout(self.form_layout_db1)
self.page_db1.setLayout(self.page_db1_layout)
self.stackedLayout.addWidget(self.page_db1)
# -------------------------- #
# Secondary Mimisbrunnr #
# -------------------------- #
self.page_db2 = QWidget()
self.page_db2_layout = QVBoxLayout()
self.form_layout_db2 = QFormLayout()
self.item_db2_id = ""
self.item_db2_id_label = QLabel(lb_id+" " + self.item_db2_id)
self.page_db2_layout.addWidget(self.item_label_db2)
self.page_db2_layout.addWidget(self.item_db2_id_label)
self.description_db2 = QLineEdit()
self.form_layout_db2.addRow(lb_product, self.description_db2)
self.package_db2 = QLineEdit()
self.form_layout_db2.addRow(lb_make, self.package_db2)
self.location_db2 = QLineEdit()
self.form_layout_db2.addRow(
lb_location, self.location_db2)
self.product_db2 = QComboBox()
self.product_db2.addItem(lb_default_dropdown)
self.product_db2.addItem(lb_desktop, ["Black", "Blue", "Brown", "Green", "Grey", "Yellow", "White"])
self.product_db2.addItem(lb_dvr, ["Black", "Blue", "Brown", "Green", "Grey", "Yellow", "White"])
self.product_db2.currentIndexChanged.connect(self.updatemanufacturerInput_2)
self.form_layout_db2.addRow(lb_asset, self.product_db2)
self.manufacturer_db2 = QComboBox()
self.manufacturer_db2.addItems([lb_default_dropdown])
self.form_layout_db2.addRow(lb_reference, self.manufacturer_db2)
self.assigned_db2 = QLineEdit()
self.form_layout_db2.addRow(lb_assigned, self.assigned_db2)
self.status_db2 = QLineEdit()
self.form_layout_db2.addRow(lb_status, self.status_db2)
self.dates_db2 = QLineEdit()
self.form_layout_db2.addRow(lb_date, self.dates_db2)
self.notes_db2 = QLineEdit()
self.form_layout_db2.addRow(lb_info, self.notes_db2)
self.page_db2_layout.addLayout(self.form_layout_db2)
self.page_db2.setLayout(self.page_db2_layout)
self.stackedLayout.addWidget(self.page_db2)
self.db_id = 0
## FLOATING TOOLS
global tool_Scan_Mode
tool_Scan_Mode = QCheckBox('Scan Mode', self)
tool_Scan_Mode.move(80, 10)
tool_Scan_Mode.hide()
tool_Scan_Mode.stateChanged.connect(self.enable_Auto_Tag)
def focus_AT(self):
# global_Serial_Number.clearFocus()
try:
global_Asset_Tag.disconnect()
global_Asset_Tag.setFocus()
global_Asset_Tag.returnPressed.connect(btn_add.click)
except:
global_Asset_Tag.setFocus()
global_Asset_Tag.returnPressed.connect(btn_add.click)
def enable_Auto_Tag(self, state):
if state == Qt.CheckState.Checked.value:
global_Serial_Number.setFocus()
global_Serial_Number.returnPressed.connect(self.focus_AT)
else:
try:
global_Asset_Tag.disconnect()
global_Serial_Number.disconnect()
except:
pass
# When called, takes the input and checks which lb_drop# was selected
# and launches a unique follow-up window if additional information is required
def pass_Net_Adap(self):
if starting_page == False:
self.shortcut_search = QShortcut(QKeySequence('Ctrl+s'), self)
self.shortcut_search.activated.connect(btn_search.click)
if search_selection in (str(product_selection.currentText())):
self.manufacturer_db1.clear()
self.assettag_db1.clear()
self.manufacturer_db1.addItem(search_selection)
# calling method
self.UiComponents()
# showing all the widgets
self.show()
pass
elif lb_dvr in (str(product_selection.currentText())):
super().__init__()
# setting title
self.setWindowTitle("Python ")
# setting geometry
self.setGeometry(500, 350, 225, 90)
# calling method
self.UiComponents()
# showing all the widgets
self.show()
pass
elif lb_chromebooks or lb_winlaptops or lb_desktop in (str(product_selection.currentText())):
super().__init__()
# setting title
self.setWindowTitle("Python ")
# setting geometry
self.setGeometry(500, 350, 250, 90)
# calling method
self.UiComponents()
# showing all the widgets
self.show()
pass
elif lb_locprinters in (str(product_selection.currentText())):
pass
elif lb_netprinters in (str(product_selection.currentText())):
super().__init__()
# setting title
self.setWindowTitle("Python ")
# setting geometry
self.setGeometry(500, 350, 225, 90)
# calling method
self.UiComponents()
# showing all the widgets
self.show()
pass
elif lb_toner in (str(product_selection.currentText())):
print(lb_netprinters)
super().__init__()
# setting title
self.setWindowTitle("Python ")
# setting geometry
self.setGeometry(500, 350, 225, 90)
# calling method
self.UiComponents()
# showing all the widgets
self.show()
pass
else:
pass
# The follow-up window, unique to each selection
def UiComponents(self):
# creating a combo box widget
self.line = QLineEdit(self)
self.nameLabel = QLabel(self)
if search_selection in (str(product_selection.currentText())):
print("UI Element")
self.find()
pass
elif lb_dvr in (str(product_selection.currentText())):
self.setWindowTitle("Enter IP Address")
self.nameLabel.setText('192.168.')
self.nameLabel.move(25, 25)
# setting geometry of combo box
self.line.setGeometry(85, 18, 120, 30)
# creating label to
self.label = QLabel(self)
# setting geometry of the label
self.label.setGeometry(25, 50, 200, 30)
elif lb_netprinters in (str(product_selection.currentText())):
self.setWindowTitle("Enter IP Address")
self.nameLabel.setText('192.168.')
self.nameLabel.move(25, 25)
# setting geometry of combo box
self.line.setGeometry(85, 18, 120, 30)
# creating label to
self.label = QLabel(self)
# setting geometry of the label
self.label.setGeometry(25, 50, 200, 30)
elif lb_locprinters in (str(product_selection.currentText())):
self.setWindowTitle("Enter Printer Purpose")
self.nameLabel.setText('Purpose: ')
self.nameLabel.move(20, 25)
# setting geometry of combo box
self.line.setGeometry(85, 18, 120, 30)
# creating label to
self.label = QLabel(self)
# setting geometry of the label
self.label.setGeometry(25, 50, 200, 30)
elif lb_toner in (str(product_selection.currentText())):
self.setWindowTitle("Enter Toner Type")
self.nameLabel.setText("Toner Type:")
self.nameLabel.move(5, 25)
self.line.setGeometry(85, 18, 150, 30)
# creating label to
self.label = QLabel(self)
# setting geometry of the label
self.label.setGeometry(25, 50, 200, 30)
elif lb_chromebooks or lb_winlaptops or lb_desktop in (str(product_selection.currentText())):
self.setWindowTitle("Enter Service Tag")
self.nameLabel.setText("Service Tag:")
self.nameLabel.move(5, 25)
self.line.setGeometry(85, 18, 150, 30)
# creating label to
self.label = QLabel(self)
# setting geometry of the label
self.label.setGeometry(25, 50, 200, 30)
if tutorial == QMessageBox.StandardButton.Yes:
simpleaudio.stop_all()
if lb_netprinters in (str(product_selection.currentText())) or lb_dvr in (str(product_selection.currentText())):
tyr_IP.play()
else:
tyr_serial.play()
self.line.returnPressed.connect(self.find)
# Inserts the information from the previous window, into our main window
def find(self):
# finding the content of current item in combo box
product_selection.disconnect()
btn_add.show()
search_bar.show()
btn_search.show()
search_bar_asset_tag.show()
btn_search_asset_tag.show()
search_bar_general.show()
btn_search_general.show()
btn_delete.show()
btn_clear.show()
btn_update.show()
self.manufacturer_db1.addItems([lb_default_dropdown])
self.form_layout_db1.addRow(lb_make, self.manufacturer_db1)
selected_product = (str(product_selection.currentText()))
user_text_input = self.line.text()
self.package_db1.close()
self.form_layout_db1.addRow(lb_asset, self.assettag_db1)
global global_Asset_Tag
global_Asset_Tag = self.assettag_db1
self.package_db1 = QLineEdit()
global global_Serial_Number
global_Serial_Number = self.package_db1
self.assigned_db1 = QComboBox()
if lb_dvr or lb_netprinters or lb_locprinters in selected_product:
self.assigned_db1.addItem("To Realm")
self.assigned_db1.addItems(user_list)
else:
self.assigned_db1.addItems(user_list)
self.form_layout_db1.addRow(lb_assigned, self.assigned_db1)
self.location_db1 = QComboBox()
if lb_aesir in str(self.site_db1.currentText()):
self.location_db1.addItems(lb_locations_aesir)
elif lb_vanir in str(self.site_db1.currentText()):
self.location_db1.addItems(lb_locations_vanir)
elif lb_aesir not in str(self.site_db1.currentText()) and lb_vanir not in str(self.site_db1.currentText()):
self.location_db1.addItem("Midgard")
self.form_layout_db1.addRow(lb_location, self.location_db1)
self.status_db1 = QComboBox()
#self.status_db1.addItems([lb_default_dropdown])
self.status_db1.addItem(lb_deployed)
self.status_db1.addItem(lb_instock)
self.status_db1.addItem(lb_onorder)
self.status_db1.addItem(lb_oos_repair)
self.status_db1.addItem(lb_oos_obsolete)
self.form_layout_db1.addRow(lb_status, self.status_db1)
if lb_dvr in selected_product:
# showing content on the screen though label
self.label.setText("IP Address : " + "192.168." + user_text_input)
self.form_layout_db1.addRow("IP Address:", self.package_db1)
self.package_db1.insert("//192.168."+ user_text_input)
elif lb_netprinters in selected_product:
# showing content on the screen though label
self.label.setText("IP Address : " + "192.168." + user_text_input)
self.form_layout_db1.addRow("IP Address:", self.package_db1)
self.package_db1.insert("//192.168."+ user_text_input)
elif lb_locprinters in selected_product:
self.label.setText("Printer Purpose:" + user_text_input)
self.form_layout_db1.addRow("Purpose:", self.package_db1)
self.package_db1.insert("* "+ user_text_input)
elif lb_toner in selected_product:
GPR_37 = ["GPR-37", "GPR37", "37"]
GPR_38 = ["GPR-38", "GPR38", "38"]
if any(x in user_text_input.upper() for x in GPR_37):
user_text_input = "GPR-37"
elif any(x in user_text_input.upper() for x in GPR_38):
user_text_input = "GPR-38"
else:
pass
self.label.setText("Type: " + user_text_input)
self.form_layout_db1.addRow("Model of Toner:", self.package_db1)
self.package_db1.insert("Type: "+ user_text_input)
elif lb_chromebooks or lb_winlaptops or lb_desktop in selected_product:
# showing content on the screen though label
user_text_input = user_text_input.upper()
self.label.setText("Service Tag : " + user_text_input)
self.form_layout_db1.addRow("Service Tag:", self.package_db1)
self.package_db1.insert("SN: " + user_text_input)
self.notes_db1 = QLineEdit()
global global_Info
global_Info = self.notes_db1
if "GPR" in user_text_input:
self.manufacturer_db1.clear()
self.manufacturer_db1.addItem("Canon")
self.assettag_db1.insert("N/A")
self.status_db1.clear()
for x in range(1, 10):
self.status_db1.addItem("Quantity: " + str(x))
if "GPR-37" in user_text_input:
self.notes_db1.insert("Compatibility: 8085 / 8095 / 8105")
elif "GPR-38" in user_text_input:
self.notes_db1.insert("Compatibility: 6075 / 6265 / 6025")
self.form_layout_db1.addRow(lb_info, self.notes_db1)
self.dates_db1 = QLineEdit()
self.dates_db1.insert(today)
self.form_layout_db1.addRow(lb_date, self.dates_db1)
self.notes_db1.returnPressed.connect(btn_add.click)
self.close()
# SETS ASSET TAG LINE IN FOCUS
self.assettag_db1.setFocus()
if tutorial == QMessageBox.StandardButton.Yes:
simpleaudio.stop_all()
tyr_initialize.play()
def updatemanufacturerInput(self, index):
self.manufacturer_db1.clear()
categories = self.product_db1.itemData(index)
if categories:
self.manufacturer_db1.addItems(categories)
def updatemanufacturerInput_2(self, index):
self.manufacturer_db2.clear()
categories = self.product_db2.itemData(index)
if categories:
self.manufacturer_db2.addItems(categories)
def switchPage(self):
self.stackedLayout.setCurrentIndex(self.pageCombo.currentIndex())
self.db_id = self.pageCombo.currentIndex()
return self.db_id
## TYR SYSTEM LOG #################################################
def AppLog(log, type, make, asset, serial):
if log == "main":
with open(tyr_log, 'a') as f:
if type == "start":
f.write(f'{user} started Tyr ::: {today}\n')
else:
f.write(f'{user} added a {type} ::: {today}\n')
f.write(f":: {make} - ")
f.write(f"{asset} - ")
f.write(f"{serial}\n")
## MIMIR IMPORT ###################################################
class mainWin(QMainWindow):
def __init__(self, parent = None):
super(mainWin, self).__init__(parent)
self.setupUI()
def setupUI(self):
self.setGeometry(0, 0, 800, 600)
self.setContentsMargins(10, 5, 10, 5)
self.lb = QTableWidget()
self.setCentralWidget(self.lb)
# self.create_toolbar()
self.csv_file = ""
self.csv_file_name = ""
def open_file(self):
fname,_ = QFileDialog.getOpenFileName(self, 'Open file', '',
"CSV Files (*.csv *.tsv *.txt);;All Files (*.*)")
if fname:
self.csv_file = fname
self.load_csv(self.csv_file)
self.statusbar.showMessage(f"{fname} loaded")
def save_file(self):
if self.lb.rowCount() < 1:
return
if self.csv_file != "":
file_name = self.csv_file
else:
file_name = "*.csv"
fname,_ = QFileDialog.getSaveFileName(self, 'Save file', file_name,
"CSV Files (*.csv *.tsv *.txt);;All Files (*.*)")
if fname:
self.save_csv(fname)
self.csv_file = fname
def save_csv(self, filename):
rowtext = ""
for row in range(self.lb.rowCount()-1):
for column in range(self.lb.columnCount()-1):
celltext = self.lb.item(row, column).text()
rowtext += f"{celltext}\t"
# ERROR MESSAGE
rowtext = rowtext.rstrip("\t")
rowtext += "\n"
with open(filename, "w") as f:
f.write(rowtext)
def load_csv(self, filename):
self.csv_text = open(filename, "r").read()
### count tab / comma
tab_counter = self.csv_text.splitlines()[0].count("\t")
comma_counter = self.csv_text.splitlines()[0].count(",")
if tab_counter > comma_counter:
self.lb.setColumnCount(tab_counter + 1)
delimiter = "\t"
else:
self.lb.setColumnCount(comma_counter + 1)
delimiter = ","
row = 0
for listrow in self.csv_text.splitlines():
self.lb.insertRow(row)
rowtext = listrow.split(delimiter)
column = 0
for cell in rowtext:
celltext = QTableWidgetItem(cell)
self.lb.setItem(row, column, celltext)
column += 1
row += 1
## MIMISBRUNNR ####################################################
if __name__ == "__main__":
mimir_exists = os.path.isfile(inventory_db)
if mimir_exists:
open(inventory_db, "r+")
else:
open(inventory_db, "w")
Mimisbrunnr_1.create_table_db1()
Mimisbrunnr_2.create_table_db2()
app = QApplication(sys.argv)
if platform.system() == "Windows":
app.setStyleSheet(qdarktheme.load_stylesheet())
if QDialog.accepted:
window = MainWindow()
window.show()
window.key = db_primary # Default Mimisbrunnr to load
window.load_data()
# NEW
win = mainWin()
win.setWindowTitle("CSV Example")
#win.show()
# /NEW
sys.exit(app.exec())
|
need4swede/ODIN
|
Tyr/Týr.py
|
Týr.py
|
py
| 98,781 |
python
|
en
|
code
| 0 |
github-code
|
50
|
72158899356
|
""" Module to adjust general SolydXK settings """
#!/usr/bin/env python3
import os
from os.path import exists, dirname, isdir
from utils import get_apt_force, is_package_installed, \
get_apt_cache_locked_program, get_debian_version
# --force-yes is deprecated in stretch
APT_FORCE = get_apt_force()
# Fix some programs:
# [package, what to fix, options(1), exec from debian version(2)
# (1): touch/mkdir/purge/install|owner:group|permissions
# (2): check /etc/debian_version or 0 = all
fix_progs = [['login', '/var/log/btmp', 'touch|root:utmp|600', 0],
['login', '/var/log/lastlog', 'touch|root:utmp|664', 0],
['login', '/var/log/faillog', 'touch|root:utmp|664', 0],
['login', '/var/log/wtmp', 'touch|root:utmp|664', 0],
['apache2', '/var/log/apache2', 'mkdir|root:adm|755', 0],
['mysql-client', '/var/log/mysql', 'mkdir|mysql:adm|755', 0],
['clamav', '/var/log/clamav', 'mkdir|clamav:clamav|755', 0],
['clamav', '/var/log/clamav/freshclam.log', 'touch|clamav:clamav|644', 0],
['samba', '/var/log/samba', 'mkdir|root:adm|755', 0],
['consolekit', '/var/log/ConsoleKit', 'mkdir|root:root|755', 0],
['exim4-base', '/var/log/exim4', 'mkdir|Debian-exim:adm|755', 0],
['lightdm', '/var/lib/lightdm/data', 'mkdir|lightdm:lightdm|755', 0],
['usbguard', '/etc/usbguard/rules.conf', 'touch|root:root|600', 0],
['ntpsec', '/var/log/ntpsec', 'mkdir|ntpsec:ntpsec|755', 0]]
try:
ver = get_debian_version()
for prog in fix_progs:
if ver >= prog[3] or prog[3] == 0:
if is_package_installed(prog[0]):
options = prog[2].split('|')
if options[0] == 'purge' or options[0] == 'install':
if not get_apt_cache_locked_program():
os.system(
f"apt-get {options[0]} {APT_FORCE} {prog[1]}")
elif options[0] == 'touch' and not exists(prog[1]):
dir_name = dirname(prog[1])
if not isdir(dir_name):
os.system(
f"mkdir -p {dir_name}; chown {options[1]} {dir_name}; chmod {options[2]} {dir_name}")
os.system(
f"touch {prog[1]}; chown {options[1]} {prog[1]}; chmod {options[2]} {prog[1]}")
elif options[0] == 'mkdir' and not isdir(prog[1]):
os.system(
f"mkdir -p {prog[1]}; chown {options[1]} {prog[1]}; chmod {options[2]} {prog[1]}")
except Exception as detail:
print(detail)
def get_info_line(par_name):
""" Return the info per line """
matches = [match for match in info if par_name in match]
return '' if not matches else matches[0]
if exists('/usr/share/solydxk/info'):
with open(file='/usr/share/solydxk/info', mode='r', encoding='utf-8') as f:
info = f.readlines()
codename = get_info_line("CODENAME")
edition = get_info_line("EDITION")
release = get_info_line("RELEASE")
distrib_id = get_info_line("DISTRIB_ID")
description = get_info_line("DESCRIPTION")
pretty_name = get_info_line("PRETTY_NAME")
home_url = get_info_line("HOME_URL")
support_url = get_info_line("SUPPORT_URL")
bug_report_url = get_info_line("BUG_REPORT_URL")
try:
# Restore LSB information
with open(file="/etc/lsb-release", mode="w", encoding="utf-8") as f:
f.writelines([distrib_id,
"DISTRIB_" + release,
"DISTRIB_" + codename,
"DISTRIB_" + description])
except Exception as detail:
print(detail)
try:
with open(file="/usr/lib/os-release", mode="w", encoding="utf-8") as f:
f.writelines([pretty_name,
codename.replace("CODENAME", "NAME"),
release.replace("RELEASE", "VERSION_ID"),
distrib_id.replace("DISTRIB_ID", "ID"),
release.replace("RELEASE", "VERSION"),
codename.replace("CODENAME", "VERSION_CODENAME"),
home_url,
support_url,
bug_report_url])
except Exception as detail:
print(detail)
try:
# Restore /etc/issue and /etc/issue.net
issue = description.replace("DESCRIPTION=", "").replace("\"", "")
with open(file="/etc/issue", mode="w", encoding="utf-8") as f:
f.writelines(issue.strip() + " \\n \\l\n")
with open(file="/etc/issue.net", mode="w", encoding="utf-8") as f:
f.writelines(issue)
except Exception as detail:
print(detail)
|
abalfoort/solydxk-system
|
usr/lib/solydxk/system/adjust.py
|
adjust.py
|
py
| 4,805 |
python
|
en
|
code
| 0 |
github-code
|
50
|
38349211457
|
import unittest
# returns an array from 1 to max_number with values according to the fizzbuzz rules
def fizzbuzz(max_number):
result = []
for n in range(1, max_number+1):
if (n % 5 == 0) and (n % 3 == 0):
result.append("FizzBuzz")
elif (n % 3 == 0):
result.append("Fizz")
elif (n % 5 == 0):
result.append("Buzz")
else:
result.append(n)
return result
###
### Tests
###
class FizzBuzz_Test(unittest.TestCase):
def test_fizzbuzz(self):
self.assertEqual(fizzbuzz(15), [1, 2, "Fizz", 4, "Buzz", "Fizz", 7, 8, "Fizz", "Buzz", 11, "Fizz", 13, 14, "FizzBuzz"])
unittest.main()
|
hackjoy/fizzbuzz
|
python/fizzbuzz.py
|
fizzbuzz.py
|
py
| 618 |
python
|
en
|
code
| 0 |
github-code
|
50
|
71316604955
|
import numpy as np
import torch
EPS = 1e-3
def torch_sgd_linear(dots: torch.tensor, batch_size, start=None, lr=1e-6, epoch_limit=100, method="SGD", log=False,
dtype=torch.float64):
if start is None:
start = [0., 0.]
n = len(dots)
# looking for solution of linear regression y = ax + b, so need to search the a and b
model = torch.nn.Linear(1, 1, dtype=dtype)
model.weight.data = torch.tensor([[float(start[0])]], dtype=dtype)
model.bias.data = torch.tensor([[float(start[1])]], dtype=dtype)
loss_fn = torch.nn.MSELoss(reduction='sum')
if method == 'SGD':
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
elif method == 'Momentum':
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)
elif method == 'Nesterov':
optimizer = torch.optim.SGD(model.parameters(), lr=lr, nesterov=True, momentum=0.9)
elif method == 'AdaGrad':
optimizer = torch.optim.Adagrad(model.parameters(), lr=lr)
elif method == 'RMSProp':
optimizer = torch.optim.RMSprop(model.parameters(), lr=lr)
elif method == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
else:
raise RuntimeError("Unsupported method")
converged = False
way = [start]
for epoch in range(epoch_limit):
if converged:
break
if log:
print(f"Epoch {epoch + 1}\n-------------------------------")
dots_perm = torch.randperm(n)
for i in range((len(dots) + batch_size - 1) // batch_size):
indexes = dots_perm[i * batch_size: (i + 1) * batch_size]
shuffled_batch = dots[indexes]
x_batch, y_batch = shuffled_batch[:, 0], shuffled_batch[:, 1]
y_batch_pred = model(x_batch.unsqueeze(-1))
loss = loss_fn(y_batch_pred, y_batch.unsqueeze(-1))
optimizer.zero_grad()
loss.backward()
if all(abs(param.grad.item()) * lr < EPS for param in model.parameters()):
converged = True
break
optimizer.step()
ans_a, ans_b = model.weight.item(), model.bias.item()
way.append((ans_a, ans_b))
if log and i % 5 == 0:
loss, current = loss.item(), (i + 1) * len(x_batch)
print(f"loss: {loss:>7f} [{current:>5d}/{n:>5d}]")
return converged, np.array(way)
|
Marlesss/nonlinear-regression
|
pytorch_tool.py
|
pytorch_tool.py
|
py
| 2,426 |
python
|
en
|
code
| 0 |
github-code
|
50
|
22191118521
|
import numpy as np
from openmdao.api import ExplicitComponent
class BatteryCostComp(ExplicitComponent):
def setup(self):
self.add_input('kwh')
self.add_input('kwhcost')
self.add_input('quantity')
self.add_output('BatteryCost')
self.declare_partials('BatteryCost', 'quantity')
def compute(self, inputs, outputs):
kwh = inputs['kwh']
kwhcost = inputs['kwhcost']
quantity = inputs['quantity']
outputs['BatteryCost'] = kwh * kwhcost * quantity
def compute_partials(self, inputs, partials):
kwh = inputs['kwh']
kwhcost = inputs['kwhcost']
quantity = inputs['quantity']
partials['BatteryCost', 'quantity'] = kwh * kwhcost
partials['BatteryCost', 'kwh'] = quantity * kwhcost
partials['BatteryCost', 'kwhcost'] = kwh * quantity
|
UAM-Team-Optimization-Problem/UAM-Team-Optimization
|
UAM_team_optimization/components/Economics/batterycost_comp.py
|
batterycost_comp.py
|
py
| 886 |
python
|
en
|
code
| 0 |
github-code
|
50
|
10832897608
|
import argparse
import constants
def get_train_args():
"""
Retrieves and parses the command line arguments provided by the user when
they run the training program from a terminal window. This function uses Python's
argparse module to created and defined these command line arguments.
This function returns these arguments as an ArgumentParser object.
Parameters:
None - simply using argparse module to create & store command line arguments
Returns:
parse_args() - data structure that stores the command line arguments object
"""
parser = argparse.ArgumentParser()
parser.add_argument("data_dir", type = str, help = "path to images i.e. 'flowers'")
parser.add_argument("--save_dir", type = str, default="checkpoints", help = "path to save model in i.e. 'checkpoints'")
parser.add_argument("--arch", type = str, default="vgg16", choices = constants.supported_arch_list, help = "CNN model architecture")
parser.add_argument("--learning_rate", type = float, default="0.001", help = "learning rate")
parser.add_argument("--epochs", type = int, default="15", help = "number of epochs")
parser.add_argument("--hidden_units", type = int, default="4096", help = "number of hidden layer units")
parser.add_argument("--gpu", action="store_true", dest="gpu", help = "train on GPU if one is available")
parser.add_argument("--continue_training", type = str, help = "path to a model to continue training it")
parser.add_argument("--save_threshold", type = float, default=0.75, help = "0.xx accuracy threshold to save model at")
parser.add_argument("--optimizer_name", type = str, default="adam", help = "optimizer to use for training model")
return parser.parse_args()
def get_predict_args():
"""
Retrieves and parses the command line arguments provided by the user when
they run the prediction program from a terminal window. This function uses Python's
argparse module to created and defined these command line arguments.
This function returns these arguments as an ArgumentParser object.
Parameters:
None - simply using argparse module to create & store command line arguments
Returns:
parse_args() - data structure that stores the command line arguments object
"""
parser = argparse.ArgumentParser()
parser.add_argument("input", type = str, help = "path to image")
parser.add_argument("checkpoint", type = str, help = "path to model checkpoint")
parser.add_argument("--topk", type = int, default=3, help = "top K predicted classes")
parser.add_argument("--category_names", type = str, default="cat_to_name.json", help = "filename of mapping from classes to names")
parser.add_argument("--gpu", action="store_true", dest="gpu", help = "perform inference on GPU if one is available")
return parser.parse_args()
|
felixglush/FlowerImageClassifier
|
get_input_args.py
|
get_input_args.py
|
py
| 2,889 |
python
|
en
|
code
| 0 |
github-code
|
50
|
13777409153
|
# B - Counterclockwise Rotation
# https://atcoder.jp/contests/abc259/tasks/abc259_b
# NOTE: 座標の回転の公式を利用。https://keisan.casio.jp/exec/system/1496883774
import math
a, b, d = map(int, input().split())
rad = math.radians(d)
x = a * math.cos(rad) - b * math.sin(rad)
y = a * math.sin(rad) + b * math.cos(rad)
print(x, y)
|
ryu-0729/AtCoder-Beginners-Selection
|
AimingForBrownCoders/atcoder/b_counterclockwise-rotation.py
|
b_counterclockwise-rotation.py
|
py
| 343 |
python
|
en
|
code
| 0 |
github-code
|
50
|
26437267464
|
import re
pattern = r"(#|\|)(?P<item>[A-Za-z\s]+)\1(?P<date>[0-9]{2}/[0-9]{2}/[0-9]{2})\1(?P<calories>[0-9][0-9]{0,3}|10000)\1"
string = input()
data = re.finditer(pattern, string)
items = []
total_calories = 0
for match in data:
result = match.groupdict()
items.append(result)
total_calories += int(result["calories"])
days = total_calories // 2000
print(f"You have food to last you for: {days} days!")
for item in items:
print(f"Item: {item['item']}, Best before: {item['date']}, Nutrition: {item['calories']}")
|
konsatanasoff/python-fundameltals-2020
|
final_exam_preparations/ad_astra.py
|
ad_astra.py
|
py
| 564 |
python
|
en
|
code
| 0 |
github-code
|
50
|
73445158556
|
# coding: utf-8
# In[4]:
import pylab as plt
import pickle
# In[10]:
with open('abs_sp', 'rb') as handle:
abs_spectrum = pickle.load(handle)
with open('emis_sp', 'rb') as handle:
emis_spectrum = pickle.load(handle)
# In[13]:
# plot the spectra
ft = 14
plt.figure(figsize=(3.5, 2.5))
plt.plot(abs_spectrum[0], abs_spectrum[1], label = 'abs')
plt.plot(emis_spectrum[0], emis_spectrum[1], label = 'emis')
plt.xlim(0.0, 2.0)
plt.ylim(0, 1)
plt.legend(loc = [0.6, 0.62], fontsize=14, handlelength = 1)
plt.xlabel('$\omega_c$', size=ft)
plt.xticks(fontsize = ft, rotation=0)
plt.yticks(fontsize = ft, rotation=0)
x = [0.0, 2.0]
y = [0.0, 1.0]
plt.xticks(np.arange(min(x), max(x)+0.1, 0.4))
plt.yticks(np.arange(min(y), max(y)+0.1, 0.2))
'''
plt.savefig('abs_emis__S' + str(S).replace(".","") +
'__N_' + str(N) + '.svg', bbox_inches='tight', transparent=True)
'''
plt.show(block=False)
plt.show(block=False)
|
ArtemStrashko/Organic_Polariton_Lasing
|
data_and_scripts_producing_figures/1_Plotting_abs_emis_spectra__fig1d.py
|
1_Plotting_abs_emis_spectra__fig1d.py
|
py
| 980 |
python
|
en
|
code
| 2 |
github-code
|
50
|
16106421915
|
from flask import Flask, render_template, request
from flask_mysqldb import MySQL
app = Flask(__name__)
app.config['MYSQL_USER'] = 'sql9365288'
app.config['MYSQL_PASSWORD'] = 'RLtjMbHeDA'
app.config['MYSQL_HOST'] = 'sql9.freemysqlhosting.net'
app.config['MYSQL_DB'] = 'sql9365288'
mysql= MySQL(app)
## home page is used to create tables when the server is first run.
## Commented since the sql database is already connected.
@app.route("/")
def home():
cur = mysql.connection.cursor()
# cur.execute('''
# CREATE TABLE individuals (id INTEGER AUTO_INCREMENT PRIMARY KEY,
# gender VARCHAR(8), date_of_birth DATE ,social_security VARCHAR(9) UNIQUE,smoking_status VARCHAR(5),
# allergies VARCHAR(100),medical_conditions VARCHAR(100))''')
# cur.execute('''
# CREATE TABLE events (event_id INTEGER AUTO_INCREMENT PRIMARY KEY,
# user_id INTEGER,
# date_of_incidence DATE, type_of_issue VARCHAR(50) ,billed_amount DECIMAL(8,2),covered_amount DECIMAL(8,2),
# FOREIGN KEY (user_id) REFERENCES individuals(id))''')
# cur.execute('''DROP TABLE events''')
# cur.execute('''DROP TABLE individuals''')
return render_template("home.html")
## Used to add individuals along with their details.
##Routes to: Individual.html to display the id of individual added
@app.route("/AddIndividuals", methods=['GET','POST'])
def AddIndividuals():
if request.method== "POST":
result=request.form
gender=result['gender']
date_of_birth=result['date_of_birth']
social_security=result['social_security']
smoking_status=result['smoking_status']
allergies=result['allergies']
medical_conditions=result['medical_conditions']
cur = mysql.connection.cursor()
try:
cur.execute("INSERT INTO individuals (gender,date_of_birth,social_security,smoking_status,allergies,medical_conditions) VALUES(%s,%s,%s,%s,%s,%s)",(gender,date_of_birth,social_security,smoking_status,allergies,medical_conditions))
except:
return "Cannot Add Individual"
mysql.connection.commit()
cur.close()
cur = mysql.connection.cursor()
try:
addedIndividual= cur.execute("SELECT id FROM individuals ORDER BY id DESC LIMIT 1")
except (mysql.connection.Error, mysql.connection.Warning) as e:
print(e)
if addedIndividual > 0:
addedIndividual= cur.fetchone()
cur.close()
return render_template('Individual.html', addedIndividual=addedIndividual)
return render_template("AddIndividuals.html")
@app.route("/Individual")
def Individual():
return render_template("Individual.html")
## Used to add events along with their details.
## events are added per individual(policy holder)
##Routes to: Event.html to display the id of individual added
@app.route("/AddEvents", methods=['GET','POST'])
def AddEvents():
if request.method== "POST":
result=request.form
user_id=result['user_id']
date_of_incidence=result['date_of_incidence']
type_of_issue=result['type_of_issue']
billed_amount=result['billed_amount']
covered_amount=result['covered_amount']
cur = mysql.connection.cursor()
try:
cur.execute("INSERT INTO events (user_id,date_of_incidence,type_of_issue,billed_amount,covered_amount) VALUES(%s,%s,%s,%s,%s)",(user_id,date_of_incidence,type_of_issue,billed_amount,covered_amount))
except:
return "Cannot Add Event: Check if user id is present"
mysql.connection.commit()
cur.close()
cur = mysql.connection.cursor()
try:
addedEvent= cur.execute("SELECT event_id FROM events ORDER BY event_id DESC LIMIT 1")
except (mysql.connection.Error, mysql.connection.Warning) as e:
print(e)
if addedEvent > 0:
addedEvent= cur.fetchone()
cur.close()
return render_template('Event.html', addedEvent=addedEvent)
return render_template("AddEvents.html")
@app.route("/Event")
def Event():
return render_template("Event.html")
## Display all the individuals in the database along with their details
@app.route("/AllIndividuals")
def AllIndividuals():
cur= mysql.connection.cursor()
try:
resultIndividuals= cur.execute("SELECT * FROM individuals")
except (mysql.connection.Error, mysql.connection.Warning) as e:
print(e)
if resultIndividuals > 0:
resultIndividuals= cur.fetchall()
return render_template("AllIndividuals.html",resultIndividuals=resultIndividuals)
return "Individuals Table is Empty"
## Display all the events in the database along with their details
@app.route("/AllEvents")
def AllEvents():
cur= mysql.connection.cursor()
try:
resultEvents= cur.execute("SELECT * FROM events")
except (mysql.connection.Error, mysql.connection.Warning) as e:
print(e)
if resultEvents > 0:
resultEvents= cur.fetchall()
return render_template("AllEvents.html",resultEvents=resultEvents)
return "Events Table is Empty"
## Display average age of all individuals in the database
@app.route("/AverageAge")
def AverageAge():
cur= mysql.connection.cursor()
try:
resultAverageAge= cur.execute("SELECT CAST( AVG((YEAR(NOW()) - YEAR(date_of_birth) - (DATE_FORMAT(date_of_birth, '%m%d') < DATE_FORMAT(NOW(), '%m%d')))) AS DECIMAL(8,0)) as avg FROM individuals")
except (mysql.connection.Error, mysql.connection.Warning) as e:
print(e)
if resultAverageAge > 0:
resultAverageAge= cur.fetchall()
print(resultAverageAge)
return render_template("AverageAge.html",resultAverageAge=resultAverageAge)
return "Individuals Table is Empty: No Age Information"
## Display Sum of coveregAmount all claims in the database
@app.route("/TotalCoveredAmount")
def TotalCoveredAmount():
cur= mysql.connection.cursor()
try:
resultTotalCoveredAmount= cur.execute("SELECT SUM(covered_amount) as TotalCoveredAmount FROM events")
except (mysql.connection.Error, mysql.connection.Warning) as e:
print(e)
if resultTotalCoveredAmount > 0:
resultTotalCoveredAmount= cur.fetchall()
print(resultTotalCoveredAmount)
return render_template("TotalCoveredAmount.html",resultTotalCoveredAmount=resultTotalCoveredAmount)
return "Event Table is Empty: No Covered Amount Information"
## displays number of claims per year.
## year is determined from date_of_incidence
@app.route("/ClaimsPerYear")
def ClaimsPerYear():
cur= mysql.connection.cursor()
try:
resultClaimsPerYear= cur.execute("SELECT COUNT(*),YEAR(date_of_incidence) FROM events GROUP BY YEAR(date_of_incidence) ")
except (mysql.connection.Error, mysql.connection.Warning) as e:
print(e)
if resultClaimsPerYear > 0:
resultClaimsPerYear= cur.fetchall()
print(resultClaimsPerYear)
return render_template("ClaimsPerYear.html",resultClaimsPerYear=resultClaimsPerYear)
return "Event Table is Empty: No Claims Information"
##Display all the events with respect to the unique id of policy holder
@app.route("/EventPerUser", methods=['GET','POST'])
def EventPerUser():
if request.method== "POST":
result=request.form
user_id=result['user_id']
cur = mysql.connection.cursor()
try:
try:
EventPerUser= cur.execute("SELECT * FROM events where user_id = %s",[user_id])
except:
return None
if EventPerUser > 0:
EventPerUser= cur.fetchall()
return render_template("EventPerUserDetails.html",EventPerUser=EventPerUser)
return "User id does not esist"
finally:
cur.close()
return render_template("EventPerUser.html")
if __name__ == "__main__":
app.run(debug=True)
|
naveendayakar/health-insurance-project
|
app.py
|
app.py
|
py
| 7,972 |
python
|
en
|
code
| 0 |
github-code
|
50
|
23334035042
|
#!/usr/bin/python3
import argparse
import logging
import binpack
import uuid
import statistics
import time
import os.path
import pprint
logr = logging.getLogger()
console_handler = logging.StreamHandler()
formatter = logging.Formatter( '%(levelname)s [%(filename)s %(lineno)d] %(message)s' )
console_handler.setFormatter( formatter )
logr.addHandler( console_handler )
logr.setLevel( logging.WARNING )
def process_cmdline():
desc = 'Split filelist based on thresholds.'
parser = argparse.ArgumentParser( description=desc )
parser.add_argument( '-s', '--size_max', type=int,
help='Max size, in bytes, of sum of all file sizes in each output file' )
parser.add_argument( '-n', '--numfiles_max', type=int,
help='Max number of files in each output file' )
parser.add_argument( '-o', '--outdir',
help='Output directory' )
parser.add_argument( 'infile', type=argparse.FileType('r') )
parser.add_argument( '--with_summary', action='store_true' )
parser.add_argument( '-v', '--verbose', action='store_true' )
parser.add_argument( '-d', '--debug', action='store_true' )
group_sep = parser.add_mutually_exclusive_group()
group_sep.add_argument( '-F', '--field_sep' )
group_sep.add_argument( '-0', '--null_sep', action='store_true' )
parser.set_defaults(
size_max = 1073741824,
numfiles_max = 1048576,
outdir = '.',
field_sep = None
)
args = parser.parse_args()
if args.null_sep:
args.field_sep = '\x00'
if args.verbose:
logr.setLevel( logging.INFO )
if args.debug:
logr.setLevel( logging.DEBUG )
if not os.path.isdir( args.outdir ):
raise UserWarning( "Output directory '{0}' does not exist".format( args.outdir ) )
return args
def run():
args = process_cmdline()
outfn_count = 1
active_bins = {}
donelist = []
final_bins = {}
linecount = 0
starttime = time.time()
# count line in input
total_linecount = sum( 1 for line in args.infile )
args.infile.seek(0)
# PROCESS INPUT
for line in args.infile:
logr.debug( "Processing line: {0}".format( line ) )
parts = line.strip().split( args.field_sep, 1 )
logr.debug( pprint.pformat( parts ) )
item = binpack.File( filename=parts[1], size=int( parts[0] ) )
# Try to fit into an existing bin
for key, bin in active_bins.items():
if bin.insert( item ):
break
if bin.is_full():
final_bins[ key ] = bin
donelist.append( key )
else:
# Doesn't fit in any existing bins, make a new one
newbin = binpack.Bin( maxsize=args.size_max, maxcount=args.numfiles_max )
if not newbin.insert( item ):
raise UserWarning( 'Failed to insert item into bin: {0}'.format( item ) )
active_bins[ uuid.uuid4() ] = newbin
logr.debug( "New bin: {0}".format( newbin ) )
# Remove full bins from active list
for k in donelist:
logr.debug( "Full bin: {0}".format( active_bins[k] ) )
del active_bins[ k ]
donelist = []
# Progress report
linecount += 1
if linecount % 100000 == 0:
elapsed = time.time() - starttime
line_rate = linecount / elapsed
eta = ( total_linecount - linecount ) / line_rate
bincount = len( active_bins )
logr.info( "Lines:{L} ActiveBins:{B} Secs:{S:2.0f} Rate:{R:5.0f} ETA:{E:3.1f}".format(
L=linecount,
S=elapsed,
R=line_rate,
E=eta,
B=bincount ) )
# Create final bin dict
endtime = time.time()
final_bins.update( active_bins )
bins = final_bins
# SAVE BINS TO FILES and SUMMARIZE BINS
sizes = []
lengths = []
percents_full = []
for key, bin in bins.items():
sizes.append( bin.size )
lengths.append( len( bin.items ) )
percents_full.append( float( bin.size ) / bin.maxsize * 100 )
with open( "{0}/{1}.filelist".format( args.outdir, key ), 'w' ) as f:
for fn in bin:
f.write( "{0}\n".format( fn ) )
totalbins = len( bins )
if len( sizes ) != totalbins:
raise UserWarning( "num sizes doesn't match num bins" )
if len( lengths ) != totalbins:
raise UserWarning( "num lengths doesn't match num bins" )
if args.with_summary:
print( "Runtime: {0:2.0f} secs".format( endtime - starttime ) )
print( "Total number of bins: {0}".format( totalbins ) )
# Sizes
print( "SIZES" )
print( "Max: {0}".format( max( sizes ) ) )
print( "Min: {0}".format( min( sizes ) ) )
print( "PERCENT FULL STATS" )
for stat in [ "mean", "median", "pstdev", "pvariance" ]:
f = getattr( statistics, stat )
print( "{0}: {1:3.2f}".format( stat.title(), f( percents_full ) ) )
# Lenths
print( "LENGTH STATS" )
print( "Max: {0}".format( max( lengths ) ) )
print( "Min: {0}".format( min( lengths ) ) )
for stat in [ "mean", "median", "pstdev", "pvariance" ]:
f = getattr( statistics, stat )
print( "{0}: {1:3.2f}".format( stat.title(), f( lengths ) ) )
print( "Num 1-length bins: {0}".format( lengths.count(1) ) )
if __name__ == '__main__':
run()
|
ncsa/pdbkup
|
bin/split_filelist.py
|
split_filelist.py
|
py
| 5,466 |
python
|
en
|
code
| 0 |
github-code
|
50
|
18725053601
|
print('\033[36m~' * 7)
print('COMPRAS')
print('~' * 7)
n = int(input('''\033[31m[1]\033[m MICROONDAS
\033[31m[2]\033[m BICICLETA
\033[1mDigite o número referente ao poduto que você deseja?\033[m'''))
if n == 1:
print('O microondas custa R$500.00!')
elif n == 2:
print('A bicicleta custa R$350.00!')
else:
print('Opção Inválida!')
print('\033[36m~~'*10)
print('FORMAS DE PAGAMENTO')
print('~~'*10)
a = int(input('''\033[33m[1]\033[m À VISTA (DINHEIRO/CHEQUE)
\033[33m[2]\033[m À VISTA (CARTÃO)
\033[33m[3]\033[m EM 2X NO CARTÃO
\033[33m[4]\033[m EM 3X OU MAIS NO CARTÃO (ATÉ 10X)
\033[1mQual a forma de pagamento desejada?\033[m'''))
if a == 1:
if n == 1:
print('Você recebeu 10% de desconto! Pague R$450.00.')
if n == 2:
print('Você recebeu 10% de desconto! Pague R$315.00.')
if a == 2:
if n == 1:
print('Você recebeu 5% de desconto! Pague R$475.00.')
if n == 2:
print('Você recebeu 5% de desconto! Pague R$332.50.')
if a == 3:
if n == 1:
print('Você não recebeu desconto! pague 2 parcelas de R$250.00')
if n == 2:
print('Você não recebeu desconto! Pague 2 parcelas de R$175.00.')
if a == 4:
v = int(input('\033[1mEm quantas vezes deseja pagar?\033[m'))
y = 420.00 / v
x = 600.00 / v
if v == 3 or v == 4 or v == 5 or v == 6 or v == 7 or v == 8 or v == 9 or v == 10:
if n == 1:
print('A forma de pagamento escolhida tem um juros de 20%! Cada parcela será de R${:.2f}'.format(x))
if n == 2:
print('A forma de pagamento escolhida tem um juros de 20%! Cada parcela será de R${}'.format(y))
elif v != 3 or v != 4 or v != 5 or v != 6 or v != 7 or v != 8 or v != 9 or v != 10:
print('Opção Inválida!')
|
luhpazos/Exercicios-Python
|
Pacote download/Desafios/44.Gerenciador de Pagamentos.py
|
44.Gerenciador de Pagamentos.py
|
py
| 1,772 |
python
|
pt
|
code
| 0 |
github-code
|
50
|
3797541546
|
import RPi.GPIO as GPIO
import httplib2
import socks
import sys,os
import time
from datetime import datetime
import re
from apiclient.discovery import build
from oauth2client import tools, client, file
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from apscheduler.schedulers.background import BackgroundScheduler #this will let us check the calender on a regular interval
# argparse module needed for flag settings used in oaut2 autetication process
# by tools.run_flow procedure
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
currPath = os.path.dirname(os.path.realpath(__file__)) + '/'
#LOG ON LOGFILE status
def logStatus(text):
with open("/var/log/gcalendar/status.log","a+") as f:
f.write(str(datetime.now())+" "+text+"\n")
#LOG ON LOGFILE error
def logError():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
text = "EXCEPTION IN (" + str(filename) + ", LINE " + str(lineno) + " '" + str(line.strip()) + "'):" + str(exc_obj);
with open("/var/log/gcalendar/error.log","a+") as f:
f.write(str(datetime.now())+" "+text+"\n")
# The scope URL for read/write access to a user's calendar data
SCOPES = 'https://www.googleapis.com/auth/calendar'
# Replace CAL_ID with your current google calendar Id
CAL_ID = '[email protected]'
# proxy settings to be used in case we are under firewall
# httpclient must be also used when calling tools.run_flow
#
# Uncomment the following 4 lines and fill the PROXY_IP & PROXY_PORT vars
# in case you are using a proxy
#PROXY_IP='xxx.xxx.xxx.xxx'
#PROXY_PORT=xxxx
#socks.setdefaultproxy(socks.PROXY_TYPE_HTTP, PROXY_IP, PROXY_PORT)
#socks.wrapmodule(httplib2)
# Create an httplib2.Http object to handle our HTTP requests
# httpclient must be also used when calling tools.run_flow in case of proxy usage
httpclient = httplib2.Http()
# Create a Storage object. This object holds the credentials that your
# application needs to authorize access to the user's data. The name of the
# credentials file is provided. If the file does not exist, it is
# created. This object can only hold credentials for a single user, so
# as-written, this script can only handle a single user.
store = file.Storage(currPath + 'storage.json')
print('\nGetting calendar credentials (Oauth2 authorization process)')
logStatus('\nGetting calendar credentials (Oauth2 authorization process)')
# The get() function returns the credentials for the Storage object. If no
# credentials were found, None is returned.
creds = store.get()
if not creds or creds.invalid:
# Create a flow object. This object holds the client_id, client_secret, and
# scope. It assists with OAuth 2.0 steps to get user authorization and
# credentials.
print('No Credentials were found\n Starting OAuth Process to get them ...')
logStatus('No Credentials were found\n Starting OAuth Process to get them ...')
flow = client.flow_from_clientsecrets(currPath + 'client_secret.json',SCOPES)
# If no credentials are found or the credentials are invalid due to
# expiration, new credentials need to be obtained from the authorization
# server. The oauth2client.tools.run_flow() function attempts to open an
# authorization server page in your default web browser. The server
# asks the user to grant your application access to the user's data.
# If the user grants access, the run_flow() function returns new credentials.
# The new credentials are also stored in the supplied Storage object,
# which updates the credentials.dat file.
creds = tools.run_flow(flow,store,flags,httpclient) \
if flags else tools.run(flow,store,httpclient)
else:
print('Valid Credentials were found...')
logStatus('Valid Credentials were found...')
# authorize http object
# using the credentials.authorize() function.
print('Authorizing...')
logStatus('Authorizing...')
httpclient = creds.authorize(httpclient)
print('...Done\n')
logStatus('...Done\n')
# The apiclient.discovery.build() function returns an instance of an API service
# object can be used to make API calls. The object is constructed with
# methods specific to the calendar API. The arguments provided are:
# name of the API ('calendar')
# version of the API you are using ('v3')
# authorized httplib2.Http() object that can be used for API calls
service = build('calendar', 'v3', http=httpclient)
# settings for GPIOs
GPIO.setmode(GPIO.BCM)
# init list with pin numbers
pinList = [2, 3, 4, 5, 6, 7, 8, 9]
# loop through pins and set mode and state to 'low'
for i in pinList:
GPIO.setup(i, GPIO.OUT)
GPIO.output(i, GPIO.HIGH)
def runEvent(event,duration):
print('Event: %s scheduled to run at this time'%event.get('summary','No Summary'))
#logStatus('Event: %s scheduled to run at this time'%event.get('summary','No Summary'))
#managing Google calendar event
curr_event_descr = event.get('description','NO DESCRIPTION')
if re.search('.*--DONE--.*',curr_event_descr):
print('\tevent already managed')
logStatus('\tevent already managed')
else:
event['description'] = curr_event_descr + '\n--DONE--'
updated_event = service.events().update(calendarId=CAL_ID, eventId=event['id'], body=event).execute()
print('\trun event for %s seconds...'%duration)
logStatus('\trun event for %s seconds...'%duration)
#managing physical IO PINs
if re.search('.*GPIO*',event.get('summary','No Summary')):
print('\tGPIO event')
logStatus('\tGPIO event')
res=event['summary'].split('-')
gpio=res[1]
op=res[2]
print('\IO Id %s . operation %s'%(gpio,op))
logStatus('\IO Id %s . operation %s'%(gpio,op))
if op.upper() == 'ON':
GPIO.output(int(gpio), GPIO.LOW)
time.sleep(duration)
GPIO.output(int(gpio), GPIO.HIGH)
else:
GPIO.output(int(gpio), GPIO.HIGH)
time.sleep(duration)
GPIO.output(int(gpio), GPIO.LOW)
def todayEvent(event,currdate):
#check if event is scheduled for current day
res = False
dateStart = event.get('start').get('date','NODATE')
dateTimeStart = event.get('start').get('dateTime','NOTIME')
#print ('Date Start: %s'%dateStart)
#print ('Time Start: %s'%dateTimeStart)
if dateTimeStart != 'NOTIME':
#print ('valid Start Time found: %s'%dateTimeStart)
if dateTimeStart.split('T')[0] == currdate: return True
if dateStart != 'NODATE':
#print ('valid start Date found %s'%dateStart)
if dateStart == currdate: return True
return res
def manageEvent(event):
#manage Event scheduled for current day
if event.get('start').get('dateTime','NOTIME') == 'NOTIME':
#the event is a full day event
runEvent(event,86400)
else:# the event is scheduled for a particular start time and duration
#check if we have to run it (based on starttime comparation)
ts = time.strptime(event.get('start').get('dateTime').split('+')[0], '%Y-%m-%dT%H:%M:%S')
te = time.strptime(event.get('end').get('dateTime').split('+')[0], '%Y-%m-%dT%H:%M:%S')
duration = time.mktime(te)-time.mktime(ts)
lt = time.localtime()
if lt.tm_hour == ts.tm_hour and time.mktime(ts) <= time.mktime(lt) and time.mktime(te) > time.mktime(lt):
runEvent(event,duration)
#for i in range(lt.tm_min-1,lt.tm_min):
# if ts.tm_min == i:
# runEvent(event,duration)
# break
#else:
#print('scheduled starting minute not corresponding, skipping event ...')
#else:
#print('scheduled starting hour not corresponding, skipping event ...')
def myloop():
#print('\n\n\nGetting Calendar event list...\n')
try:
# The Calendar API's events().list method returns paginated results, so we
# have to execute the request in a paging loop. First, build the
# request object. The arguments provided are:
# primary calendar for user
currdate=time.strftime('%Y-%m-%d')# get current date
# Getting Event list starting from current day
request = service.events().list(calendarId=CAL_ID,timeMin=currdate+'T00:00:00Z')
# Loop until all pages have been processed.
while request != None:
# Get the next page.
response = request.execute()
# Accessing the response like a dict object with an 'items' key
# returns a list of item objects (events).
print('\nCurrent time: %s'%time.strftime('%Y-%m-%dT%H:%M'))
logStatus('')
for event in response.get('items', []):
# The event object is a dict object with a 'summary' key.
#print ('\nEvent Summary : %s\n'%repr(event.get('summary', 'NO SUMMARY')))
if todayEvent(event,currdate):
manageEvent(event)
#else:
#print('NOT Today Event, skipping ...')
#print ('Start Time: %s \n'%repr(event.get('start','NO DATE').get('dateTime')))
#print ('End Time at: %s \n'%repr(event.get('end','NO DATE').get('dateTime')))
# Get the next request object by passing the previous request object to
# the list_next method.
request = service.events().list_next(request, response)
except AccessTokenRefreshError:
# The AccessTokenRefreshError exception is raised if the credentials
# have been revoked by the user or they have expired.
print ('The credentials have been revoked or expired, please re-run'
'the application to re-authorize')
logError ('The credentials have been revoked or expired, please re-run'
'the application to re-authorize')
def loopRequest():
#print('looping...')
myloop()
if __name__ == '__main__':
#loopRequest()
scheduler = BackgroundScheduler(standalone=True)
scheduler.add_job(loopRequest, 'interval', seconds=60, id='loopRequest_id',max_instances=8)
scheduler.start() #runs the program indefinatly on an interval of 1 minutes
print('Start main loop polling request... ( 1 minute interval)')
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
logStatus('Start main loop polling request... ( 1 minute interval)')
try:
# This is here to simulate application activity (which keeps the main thread alive).
while True:
time.sleep(2)
except (KeyboardInterrupt, SystemExit):
# Not strictly necessary if daemonic mode is enabled but should be done if possible
scheduler.shutdown()
GPIO.cleanup()
print ("Good bye!")
logStatus ("Good bye!")
|
flavioipp/Gcalendar
|
daemon/gcalendar.py
|
gcalendar.py
|
py
| 10,794 |
python
|
en
|
code
| 0 |
github-code
|
50
|
39796606309
|
device = "cuda"
import torch
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('t5-small')
# Load model.bin
print("Please enter the input text:")
input_text = input()
model = torch.load("D:\DL5-SmartSense\output-model\output\final\pytorch_model.bin")
with torch.no_grad():
tokenized_text = tokenizer(input_text, truncation=True, padding=True, return_tensors='pt')
source_ids = tokenized_text['input_ids'].to(device, dtype = torch.long)
source_mask = tokenized_text['attention_mask'].to(device, dtype = torch.long)
generated_ids = model.generate(
input_ids = source_ids,
attention_mask = source_mask,
max_length=512,
num_beams=5,
repetition_penalty=1,
length_penalty=1,
early_stopping=True,
no_repeat_ngram_size=2
)
pred = tokenizer.decode(generated_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
print("\noutput:\n" + pred)
|
sbthycode/DL5-SmartSense
|
src/inference.py
|
inference.py
|
py
| 975 |
python
|
en
|
code
| 0 |
github-code
|
50
|
26799336116
|
import os
import requests
TELEGRAM_BOT_TOKEN = os.getenv('TELEGRAM_BOT_TOKEN')
TELEGRAM_CHAT_ID = os.getenv('TELEGRAM_CHAT_ID')
def send(msg, pictures=[]):
if isinstance(pictures, list) and len(pictures) > 0:
telegram_url = 'https://api.telegram.org/bot{0}/sendPhoto'.format(
TELEGRAM_BOT_TOKEN,
)
for idx, pic_path in enumerate(pictures):
res = requests.post(telegram_url, files={
'photo': open(pic_path, 'rb'),
}, data = {
'chat_id' : TELEGRAM_CHAT_ID,
'caption' : msg if idx <= 0 else None,
})
else:
telegram_url = 'https://api.telegram.org/bot{0}/sendMessage?chat_id={1}&text='.format(
TELEGRAM_BOT_TOKEN,
TELEGRAM_CHAT_ID
)
res = requests.get(telegram_url + msg)
if __name__ == '__main__' :
import sys
send(sys.argv[1])
|
ApaRiauSedangBerasap/bot
|
send_telegram.py
|
send_telegram.py
|
py
| 914 |
python
|
en
|
code
| 0 |
github-code
|
50
|
38138119166
|
from IPython.display import clear_output
def display_board(board):
clear_output()
print(board[7] + '|' + board[8] + '|' + board[9])
print('-|-|-')
print(board[4] + '|' + board[5] + '|' + board[6])
print('-|-|-')
print(board[1] + '|' + board[2] + '|' + board[3])
def player_input():
marker = ''
while marker != 'X' and marker != 'O':
marker = input('Player 1, choose X or O: ')
player1 = marker
player2 = 'X' if player1 == 'O' else 'O'
return player1, player2
player1_marker, player2_marker = player_input()
|
david2999999/Python
|
Complete Python BootCamp - Udemy/Section 7 - Milestone Project 1/tic-tac-toe.py
|
tic-tac-toe.py
|
py
| 564 |
python
|
en
|
code
| 0 |
github-code
|
50
|
946787109
|
"""
2019.06.29
"""
### 递归
class Solution(object):
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
return max(self.maxDepth(root.right),self.maxDepth(root.left))+1
### 非递归
class Solution(object):
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
stack=[[1,root]]
res = 0
while stack:
cur,node = stack.pop()
if node.right:
stack.append([cur+1,node.right])
if node.left:
stack.append([cur+1,node.left])
res = max(res,cur)
return res
|
jiechenyi/jyc-record-
|
Algorithm/leetcode/二叉树的最大深度.py
|
二叉树的最大深度.py
|
py
| 794 |
python
|
en
|
code
| 0 |
github-code
|
50
|
7485240991
|
class subjectt:
def __init__(self, name, mark):
self.name = name
self.mark = mark
subject1 = subjectt("english", 90)
subject2 = subjectt("math", 85)
subject3 = subjectt("It", 92)
print(subject1.name)
print(subject1.mark)
|
SamahElShareef/task_test
|
subjectt_module.py
|
subjectt_module.py
|
py
| 241 |
python
|
en
|
code
| 0 |
github-code
|
50
|
11664680458
|
#!/usr/bin/env python3
import multiprocessing
from multiprocessing.sharedctypes import RawArray, RawValue
import array
import time
import spidev
class Driver():
MEM_SIZE = 3 * 64
RED_OFFSET = 0
GREEN_OFFSET = 64
BLUE_OFFSET = 128
def __init__(self, bam_bits):
self.bam_bits = bam_bits
self.buf = RawArray('B', Driver.MEM_SIZE * bam_bits)
self.sp = None
self.quit = RawValue('B', 0)
@classmethod
def _mainloop(self, buf, quit, bam_bits):
t0 = time.time()
fr = 0
tf0 = 0
spi = spidev.SpiDev()
spi.open(0, 0)
spi.max_speed_hz = 8000000
bb_timeslot = 0
try:
while not quit:
bam_offset = bb_timeslot * self.MEM_SIZE
for i in range(0, 64, 8):
red_i = bam_offset + Driver.RED_OFFSET + i
green_i = bam_offset + Driver.GREEN_OFFSET + i
blue_i = bam_offset + Driver.BLUE_OFFSET + i
spi.xfer(list(buf[red_i:red_i+8]) + list(buf[green_i:green_i+8]) + list(buf[blue_i:blue_i+8]) + list([1 << (i >> 3)]))
#time.sleep(0.00001)
bb_timeslot = (bb_timeslot + 1) % bam_bits
fr += 1
if fr == 2000:
t = time.time() - t0
fps = fr / (t - tf0)
print('Driver FPS', fps / bam_bits)
fr = 0
tf0 = t
except KeyboardInterrupt:
quit = 1
spi.xfer2([0] * 25)
def fill(self, frame):
buf = bytearray(self.MEM_SIZE * self.bam_bits)
data = frame.data
for x in range(8):
for y in range(8):
for z in range(8):
wholebyte = (x*64)+(y*8)+z
whichbyte = int((wholebyte) >> 3)
posInByte = wholebyte-(8*whichbyte)
redValue = data[x,y,z,0]
greenValue = data[x,y,z,1]
blueValue = data[x,y,z,2]
self._setBits(buf, redValue, greenValue, blueValue, whichbyte, posInByte)
self.buf[:] = buf
def run(self):
if self.sp is None:
self.quit = 0
self.sp = multiprocessing.Process(target=self._mainloop, args=(self.buf, self.quit, self.bam_bits))
self.sp.daemon = True
self.sp.start()
def stop(self):
if self.sp is not None:
self.quit = 1
self.sp.join()
self.sp = None
def _setBits(self, buf, r, g, b, whichbyte, posInByte):
r = int((r + 0.05) * self.bam_bits)
r = min(self.bam_bits, max(0, r))
g = int((g + 0.05) * self.bam_bits)
g = min(self.bam_bits, max(0, g))
b = int((b + 0.05) * self.bam_bits)
b = min(self.bam_bits, max(0, b))
for bb_timeslot in range(self.bam_bits):
bam_offset = bb_timeslot * self.MEM_SIZE
buf[bam_offset + self.RED_OFFSET + whichbyte] |= get_bam_value(self.bam_bits, bb_timeslot, r) << posInByte
buf[bam_offset + self.GREEN_OFFSET + whichbyte] |= get_bam_value(self.bam_bits, bb_timeslot, g) << posInByte
buf[bam_offset + self.BLUE_OFFSET + whichbyte] |= get_bam_value(self.bam_bits, bb_timeslot, b) << posInByte
BAM_BITS = {
2: (
(0, 0),
(1, 0),
(1, 1),
),
4: (
(0, 0, 0, 0),
(0, 0, 1, 0),
(1, 0, 1, 0),
(1, 1, 1, 0),
(1, 1, 1, 1),
),
8: (
(0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 1),
(0, 0, 1, 0, 0, 0, 1, 0),
(0, 1, 0, 1, 0, 1, 0, 0),
(1, 0, 1, 0, 0, 1, 0, 1),
(1, 0, 1, 1, 0, 1, 0, 1),
(1, 0, 1, 0, 1, 1, 1, 0),
(1, 1, 0, 1, 1, 1, 1, 0),
(1, 1, 1, 1, 1, 1, 1, 1),
)
}
def get_bam_value(bam_bits, timeslot, val):
return BAM_BITS[bam_bits][val][timeslot]
|
peterpdj/Py3-RGBCube
|
cubedriver.py
|
cubedriver.py
|
py
| 3,324 |
python
|
en
|
code
| 0 |
github-code
|
50
|
36497450432
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : get_file.py
# @Author : Norah C.IV
# @Time : 2022/4/22 15:54
# @Software: PyCharm
import os
def find_file(search_path, include_str=None, filter_strs=None):
"""
查找指定目录下所有的文件(不包含以__开头和结尾的文件)或指定格式的文件,若不同目录存在相同文件名,只返回第1个文件的路径
:param search_path: 查找的目录路径
:param include_str: 获取包含字符串的名称
:param filter_strs: 过滤包含字符串的名称
"""
if filter_strs is None:
filter_strs = []
files = []
# 获取路径下所有文件
names = os.listdir(search_path)
for name in names:
path = os.path.abspath(os.path.join(search_path, name))
if os.path.isfile(path):
# 如果不包含指定字符串则
if include_str is not None and include_str not in name:
continue
# 如果未break,说明不包含filter_strs中的字符
for filter_str in filter_strs:
if filter_str in name:
break
else:
files.append(path)
else:
files += find_file(path, include_str=include_str, filter_strs=filter_strs)
return files
|
RoziSec/sectools
|
scan_start/get_file.py
|
get_file.py
|
py
| 1,318 |
python
|
en
|
code
| 3 |
github-code
|
50
|
41826730600
|
import os, shutil
import tkinter as tk
from tkinter import *
directorya = r"path/to/png"
dirpng = r"path/to/Photo"
dirmp4 = r"path/to/video"
direv3 = r"path/to/EV3"
dirlego = r"path/to/lego_digital_designer"
dirmc = r"path/to/Minecraft"
dirmus = r"path/to/Music"
dirscra = r"path/to/Scratch"
bin = r"path/to/bin"
dirmkv = r"path/to/Film"
dirdoc = r"path/to/cours"
dirzip = r"path/to/Zip"
direxe = r"path/to/Computer_stuff"
dirpy = r"path/to/python"
moved = 0
deleted = 0
window = tk.Tk()
window.title("Sorting Program")
window.geometry('800x50')
lbl = Label(window, text="Directory to sort:")
lbl.grid(column=0, row=0)
txt = Entry(window,width=100)
txt.grid(column=1, row=0)
txt.pack
log1 = Label(window, text="Log :")
log1.grid(column=0, row=1)
log = Label(window, text="test", bg='white',fg='black')
log.grid(column=1, row=1)
directorya = txt.get()
def sort():
directorya = txt.get()
lbl = Label(window, text="Votre vidéo :" + txt.get() + "")
txt.delete(0, END)
folders = [folder for folder in os.listdir(directorya) if os.path.isdir(os.path.join(directorya, folder))]
files = [file for file in os.listdir(directorya) if os.path.isfile(os.path.join(directorya, file))]
moved = 0
deleted = 0
for file in files:
try:
if os.path.splitext(file)[1] == ".png" or os.path.splitext(file)[1] == ".jpg" or os.path.splitext(file)[1] == ".PNG" or os.path.splitext(file)[1] == ".JPG" or os.path.splitext(file)[1] == ".jpeg" or os.path.splitext(file)[1] == ".webp" or os.path.splitext(file)[1] == ".dng":
if not os.path.exists(os.path.join(dirpng, file)):
print(file,"is png")
shutil.move(os.path.join(directorya, file), dirpng)
moved=moved+1
else :
print(file, "is moving to bin")
shutil.move(os.path.join(directorya, file), bin)
deleted=deleted+1
if os.path.splitext(file)[1] == ".mp4" or os.path.splitext(file)[1] == ".AVI" or os.path.splitext(file)[1] == ".gif":
if not os.path.exists(os.path.join(dirmp4, file)):
print(file,"is mp4")
shutil.move(os.path.join(directorya, file), dirmp4)
moved=moved+1
else :
print(file, "is moving to bin")
shutil.move(os.path.join(directorya, file), bin)
deleted=deleted+1
if os.path.splitext(file)[1] == ".ev3":
if not os.path.exists(os.path.join(direv3, file)):
print(file,"is EV3")
shutil.move(os.path.join(directorya, file), direv3)
moved=moved+1
else :
print(file, "is moving to bin")
shutil.move(os.path.join(directorya, file), bin)
deleted=deleted+1
if os.path.splitext(file)[1] == ".lxf" or os.path.splitext(file)[1] == ".io":
if not os.path.exists(os.path.join(dirlego, file)):
print(file,"is lego")
shutil.move(os.path.join(directorya, file), dirlego)
moved=moved+1
else :
print(file, "is moving to bin")
shutil.move(os.path.join(directorya, file), bin)
deleted=deleted+1
if os.path.splitext(file)[1] == ".mcworld" or os.path.splitext(file)[1] == ".mcpack":
if not os.path.exists(os.path.join(dirmc, file)):
print(file,"is minecraft")
shutil.move(os.path.join(directorya, file), dirmc)
moved=moved+1
else :
print(file, "is moving to bin")
shutil.move(os.path.join(directorya, file), bin)
deleted=deleted+1
if os.path.splitext(file)[1] == ".mp3":
if not os.path.exists(os.path.join(dirmus, file)):
print(file,"is mp3")
shutil.move(os.path.join(directorya, file), dirmus)
moved=moved+1
else :
print(file, "is moving to bin")
shutil.move(os.path.join(directorya, file), bin)
deleted=deleted+1
if os.path.splitext(file)[1] == ".sb2":
if not os.path.exists(os.path.join(dirscra, file)):
print(file,"is sb2")
shutil.move(os.path.join(directorya, file), dirscra)
moved=moved+1
else :
print(file, "is moving to bin")
shutil.move(os.path.join(directorya, file), bin)
deleted=deleted+1
if os.path.splitext(file)[1] == ".mkv" or os.path.splitext(file)[1] == ".mts":
if not os.path.exists(os.path.join(dirmkv, file)):
print(file,"is mkv")
shutil.move(os.path.join(directorya, file), dirmkv)
moved=moved+1
else :
print(file, "is moving to bin")
shutil.move(os.path.join(directorya, file), bin)
deleted=deleted+1
if os.path.splitext(file)[1] == ".doc" or os.path.splitext(file)[1] == ".pdf" or os.path.splitext(file)[1] == ".odt" or os.path.splitext(file)[1] == ".odp":
if not os.path.exists(os.path.join(dirdoc, file)):
print(file,"is doc")
shutil.move(os.path.join(directorya, file), dirdoc)
moved=moved+1
else :
print(file, "is moving to bin")
shutil.move(os.path.join(directorya, file), bin)
deleted=deleted+1
if os.path.splitext(file)[1] == ".zip" or os.path.splitext(file)[1] == ".gz" or os.path.splitext(file)[1] == ".tar":
if not os.path.exists(os.path.join(dirzip, file)):
print(file,"is zip")
shutil.move(os.path.join(directorya, file), dirzip)
moved=moved+1
else :
print(file, "is moving to bin")
shutil.move(os.path.join(directorya, file), bin)
deleted=deleted+1
if os.path.splitext(file)[1] == ".exe" or os.path.splitext(file)[1] == ".html" or os.path.splitext(file)[1] == ".css":
if not os.path.exists(os.path.join(direxe, file)):
print(file,"is exe")
shutil.move(os.path.join(directorya, file), direxe)
moved=moved+1
else :
print(file, "is moving to bin")
shutil.move(os.path.join(directorya, file), bin)
deleted=deleted+1
if os.path.splitext(file)[1] == ".THM" or os.path.splitext(file)[1] == ".nes" or os.path.splitext(file)[1] == ".sfc":
print(file, "is moving to bin")
shutil.move(os.path.join(directorya, file), bin)
deleted=deleted+1
if os.path.splitext(file)[1] == ".py" or os.path.splitext(file)[1] == ".whl":
if not os.path.exists(os.path.join(dirpy, file)):
print(file,"is py")
shutil.move(os.path.join(directorya, file), dirpy)
moved=moved+1
else :
print(file, "is moving to bin")
shutil.move(os.path.join(directorya, file), bin)
deleted=deleted+1
except:
print("restarting script because of an error")
print(exception)
print("moved",moved,"files and deleted",deleted,"files in the directory :", directorya)
final = "moved",moved,"files and deleted",deleted,"files in the directory :", directorya
log.config(text=final)
os.path
btn = Button(window, text="Sort", command=sort, bg='black',fg='white')
btn.grid(column=2, row=0)
window.mainloop()
|
0CT0PUCE/Basic-python-sorting-algorithm
|
sorting.py
|
sorting.py
|
py
| 8,114 |
python
|
en
|
code
| 1 |
github-code
|
50
|
25136176368
|
import argparse
import os
import tarfile
import urllib.request
from urllib.request import urlopen as uReq
from zipfile import ZipFile
from bs4 import BeautifulSoup as soup
from tqdm import tqdm
def create_directory(dir_path):
if os.path.exists(dir_path):
print(f"Directory for {dir_path} already exists.")
else:
os.makedirs(dir_path)
print(f"Directory {dir_path} created")
def scrape_uspto(patent_year):
# Define the USPTO web page according to the year:
patent_url = os.path.join("https://bulkdata.uspto.gov/data/patent/grant/redbook/", patent_year)
# Open a connection and download the webpage:
uClient = uReq(patent_url)
# Read the html of the page:
page_html = uClient.read()
# Close the webpage:
uClient.close()
# Parse the html using Beautiful Soup:
page_soup = soup(page_html, "html.parser")
# Get all the attributes of the page, containing the link to the weekly patent files
patent_weekly_releases = page_soup.findAll("a")
# Here, the link are selected from containers (ex: 'I20180102.tar').
# They can be used to complete the url for download, e.g.
# https://bulkdata.uspto.gov/data/patent/grant/redbook/2018/I20180102.tar
patent_link = []
for release in patent_weekly_releases:
match = [s for s in release if ".tar" in s]
matchzip = [s for s in release if ((".zip" in s) or (".ZIP" in s))]
if match != []:
if match[0].endswith(".tar"):
patent_link.append(match)
if matchzip != []:
if (
matchzip[0].lower().endswith(".zip")
and ("SUPP" not in matchzip[0])
and ("Grant" not in matchzip[0])
and ("Red Book Viewer Public" not in matchzip[0])
):
patent_link.append(matchzip)
print(f"Found {len(patent_link)} releases from {patent_year}")
return patent_link
class DownloadProgressBar(tqdm):
# Progression bar for the download
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def download_url(url, output_path):
with DownloadProgressBar(unit="B", unit_scale=True, miniters=1, desc=url.split("/")[-1]) as t:
urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to)
def get_parser():
parser = argparse.ArgumentParser(description="Downloads patent files from USPTO website")
parser.add_argument(
"--years",
type=str,
nargs="+",
required=True,
help="Year(s) of patent files to download (or 'all')",
)
parser.add_argument(
"--data_dir",
type=str,
default=".",
help="Path where all data is be stored (e.g. /data/patents_data/)",
)
parser.add_argument(
"--force_redownload",
action="store_true",
help="Use if you want to redownload files that already exist",
)
parser.add_argument(
"--no_uncompress",
action="store_true",
help="Use if you want to skip the untar/unzip step",
)
parser.add_argument(
"--remove_compressed",
action="store_true",
help="Use if you want to remove the original .tar and .zip files for the weekly releases",
)
return parser
def main(args):
# Change to directory that will contain all years
os.chdir(args.data_dir)
# Construct years list from parsed args
if args.years == ["all"]:
print("Preparing to download all USPTO patents from 2001 to 2023...")
years = list(map(str, range(2001, 2024)))
else:
print("Preparing to download all USPTO patents from", ", ".join(args.years), "...")
years = args.years
# Iterate through years
for patent_year in years:
# Get list of weekly releases from web scraper
patent_link = scrape_uspto(patent_year)
# Create the directory for the year if it doesn't exist yet
create_directory(patent_year)
# Iterate through weekly releases
for link in patent_link:
year_link = os.path.join(patent_year, link[0])
target_dir = year_link[:-4]
# Start downloading
if not os.path.isfile(year_link) or args.force_redownload:
download_url(
f"https://bulkdata.uspto.gov/data/patent/grant/redbook/{year_link}",
year_link,
)
else:
# TODO: Add `--force_download_most_recent` for cases where download was interrupted in the middle of a file
print(
f"File {year_link} already exists. Use option '--force_redownload' if you want to download anyway."
)
# Once downloaded, untar/unzip in the corresponding directory and remove the original tar/zip file
if not args.no_uncompress:
# Create target directory to untar/unzip the files
create_directory(target_dir)
if link[0].endswith(".tar"):
tar = tarfile.open(name=year_link, mode="r")
tar.extractall(target_dir)
tar.close()
elif link[0].lower().endswith(".zip"):
with ZipFile(year_link, "r") as zf:
zf.extractall(path=target_dir)
if args.remove_compressed:
os.remove(year_link)
return
if __name__ == "__main__":
args = get_parser().parse_args()
if args.no_uncompress and args.remove_compressed:
raise ValueError(
"You cannot use the options '--no_uncompress' and '--remove_compressed' at the same time. This would remove the files immediately after downloading them."
)
main(args)
|
learningmatter-mit/PatentChem
|
download.py
|
download.py
|
py
| 5,874 |
python
|
en
|
code
| 26 |
github-code
|
50
|
1020423096
|
import time
import keyboard
from const import *
class Tamagucci:
def __init__(self, name="Person_name", health=100, eat=140, type="REAL"):
self.name = name
self.age = 0
self.health = health
self.type = type
self.is_living = True
self.sleep = MAX_SLEEP
self.eat = eat
self.happiness = 100
def eating(self):
if self.eat < EAT:
self.eat += 10
print("Спасибо за вкусняшку")
else:
print('Я не хочу кушать')
def life(self):
if self.health <= 0:
self.is_living = False
if self.eat <= 5:
print("Я умираю от голода, покорми меня")
self.eat = 0
if self.happiness <= MAX_HAPPINESS // 2:
print("Скучно...")
if self.eat <= 5:
print("Я умираю от голода, покорми меня")
self.eat = 0
else:
self.eat -= 5 * 2 if self.happiness <= MAX_HAPPINESS // 5 else 1
if self.sleep <= 2:
print("Я умираю от усталости, давай поспим")
self.sleep = 0
else:
self.sleep -= 2 * 2 if self.happiness <= MAX_HAPPINESS // 5 else 1
if self.eat <= 0 or self.sleep == 0:
self.health -= 10 if self.health > 0 else 0
self.happiness -= 2
def __str__(self):
return f"Информация о {self.name}\nЗдоровье {self.health}\nСон {self.sleep}\nЕда {self.eat}\nСчастье {self.happiness}"
def walk(self):
print("Ура идем гулять")
print("Зажми CTRL, чтобы пойти домой")
while True:
if keyboard.is_pressed("ctrl"):
print("Ну ладно, пошли домой")
break
self.happiness += 10 if self.happiness <= MAX_HAPPINESS else 0
time.sleep(2)
def play(self):
print("Играем")
print("Зажми CTRL, чтобы закончить игру")
while True:
if keyboard.is_pressed("ctrl"):
print("Хорошо поиграли")
break
self.happiness += 15 if self.happiness <= MAX_HAPPINESS else 0
time.sleep(2)
def sleeping(self):
if CREATURE[0].sleep == MAX_SLEEP:
print("Я не хочу спать")
else:
print("Зажми CTRL, чтобы пробудить меня")
while True:
print("Zzz...")
if keyboard.is_pressed("ctrl"):
print("Почти выспался")
break
if self.sleep >= MAX_SLEEP:
self.sleep = MAX_SLEEP
print("Я выспался")
break
self.sleep += 10
time.sleep(2)
def ability(self):
print("У меня есть способность, но я ее пока не получил(")
class Unreal(Tamagucci):
def __init__(self, name="Person_name", health=150, eat=140):
super().__init__(name, health, eat, "MAGIC")
def do_magic(self):
print("Я могу все")
class Real(Tamagucci):
def __init__(self, name="Person_name", health=100, eat=140):
super().__init__(name, health, eat)
class Dragon(Unreal):
def __init__(self, name="Person_name"):
super().__init__(name)
self.magic = 100
def do_magic(self):
if self.magic >= 10:
print("Пых, и нет")
self.magic -= 10
else:
print("У меня нет маны!")
def ability(self):
print('Я умею сжигать')
class Cat(Real):
def __init__(self, name="Person_name"):
super().__init__(name)
def ability(self):
print("Мур")
class Dog(Real):
def __init__(self, name="Person_name"):
super().__init__(name)
def ability(self):
print("ГАВ")
|
VladimirSpe/Tamagucci1
|
game.py
|
game.py
|
py
| 4,134 |
python
|
ru
|
code
| 0 |
github-code
|
50
|
21711720254
|
"""
Program to log and display crop plantings.
crop, quantity
date, outcome
notes
User can:
view all records
search for an entry
add an entry
update an entry
delete an entry
exit
"""
from tkinter import *
from backend import Database
database=Database("plantings.db")
def get_selected_row(event):
global selected_tuple
if list1.curselection():
index=list1.curselection()[0]
selected_tuple=list1.get(index)
e1.delete(0,END)
e1.insert(END,selected_tuple[1])
e2.delete(0,END)
e2.insert(END,selected_tuple[2])
e3.delete(0,END)
e3.insert(END,selected_tuple[3])
e4.delete(0,END)
e4.insert(END,selected_tuple[4])
e5.delete(0,END)
e5.insert(END,selected_tuple[5])
print(index)
def view_command():
list1.delete(0,END)
for row in database.view():
list1.insert(END,row)
def search_command():
list1.delete(0,END)
for row in database.search(crop_text.get(),quantity_text.get(),date_text.get(),outcome_text.get()):
list1.insert(END,row)
def insert_command():
database.insert(crop_text.get(),quantity_text.get(),date_text.get(),outcome_text.get(),notes_text.get())
view_command()
def delete_command():
database.delete(selected_tuple[0])
view_command()
def update_command():
database.update(selected_tuple[0],crop_text.get(),quantity_text.get(),date_text.get(),outcome_text.get(),notes_text.get())
view_command()
window=Tk()
window.wm_title("Planting Log")
l1=Label(window,text="Crop")
l1.grid(row=0, column=0)
crop_text=StringVar()
e1=Entry(window,textvariable=crop_text)
e1.grid(row=0, column=1)
l2=Label(window,text="Quantity")
l2.grid(row=0, column=2)
quantity_text=StringVar()
e2=Entry(window,textvariable=quantity_text)
e2.grid(row=0, column=3)
l3=Label(window,text="Date")
l3.grid(row=1, column=0)
date_text=StringVar()
e3=Entry(window,textvariable=date_text)
e3.grid(row=1, column=1)
l4=Label(window,text="Outcome")
l4.grid(row=1, column=2)
outcome_text=StringVar()
e4=Entry(window,textvariable=outcome_text)
e4.grid(row=1, column=3)
l5=Label(window,text="Notes")
l5.grid(row=2, column=0)
notes_text=StringVar()
e5=Entry(window,textvariable=notes_text)
e5.grid(row=2, column=1)
list1=Listbox(window, height=10,width=55)
list1.grid(row=3, column=0, rowspan=10,columnspan=2)
sb1=Scrollbar(window)
sb1.grid(row=4, column=2, rowspan=3)
list1.configure(yscrollcommand=sb1.set)
sb1.configure(command=list1.yview)
list1.bind('<<ListboxSelect>>',get_selected_row)
b1=Button(window,text="View All", width=12, command=view_command)
b1.grid(row=3,column=3)
b2=Button(window,text="Search", width=12, command=search_command)
b2.grid(row=4,column=3)
b3=Button(window,text="Add Entry", width=12, command=insert_command)
b3.grid(row=5,column=3)
b4=Button(window,text="Update Selected", width=12, command=update_command)
b4.grid(row=6,column=3)
b5=Button(window,text="Delete Selected", width=12, command=delete_command)
b5.grid(row=7,column=3)
b6=Button(window,text="Close", width=12, command=window.destroy)
b6.grid(row=8,column=3)
window.mainloop()
|
sethgerou/PlantingLog
|
frontend.py
|
frontend.py
|
py
| 3,125 |
python
|
en
|
code
| 0 |
github-code
|
50
|
72137620956
|
from collections import defaultdict
def transform_into_distinct_wordlist(wordlist):
"""Eliminate duplicates"""
words_dict = dict()
distinct_wordlist = []
for word in wordlist:
# no need in dealing with duplicates
if word in words_dict:
continue
distinct_wordlist.append(word)
words_dict[word] = 0
return distinct_wordlist
def generate_groups_for_wordlist(indexed_wordlist, debug=False):
"""
Scan words to form starting, middle and ending sequences of characters
of different scales 2 to max-1.
"""
starting_groups = defaultdict(list)
middle_groups = defaultdict(list)
ending_groups = defaultdict(list)
words_to_starting_groups = dict()
words_to_ending_groups = dict()
for index, word in indexed_wordlist.items():
if debug:
print(f"\nW {word}")
# scan word for groups
for scale in range(1, len(word)):
if debug:
print(f"SCALE: {scale}")
# starting
start_group = word[:scale]
starting_groups[start_group].append(index)
try:
word_index_groups = words_to_starting_groups[index]
except KeyError:
word_index_groups = defaultdict(set)
words_to_starting_groups[index] = word_index_groups
word_index_groups[scale].add(start_group)
# middle
for offset in range(1, len(word) - scale):
mid_group = word[offset:offset+scale]
middle_groups[mid_group].append(index)
if debug:
print(f"M {mid_group}")
# ending
end_group = word[-scale:]
ending_groups[end_group].append(index)
try:
word_index_groups = words_to_ending_groups[index]
except KeyError:
word_index_groups = defaultdict(set)
words_to_ending_groups[index] = word_index_groups
word_index_groups[scale].add(end_group)
if debug:
print(f"S {start_group} E {end_group}")
return (
starting_groups,
middle_groups,
ending_groups,
words_to_starting_groups,
words_to_ending_groups
)
def compute_degrees(groups):
"""
Groups groups by degree (by length)
"""
degree_pairs = defaultdict(list)
for group, word_indexes in groups.items():
degree = len(group)
group_wi_pairs = [(group, wi) for wi in word_indexes]
degree_pairs[degree] = degree_pairs[degree].union(group_wi_pairs)
return degree_pairs
def cross_combine(starting_degree_pairs, ending_groups, indexed_wordlist):
combination_pairs = dict()
reverse_combination_track = dict()
used_groupds = dict()
unmatched = set()
while len(starting_degree_pairs) > 0:
# always pick the biggest
max_degree = max(starting_degree_pairs.keys())
print(f"\nSTA DEG {max_degree}")
for group_word_pair in starting_degree_pairs[max_degree]:
print(f"WI {group_word_pair}")
current_word_group = group_word_pair[0]
current_word_index = group_word_pair[1]
current_word = indexed_wordlist[current_word_index]
# check if the word is fully included in other
if (
current_word in starting_groups or
current_word in middle_groups or
current_word in ending_groups
):
print(f"Included, skip {current_word}")
continue
# if not included in other words then attempt to match it
is_unmatched = True
if (
current_word_group in ending_groups and
current_word_index not in combination_pairs and
current_word_index not in reverse_combination_track
):
# match found for ending -> attempt at combining it
print('-> matched')
while len(ending_groups[current_word_group]) > 0:
# fetch an word ending with this group
other_word_index = ending_groups[current_word_group].pop(0)
if current_word_index == other_word_index:
continue
if (
other_word_index not in combination_pairs and
other_word_index not in reverse_combination_track
):
combination_pairs[current_word_index] = (
other_word_index, max_degree,
)
reverse_combination_track[other_word_index] = current_word_index
is_unmatched = False
break # no need to attempt with another word from the ending set
# if the processing resulted in depletion of the specific set
# for an ending group, no need to keep it there anymore
if len(ending_groups[current_word_group]) == 0:
del ending_groups[current_word_group]
if is_unmatched:
unmatched.add((current_word_index, current_word))
print("-> not matched")
del starting_degree_pairs[max_degree]
# post process endings
print("\n--- ENDINGS POST PROCESSING")
# for end_group, end_group_words_indexes in ending_groups.items():
while len(ending_groups) > 0:
end_group, end_group_words_indexes = ending_groups.popitem()
for word_index in end_group_words_indexes:
current_word = indexed_wordlist[word_index]
if (
current_word in starting_groups or
current_word in middle_groups or
current_word in ending_groups
):
continue
unmatched.add((word_index, current_word))
print("\n--- UNMATCHED POST PROCESSING")
unmatched_refined = []
for x in unmatched:
# if the word was already used drop it
if x[0] in combination_pairs or x[0] in reverse_combination_track:
print(f"{x} already used")
continue
print(f"{x} is lonely")
unmatched_refined.append(x)
print("\n--- UNMATCHED")
for x in unmatched_refined:
print(f"{x}")
print("\n--- ENDINGS LEFT")
for x, y in ending_groups.items():
print(f"{x}, {y}")
print("\n--- COMBINATIONS")
for combo, pair in combination_pairs.items():
print(f"({combo}, {pair[0]}): {pair[1]}")
|
AndreiHondrari/various_test
|
python/imc_challenge/s2_func1bkp.py
|
s2_func1bkp.py
|
py
| 6,665 |
python
|
en
|
code
| 0 |
github-code
|
50
|
26448213104
|
#!/usr/bin/evn python
# -*- encoding:utf-8 -*-
# function: connect fusion server api
# date:2020-08-16
# Arthor:Timbaland
import json
import time
import requests
requests.packages.urllib3.disable_warnings()
class Fusionvm():
#登入fusion computer初始化
def __init__(self,login_head,login_url,login_data):
self.login_head = login_head
self.login_url = login_url
self.login_data = login_data
#登入fusioncomputer
def loginfusion(self):
try:
resond_list = []
s = requests.Session()
r = s.post(self.login_url, data=json.dumps(self.login_data), headers=self.login_head, verify=False)
# print(r.text)
# print(r.cookies)
m = requests.utils.dict_from_cookiejar(r.cookies)
for j in m.keys():
cookie_key = j
for k in m.values():
cookie_value = k
csfr_token = r.text.split(',')[0].split(':')[1].split('"')[1]
# print(cookie_key, cookie_value, csfr_token)
resond_list.append(s)
resond_list.append(cookie_key)
resond_list.append(cookie_value)
resond_list.append(csfr_token)
except Exception as e:
print(e)
return resond_list
#重置vm API-POST请求
def vmreset(self,s_session=None,vm_args=None,reset_url=None,loadHeader=None):
try:
m = s_session.post(reset_url, data=json.dumps(vm_args), headers=loadHeader, verify=False)
print(m.json())
except Exception as e:
print(e)
return m.json()
# 关闭vdi电源 API-POST请求
@staticmethod
def vmshutdown(s_session=None,vm_args=None, vm_id=None,url=None, loadHeader=None,prix_url=None):
try:
status_url = f'{prix_url}/service/sites/39A107AF/vms/{vm_id}'
# print(33333,status_url,vm_id)
status = Fusionvm.vmstatus(s_session=s_session,url=status_url,loadHeader=loadHeader)
# print(8888,status)
task_url = None
if status == "running":
m = s_session.post(url=url, data=json.dumps(vm_args), headers=loadHeader, verify=False)
status_result = m.json()
task_url = f"{prix_url}{status_result['taskUri']}"
Fusionvm.waittask(s_session,task_url,loadHeader,flag_1=f'{vm_id}已关机',flag_2=f'{vm_id}正在关机')
# print(666, task_url)
except Exception as e:
print(e)
return 1
@staticmethod
def waittask(s_session,url,loadHeader,flag_1,flag_2):
try:
m = s_session.get(url, headers=loadHeader, verify=False)
clone_status = m.json()
# print(clone_status)
except Exception as e:
print(e)
if clone_status['status'] == 'success':
print(f'{flag_1}')
return clone_status
else:
time.sleep(1)
print(f'=========={flag_2}==========')
return Fusionvm.waittask(s_session,url,loadHeader,flag_1,flag_2)
#从模版上克隆虚拟机
def clonevm(self,s_session=None,url=None,vm_args=None,loadHeader=None,prix_url=None):
try:
m = s_session.post(url, data=json.dumps(vm_args), headers=loadHeader, verify=False)
clone_result = m.json()
url = f"{prix_url}{clone_result['taskUri']}"
except Exception as e:
print(e)
clone_status = Fusionvm.waittask(s_session,url,loadHeader,'Cache创建磁盘成功========success!!!','Cache正在创建磁盘')
return clone_result
#cache 改名
def recache(self,s_session=None,url=None,vm_args=None,loadHeader=None,prix_url=None):
try:
m = s_session.put(url, data=json.dumps(vm_args), headers=loadHeader, verify=False)
update_result = m.json()
except Exception as e:
print(e)
return update_result
#存储卷 volume
def vmvolume(self,s_session=None,url=None,loadHeader=None):
try:
m = s_session.get(url,headers=loadHeader, verify=False)
volume = m.json()
# print(volume)
vm_id_value = None
if len(volume['vmConfig']['disks']) == 1:
if volume['vmConfig']['disks'][0]['quantityGB'] == 20:
vm_id_value = volume['vmConfig']['disks'][0]['volumeUrn'].split(':')[4]
if len(volume['vmConfig']['disks']) == 2:
if volume['vmConfig']['disks'][0]['quantityGB'] == 20:
vm_id_value = volume['vmConfig']['disks'][0]['volumeUrn'].split(':')[4]
else:
vm_id_value = volume['vmConfig']['disks'][1]['volumeUrn'].split(':')[4]
except Exception as e:
print(e)
return vm_id_value
#查询虚拟机状态
@staticmethod
def vmstatus(s_session=None,url=None,loadHeader=None):
try:
# print(5555,url,loadHeader)
#查询虚拟机状态:
m = s_session.get(url=url,headers=loadHeader, verify=False)
vm_info = m.json()
vm_status = vm_info['status']
except Exception as e:
print('vmstatus:',e)
return vm_status
#解除绑定的磁盘
def detachdisk(self,s_session=None,vm_args=None,url=None,loadHeader=None,vm_id=None,prix_url=None,type_vm='Cache'):
try:
shut_dwon_args = {"mode":"force"
}
shut_dwon_url = f'{prix_url}/service/sites/39A107AF/vms/{vm_id}/action/stop'
# print(1111,shut_dwon_url,vm_id)
Fusionvm.vmshutdown(s_session=s_session,vm_args=shut_dwon_args, vm_id=vm_id,url=shut_dwon_url, loadHeader=loadHeader,prix_url=prix_url)
m = s_session.post(url, data=json.dumps(vm_args), headers=loadHeader, verify=False)
detach_info = m.json()
# print(detach_info)
url = f"{prix_url}{detach_info['taskUri']}"
Fusionvm.waittask(s_session,url,loadHeader,f'{type_vm}解除磁盘成功========success!!!',f'{type_vm}正在解除磁盘')
except Exception as e:
print('error:',e)
return detach_info
def delevm(self,s_session=None,vm_args=None,url=None,loadHeader=None,prix_url=None):
try:
m = s_session.delete(url, headers=loadHeader, verify=False)
del_vm_info = m.json()
url = f"{prix_url}{del_vm_info['taskUri']}"
# print(99999,url)
Fusionvm.waittask(s_session,url,loadHeader,'新增Cache虚拟机删除成功========success!!!','正在删除增Cache虚拟机')
except Exception as e:
print(e)
return del_vm_info
def attachcahe(self,s_session=None,vdi_value=None,vm_vdi=None,vm_args=None,url=None,loadHeader=None,prix_url=None):
try:
# print(vm_args)
m = s_session.post(url=url, data=json.dumps(vm_args),headers=loadHeader, verify=False)
attach_info = m.json()
attach_url = f"{prix_url}{attach_info['taskUri']}"
Fusionvm.waittask(s_session, attach_url, loadHeader, '挂载Cache缓存盘除成功========success!!!', '正在挂载Cache缓存盘')
del_volume_url = f'{prix_url}/service/sites/39A107AF/volumes/{vdi_value}'
# 删除vdi的磁盘 DELETE
# https://10.10.20.10:8443/service/sites/39A107AF/volumes/13376
n = s_session.delete(url=del_volume_url,headers=loadHeader, verify=False)
del_volume_info = n.json()
del_volume_url = f"{prix_url}{del_volume_info['taskUri']}"
Fusionvm.waittask(s_session, del_volume_url, loadHeader, f'VDI--{vm_vdi}成功========success!!!', f'{vm_vdi}正在开机----')
#准备开机 POST
#https://10.10.20.10:8443/service/sites/39A107AF/vms/i-00000627/action/start
star_vm_url = f'{prix_url}/service/sites/39A107AF/vms/{vm_vdi}/action/start'
k = s_session.post(url=star_vm_url, headers=loadHeader, verify=False)
start_vm_info = k.json()
star_vm_url = f"{prix_url}{start_vm_info['taskUri']}"
Fusionvm.waittask(s_session, star_vm_url, loadHeader, 'VDI-========success!!!', '正在删除VDI缓存盘')
except Exception as e:
print('attachcahe',e)
return 1
if __name__ == '__main__':
index_head = {"Accept": "application/json;version=6.5;charset=UTF-8",
"Content-Type": "application/json; charset=UTF-8",
"Host": "10.10.20.10:8443",
}
index_url = "https://10.10.20.10:8443/service/login/form"
login_data = {"acceptLanguage": "zh-CN",
"authKey": "nhgs@2019",
"authType": "0",
"authUser": "admin",
"userType": "0",
"verification": ""}
t1 = Fusionvm(index_head,index_url,login_data)
# print(t1.loginfusion())
respond_args = t1.loginfusion()
# 重新启动虚拟机POST请求
Vm_Reset_Url = f'https://10.10.20.10:8443/service/sites/39A107AF/vms/i-00000644/action/reboot'
# 重置虚拟机参数
Vm_reset_Data = {'mode': "force"}
# 请求头
loadHeader = {'Accept': 'application/json;version=6.5;charset=UTF-8',
'Content-Type': 'application/json; charset=UTF-8',
'Cookie': f'{respond_args[1]}={respond_args[2]}',
'CSRF-HW': f'{respond_args[3]}',
'Host': '10.10.20.10:8443',
}
# t1.vmreset(s_session=respond_args[0],vm_args=Vm_reset_Data,reset_url=Vm_Reset_Url,loadHeader=loadHeader)
# 链接克隆 POST请求
Clone_Vm_Post = r'https://10.10.20.10:8443/service/sites/39A107AF/vms/i-00000644/action/clone'
Clone_Vm_Data = {
"name": "新增cache",
"description": "",
"isBindingHost": 'false',
"parentObjUrn": "urn:sites:39A107AF",
"location": "urn:sites:39A107AF:clusters:117",
"hasSetStoreResAssignState": 'false',
"isTemplate": 'false',
"group": "",
"osOptions": {
"osType": "Windows",
"osVersion": 1050,
"password": "DhJf9ZGZ"
},
"isMultiDiskSpeedup": 'false',
"autoBoot": 'false',
"vmConfig": {
"cpu": {
"cpuHotPlug": 0,
"cpuPolicy": "shared",
"cpuThreadPolicy": "prefer",
"weight": 1000,
"reservation": 0,
"quantity": 2,
"limit": 0,
"slotNum": 2,
"coresPerSocket": 1
},
"memory": {
"memHotPlug": 0,
"unit": "GB",
"quantityMB": 4096,
"weight": 40960,
"reservation": 0,
"limit": 0,
"hugePage": "4K"
},
"numaNodes": 0,
"properties": {
"clockMode": "freeClock",
"bootFirmware": "BIOS",
"bootFirmwareTime": 0,
"bootOption": "disk",
"evsAffinity": 'false',
"vmVncKeymapSetting": 7,
"isAutoUpgrade": 'true',
"attachType": 'false',
"isEnableMemVol": 'false',
"isEnableFt": 'false',
"consoleLogTextState": 'false',
"isAutoAdjustNuma": 'false',
"secureVmType": "",
"dpiVmType": "",
"consolelog": 1
},
"graphicsCard": {
"type": "cirrus",
"size": 4
},
"disks": [{
"datastoreUrn": "urn:sites:39A107AF:datastores:37",
"name": "SASLun1",
"quantityGB": 20,
"sequenceNum": 2,
'systemVolume': False,
"indepDisk": 'false',
"persistentDisk": 'true',
"isThin": 'true',
"pciType": "VIRTIO",
"volType": 0
}],
"nics": [{
"sequenceNum": 0,
"portGroupUrn": "urn:sites:39A107AF:dvswitchs:3:portgroups:6",
"virtIo": 1,
"nicConfig": {
"vringbuf": 256,
"queues": 1
},
"enableSecurityGroup": 'false'
}]
},
"vmCustomization": {
"isUpdateVmPassword": 'false',
"osType": "Windows"
},
"floppyProtocol": "automatch",
"floppyFile": ""
}
prix_url = r'https://10.10.20.10:8443'
#克隆虚拟机
clone_result = t1.clonevm(s_session=respond_args[0],url=Clone_Vm_Post,vm_args=Clone_Vm_Data,loadHeader=loadHeader,prix_url=prix_url)
# 克隆完成后虚拟机的ID
vm_id = clone_result['urn'].split(':')[4]
#vdi 虚拟机
vm_vdi = 'i-00000627'
#cache缓存盘的更改名称的参数
Cache_Name_Data = {"indepDisk": 'false',
"name": "Cache=" + vm_id,
"persistentDisk": 'true'}
#虚拟机信息
Vm_Info_Url = f'https://10.10.20.10:8443/service/sites/39A107AF/vms/{vm_id}'
#虚拟机磁盘volume
vm_id_value = t1.vmvolume(s_session=respond_args[0],url=Vm_Info_Url,loadHeader=loadHeader)
#缓存更新请求
Cache_Name_Url = f'https://10.10.20.10:8443/service/sites/39A107AF/volumes/{vm_id_value}'
#磁盘重命名
t1.recache(s_session=respond_args[0],url=Cache_Name_Url,vm_args=Cache_Name_Data,loadHeader=loadHeader,prix_url=prix_url)
# 卸载磁盘然后格式化参数
Detach_Disk_Data = {"isFormat": "true", # 格式化磁盘
"volUrn": f"urn:sites:39A107AF:volumes:{vm_id_value}"
}
# 卸载Cache创建出来磁盘POST请求
Detach_Disk_Url = f'https://10.10.20.10:8443/service/sites/39A107AF/vms/{vm_id}/action/detachvol'
t1.detachdisk(s_session=respond_args[0],vm_args=Detach_Disk_Data,vm_id=vm_id,url=Detach_Disk_Url,loadHeader=loadHeader,prix_url=prix_url)
#删掉Cache虚拟机
Delete_Cache_Vm_Url = f'https://10.10.20.10:8443/service/sites/39A107AF/vms/{vm_id}'
#删掉Cache虚拟机
t1.delevm(s_session=respond_args[0],url=Delete_Cache_Vm_Url,loadHeader=loadHeader)
# 测试当前桌面虚拟机 i-00000627 解除 和挂载cache磁盘
Datach_Vdi_Url = f'https://10.10.20.10:8443/service/sites/39A107AF/vms/{vm_vdi}/action/detachvol'
# vdi虚拟机信息
Vdi_Info_Url = f'https://10.10.20.10:8443/service/sites/39A107AF/vms/{vm_vdi}'
# vdi虚拟机磁盘volume
vdi_value = t1.vmvolume(s_session=respond_args[0], url=Vdi_Info_Url, loadHeader=loadHeader)
Detach_Vdi_Data = {"isFormat": "false", # 格式化磁盘
"volUrn": f"urn:sites:39A107AF:volumes:{vdi_value}"
}
# print(vdi_value,7777777)
#移除vdi磁盘
if vdi_value:
t1.detachdisk(s_session=respond_args[0],vm_args=Detach_Vdi_Data,url=Datach_Vdi_Url,loadHeader=loadHeader,vm_id=vm_vdi,prix_url=prix_url,type_vm=vm_vdi)
# 挂载新磁盘chache载磁盘参数
Mount_Disk_Data = {
"pciType": "VIRTIO",
"volUrn": f"urn:sites:39A107AF:volumes:{vm_id_value}",
"ioMode": "dataplane",
"accessMode": 0
}
#vdi上挂载Cache磁盘到正在使用的虚拟机POST请求 POST
#https://10.10.20.10:8443/service/sites/39A107AF/vms/i-00000627/action/attachvol
Aatch_Disk_Url = f"https://10.10.20.10:8443/service/sites/39A107AF/vms/{vm_vdi}/action/attachvol"
if vm_id_value:
t1.attachcahe(s_session=respond_args[0],vdi_value=vdi_value,vm_vdi=vm_vdi,vm_args=Mount_Disk_Data,url=Aatch_Disk_Url,loadHeader=loadHeader,prix_url=prix_url)
|
Timbaland8888/QYNHGS
|
huawei/fusion_api.py
|
fusion_api.py
|
py
| 16,025 |
python
|
en
|
code
| 0 |
github-code
|
50
|
17437702994
|
#!/usr/bin/python
import rospy
import numpy as np
import sys
from velocity_controller import VelocityController, vector
from geometry_msgs.msg import Twist, PoseStamped, Pose, Point, Quaternion
import tf.transformations as tft
def controller(marker_num, self_num, master_num):
vel_control = VelocityController(marker_num, self_num, master_num)
pub_target = rospy.Publisher('move_base_simple/goal', PoseStamped, queue_size=10, latch=True)
#pub_rel_vel = rospy.Publisher(vel_control.turtlebot+'/relative_velocity', Twist, queue_size=10)
rate = rospy.Rate(1) #subject to change
pose_target = Pose()
pose_target.position.x = 0.55
pose_target.position.z = -0.35
# pose_target.position.y = 0
# post_target.position.z = 0
# post_target.orientation.x
pose_target.orientation.y = 1/np.sqrt(2)
# post_target.orientation.z
pose_target.orientation.w = 1/np.sqrt(2)
target_stamped = PoseStamped()
target_stamped.header.stamp = rospy.Time.now()
target_stamped.header.frame_id = "map"
target_stamped.pose = None
while not target_stamped.pose:
target_stamped.pose = vel_control.transform_pose(pose_target, "map", vel_control.marker_frame)
target_stamped.pose.position.z = 0
angles = tft.euler_from_quaternion(vector(target_stamped.pose.orientation))
quatern = tft.quaternion_from_euler(0, 0, angles[2])
target_stamped.pose.orientation.x = quatern[0]
target_stamped.pose.orientation.y = quatern[1]
target_stamped.pose.orientation.z = quatern[2]
target_stamped.pose.orientation.w = quatern[3]
#position = Point(1.266, 0.0, 0.0)
#orientation = Quaternion(0, 0, 0.127, 0.991)
#target_stamped.pose.position = position
#target_stamped.pose.orientation = orientation
print(target_stamped)
pub_target.publish(target_stamped)
rospy.spin()
if __name__ == '__main__':
rospy.init_node('turtlebot_controller', anonymous=True)
try:
controller(sys.argv[1], sys.argv[2], sys.argv[3])
except Exception as e:
print(e)
pass
|
toby-l-baker/robotics-project
|
src/move_turtlebot/src/set_turtlebot_goal.py
|
set_turtlebot_goal.py
|
py
| 1,969 |
python
|
en
|
code
| 0 |
github-code
|
50
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.